Alioth Code Coverage

vu.rs0.00%

1// Copyright 2024 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::fs::File;
16use std::io::ErrorKind;
17use std::iter::zip;
18use std::mem::size_of_val;
19use std::os::fd::{AsFd, AsRawFd};
20use std::path::Path;
21use std::sync::Arc;
22use std::sync::mpsc::Receiver;
23use std::thread::JoinHandle;
24
25use libc::{MAP_ANONYMOUS, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, MAP_SHARED, PROT_NONE, mmap};
26use mio::event::Event;
27use mio::unix::SourceFd;
28use mio::{Interest, Registry, Token};
29use serde::Deserialize;
30use serde_aco::Help;
31use zerocopy::{FromZeros, IntoBytes};
32
33use crate::errors::BoxTrace;
34use crate::fuse::bindings::FuseSetupmappingFlag;
35use crate::fuse::{self, DaxRegion};
36use crate::hv::IoeventFd;
37use crate::mem::mapped::{ArcMemPages, RamBus};
38use crate::mem::{LayoutChanged, MemRegion, MemRegionType};
39use crate::sync::notifier::Notifier;
40use crate::virtio::dev::fs::{FsConfig, FsFeature};
41use crate::virtio::dev::{DevParam, Virtio, WakeEvent};
42use crate::virtio::queue::{QueueReg, VirtQueue};
43use crate::virtio::vu::bindings::{DeviceConfig, FsMap, VuBackMsg, VuFeature};
44use crate::virtio::vu::conn::VuChannel;
45use crate::virtio::vu::frontend::VuFrontend;
46use crate::virtio::vu::{Error, error as vu_error};
47use crate::virtio::worker::mio::{ActiveMio, Mio, VirtioMio};
48use crate::virtio::{DeviceId, IrqSender, Result};
49use crate::{align_up, ffi};
50
51#[derive(Debug)]
52pub struct VuFs {
53 frontend: VuFrontend,
54 config: Arc<FsConfig>,
55 dax_region: Option<ArcMemPages>,
56}
57
58impl VuFs {
59 pub fn new(param: VuFsParam, name: impl Into<Arc<str>>) -> Result<Self> {
60 let mut extra_features = VuFeature::empty();
61 if param.dax_window > 0 {
62 extra_features |= VuFeature::BACKEND_REQ | VuFeature::BACKEND_SEND_FD
63 };
64 if param.tag.is_none() {
65 extra_features |= VuFeature::CONFIG;
66 }
67 let frontend = VuFrontend::new(name, &param.socket, DeviceId::FILE_SYSTEM, extra_features)?;
68 let config = if let Some(tag) = param.tag {
69 assert!(tag.len() <= 36);
70 assert_ne!(tag.len(), 0);
71 let mut config = FsConfig::new_zeroed();
72 config.tag[0..tag.len()].copy_from_slice(tag.as_bytes());
73 config.num_request_queues = frontend.num_queues() as u32 - 1;
74 if FsFeature::from_bits_retain(frontend.feature()).contains(FsFeature::NOTIFICATION) {
75 config.num_request_queues -= 1;
76 }
77 config
78 } else {
79 let cfg = DeviceConfig {
80 offset: 0,
81 size: size_of::<FsConfig>() as u32,
82 flags: 0,
83 };
84 let mut config = FsConfig::new_zeroed();
85 frontend.session().get_config(&cfg, config.as_mut_bytes())?;
86 log::info!("{}: get config: {config:?}", frontend.name());
87 config
88 };
89
90 let dax_region = if param.dax_window > 0 {
91 let size = align_up!(param.dax_window, 12);
92 Some(ArcMemPages::from_anonymous(size, Some(PROT_NONE), None)?)
93 } else {
94 None
95 };
96
97 Ok(VuFs {
98 frontend,
99 config: Arc::new(config),
100 dax_region,
101 })
102 }
103}
104
105#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Help)]
106pub struct VuFsParam {
107 /// Path to the vhost-user UNIX domain socket.
108 pub socket: Box<Path>,
109 /// Mount tag seen by the guest.
110 pub tag: Option<String>,
111 /// Size of memory region for DAX in bytes.
112 /// 0 means no DAX. [default: 0]
113 #[serde(default)]
114 pub dax_window: usize,
115}
116
117impl DevParam for VuFsParam {
118 type Device = VuFs;
119
120 fn build(self, name: impl Into<Arc<str>>) -> Result<Self::Device> {
121 VuFs::new(self, name)
122 }
123
124 fn needs_mem_shared_fd(&self) -> bool {
125 true
126 }
127}
128
129impl Virtio for VuFs {
130 type Config = FsConfig;
131 type Feature = FsFeature;
132
133 fn id(&self) -> DeviceId {
134 DeviceId::FILE_SYSTEM
135 }
136
137 fn name(&self) -> &str {
138 self.frontend.name()
139 }
140
141 fn config(&self) -> Arc<Self::Config> {
142 self.config.clone()
143 }
144
145 fn feature(&self) -> u128 {
146 self.frontend.feature()
147 }
148
149 fn num_queues(&self) -> u16 {
150 self.frontend.num_queues()
151 }
152
153 fn spawn_worker<S, E>(
154 self,
155 event_rx: Receiver<WakeEvent<S, E>>,
156 memory: Arc<RamBus>,
157 queue_regs: Arc<[QueueReg]>,
158 ) -> Result<(JoinHandle<()>, Arc<Notifier>)>
159 where
160 S: IrqSender,
161 E: IoeventFd,
162 {
163 Mio::spawn_worker(self, event_rx, memory, queue_regs)
164 }
165
166 fn ioeventfd_offloaded(&self, q_index: u16) -> Result<bool> {
167 self.frontend.ioeventfd_offloaded(q_index)
168 }
169
170 fn shared_mem_regions(&self) -> Option<Arc<MemRegion>> {
171 let dax_region = self.dax_region.as_ref()?;
172 Some(Arc::new(MemRegion::with_dev_mem(
173 dax_region.clone(),
174 MemRegionType::Hidden,
175 )))
176 }
177
178 fn mem_change_callback(&self) -> Option<Box<dyn LayoutChanged>> {
179 self.frontend.mem_change_callback()
180 }
181}
182
183impl VirtioMio for VuFs {
184 fn activate<'m, Q, S, E>(
185 &mut self,
186 feature: u128,
187 active_mio: &mut ActiveMio<'_, '_, 'm, Q, S, E>,
188 ) -> Result<()>
189 where
190 Q: VirtQueue<'m>,
191 S: IrqSender,
192 E: IoeventFd,
193 {
194 self.frontend.activate(feature, active_mio)?;
195 if let Some(channel) = self.frontend.channel() {
196 channel.conn.set_nonblocking(true)?;
197 active_mio.poll.registry().register(
198 &mut SourceFd(&channel.conn.as_raw_fd()),
199 Token(self.frontend.num_queues() as _),
200 Interest::READABLE,
201 )?;
202 }
203 Ok(())
204 }
205
206 fn handle_event<'a, 'm, Q, S, E>(
207 &mut self,
208 event: &Event,
209 active_mio: &mut ActiveMio<'_, '_, 'm, Q, S, E>,
210 ) -> Result<()>
211 where
212 Q: VirtQueue<'m>,
213 S: IrqSender,
214 E: IoeventFd,
215 {
216 let q_index = event.token().0;
217 if q_index < active_mio.queues.len() {
218 return vu_error::QueueErr {
219 index: q_index as u16,
220 }
221 .fail()?;
222 }
223
224 let Some(dax_region) = &self.dax_region else {
225 return vu_error::ProtocolFeature {
226 feature: VuFeature::BACKEND_REQ,
227 }
228 .fail()?;
229 };
230 let Some(channel) = self.frontend.channel() else {
231 return vu_error::ProtocolFeature {
232 feature: VuFeature::BACKEND_REQ,
233 }
234 .fail()?;
235 };
236 loop {
237 let mut fds = [const { None }; 8];
238 let msg = channel.recv_msg(&mut fds);
239 let (request, size) = match msg {
240 Ok(m) => (m.request, m.size),
241 Err(Error::System { error, .. }) if error.kind() == ErrorKind::WouldBlock => break,
242 Err(e) => return Err(e)?,
243 };
244 let fs_map: FsMap = channel.recv_payload()?;
245
246 if size as usize != size_of_val(&fs_map) {
247 return vu_error::PayloadSize {
248 want: size_of_val(&fs_map),
249 got: size,
250 }
251 .fail()?;
252 }
253 match VuBackMsg::from(request) {
254 VuBackMsg::SHARED_OBJECT_ADD => {
255 for (index, fd) in fds.iter().enumerate() {
256 let Some(fd) = fd else {
257 break;
258 };
259 let raw_fd = fd.as_raw_fd();
260 let map_addr = dax_region.addr() + fs_map.cache_offset[index] as usize;
261 log::trace!(
262 "{}: mapping fd {raw_fd} to offset {:#x}",
263 self.name(),
264 fs_map.cache_offset[index]
265 );
266 ffi!(
267 unsafe {
268 mmap(
269 map_addr as _,
270 fs_map.len[index] as _,
271 fs_map.flags[index] as _,
272 MAP_SHARED | MAP_FIXED,
273 raw_fd,
274 fs_map.fd_offset[index] as _,
275 )
276 },
277 MAP_FAILED
278 )?;
279 }
280 }
281 VuBackMsg::SHARED_OBJECT_REMOVE => {
282 for (len, offset) in zip(fs_map.len, fs_map.cache_offset) {
283 if len == 0 {
284 continue;
285 }
286 log::trace!(
287 "{}: unmapping offset {offset:#x}, size {len:#x}",
288 self.name()
289 );
290 let map_addr = dax_region.addr() + offset as usize;
291 let flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED;
292 ffi!(
293 unsafe { mmap(map_addr as _, len as _, PROT_NONE, flags, -1, 0) },
294 MAP_FAILED
295 )?;
296 }
297 }
298 _ => unimplemented!("{}: unknown request {request:#x}", self.name()),
299 }
300 channel.reply(VuBackMsg::from(request), &0u64, &[])?;
301 }
302 Ok(())
303 }
304
305 fn handle_queue<'m, Q, S, E>(
306 &mut self,
307 index: u16,
308 active_mio: &mut ActiveMio<'_, '_, 'm, Q, S, E>,
309 ) -> Result<()>
310 where
311 Q: VirtQueue<'m>,
312 S: IrqSender,
313 E: IoeventFd,
314 {
315 self.frontend.handle_queue(index, active_mio)
316 }
317
318 fn reset(&mut self, registry: &Registry) {
319 self.frontend.reset(registry)
320 }
321}
322
323#[derive(Debug)]
324pub struct VuDaxRegion {
325 pub channel: Arc<VuChannel>,
326}
327
328impl DaxRegion for VuDaxRegion {
329 fn map(
330 &self,
331 m_offset: u64,
332 fd: &File,
333 f_offset: u64,
334 len: u64,
335 flag: FuseSetupmappingFlag,
336 ) -> fuse::Result<()> {
337 let mut fs_map = FsMap::new_zeroed();
338 fs_map.fd_offset[0] = f_offset;
339 fs_map.cache_offset[0] = m_offset;
340
341 let mut prot = 0;
342 if flag.contains(FuseSetupmappingFlag::READ) {
343 prot |= libc::PROT_READ;
344 };
345 if flag.contains(FuseSetupmappingFlag::WRITE) {
346 prot |= libc::PROT_WRITE;
347 }
348 fs_map.flags[0] = prot as _;
349
350 fs_map.len[0] = len;
351 let fds = [fd.as_fd()];
352 self.channel
353 .fs_map(&fs_map, &fds)
354 .box_trace(fuse::error::DaxMapping)
355 }
356
357 fn unmap(&self, m_offset: u64, len: u64) -> fuse::Result<()> {
358 let mut fs_map = FsMap::new_zeroed();
359 fs_map.cache_offset[0] = m_offset;
360 fs_map.len[0] = len;
361 self.channel
362 .fs_unmap(&fs_map)
363 .box_trace(fuse::error::DaxMapping)
364 }
365}
366