Alioth Code Coverage

vmnet.rs0.00%

1// Copyright 2025 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::ffi::CStr;
16use std::fmt::Debug;
17use std::io::{self, ErrorKind, Read};
18use std::ptr::null;
19use std::sync::atomic::{AtomicPtr, Ordering};
20use std::sync::mpsc::{Receiver, Sender};
21use std::sync::{Arc, mpsc};
22use std::thread::JoinHandle;
23use std::time::Duration;
24
25use libc::c_void;
26use mio::event::Event;
27use mio::{Interest, Registry, Token};
28use serde::Deserialize;
29use serde_aco::Help;
30use zerocopy::IntoBytes;
31
32use crate::device::net::MacAddr;
33use crate::hv::IoeventFd;
34use crate::mem::mapped::RamBus;
35use crate::sync::notifier::Notifier;
36use crate::sys::block::{_NSConcreteStackBlock, BlockDescriptor, BlockFlag};
37use crate::sys::dispatch::{DispatchQueue, dispatch_queue_create, dispatch_release};
38use crate::sys::vmnet::{
39 InterfaceEvent, OperationMode, VmPktDesc, VmnetInterface, VmnetInterfaceCompletionHandler,
40 VmnetInterfaceEventCallback, VmnetReturn, VmnetStartInterfaceCompletionHandler,
41 vmnet_allocate_mac_address_key, vmnet_enable_isolation_key, vmnet_interface_set_event_callback,
42 vmnet_mac_address_key, vmnet_mtu_key, vmnet_operation_mode_key, vmnet_read,
43 vmnet_start_interface, vmnet_stop_interface, vmnet_write,
44};
45use crate::sys::xpc::{
46 XpcObject, xpc_bool_create, xpc_dictionary_create, xpc_dictionary_get_string,
47 xpc_dictionary_get_uint64, xpc_uint64_create,
48};
49use crate::virtio::dev::net::{NetConfig, NetFeature, VirtioNetHdr};
50use crate::virtio::dev::{DevParam, DeviceId, Result, Virtio, WakeEvent};
51use crate::virtio::queue::{DescChain, QueueReg, Status, VirtQueue};
52use crate::virtio::worker::mio::{ActiveMio, Mio, VirtioMio};
53use crate::virtio::{FEATURE_BUILT_IN, IrqSender};
54
55#[derive(Debug)]
56pub struct Net {
57 name: Arc<str>,
58 config: Arc<NetConfig>,
59 feature: NetFeature,
60 dispatch_queue: AtomicPtr<DispatchQueue>,
61 interface: AtomicPtr<VmnetInterface>,
62 rx_notifier: Notifier,
63}
64
65fn check_ret(ret: VmnetReturn) -> Result<(), io::Error> {
66 if ret == VmnetReturn::SUCCESS {
67 return Ok(());
68 }
69 let kind = match ret {
70 VmnetReturn::MEM_FAILURE => ErrorKind::OutOfMemory,
71 VmnetReturn::INVALID_ARGUMENT => ErrorKind::InvalidInput,
72 VmnetReturn::INVALID_ACCESS => ErrorKind::PermissionDenied,
73 _ => ErrorKind::Other,
74 };
75 Err(io::Error::new(kind, format!("{ret:?}")))
76}
77
78impl Net {
79 pub fn new(param: NetVmnetParam, name: impl Into<Arc<str>>) -> Result<Self> {
80 let allocate_mac = param.mac.is_none();
81 let keys = unsafe {
82 [
83 vmnet_operation_mode_key,
84 vmnet_allocate_mac_address_key,
85 vmnet_enable_isolation_key,
86 ]
87 };
88 let vals = [
89 unsafe { xpc_uint64_create(OperationMode::SHARED.raw() as u64) } as *const _,
90 unsafe { xpc_bool_create(allocate_mac) } as *const _,
91 unsafe { xpc_bool_create(false) } as *const _,
92 ];
93 let desc = unsafe { xpc_dictionary_create(keys.as_ptr(), vals.as_ptr(), 3) };
94 let dispatch_queue = unsafe { dispatch_queue_create(c"virtio-net".as_ptr(), null()) };
95 let (sender, receiver) = mpsc::channel::<Result<NetConfig>>();
96
97 #[repr(C)]
98 struct HandlerBlock {
99 block: VmnetStartInterfaceCompletionHandler,
100 sender: *const Sender<Result<NetConfig>>,
101 }
102
103 fn do_handler_invoke(ret: VmnetReturn, obj: *const XpcObject) -> Result<NetConfig> {
104 check_ret(ret)?;
105 let mtu = unsafe { xpc_dictionary_get_uint64(obj, vmnet_mtu_key) } as u16;
106 let mac_addr = unsafe { xpc_dictionary_get_string(obj, vmnet_mac_address_key) };
107 if mac_addr.is_null() {
108 return Ok(NetConfig {
109 mtu,
110 max_queue_pairs: 1,
111 ..Default::default()
112 });
113 }
114 let Ok(mac_addr) = unsafe { CStr::from_ptr(mac_addr) }.to_str() else {
115 let e = io::Error::new(ErrorKind::InvalidData, "Invalid mac address string");
116 return Err(e.into());
117 };
118 match mac_addr.parse() {
119 Ok(mac) => Ok(NetConfig {
120 mtu,
121 max_queue_pairs: 1,
122 mac,
123 ..Default::default()
124 }),
125 Err(e) => {
126 let msg = format!("Invalid mac address: {e:?}");
127 Err(io::Error::new(ErrorKind::InvalidData, msg).into())
128 }
129 }
130 }
131
132 extern "C" fn handler_invoke(this: *mut c_void, ret: VmnetReturn, obj: *const XpcObject) {
133 let this = unsafe { &*(this as *mut HandlerBlock) };
134 let sender = unsafe { &*this.sender };
135
136 let config = do_handler_invoke(ret, obj);
137 if let Err(e) = sender.send(config) {
138 log::error!("Failed to send config: {e:?}");
139 }
140 }
141
142 static BLOCK_DESC: BlockDescriptor = BlockDescriptor {
143 reserved: 0,
144 size: size_of::<HandlerBlock>() as _,
145 };
146 let handler = HandlerBlock {
147 block: VmnetStartInterfaceCompletionHandler {
148 isa: unsafe { _NSConcreteStackBlock },
149 flags: BlockFlag::HAS_STRET,
150 reserved: 0,
151 invoke: handler_invoke,
152 descriptor: &BLOCK_DESC as *const _,
153 },
154 sender: &sender as *const _,
155 };
156 let interface = unsafe { vmnet_start_interface(desc, dispatch_queue, &handler.block) };
157 let mut config = match receiver.recv_timeout(Duration::from_secs(5)) {
158 Ok(Ok(config)) => Ok(config),
159 Ok(Err(e)) => Err(e),
160 Err(_) => Err(io::Error::other("failed to start vmnet interface").into()),
161 }?;
162
163 if let Some(mac) = param.mac {
164 config.mac = mac;
165 }
166
167 Ok(Net {
168 name: name.into(),
169 config: Arc::new(config),
170 feature: NetFeature::MAC | NetFeature::MTU,
171 dispatch_queue: AtomicPtr::new(dispatch_queue),
172 interface: AtomicPtr::new(interface),
173 rx_notifier: Notifier::new()?,
174 })
175 }
176}
177
178impl Drop for Net {
179 fn drop(&mut self) {
180 let interface = self.interface.load(Ordering::Acquire);
181 let dispatch_queue = self.dispatch_queue.load(Ordering::Acquire);
182
183 let (sender, receiver) = mpsc::channel::<VmnetReturn>();
184
185 #[repr(C)]
186 struct HandlerBlock {
187 block: VmnetInterfaceCompletionHandler,
188 sender: *const Sender<VmnetReturn>,
189 }
190
191 extern "C" fn handler_invoke(this: *mut c_void, ret: VmnetReturn) {
192 let this = unsafe { &*(this as *mut HandlerBlock) };
193 let sender = unsafe { &*this.sender };
194
195 if let Err(e) = sender.send(ret) {
196 log::error!("Failed to send ret {ret:x?}: {e:?}");
197 }
198 }
199
200 static BLOCK_DESC: BlockDescriptor = BlockDescriptor {
201 reserved: 0,
202 size: size_of::<HandlerBlock>() as _,
203 };
204 let handler = HandlerBlock {
205 block: VmnetInterfaceCompletionHandler {
206 isa: unsafe { _NSConcreteStackBlock },
207 flags: BlockFlag::HAS_STRET,
208 reserved: 0,
209 invoke: handler_invoke,
210 descriptor: &BLOCK_DESC as *const _,
211 },
212 sender: &sender as *const _,
213 };
214 let ret = unsafe { vmnet_stop_interface(interface, dispatch_queue, &handler.block) };
215 if let Err(e) = check_ret(ret) {
216 log::error!("{}: failed to stop interface: {e:?}", self.name);
217 return;
218 }
219 match receiver.recv_timeout(Duration::from_secs(1)) {
220 Ok(ret) => {
221 if let Err(e) = check_ret(ret) {
222 log::error!("{}: failed to stop interface: {e:?}", self.name);
223 }
224 }
225 Err(e) => log::error!(
226 "{}: failed to receive stop interface response: {e:?}",
227 self.name
228 ),
229 }
230 unsafe { dispatch_release(dispatch_queue) };
231 }
232}
233
234impl Virtio for Net {
235 type Config = NetConfig;
236 type Feature = NetFeature;
237
238 fn id(&self) -> DeviceId {
239 DeviceId::NET
240 }
241
242 fn name(&self) -> &str {
243 &self.name
244 }
245
246 fn num_queues(&self) -> u16 {
247 let data_queues = self.config.max_queue_pairs << 1;
248 if self.feature.contains(NetFeature::CTRL_VQ) {
249 data_queues + 1
250 } else {
251 data_queues
252 }
253 }
254
255 fn config(&self) -> Arc<NetConfig> {
256 self.config.clone()
257 }
258
259 fn feature(&self) -> u128 {
260 self.feature.bits() | FEATURE_BUILT_IN
261 }
262
263 fn spawn_worker<S, E>(
264 self,
265 event_rx: Receiver<WakeEvent<S, E>>,
266 memory: Arc<RamBus>,
267 queue_regs: Arc<[QueueReg]>,
268 ) -> Result<(JoinHandle<()>, Arc<Notifier>)>
269 where
270 S: IrqSender,
271 E: IoeventFd,
272 {
273 Mio::spawn_worker(self, event_rx, memory, queue_regs)
274 }
275}
276
277impl VirtioMio for Net {
278 fn reset(&mut self, registry: &Registry) {
279 let interface = self.interface.load(Ordering::Acquire);
280
281 let ret = unsafe {
282 vmnet_interface_set_event_callback(
283 interface,
284 InterfaceEvent::PACKETS_AVAILABLE,
285 null(),
286 null(),
287 )
288 };
289 if let Err(err) = check_ret(ret) {
290 log::error!("{}: failed to reset event callback: {}", self.name, err);
291 }
292
293 let _ = registry.deregister(&mut self.rx_notifier);
294 }
295
296 fn activate<'m, Q, S, E>(
297 &mut self,
298 _feature: u128,
299 active_mio: &mut ActiveMio<'_, '_, 'm, Q, S, E>,
300 ) -> Result<()>
301 where
302 Q: VirtQueue<'m>,
303 S: IrqSender,
304 E: IoeventFd,
305 {
306 let registry = active_mio.poll.registry();
307 registry.register(&mut self.rx_notifier, Token(0), Interest::READABLE)?;
308
309 let interface = self.interface.load(Ordering::Acquire);
310 let dispatch_queue = self.dispatch_queue.load(Ordering::Acquire);
311
312 #[repr(C)]
313 struct CallbackBlock {
314 block: VmnetInterfaceEventCallback,
315 notifier: *const Notifier,
316 }
317
318 extern "C" fn callback_invoke(this: *mut c_void, _: InterfaceEvent, _: *const XpcObject) {
319 let this = unsafe { &*(this as *mut CallbackBlock) };
320 let notifier = unsafe { &*this.notifier };
321
322 if let Err(e) = notifier.notify() {
323 log::error!("Failed to notify: {e:?}");
324 }
325 }
326
327 static BLOCK_DESC: BlockDescriptor = BlockDescriptor {
328 reserved: 0,
329 size: size_of::<CallbackBlock>() as _,
330 };
331 let callback = CallbackBlock {
332 block: VmnetInterfaceEventCallback {
333 isa: unsafe { _NSConcreteStackBlock },
334 flags: BlockFlag::HAS_STRET,
335 reserved: 0,
336 invoke: callback_invoke,
337 descriptor: &BLOCK_DESC as *const _,
338 },
339 notifier: &self.rx_notifier as *const Notifier,
340 };
341
342 let ret = unsafe {
343 vmnet_interface_set_event_callback(
344 interface,
345 InterfaceEvent::PACKETS_AVAILABLE,
346 dispatch_queue,
347 &callback.block,
348 )
349 };
350 check_ret(ret)?;
351 Ok(())
352 }
353
354 fn handle_event<'a, 'm, Q, S, E>(
355 &mut self,
356 event: &Event,
357 active_mio: &mut ActiveMio<'_, '_, 'm, Q, S, E>,
358 ) -> Result<()>
359 where
360 Q: VirtQueue<'m>,
361 S: IrqSender,
362 E: IoeventFd,
363 {
364 let token = event.token().0;
365 let irq_sender = active_mio.irq_sender;
366 if event.is_readable() {
367 let index = (token as u16) << 1;
368 let Some(Some(queue)) = active_mio.queues.get_mut(index as usize) else {
369 log::error!("{}: cannot find rx queue {index}", self.name);
370 return Ok(());
371 };
372 let interface = self.interface.load(Ordering::Acquire);
373 queue.handle_desc(index, irq_sender, read_from_vmnet(interface))?;
374 }
375 Ok(())
376 }
377
378 fn handle_queue<'m, Q, S, E>(
379 &mut self,
380 index: u16,
381 active_mio: &mut ActiveMio<'_, '_, 'm, Q, S, E>,
382 ) -> Result<()>
383 where
384 Q: VirtQueue<'m>,
385 S: IrqSender,
386 E: IoeventFd,
387 {
388 let Some(Some(queue)) = active_mio.queues.get_mut(index as usize) else {
389 log::error!("{}: invalid queue index {index}", self.name);
390 return Ok(());
391 };
392 let irq_sender = active_mio.irq_sender;
393 if index == self.config.max_queue_pairs * 2 {
394 unimplemented!()
395 }
396 let interface = self.interface.load(Ordering::Acquire);
397 if index & 1 == 0 {
398 queue.handle_desc(index, irq_sender, read_from_vmnet(interface))
399 } else {
400 queue.handle_desc(index, irq_sender, write_to_vmnet(interface))
401 }
402 }
403}
404
405fn read_from_vmnet(interface: *mut VmnetInterface) -> impl FnMut(&mut DescChain) -> Result<Status> {
406 move |chain: &mut DescChain| {
407 let mut iov = trim_desc_chain(chain.writable.iter().map(|b| &**b));
408 let size = iov.iter().map(|s| s.iov_len).sum();
409 let mut packets = VmPktDesc {
410 vm_pkt_size: size,
411 vm_pkt_iov: iov.as_mut_ptr(),
412 vm_pkt_iovcnt: iov.len() as u32,
413 vm_flags: 0,
414 };
415 let mut pktcnt = 1;
416 let ret = unsafe { vmnet_read(interface, &mut packets, &mut pktcnt) };
417 check_ret(ret)?;
418
419 if pktcnt == 0 {
420 return Ok(Status::Break);
421 }
422
423 let hdr = VirtioNetHdr {
424 num_buffers: 1,
425 ..Default::default()
426 };
427 let _ = hdr.as_bytes().read_vectored(&mut chain.writable);
428
429 Ok(Status::Done {
430 len: (packets.vm_pkt_size + size_of::<VirtioNetHdr>()) as u32,
431 })
432 }
433}
434
435fn write_to_vmnet(interface: *mut VmnetInterface) -> impl FnMut(&mut DescChain) -> Result<Status> {
436 move |chain: &mut DescChain| {
437 let mut iov = trim_desc_chain(chain.readable.iter().map(|b| &**b));
438 let size = iov.iter().map(|s| s.iov_len).sum();
439 let mut packets = VmPktDesc {
440 vm_pkt_size: size,
441 vm_pkt_iov: iov.as_mut_ptr(),
442 vm_pkt_iovcnt: iov.len() as u32,
443 vm_flags: 0,
444 };
445 let mut pktcnt = 1;
446 let ret = unsafe { vmnet_write(interface, &mut packets, &mut pktcnt) };
447 check_ret(ret)?;
448
449 if pktcnt == 0 {
450 return Ok(Status::Break);
451 }
452 Ok(Status::Done { len: 0 })
453 }
454}
455
456#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Help)]
457pub struct NetVmnetParam {
458 /// MAC address of the virtual NIC, e.g. 06:3a:76:53:da:3d.
459 pub mac: Option<MacAddr>,
460}
461
462impl DevParam for NetVmnetParam {
463 type Device = Net;
464
465 fn build(self, name: impl Into<Arc<str>>) -> Result<Self::Device> {
466 Net::new(self, name)
467 }
468}
469
470fn trim_desc_chain<'m>(bufs: impl Iterator<Item = &'m [u8]>) -> Vec<libc::iovec> {
471 let mut iov = Vec::new();
472 let mut trim_len = size_of::<VirtioNetHdr>();
473
474 for buf in bufs {
475 let b = if trim_len > 0 {
476 if let Some((_, tail)) = buf.split_at_checked(trim_len)
477 && !tail.is_empty()
478 {
479 trim_len = 0;
480 tail
481 } else {
482 trim_len -= buf.len();
483 continue;
484 }
485 } else {
486 buf
487 };
488 iov.push(libc::iovec {
489 iov_base: b.as_ptr() as *mut c_void,
490 iov_len: b.len(),
491 });
492 }
493 iov
494}
495