Alioth Code Coverage

frontend.rs0.00%

1// Copyright 2025 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::os::fd::{AsFd, AsRawFd, FromRawFd, OwnedFd};
16use std::path::Path;
17use std::sync::Arc;
18use std::sync::atomic::Ordering;
19use std::sync::mpsc::Receiver;
20use std::thread::JoinHandle;
21
22use mio::event::Event;
23use mio::unix::SourceFd;
24use mio::{Interest, Registry, Token};
25use zerocopy::IntoBytes;
26
27use crate::errors::BoxTrace;
28use crate::hv::IoeventFd;
29use crate::mem::emulated::{Action, Mmio};
30use crate::mem::mapped::{ArcMemPages, RamBus};
31use crate::mem::{LayoutChanged, MemRegion};
32use crate::sync::notifier::Notifier;
33use crate::virtio::dev::{DevParam, Virtio, WakeEvent};
34use crate::virtio::queue::{QueueReg, VirtQueue};
35use crate::virtio::vu::bindings::{
36 DeviceConfig, MemoryRegion, MemorySingleRegion, VirtqAddr, VirtqState, VuFeature,
37};
38use crate::virtio::vu::conn::{VuChannel, VuSession};
39use crate::virtio::vu::error as vu_error;
40use crate::virtio::worker::mio::{ActiveMio, Mio, VirtioMio};
41use crate::virtio::{DevStatus, DeviceId, IrqSender, Result, VirtioFeature, error};
42use crate::{bitflags, ffi, mem};
43
44bitflags! {
45 pub struct VuDevFeature(u128) { }
46}
47
48#[derive(Debug)]
49pub struct UpdateVuMem {
50 pub name: Arc<str>,
51 pub session: Arc<VuSession>,
52}
53
54impl LayoutChanged for UpdateVuMem {
55 fn ram_added(&self, gpa: u64, pages: &ArcMemPages) -> mem::Result<()> {
56 let Some((fd, offset)) = pages.fd() else {
57 return Ok(());
58 };
59 let region = MemorySingleRegion {
60 _padding: 0,
61 region: MemoryRegion {
62 gpa: gpa as _,
63 size: pages.size() as _,
64 hva: pages.addr() as _,
65 mmap_offset: offset,
66 },
67 };
68 let ret = self.session.add_mem_region(&region, fd);
69 ret.box_trace(mem::error::ChangeLayout)?;
70 log::trace!("{}: add memory region: {:x?}", self.name, region.region);
71 Ok(())
72 }
73
74 fn ram_removed(&self, gpa: u64, pages: &ArcMemPages) -> mem::Result<()> {
75 let Some((_, offset)) = pages.fd() else {
76 return Ok(());
77 };
78 let region = MemorySingleRegion {
79 _padding: 0,
80 region: MemoryRegion {
81 gpa: gpa as _,
82 size: pages.size() as _,
83 hva: pages.addr() as _,
84 mmap_offset: offset,
85 },
86 };
87 let ret = self.session.remove_mem_region(&region);
88 ret.box_trace(mem::error::ChangeLayout)?;
89 log::trace!("{}: remove memory region: {:x?}", self.name, region.region);
90 Ok(())
91 }
92}
93
94#[derive(Debug)]
95pub struct VuDevConfig {
96 session: Arc<VuSession>,
97}
98
99impl Mmio for VuDevConfig {
100 fn size(&self) -> u64 {
101 256
102 }
103
104 fn read(&self, offset: u64, size: u8) -> mem::Result<u64> {
105 let req = DeviceConfig {
106 offset: offset as u32,
107 size: size as u32,
108 flags: 0,
109 };
110 let mut ret = 0u64;
111 let buf = &mut ret.as_mut_bytes()[..size as usize];
112 self.session
113 .get_config(&req, buf)
114 .box_trace(mem::error::Mmio)?;
115 Ok(ret)
116 }
117
118 fn write(&self, offset: u64, size: u8, val: u64) -> mem::Result<Action> {
119 let req = DeviceConfig {
120 offset: offset as u32,
121 size: size as u32,
122 flags: 0,
123 };
124 let buf = &val.as_bytes()[..size as usize];
125 self.session
126 .set_config(&req, buf)
127 .box_trace(mem::error::Mmio)?;
128 Ok(Action::None)
129 }
130}
131
132#[derive(Debug)]
133pub struct VuFrontend {
134 name: Arc<str>,
135 session: Arc<VuSession>,
136 channel: Option<VuChannel>,
137 id: DeviceId,
138 vu_feature: VuFeature,
139 device_feature: u64,
140 num_queues: u16,
141 err_fds: Box<[OwnedFd]>,
142}
143
144impl VuFrontend {
145 pub fn new<P>(
146 name: impl Into<Arc<str>>,
147 socket: P,
148 id: DeviceId,
149 extra_feat: VuFeature,
150 ) -> Result<Self>
151 where
152 P: AsRef<Path>,
153 {
154 let name = name.into();
155 let session = Arc::new(VuSession::new(socket)?);
156
157 let device_feature = session.get_features()?;
158 let feat = VirtioFeature::from_bits_retain(device_feature as u128);
159 log::trace!("{name}: get device feature: {feat:x?}");
160 let need_feat = VirtioFeature::VHOST_PROTOCOL | VirtioFeature::VERSION_1;
161 if !feat.contains(need_feat) {
162 return vu_error::DeviceFeature {
163 feature: need_feat.bits(),
164 }
165 .fail()?;
166 }
167
168 let protocol_feat = VuFeature::from_bits_retain(session.get_protocol_features()?);
169 log::trace!("{name}: get protocol feature: {protocol_feat:x?}");
170 let need_feat =
171 VuFeature::MQ | VuFeature::REPLY_ACK | VuFeature::CONFIGURE_MEM_SLOTS | extra_feat;
172 if !protocol_feat.contains(need_feat) {
173 return vu_error::ProtocolFeature {
174 feature: need_feat & !protocol_feat,
175 }
176 .fail()?;
177 }
178
179 let mut vu_feature = need_feat;
180 if protocol_feat.contains(VuFeature::STATUS) {
181 vu_feature |= VuFeature::STATUS
182 };
183 session.set_protocol_features(&vu_feature.bits())?;
184 log::trace!("{name}: set protocol feature: {vu_feature:x?}");
185
186 let num_queues = session.get_queue_num()? as u16;
187 log::trace!("{name}: get queue number: {num_queues}");
188
189 let channel = if vu_feature.contains(VuFeature::BACKEND_REQ) {
190 Some(session.create_channel()?)
191 } else {
192 None
193 };
194
195 let mut err_fds = vec![];
196 for index in 0..num_queues {
197 let raw_fd = ffi!(unsafe { libc::eventfd(0, libc::EFD_CLOEXEC | libc::EFD_NONBLOCK) })?;
198 let fd = unsafe { OwnedFd::from_raw_fd(raw_fd) };
199 session.set_virtq_err(&(index as u64), fd.as_fd())?;
200 log::trace!("{name}: queue-{index}: set error fd: {}", fd.as_raw_fd());
201 err_fds.push(fd);
202 }
203
204 session.set_owner()?;
205 log::trace!("{name}: set owner");
206
207 Ok(VuFrontend {
208 name,
209 session,
210 channel,
211 id,
212 vu_feature,
213 device_feature,
214 num_queues,
215 err_fds: err_fds.into(),
216 })
217 }
218
219 pub fn session(&self) -> &VuSession {
220 &self.session
221 }
222
223 pub fn channel(&self) -> Option<&VuChannel> {
224 self.channel.as_ref()
225 }
226}
227
228impl Virtio for VuFrontend {
229 type Config = VuDevConfig;
230 type Feature = VuDevFeature;
231
232 fn id(&self) -> DeviceId {
233 self.id
234 }
235
236 fn name(&self) -> &str {
237 &self.name
238 }
239
240 fn num_queues(&self) -> u16 {
241 self.num_queues
242 }
243
244 fn config(&self) -> Arc<Self::Config> {
245 assert!(self.vu_feature.contains(VuFeature::CONFIG));
246 Arc::new(VuDevConfig {
247 session: self.session.clone(),
248 })
249 }
250
251 fn feature(&self) -> u128 {
252 self.device_feature as u128
253 }
254
255 fn spawn_worker<S, E>(
256 self,
257 event_rx: Receiver<WakeEvent<S, E>>,
258 memory: Arc<RamBus>,
259 queue_regs: Arc<[QueueReg]>,
260 ) -> Result<(JoinHandle<()>, Arc<Notifier>)>
261 where
262 S: IrqSender,
263 E: IoeventFd,
264 {
265 Mio::spawn_worker(self, event_rx, memory, queue_regs)
266 }
267
268 fn ioeventfd_offloaded(&self, q_index: u16) -> Result<bool> {
269 if q_index < self.num_queues {
270 Ok(true)
271 } else {
272 error::InvalidQueueIndex { index: q_index }.fail()
273 }
274 }
275
276 fn shared_mem_regions(&self) -> Option<Arc<MemRegion>> {
277 None
278 }
279
280 fn mem_change_callback(&self) -> Option<Box<dyn LayoutChanged>> {
281 Some(Box::new(UpdateVuMem {
282 name: self.name.clone(),
283 session: self.session.clone(),
284 }))
285 }
286}
287
288impl VirtioMio for VuFrontend {
289 fn activate<'m, Q, S, E>(
290 &mut self,
291 feature: u128,
292 active_mio: &mut ActiveMio<'_, '_, 'm, Q, S, E>,
293 ) -> Result<()>
294 where
295 Q: VirtQueue<'m>,
296 S: IrqSender,
297 E: IoeventFd,
298 {
299 let name = &*self.name;
300 self.session
301 .set_features(&((feature | VirtioFeature::VHOST_PROTOCOL.bits()) as u64))?;
302 log::trace!("{name}: set driver feature: {feature:x?}");
303
304 for (index, fd) in active_mio.ioeventfds.iter().enumerate() {
305 self.session.set_virtq_kick(&(index as u64), fd.as_fd())?;
306 let raw_fd = fd.as_fd().as_raw_fd();
307 log::trace!("{name}: queue-{index}: set kick fd: {raw_fd}");
308 }
309
310 for (index, queue) in active_mio.queues.iter().enumerate() {
311 let Some(queue) = queue else {
312 log::trace!("{name}: queue-{index} is disabled");
313 continue;
314 };
315 let reg = queue.reg();
316
317 let _ = active_mio.irq_sender.queue_irqfd(index as _, |fd| {
318 self.session.set_virtq_call(&(index as u64), fd)?;
319 log::trace!("{name}: queue-{index}: set call fd: {}", fd.as_raw_fd());
320 Ok(())
321 });
322
323 let virtq_num = VirtqState {
324 index: index as _,
325 val: reg.size.load(Ordering::Acquire) as _,
326 };
327 self.session.set_virtq_num(&virtq_num)?;
328 log::trace!("{name}: queue-{index}: set size: {}", virtq_num.val);
329
330 let virtq_base = VirtqState {
331 index: index as _,
332 val: 0,
333 };
334 self.session.set_virtq_base(&virtq_base)?;
335 log::trace!("{name}: queue-{index}: set base: {}", virtq_base.val);
336
337 let mem = active_mio.mem;
338 let virtq_addr = VirtqAddr {
339 index: index as _,
340 flags: 0,
341 desc_hva: mem.translate(reg.desc.load(Ordering::Acquire) as _)? as _,
342 used_hva: mem.translate(reg.device.load(Ordering::Acquire) as _)? as _,
343 avail_hva: mem.translate(reg.driver.load(Ordering::Acquire) as _)? as _,
344 log_guest_addr: 0,
345 };
346 self.session.set_virtq_addr(&virtq_addr)?;
347 log::trace!("{name}: queue-{index}: set addr: {virtq_addr:x?}");
348
349 let virtq_enable = VirtqState {
350 index: index as _,
351 val: 1,
352 };
353 self.session.set_virtq_enable(&virtq_enable)?;
354 log::trace!("{name}: queue-{index}: set enabled: {}", virtq_enable.val);
355 }
356
357 for (index, fd) in self.err_fds.iter().enumerate() {
358 active_mio.poll.registry().register(
359 &mut SourceFd(&fd.as_raw_fd()),
360 Token(index),
361 Interest::READABLE,
362 )?;
363 }
364
365 if self.vu_feature.contains(VuFeature::STATUS) {
366 let dev_status = DevStatus::from_bits_retain(0xf);
367 self.session.set_status(&(dev_status.bits() as u64))?;
368 log::trace!("{name}: set status: {dev_status:x?}");
369 }
370 Ok(())
371 }
372
373 fn handle_event<'a, 'm, Q, S, E>(
374 &mut self,
375 _: &Event,
376 _: &mut ActiveMio<'_, '_, 'm, Q, S, E>,
377 ) -> Result<()>
378 where
379 Q: VirtQueue<'m>,
380 S: IrqSender,
381 E: IoeventFd,
382 {
383 unreachable!()
384 }
385
386 fn handle_queue<'m, Q, S, E>(
387 &mut self,
388 index: u16,
389 _: &mut ActiveMio<'_, '_, 'm, Q, S, E>,
390 ) -> Result<()>
391 where
392 Q: VirtQueue<'m>,
393 S: IrqSender,
394 E: IoeventFd,
395 {
396 unreachable!(
397 "{}: queue {index} notification should go to vhost-user backend",
398 self.name
399 )
400 }
401
402 fn reset(&mut self, registry: &Registry) {
403 let name = &*self.name;
404 for index in 0..self.num_queues {
405 let disable = VirtqState {
406 index: index as _,
407 val: 0,
408 };
409 if let Err(e) = self.session.set_virtq_enable(&disable) {
410 log::error!("{name}: failed to disable queue-{index}: {e:?}")
411 }
412 }
413 if self.vu_feature.contains(VuFeature::STATUS)
414 && let Err(e) = self.session.set_status(&0)
415 {
416 log::error!("{name}: failed to reset device status: {e:?}");
417 }
418 for (index, fd) in self.err_fds.iter().enumerate() {
419 if let Err(e) = registry.deregister(&mut SourceFd(&fd.as_raw_fd())) {
420 log::error!("{name}: queue-{index}: failed to deregister error fd: {e:?}");
421 }
422 }
423 if let Some(channel) = &self.channel {
424 let channel_fd = channel.conn.as_fd();
425 if let Err(e) = registry.deregister(&mut SourceFd(&channel_fd.as_raw_fd())) {
426 log::error!("{name}: failed to deregister backend channel fd: {e:?}")
427 }
428 }
429 }
430}
431
432pub struct VuFrontendParam {
433 pub socket: Box<Path>,
434 pub id: DeviceId,
435}
436
437impl DevParam for VuFrontendParam {
438 type Device = VuFrontend;
439
440 fn build(self, name: impl Into<Arc<str>>) -> Result<Self::Device> {
441 VuFrontend::new(name, self.socket, self.id, VuFeature::CONFIG)
442 }
443
444 fn needs_mem_shared_fd(&self) -> bool {
445 true
446 }
447}
448