Alioth Code Coverage

vm.rs0.00%

1// Copyright 2024 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#[cfg(target_arch = "aarch64")]
16#[path = "vm_aarch64.rs"]
17mod aarch64;
18#[cfg(target_arch = "x86_64")]
19#[path = "vm_x86_64/vm_x86_64.rs"]
20mod x86_64;
21
22#[cfg(target_arch = "x86_64")]
23use std::arch::x86_64::CpuidResult;
24use std::collections::HashMap;
25use std::fmt::{self, Display, Formatter};
26use std::io::ErrorKind;
27use std::num::NonZero;
28use std::os::fd::{AsFd, AsRawFd, BorrowedFd, FromRawFd, OwnedFd};
29use std::os::unix::thread::JoinHandleExt;
30use std::sync::Arc;
31use std::sync::atomic::{AtomicU32, Ordering};
32use std::thread::JoinHandle;
33
34use libc::{EFD_CLOEXEC, EFD_NONBLOCK, SIGRTMIN, eventfd, write};
35use parking_lot::{Mutex, RwLock};
36use snafu::ResultExt;
37
38#[cfg(target_arch = "x86_64")]
39use crate::arch::cpuid::CpuidIn;
40#[cfg(target_arch = "x86_64")]
41use crate::arch::sev::{SevPolicy, SnpPageType, SnpPolicy};
42#[cfg(target_arch = "x86_64")]
43use crate::arch::tdx::TdAttr;
44use crate::ffi;
45use crate::hv::kvm::vcpu::KvmVcpu;
46use crate::hv::kvm::{KvmError, check_extension, kvm_error};
47use crate::hv::{
48 Error, IoeventFd, IoeventFdRegistry, IrqFd, IrqSender, Kvm, MemMapOption, MsiSender, Result,
49 Vm, VmConfig, VmMemory, error,
50};
51#[cfg(target_arch = "x86_64")]
52use crate::sys::kvm::KVM_IRQCHIP_IOAPIC;
53#[cfg(target_arch = "aarch64")]
54use crate::sys::kvm::KvmMsiFlag;
55use crate::sys::kvm::{
56 KVM_IRQ_ROUTING_IRQCHIP, KVM_IRQ_ROUTING_MSI, KvmCap, KvmEnableCap, KvmEncRegion, KvmIoEventFd,
57 KvmIoEventFdFlag, KvmIrqRouting, KvmIrqRoutingEntry, KvmIrqRoutingIrqchip, KvmIrqRoutingMsi,
58 KvmIrqfd, KvmIrqfdFlag, KvmMemFlag, KvmMemoryAttribute, KvmMemoryAttributes, KvmMsi,
59 KvmUserspaceMemoryRegion, KvmUserspaceMemoryRegion2, kvm_create_vm, kvm_enable_cap,
60 kvm_get_vcpu_mmap_size, kvm_ioeventfd, kvm_irqfd, kvm_memory_encrypt_reg_region,
61 kvm_memory_encrypt_unreg_region, kvm_set_gsi_routing, kvm_set_memory_attributes,
62 kvm_set_user_memory_region, kvm_set_user_memory_region2, kvm_signal_msi,
63};
64
65#[cfg(target_arch = "aarch64")]
66use self::aarch64::{VmArch, translate_msi_addr};
67#[cfg(target_arch = "x86_64")]
68use self::x86_64::{VmArch, translate_msi_addr};
69
70#[derive(Debug)]
71pub struct VmInner {
72 pub fd: OwnedFd,
73 pub vcpu_mmap_size: usize,
74 memfd: Option<OwnedFd>,
75 ioeventfds: Mutex<HashMap<i32, KvmIoEventFd>>,
76 msi_table: RwLock<HashMap<u32, KvmMsiEntryData>>,
77 next_msi_gsi: AtomicU32,
78 pin_map: AtomicU32,
79 #[allow(dead_code)]
80 arch: VmArch,
81}
82
83impl VmInner {
84 fn update_routing_table(&self, table: &HashMap<u32, KvmMsiEntryData>) -> Result<(), KvmError> {
85 let mut entries = [KvmIrqRoutingEntry::default(); MAX_GSI_ROUTES];
86 let mut index = 0;
87 let pin_map = self.pin_map.load(Ordering::Acquire);
88 #[cfg(target_arch = "x86_64")]
89 let (irqchip, max_pin) = (KVM_IRQCHIP_IOAPIC, 24);
90 #[cfg(target_arch = "aarch64")]
91 let (irqchip, max_pin) = (0, 32);
92 for pin in 0..max_pin {
93 if pin_map & (1 << pin) == 0 {
94 continue;
95 }
96 entries[index].gsi = pin;
97 entries[index].type_ = KVM_IRQ_ROUTING_IRQCHIP;
98 entries[index].routing.irqchip = KvmIrqRoutingIrqchip { irqchip, pin };
99 index += 1;
100 }
101 for (gsi, entry) in table.iter() {
102 if entry.masked {
103 continue;
104 }
105 entries[index].gsi = *gsi;
106 entries[index].type_ = KVM_IRQ_ROUTING_MSI;
107 #[cfg(target_arch = "aarch64")]
108 {
109 entries[index].flags = KvmMsiFlag::VALID_DEVID;
110 }
111 let (lo, hi) = translate_msi_addr(entry.addr_lo, entry.addr_hi);
112 entries[index].routing.msi = KvmIrqRoutingMsi {
113 address_hi: hi,
114 address_lo: lo,
115 data: entry.data,
116 #[cfg(target_arch = "aarch64")]
117 devid: entry.devid,
118 #[cfg(not(target_arch = "aarch64"))]
119 devid: 0,
120 };
121 index += 1;
122 }
123 let irq_routing = KvmIrqRouting {
124 nr: index as u32,
125 _flags: 0,
126 entries,
127 };
128 log::trace!("{self}: updating GSI routing table to {irq_routing:#x?}");
129 unsafe { kvm_set_gsi_routing(&self.fd, &irq_routing) }.context(kvm_error::GsiRouting)?;
130 Ok(())
131 }
132
133 pub fn check_extension(&self, id: KvmCap) -> Result<NonZero<i32>> {
134 check_extension(&self.fd, id)
135 }
136
137 pub fn enable_cap(&self, cap: KvmCap, arg0: u64) -> Result<(), KvmError> {
138 let request = KvmEnableCap {
139 cap,
140 args: [arg0, 0, 0, 0],
141 flags: 0,
142 pad: [0; 64],
143 };
144 unsafe { kvm_enable_cap(&self.fd, &request) }.context(kvm_error::EnableCap { cap })?;
145 Ok(())
146 }
147}
148
149impl Display for VmInner {
150 fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
151 write!(f, "kvm-{}", self.fd.as_raw_fd())
152 }
153}
154
155type MemSlots = (u32, HashMap<(u64, u64), u32>);
156
157#[derive(Debug)]
158pub struct KvmMemory {
159 slots: Mutex<MemSlots>,
160 vm: Arc<VmInner>,
161}
162
163impl KvmMemory {
164 pub fn new(vm: &KvmVm) -> Self {
165 KvmMemory {
166 slots: Mutex::new((0, HashMap::new())),
167 vm: vm.vm.clone(),
168 }
169 }
170
171 fn unmap(&self, slot: u32, gpa: u64, size: u64) -> Result<()> {
172 let flags = KvmMemFlag::empty();
173 let region = KvmUserspaceMemoryRegion {
174 slot,
175 guest_phys_addr: gpa,
176 memory_size: 0,
177 userspace_addr: 0,
178 flags,
179 };
180 unsafe { kvm_set_user_memory_region(&self.vm.fd, &region) }
181 .context(error::GuestUnmap { gpa, size })?;
182 log::trace!(
183 "{}: slot-{slot}: unmapped: {gpa:#018x}, size={size:#x}",
184 self.vm
185 );
186 Ok(())
187 }
188}
189
190impl VmMemory for KvmMemory {
191 fn mem_map(&self, gpa: u64, size: u64, hva: usize, option: MemMapOption) -> Result<(), Error> {
192 let mut flags = KvmMemFlag::empty();
193 if !option.read || !option.exec {
194 return kvm_error::MmapOption { option }.fail()?;
195 }
196 if !option.write {
197 flags |= KvmMemFlag::READONLY;
198 }
199 if option.log_dirty {
200 flags |= KvmMemFlag::LOG_DIRTY_PAGES;
201 }
202 let (slot_id, slots) = &mut *self.slots.lock();
203 if let Some(memfd) = &self.vm.memfd {
204 flags |= KvmMemFlag::GUEST_MEMFD;
205 let region = KvmUserspaceMemoryRegion2 {
206 slot: *slot_id,
207 guest_phys_addr: gpa as _,
208 memory_size: size as _,
209 userspace_addr: hva as _,
210 flags,
211 guest_memfd: memfd.as_raw_fd() as _,
212 guest_memfd_offset: gpa,
213 ..Default::default()
214 };
215 unsafe { kvm_set_user_memory_region2(&self.vm.fd, &region) }
216 } else {
217 let region = KvmUserspaceMemoryRegion {
218 slot: *slot_id,
219 guest_phys_addr: gpa as _,
220 memory_size: size as _,
221 userspace_addr: hva as _,
222 flags,
223 };
224 unsafe { kvm_set_user_memory_region(&self.vm.fd, &region) }
225 }
226 .context(error::GuestMap { hva, gpa, size })?;
227 slots.insert((gpa, size), *slot_id);
228 log::trace!(
229 "{}: slot-{slot_id}: mapped: {gpa:#018x} -> {hva:#018x}, size = {size:#x}",
230 self.vm
231 );
232 *slot_id += 1;
233 Ok(())
234 }
235
236 fn unmap(&self, gpa: u64, size: u64) -> Result<(), Error> {
237 let (_, slots) = &mut *self.slots.lock();
238 let Some(slot) = slots.remove(&(gpa, size)) else {
239 return Err(ErrorKind::NotFound.into()).context(error::GuestUnmap { gpa, size });
240 };
241 self.unmap(slot, gpa, size)
242 }
243
244 fn register_encrypted_range(&self, range: &[u8]) -> Result<()> {
245 let region = KvmEncRegion {
246 addr: range.as_ptr() as u64,
247 size: range.len() as u64,
248 };
249 unsafe { kvm_memory_encrypt_reg_region(&self.vm.fd, &region) }
250 .context(error::MemEncrypt)?;
251 Ok(())
252 }
253
254 fn deregister_encrypted_range(&self, range: &[u8]) -> Result<()> {
255 let region = KvmEncRegion {
256 addr: range.as_ptr() as u64,
257 size: range.len() as u64,
258 };
259 unsafe { kvm_memory_encrypt_unreg_region(&self.vm.fd, &region) }
260 .context(error::MemEncrypt)?;
261 Ok(())
262 }
263
264 fn mark_private_memory(&self, gpa: u64, size: u64, private: bool) -> Result<()> {
265 let attr = KvmMemoryAttributes {
266 address: gpa,
267 size,
268 attributes: if private {
269 KvmMemoryAttribute::PRIVATE
270 } else {
271 KvmMemoryAttribute::empty()
272 },
273 flags: 0,
274 };
275 unsafe { kvm_set_memory_attributes(&self.vm.fd, &attr) }.context(error::MemEncrypt)?;
276 Ok(())
277 }
278
279 fn reset(&self) -> Result<()> {
280 let (slot_id, slots) = &mut *self.slots.lock();
281 for ((gpa, size), slot) in slots.drain() {
282 self.unmap(slot, gpa, size)?;
283 }
284 *slot_id = 0;
285 Ok(())
286 }
287}
288#[derive(Debug)]
289pub struct KvmIrqSender {
290 pin: u8,
291 vm: Arc<VmInner>,
292 event_fd: OwnedFd,
293}
294
295impl Drop for KvmIrqSender {
296 fn drop(&mut self) {
297 let pin_flag = 1 << (self.pin as u32);
298 self.vm.pin_map.fetch_and(!pin_flag, Ordering::AcqRel);
299 let request = KvmIrqfd {
300 fd: self.event_fd.as_raw_fd() as u32,
301 gsi: self.pin as u32,
302 flags: KvmIrqfdFlag::DEASSIGN,
303 ..Default::default()
304 };
305 if let Err(e) = unsafe { kvm_irqfd(&self.vm.fd, &request) } {
306 log::error!(
307 "{}: removing irqfd {:#x}: {e}",
308 self.vm,
309 self.event_fd.as_raw_fd(),
310 )
311 }
312 }
313}
314
315impl IrqSender for KvmIrqSender {
316 fn send(&self) -> Result<(), Error> {
317 ffi!(unsafe { write(self.event_fd.as_raw_fd(), &1u64 as *const _ as _, 8) })
318 .context(error::SendInterrupt)?;
319 Ok(())
320 }
321}
322
323#[derive(Debug, Default)]
324pub(crate) struct KvmMsiEntryData {
325 addr_lo: u32,
326 addr_hi: u32,
327 data: u32,
328 masked: bool,
329 dirty: bool,
330 #[cfg(target_arch = "aarch64")]
331 devid: u32,
332}
333
334#[derive(Debug)]
335pub struct KvmIrqFd {
336 event_fd: OwnedFd,
337 vm: Arc<VmInner>,
338 gsi: u32,
339}
340
341impl Drop for KvmIrqFd {
342 fn drop(&mut self) {
343 let mut table = self.vm.msi_table.write();
344 let Some(entry) = table.remove(&self.gsi) else {
345 log::error!(
346 "{}: cannot find gsi {:#x} in the gsi table",
347 self.vm,
348 self.gsi,
349 );
350 return;
351 };
352 if entry.masked {
353 return;
354 }
355 if let Err(e) = self.deassign_irqfd() {
356 log::error!(
357 "{}: removing irqfd {:#x}: {e}",
358 self.vm,
359 self.event_fd.as_raw_fd(),
360 )
361 }
362 }
363}
364
365impl AsFd for KvmIrqFd {
366 fn as_fd(&self) -> BorrowedFd<'_> {
367 self.event_fd.as_fd()
368 }
369}
370
371impl KvmIrqFd {
372 fn assign_irqfd(&self) -> Result<()> {
373 let request = KvmIrqfd {
374 fd: self.event_fd.as_raw_fd() as u32,
375 gsi: self.gsi,
376 ..Default::default()
377 };
378 unsafe { kvm_irqfd(&self.vm.fd, &request) }.context(error::IrqFd)?;
379 log::debug!(
380 "{}: assigned: gsi {:#x} -> irqfd {:#x}",
381 self.vm,
382 self.gsi,
383 self.event_fd.as_raw_fd()
384 );
385 Ok(())
386 }
387
388 fn deassign_irqfd(&self) -> Result<()> {
389 let request = KvmIrqfd {
390 fd: self.event_fd.as_raw_fd() as u32,
391 gsi: self.gsi,
392 flags: KvmIrqfdFlag::DEASSIGN,
393 ..Default::default()
394 };
395 unsafe { kvm_irqfd(&self.vm.fd, &request) }.context(error::IrqFd)?;
396 log::debug!(
397 "{}: de-assigned: gsi {:#x} -> irqfd {:#x}",
398 self.vm,
399 self.gsi,
400 self.event_fd.as_raw_fd()
401 );
402 Ok(())
403 }
404}
405
406macro_rules! impl_irqfd_method {
407 ($field:ident, $get:ident, $set:ident) => {
408 fn $get(&self) -> u32 {
409 let table = self.vm.msi_table.read();
410 let Some(entry) = table.get(&self.gsi) else {
411 unreachable!("cannot find gsi {}", self.gsi);
412 };
413 entry.$field
414 }
415 fn $set(&self, val: u32) -> Result<()> {
416 let mut table = self.vm.msi_table.write();
417 let Some(entry) = table.get_mut(&self.gsi) else {
418 unreachable!("cannot find gsi {}", self.gsi);
419 };
420 if entry.$field == val {
421 return Ok(());
422 }
423 entry.$field = val;
424
425 if !entry.masked {
426 self.vm.update_routing_table(&table)?;
427 } else {
428 entry.dirty = true;
429 }
430 Ok(())
431 }
432 };
433}
434
435impl IrqFd for KvmIrqFd {
436 impl_irqfd_method!(addr_lo, get_addr_lo, set_addr_lo);
437
438 impl_irqfd_method!(addr_hi, get_addr_hi, set_addr_hi);
439
440 impl_irqfd_method!(data, get_data, set_data);
441
442 fn get_masked(&self) -> bool {
443 let table = self.vm.msi_table.read();
444 let Some(entry) = table.get(&self.gsi) else {
445 unreachable!("{}: cannot find gsi {:#x}", self.vm, self.gsi);
446 };
447 entry.masked
448 }
449
450 fn set_masked(&self, val: bool) -> Result<bool> {
451 let mut table = self.vm.msi_table.write();
452 let Some(entry) = table.get_mut(&self.gsi) else {
453 unreachable!("{}: cannot find gsi {:#x}", self.vm, self.gsi);
454 };
455 if entry.masked == val {
456 return Ok(false);
457 }
458 entry.masked = val;
459 if !val {
460 if entry.dirty {
461 self.vm.update_routing_table(&table)?;
462 }
463 self.assign_irqfd()?;
464 } else {
465 self.deassign_irqfd()?;
466 }
467 Ok(true)
468 }
469}
470
471const MAX_GSI_ROUTES: usize = 256;
472
473#[derive(Debug)]
474pub struct KvmMsiSender {
475 vm: Arc<VmInner>,
476 #[cfg(target_arch = "aarch64")]
477 devid: u32,
478}
479
480impl MsiSender for KvmMsiSender {
481 type IrqFd = KvmIrqFd;
482
483 fn send(&self, addr: u64, data: u32) -> Result<()> {
484 let (lo, hi) = translate_msi_addr(addr as u32, (addr >> 32) as u32);
485 let kvm_msi = KvmMsi {
486 address_lo: lo,
487 address_hi: hi,
488 data,
489 #[cfg(target_arch = "aarch64")]
490 devid: self.devid,
491 #[cfg(target_arch = "aarch64")]
492 flags: KvmMsiFlag::VALID_DEVID,
493 ..Default::default()
494 };
495 unsafe { kvm_signal_msi(&self.vm.fd, &kvm_msi) }.context(error::SendInterrupt)?;
496 Ok(())
497 }
498
499 fn create_irqfd(&self) -> Result<Self::IrqFd> {
500 let event_fd = unsafe {
501 OwnedFd::from_raw_fd(
502 ffi!(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK)).context(error::IrqFd)?,
503 )
504 };
505 let mut table = self.vm.msi_table.write();
506 let mut allocated_gsi = None;
507 for _ in 0..(MAX_GSI_ROUTES - 24) {
508 let gsi = self.vm.next_msi_gsi.fetch_add(1, Ordering::AcqRel)
509 % (MAX_GSI_ROUTES as u32 - 24)
510 + 24;
511 let new_entry = KvmMsiEntryData {
512 #[cfg(target_arch = "aarch64")]
513 devid: self.devid,
514 masked: true,
515 ..Default::default()
516 };
517 if let Some(e) = table.insert(gsi, new_entry) {
518 table.insert(gsi, e);
519 } else {
520 allocated_gsi = Some(gsi);
521 break;
522 }
523 }
524 let Some(gsi) = allocated_gsi else {
525 return kvm_error::AllocateGsi.fail()?;
526 };
527 log::debug!(
528 "{}: allocated: gsi {gsi:#x} -> irqfd {:#x}",
529 self.vm,
530 event_fd.as_raw_fd()
531 );
532 let entry = KvmIrqFd {
533 vm: self.vm.clone(),
534 event_fd,
535 gsi,
536 };
537 Ok(entry)
538 }
539}
540
541#[derive(Debug)]
542pub struct KvmIoeventFd {
543 fd: OwnedFd,
544}
545
546impl AsFd for KvmIoeventFd {
547 fn as_fd(&self) -> BorrowedFd<'_> {
548 self.fd.as_fd()
549 }
550}
551
552impl IoeventFd for KvmIoeventFd {}
553
554#[derive(Debug)]
555pub struct KvmIoeventFdRegistry {
556 vm: Arc<VmInner>,
557}
558
559impl IoeventFdRegistry for KvmIoeventFdRegistry {
560 type IoeventFd = KvmIoeventFd;
561
562 fn create(&self) -> Result<Self::IoeventFd> {
563 let fd =
564 ffi!(unsafe { eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK) }).context(error::IoeventFd)?;
565 Ok(KvmIoeventFd {
566 fd: unsafe { OwnedFd::from_raw_fd(fd) },
567 })
568 }
569
570 fn register(&self, fd: &Self::IoeventFd, gpa: u64, len: u8, data: Option<u64>) -> Result<()> {
571 let mut request = KvmIoEventFd {
572 addr: gpa,
573 len: len as u32,
574 fd: fd.as_fd().as_raw_fd(),
575 ..Default::default()
576 };
577 if let Some(data) = data {
578 request.datamatch = data;
579 request.flags |= KvmIoEventFdFlag::DATA_MATCH;
580 }
581 unsafe { kvm_ioeventfd(&self.vm.fd, &request) }.context(error::IoeventFd)?;
582 let mut fds = self.vm.ioeventfds.lock();
583 fds.insert(request.fd, request);
584 Ok(())
585 }
586
587 fn deregister(&self, fd: &Self::IoeventFd) -> Result<()> {
588 let mut fds = self.vm.ioeventfds.lock();
589 if let Some(mut request) = fds.remove(&fd.as_fd().as_raw_fd()) {
590 request.flags |= KvmIoEventFdFlag::DEASSIGN;
591 unsafe { kvm_ioeventfd(&self.vm.fd, &request) }.context(error::IoeventFd)?;
592 }
593 Ok(())
594 }
595}
596
597pub struct KvmVm {
598 pub vm: Arc<VmInner>,
599 memory_created: bool,
600}
601
602impl KvmVm {
603 pub fn new(kvm: &Kvm, config: &VmConfig) -> Result<Self> {
604 let vcpu_mmap_size =
605 unsafe { kvm_get_vcpu_mmap_size(&kvm.fd) }.context(error::CreateVm)? as usize;
606 let kvm_vm_type = Self::determine_vm_type(config);
607 let vm_fd = unsafe { kvm_create_vm(&kvm.fd, kvm_vm_type) }.context(error::CreateVm)?;
608 let fd = unsafe { OwnedFd::from_raw_fd(vm_fd) };
609 let arch = VmArch::new(kvm, config)?;
610 let memfd = Self::create_guest_memfd(config, &fd)?;
611 let kvm_vm = KvmVm {
612 vm: Arc::new(VmInner {
613 fd,
614 vcpu_mmap_size,
615 memfd,
616 ioeventfds: Mutex::new(HashMap::new()),
617 msi_table: RwLock::new(HashMap::new()),
618 next_msi_gsi: AtomicU32::new(0),
619 pin_map: AtomicU32::new(0),
620 arch,
621 }),
622 memory_created: false,
623 };
624 kvm_vm.init(config)?;
625 Ok(kvm_vm)
626 }
627}
628
629impl Vm for KvmVm {
630 #[cfg(target_arch = "aarch64")]
631 type GicV2 = aarch64::KvmGicV2;
632 #[cfg(target_arch = "aarch64")]
633 type GicV2m = aarch64::KvmGicV2m;
634 #[cfg(target_arch = "aarch64")]
635 type GicV3 = aarch64::KvmGicV3;
636 type IoeventFdRegistry = KvmIoeventFdRegistry;
637 type IrqSender = KvmIrqSender;
638 #[cfg(target_arch = "aarch64")]
639 type Its = aarch64::KvmIts;
640 type Memory = KvmMemory;
641 type MsiSender = KvmMsiSender;
642 type Vcpu = KvmVcpu;
643
644 fn create_vcpu(&self, index: u16, identity: u64) -> Result<Self::Vcpu, Error> {
645 KvmVcpu::new(self, index, identity)
646 }
647
648 fn stop_vcpu<T>(&self, _identity: u64, handle: &JoinHandle<T>) -> Result<(), Error> {
649 ffi!(unsafe { libc::pthread_kill(handle.as_pthread_t() as _, SIGRTMIN()) })
650 .context(error::StopVcpu)?;
651 Ok(())
652 }
653
654 fn create_vm_memory(&mut self) -> Result<Self::Memory, Error> {
655 if self.memory_created {
656 error::MemoryCreated.fail()
657 } else {
658 let kvm_memory = KvmMemory::new(self);
659 self.memory_created = true;
660 Ok(kvm_memory)
661 }
662 }
663
664 fn create_irq_sender(&self, pin: u8) -> Result<Self::IrqSender, Error> {
665 let pin_flag = 1 << pin;
666 if self.vm.pin_map.fetch_or(pin_flag, Ordering::AcqRel) & pin_flag == pin_flag {
667 return Err(std::io::ErrorKind::AlreadyExists.into()).context(error::CreateIrq { pin });
668 }
669 self.vm.check_extension(KvmCap::IRQFD)?;
670 let event_fd = ffi!(unsafe { eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK) })
671 .context(error::CreateIrq { pin })?;
672 let request = KvmIrqfd {
673 fd: event_fd as u32,
674 gsi: pin as u32,
675 ..Default::default()
676 };
677 self.vm.update_routing_table(&self.vm.msi_table.read())?;
678 unsafe { kvm_irqfd(&self.vm.fd, &request) }.context(error::CreateIrq { pin })?;
679 Ok(KvmIrqSender {
680 pin,
681 vm: self.vm.clone(),
682 event_fd: unsafe { OwnedFd::from_raw_fd(event_fd) },
683 })
684 }
685
686 fn create_msi_sender(
687 &self,
688 #[cfg(target_arch = "aarch64")] devid: u32,
689 ) -> Result<Self::MsiSender> {
690 self.vm.check_extension(KvmCap::SIGNAL_MSI)?;
691 Ok(KvmMsiSender {
692 vm: self.vm.clone(),
693 #[cfg(target_arch = "aarch64")]
694 devid,
695 })
696 }
697
698 fn create_ioeventfd_registry(&self) -> Result<Self::IoeventFdRegistry> {
699 Ok(KvmIoeventFdRegistry {
700 vm: self.vm.clone(),
701 })
702 }
703
704 #[cfg(target_arch = "x86_64")]
705 fn sev_launch_start(&self, policy: SevPolicy) -> Result<(), Error> {
706 KvmVm::sev_launch_start(self, policy)
707 }
708
709 #[cfg(target_arch = "x86_64")]
710 fn sev_launch_update_data(&self, range: &mut [u8]) -> Result<(), Error> {
711 KvmVm::sev_launch_update_data(self, range)
712 }
713
714 #[cfg(target_arch = "x86_64")]
715 fn sev_launch_update_vmsa(&self) -> Result<(), Error> {
716 KvmVm::sev_launch_update_vmsa(self)
717 }
718
719 #[cfg(target_arch = "x86_64")]
720 fn sev_launch_measure(&self) -> Result<Vec<u8>, Error> {
721 KvmVm::sev_launch_measure(self)
722 }
723
724 #[cfg(target_arch = "x86_64")]
725 fn sev_launch_finish(&self) -> Result<(), Error> {
726 KvmVm::sev_launch_finish(self)
727 }
728
729 #[cfg(target_arch = "x86_64")]
730 fn snp_launch_start(&self, policy: SnpPolicy) -> Result<()> {
731 KvmVm::snp_launch_start(self, policy)
732 }
733
734 #[cfg(target_arch = "x86_64")]
735 fn snp_launch_update(&self, range: &mut [u8], gpa: u64, type_: SnpPageType) -> Result<()> {
736 KvmVm::snp_launch_update(self, range, gpa, type_)
737 }
738
739 #[cfg(target_arch = "x86_64")]
740 fn snp_launch_finish(&self) -> Result<()> {
741 KvmVm::snp_launch_finish(self)
742 }
743
744 #[cfg(target_arch = "x86_64")]
745 fn tdx_init_vm(&self, attr: TdAttr, cpuids: &HashMap<CpuidIn, CpuidResult>) -> Result<()> {
746 KvmVm::tdx_init_vm(self, attr, cpuids)
747 }
748
749 #[cfg(target_arch = "x86_64")]
750 fn tdx_finalize_vm(&self) -> Result<()> {
751 KvmVm::tdx_finalize_vm(self)
752 }
753
754 #[cfg(target_arch = "aarch64")]
755 fn create_gic_v2(&self, distributor_base: u64, cpu_interface_base: u64) -> Result<Self::GicV2> {
756 aarch64::KvmGicV2::new(self, distributor_base, cpu_interface_base)
757 }
758
759 #[cfg(target_arch = "aarch64")]
760 fn create_gic_v2m(&self, _base: u64) -> Result<Self::GicV2m> {
761 Err(std::io::ErrorKind::Unsupported.into()).context(error::CreateDevice)
762 }
763
764 #[cfg(target_arch = "aarch64")]
765 fn create_gic_v3(
766 &self,
767 distributor_base: u64,
768 redistributor_base: u64,
769 redistributor_count: u16,
770 ) -> Result<Self::GicV3> {
771 aarch64::KvmGicV3::new(
772 self,
773 distributor_base,
774 redistributor_base,
775 redistributor_count,
776 )
777 }
778
779 #[cfg(target_arch = "aarch64")]
780 fn create_its(&self, base: u64) -> Result<Self::Its> {
781 aarch64::KvmIts::new(self, base)
782 }
783}
784
785#[cfg(test)]
786#[path = "vm_test.rs"]
787mod tests;
788