vm.rs0.00%
1
// Copyright 2024 Google LLC2
//3
// Licensed under the Apache License, Version 2.0 (the "License");4
// you may not use this file except in compliance with the License.5
// You may obtain a copy of the License at6
//7
// https://www.apache.org/licenses/LICENSE-2.08
//9
// Unless required by applicable law or agreed to in writing, software10
// distributed under the License is distributed on an "AS IS" BASIS,11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12
// See the License for the specific language governing permissions and13
// limitations under the License.14
15
#[cfg(target_arch = "aarch64")]16
#[path = "vm_aarch64.rs"]17
mod aarch64;18
#[cfg(target_arch = "x86_64")]19
#[path = "vm_x86_64/vm_x86_64.rs"]20
mod x86_64;21
22
#[cfg(target_arch = "x86_64")]23
use std::arch::x86_64::CpuidResult;24
use std::collections::HashMap;25
use std::fmt::{self, Display, Formatter};26
use std::io::ErrorKind;27
use std::num::NonZero;28
use std::os::fd::{AsFd, AsRawFd, BorrowedFd, FromRawFd, OwnedFd};29
use std::os::unix::thread::JoinHandleExt;30
use std::sync::Arc;31
use std::sync::atomic::{AtomicU32, Ordering};32
use std::thread::JoinHandle;33
34
#[cfg(not(target_arch = "x86_64"))]35
use libc::write;36
use libc::{EFD_CLOEXEC, EFD_NONBLOCK, SIGRTMIN, eventfd};37
use parking_lot::{Mutex, RwLock};38
use snafu::ResultExt;39
40
#[cfg(target_arch = "x86_64")]41
use crate::arch::cpuid::CpuidIn;42
#[cfg(target_arch = "x86_64")]43
use crate::arch::sev::{SevPolicy, SnpPageType, SnpPolicy};44
#[cfg(target_arch = "x86_64")]45
use crate::arch::tdx::TdAttr;46
use crate::ffi;47
#[cfg(not(target_arch = "x86_64"))]48
use crate::hv::IrqSender;49
use crate::hv::kvm::vcpu::KvmVcpu;50
use crate::hv::kvm::{KvmError, check_extension, kvm_error};51
use crate::hv::{52
Error, IoeventFd, IoeventFdRegistry, IrqFd, Kvm, MemMapOption, MsiSender, Result, Vm, VmConfig,53
VmMemory, error,54
};55
#[cfg(target_arch = "x86_64")]56
use crate::sys::kvm::KVM_IRQCHIP_IOAPIC;57
#[cfg(target_arch = "aarch64")]58
use crate::sys::kvm::KvmMsiFlag;59
use crate::sys::kvm::{60
KVM_IRQ_ROUTING_IRQCHIP, KVM_IRQ_ROUTING_MSI, KvmCap, KvmEnableCap, KvmEncRegion, KvmIoEventFd,61
KvmIoEventFdFlag, KvmIrqRouting, KvmIrqRoutingEntry, KvmIrqRoutingIrqchip, KvmIrqRoutingMsi,62
KvmIrqfd, KvmIrqfdFlag, KvmMemFlag, KvmMemoryAttribute, KvmMemoryAttributes, KvmMsi,63
KvmUserspaceMemoryRegion, KvmUserspaceMemoryRegion2, kvm_create_vm, kvm_enable_cap,64
kvm_get_vcpu_mmap_size, kvm_ioeventfd, kvm_irqfd, kvm_memory_encrypt_reg_region,65
kvm_memory_encrypt_unreg_region, kvm_set_gsi_routing, kvm_set_memory_attributes,66
kvm_set_user_memory_region, kvm_set_user_memory_region2, kvm_signal_msi,67
};68
69
#[cfg(target_arch = "aarch64")]70
use self::aarch64::{VmArch, translate_msi_addr};71
#[cfg(target_arch = "x86_64")]72
use self::x86_64::{VmArch, translate_msi_addr};73
74
#[derive(Debug)]75
pub struct VmInner {76
pub fd: OwnedFd,77
pub vcpu_mmap_size: usize,78
memfd: Option<OwnedFd>,79
ioeventfds: Mutex<HashMap<i32, KvmIoEventFd>>,80
msi_table: RwLock<HashMap<u32, KvmMsiEntryData>>,81
next_msi_gsi: AtomicU32,82
pin_map: AtomicU32,83
#[allow(dead_code)]84
arch: VmArch,85
}86
87
impl VmInner {88
fn update_routing_table(&self, table: &HashMap<u32, KvmMsiEntryData>) -> Result<(), KvmError> {89
let mut entries = [KvmIrqRoutingEntry::default(); MAX_GSI_ROUTES];90
let mut index = 0;91
let pin_map = self.pin_map.load(Ordering::Acquire);92
#[cfg(target_arch = "x86_64")]93
let (irqchip, max_pin) = (KVM_IRQCHIP_IOAPIC, 24);94
#[cfg(target_arch = "aarch64")]95
let (irqchip, max_pin) = (0, 32);96
for pin in 0..max_pin {97
if pin_map & (1 << pin) == 0 {98
continue;99
}100
entries[index].gsi = pin;101
entries[index].type_ = KVM_IRQ_ROUTING_IRQCHIP;102
entries[index].routing.irqchip = KvmIrqRoutingIrqchip { irqchip, pin };103
index += 1;104
}105
for (gsi, entry) in table.iter() {106
if entry.masked {107
continue;108
}109
entries[index].gsi = *gsi;110
entries[index].type_ = KVM_IRQ_ROUTING_MSI;111
#[cfg(target_arch = "aarch64")]112
{113
entries[index].flags = KvmMsiFlag::VALID_DEVID;114
}115
let (lo, hi) = translate_msi_addr(entry.addr_lo, entry.addr_hi);116
entries[index].routing.msi = KvmIrqRoutingMsi {117
address_hi: hi,118
address_lo: lo,119
data: entry.data,120
#[cfg(target_arch = "aarch64")]121
devid: entry.devid,122
#[cfg(not(target_arch = "aarch64"))]123
devid: 0,124
};125
index += 1;126
}127
let irq_routing = KvmIrqRouting {128
nr: index as u32,129
_flags: 0,130
entries,131
};132
log::trace!("{self}: updating GSI routing table to {irq_routing:#x?}");133
unsafe { kvm_set_gsi_routing(&self.fd, &irq_routing) }.context(kvm_error::GsiRouting)?;134
Ok(())135
}136
137
pub fn check_extension(&self, id: KvmCap) -> Result<NonZero<i32>> {138
check_extension(&self.fd, id)139
}140
141
pub fn enable_cap(&self, cap: KvmCap, arg0: u64) -> Result<(), KvmError> {142
let request = KvmEnableCap {143
cap,144
args: [arg0, 0, 0, 0],145
flags: 0,146
pad: [0; 64],147
};148
unsafe { kvm_enable_cap(&self.fd, &request) }.context(kvm_error::EnableCap { cap })?;149
Ok(())150
}151
}152
153
impl Display for VmInner {154
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {155
write!(f, "kvm-{}", self.fd.as_raw_fd())156
}157
}158
159
type MemSlots = (u32, HashMap<(u64, u64), u32>);160
161
#[derive(Debug)]162
pub struct KvmMemory {163
slots: Mutex<MemSlots>,164
vm: Arc<VmInner>,165
}166
167
impl KvmMemory {168
pub fn new(vm: &KvmVm) -> Self {169
KvmMemory {170
slots: Mutex::new((0, HashMap::new())),171
vm: vm.vm.clone(),172
}173
}174
175
fn unmap(&self, slot: u32, gpa: u64, size: u64) -> Result<()> {176
let flags = KvmMemFlag::empty();177
let region = KvmUserspaceMemoryRegion {178
slot,179
guest_phys_addr: gpa,180
memory_size: 0,181
userspace_addr: 0,182
flags,183
};184
unsafe { kvm_set_user_memory_region(&self.vm.fd, ®ion) }185
.context(error::GuestUnmap { gpa, size })?;186
log::trace!(187
"{}: slot-{slot}: unmapped: {gpa:#018x}, size={size:#x}",188
self.vm189
);190
Ok(())191
}192
}193
194
impl VmMemory for KvmMemory {195
fn mem_map(&self, gpa: u64, size: u64, hva: usize, option: MemMapOption) -> Result<(), Error> {196
let mut flags = KvmMemFlag::empty();197
if !option.read || !option.exec {198
return kvm_error::MmapOption { option }.fail()?;199
}200
if !option.write {201
flags |= KvmMemFlag::READONLY;202
}203
if option.log_dirty {204
flags |= KvmMemFlag::LOG_DIRTY_PAGES;205
}206
let (slot_id, slots) = &mut *self.slots.lock();207
if let Some(memfd) = &self.vm.memfd {208
flags |= KvmMemFlag::GUEST_MEMFD;209
let region = KvmUserspaceMemoryRegion2 {210
slot: *slot_id,211
guest_phys_addr: gpa as _,212
memory_size: size as _,213
userspace_addr: hva as _,214
flags,215
guest_memfd: memfd.as_raw_fd() as _,216
guest_memfd_offset: gpa,217
..Default::default()218
};219
unsafe { kvm_set_user_memory_region2(&self.vm.fd, ®ion) }220
} else {221
let region = KvmUserspaceMemoryRegion {222
slot: *slot_id,223
guest_phys_addr: gpa as _,224
memory_size: size as _,225
userspace_addr: hva as _,226
flags,227
};228
unsafe { kvm_set_user_memory_region(&self.vm.fd, ®ion) }229
}230
.context(error::GuestMap { hva, gpa, size })?;231
slots.insert((gpa, size), *slot_id);232
log::trace!(233
"{}: slot-{slot_id}: mapped: {gpa:#018x} -> {hva:#018x}, size = {size:#x}",234
self.vm235
);236
*slot_id += 1;237
Ok(())238
}239
240
fn unmap(&self, gpa: u64, size: u64) -> Result<(), Error> {241
let (_, slots) = &mut *self.slots.lock();242
let Some(slot) = slots.remove(&(gpa, size)) else {243
return Err(ErrorKind::NotFound.into()).context(error::GuestUnmap { gpa, size });244
};245
self.unmap(slot, gpa, size)246
}247
248
fn register_encrypted_range(&self, range: &[u8]) -> Result<()> {249
let region = KvmEncRegion {250
addr: range.as_ptr() as u64,251
size: range.len() as u64,252
};253
unsafe { kvm_memory_encrypt_reg_region(&self.vm.fd, ®ion) }254
.context(error::MemEncrypt)?;255
Ok(())256
}257
258
fn deregister_encrypted_range(&self, range: &[u8]) -> Result<()> {259
let region = KvmEncRegion {260
addr: range.as_ptr() as u64,261
size: range.len() as u64,262
};263
unsafe { kvm_memory_encrypt_unreg_region(&self.vm.fd, ®ion) }264
.context(error::MemEncrypt)?;265
Ok(())266
}267
268
fn mark_private_memory(&self, gpa: u64, size: u64, private: bool) -> Result<()> {269
let attr = KvmMemoryAttributes {270
address: gpa,271
size,272
attributes: if private {273
KvmMemoryAttribute::PRIVATE274
} else {275
KvmMemoryAttribute::empty()276
},277
flags: 0,278
};279
unsafe { kvm_set_memory_attributes(&self.vm.fd, &attr) }.context(error::MemEncrypt)?;280
Ok(())281
}282
283
fn reset(&self) -> Result<()> {284
let (slot_id, slots) = &mut *self.slots.lock();285
for ((gpa, size), slot) in slots.drain() {286
self.unmap(slot, gpa, size)?;287
}288
*slot_id = 0;289
Ok(())290
}291
}292
#[cfg(not(target_arch = "x86_64"))]293
#[derive(Debug)]294
pub struct KvmIrqSender {295
pin: u8,296
vm: Arc<VmInner>,297
event_fd: OwnedFd,298
}299
300
#[cfg(not(target_arch = "x86_64"))]301
impl Drop for KvmIrqSender {302
fn drop(&mut self) {303
let pin_flag = 1 << (self.pin as u32);304
self.vm.pin_map.fetch_and(!pin_flag, Ordering::AcqRel);305
let request = KvmIrqfd {306
fd: self.event_fd.as_raw_fd() as u32,307
gsi: self.pin as u32,308
flags: KvmIrqfdFlag::DEASSIGN,309
..Default::default()310
};311
if let Err(e) = unsafe { kvm_irqfd(&self.vm.fd, &request) } {312
log::error!(313
"{}: removing irqfd {:#x}: {e}",314
self.vm,315
self.event_fd.as_raw_fd(),316
)317
}318
}319
}320
321
#[cfg(not(target_arch = "x86_64"))]322
impl IrqSender for KvmIrqSender {323
fn send(&self) -> Result<(), Error> {324
ffi!(unsafe { write(self.event_fd.as_raw_fd(), &1u64 as *const _ as _, 8) })325
.context(error::SendInterrupt)?;326
Ok(())327
}328
}329
330
#[derive(Debug, Default)]331
pub(crate) struct KvmMsiEntryData {332
addr_lo: u32,333
addr_hi: u32,334
data: u32,335
masked: bool,336
dirty: bool,337
#[cfg(target_arch = "aarch64")]338
devid: u32,339
}340
341
#[derive(Debug)]342
pub struct KvmIrqFd {343
event_fd: OwnedFd,344
vm: Arc<VmInner>,345
gsi: u32,346
}347
348
impl Drop for KvmIrqFd {349
fn drop(&mut self) {350
let mut table = self.vm.msi_table.write();351
let Some(entry) = table.remove(&self.gsi) else {352
log::error!(353
"{}: cannot find gsi {:#x} in the gsi table",354
self.vm,355
self.gsi,356
);357
return;358
};359
if entry.masked {360
return;361
}362
if let Err(e) = self.deassign_irqfd() {363
log::error!(364
"{}: removing irqfd {:#x}: {e}",365
self.vm,366
self.event_fd.as_raw_fd(),367
)368
}369
}370
}371
372
impl AsFd for KvmIrqFd {373
fn as_fd(&self) -> BorrowedFd<'_> {374
self.event_fd.as_fd()375
}376
}377
378
impl KvmIrqFd {379
fn assign_irqfd(&self) -> Result<()> {380
let request = KvmIrqfd {381
fd: self.event_fd.as_raw_fd() as u32,382
gsi: self.gsi,383
..Default::default()384
};385
unsafe { kvm_irqfd(&self.vm.fd, &request) }.context(error::IrqFd)?;386
log::debug!(387
"{}: assigned: gsi {:#x} -> irqfd {:#x}",388
self.vm,389
self.gsi,390
self.event_fd.as_raw_fd()391
);392
Ok(())393
}394
395
fn deassign_irqfd(&self) -> Result<()> {396
let request = KvmIrqfd {397
fd: self.event_fd.as_raw_fd() as u32,398
gsi: self.gsi,399
flags: KvmIrqfdFlag::DEASSIGN,400
..Default::default()401
};402
unsafe { kvm_irqfd(&self.vm.fd, &request) }.context(error::IrqFd)?;403
log::debug!(404
"{}: de-assigned: gsi {:#x} -> irqfd {:#x}",405
self.vm,406
self.gsi,407
self.event_fd.as_raw_fd()408
);409
Ok(())410
}411
}412
413
macro_rules! impl_irqfd_method {414
($field:ident, $get:ident, $set:ident) => {415
fn $get(&self) -> u32 {416
let table = self.vm.msi_table.read();417
let Some(entry) = table.get(&self.gsi) else {418
unreachable!("cannot find gsi {}", self.gsi);419
};420
entry.$field421
}422
fn $set(&self, val: u32) -> Result<()> {423
let mut table = self.vm.msi_table.write();424
let Some(entry) = table.get_mut(&self.gsi) else {425
unreachable!("cannot find gsi {}", self.gsi);426
};427
if entry.$field == val {428
return Ok(());429
}430
entry.$field = val;431
432
if !entry.masked {433
self.vm.update_routing_table(&table)?;434
} else {435
entry.dirty = true;436
}437
Ok(())438
}439
};440
}441
442
impl IrqFd for KvmIrqFd {443
impl_irqfd_method!(addr_lo, get_addr_lo, set_addr_lo);444
445
impl_irqfd_method!(addr_hi, get_addr_hi, set_addr_hi);446
447
impl_irqfd_method!(data, get_data, set_data);448
449
fn get_masked(&self) -> bool {450
let table = self.vm.msi_table.read();451
let Some(entry) = table.get(&self.gsi) else {452
unreachable!("{}: cannot find gsi {:#x}", self.vm, self.gsi);453
};454
entry.masked455
}456
457
fn set_masked(&self, val: bool) -> Result<bool> {458
let mut table = self.vm.msi_table.write();459
let Some(entry) = table.get_mut(&self.gsi) else {460
unreachable!("{}: cannot find gsi {:#x}", self.vm, self.gsi);461
};462
if entry.masked == val {463
return Ok(false);464
}465
entry.masked = val;466
if !val {467
if entry.dirty {468
self.vm.update_routing_table(&table)?;469
}470
self.assign_irqfd()?;471
} else {472
self.deassign_irqfd()?;473
}474
Ok(true)475
}476
}477
478
const MAX_GSI_ROUTES: usize = 256;479
480
#[derive(Debug)]481
pub struct KvmMsiSender {482
vm: Arc<VmInner>,483
#[cfg(target_arch = "aarch64")]484
devid: u32,485
}486
487
impl MsiSender for KvmMsiSender {488
type IrqFd = KvmIrqFd;489
490
fn send(&self, addr: u64, data: u32) -> Result<()> {491
let (lo, hi) = translate_msi_addr(addr as u32, (addr >> 32) as u32);492
let kvm_msi = KvmMsi {493
address_lo: lo,494
address_hi: hi,495
data,496
#[cfg(target_arch = "aarch64")]497
devid: self.devid,498
#[cfg(target_arch = "aarch64")]499
flags: KvmMsiFlag::VALID_DEVID,500
..Default::default()501
};502
unsafe { kvm_signal_msi(&self.vm.fd, &kvm_msi) }.context(error::SendInterrupt)?;503
Ok(())504
}505
506
fn create_irqfd(&self) -> Result<Self::IrqFd> {507
let event_fd = unsafe {508
OwnedFd::from_raw_fd(509
ffi!(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK)).context(error::IrqFd)?,510
)511
};512
let mut table = self.vm.msi_table.write();513
let mut allocated_gsi = None;514
for _ in 0..(MAX_GSI_ROUTES - 24) {515
let gsi = self.vm.next_msi_gsi.fetch_add(1, Ordering::AcqRel)516
% (MAX_GSI_ROUTES as u32 - 24)517
+ 24;518
let new_entry = KvmMsiEntryData {519
#[cfg(target_arch = "aarch64")]520
devid: self.devid,521
masked: true,522
..Default::default()523
};524
if let Some(e) = table.insert(gsi, new_entry) {525
table.insert(gsi, e);526
} else {527
allocated_gsi = Some(gsi);528
break;529
}530
}531
let Some(gsi) = allocated_gsi else {532
return kvm_error::AllocateGsi.fail()?;533
};534
log::debug!(535
"{}: allocated: gsi {gsi:#x} -> irqfd {:#x}",536
self.vm,537
event_fd.as_raw_fd()538
);539
let entry = KvmIrqFd {540
vm: self.vm.clone(),541
event_fd,542
gsi,543
};544
Ok(entry)545
}546
}547
548
#[derive(Debug)]549
pub struct KvmIoeventFd {550
fd: OwnedFd,551
}552
553
impl AsFd for KvmIoeventFd {554
fn as_fd(&self) -> BorrowedFd<'_> {555
self.fd.as_fd()556
}557
}558
559
impl IoeventFd for KvmIoeventFd {}560
561
#[derive(Debug)]562
pub struct KvmIoeventFdRegistry {563
vm: Arc<VmInner>,564
}565
566
impl IoeventFdRegistry for KvmIoeventFdRegistry {567
type IoeventFd = KvmIoeventFd;568
569
fn create(&self) -> Result<Self::IoeventFd> {570
let fd =571
ffi!(unsafe { eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK) }).context(error::IoeventFd)?;572
Ok(KvmIoeventFd {573
fd: unsafe { OwnedFd::from_raw_fd(fd) },574
})575
}576
577
fn register(&self, fd: &Self::IoeventFd, gpa: u64, len: u8, data: Option<u64>) -> Result<()> {578
let mut request = KvmIoEventFd {579
addr: gpa,580
len: len as u32,581
fd: fd.as_fd().as_raw_fd(),582
..Default::default()583
};584
if let Some(data) = data {585
request.datamatch = data;586
request.flags |= KvmIoEventFdFlag::DATA_MATCH;587
}588
unsafe { kvm_ioeventfd(&self.vm.fd, &request) }.context(error::IoeventFd)?;589
let mut fds = self.vm.ioeventfds.lock();590
fds.insert(request.fd, request);591
Ok(())592
}593
594
#[cfg(target_arch = "x86_64")]595
fn register_port(596
&self,597
_fd: &Self::IoeventFd,598
_port: u16,599
_len: u8,600
_data: Option<u64>,601
) -> Result<()> {602
unimplemented!()603
}604
605
fn deregister(&self, fd: &Self::IoeventFd) -> Result<()> {606
let mut fds = self.vm.ioeventfds.lock();607
if let Some(mut request) = fds.remove(&fd.as_fd().as_raw_fd()) {608
request.flags |= KvmIoEventFdFlag::DEASSIGN;609
unsafe { kvm_ioeventfd(&self.vm.fd, &request) }.context(error::IoeventFd)?;610
}611
Ok(())612
}613
}614
615
pub struct KvmVm {616
pub vm: Arc<VmInner>,617
memory_created: bool,618
}619
620
impl KvmVm {621
pub fn new(kvm: &Kvm, config: &VmConfig) -> Result<Self> {622
let vcpu_mmap_size =623
unsafe { kvm_get_vcpu_mmap_size(&kvm.fd) }.context(error::CreateVm)? as usize;624
let kvm_vm_type = Self::determine_vm_type(config);625
let vm_fd = unsafe { kvm_create_vm(&kvm.fd, kvm_vm_type) }.context(error::CreateVm)?;626
let fd = unsafe { OwnedFd::from_raw_fd(vm_fd) };627
let arch = VmArch::new(kvm, config)?;628
let memfd = Self::create_guest_memfd(config, &fd)?;629
let kvm_vm = KvmVm {630
vm: Arc::new(VmInner {631
fd,632
vcpu_mmap_size,633
memfd,634
ioeventfds: Mutex::new(HashMap::new()),635
msi_table: RwLock::new(HashMap::new()),636
next_msi_gsi: AtomicU32::new(0),637
pin_map: AtomicU32::new(0),638
arch,639
}),640
memory_created: false,641
};642
kvm_vm.init(config)?;643
Ok(kvm_vm)644
}645
}646
647
impl Vm for KvmVm {648
#[cfg(target_arch = "aarch64")]649
type GicV2 = aarch64::KvmGicV2;650
#[cfg(target_arch = "aarch64")]651
type GicV2m = aarch64::KvmGicV2m;652
#[cfg(target_arch = "aarch64")]653
type GicV3 = aarch64::KvmGicV3;654
type IoeventFdRegistry = KvmIoeventFdRegistry;655
#[cfg(not(target_arch = "x86_64"))]656
type IrqSender = KvmIrqSender;657
#[cfg(target_arch = "aarch64")]658
type Its = aarch64::KvmIts;659
type Memory = KvmMemory;660
type MsiSender = KvmMsiSender;661
type Vcpu = KvmVcpu;662
663
fn create_vcpu(&self, index: u16, identity: u64) -> Result<Self::Vcpu, Error> {664
KvmVcpu::new(self, index, identity)665
}666
667
fn stop_vcpu<T>(&self, _identity: u64, handle: &JoinHandle<T>) -> Result<(), Error> {668
ffi!(unsafe { libc::pthread_kill(handle.as_pthread_t() as _, SIGRTMIN()) })669
.context(error::StopVcpu)?;670
Ok(())671
}672
673
fn create_vm_memory(&mut self) -> Result<Self::Memory, Error> {674
if self.memory_created {675
error::MemoryCreated.fail()676
} else {677
let kvm_memory = KvmMemory::new(self);678
self.memory_created = true;679
Ok(kvm_memory)680
}681
}682
683
#[cfg(not(target_arch = "x86_64"))]684
fn create_irq_sender(&self, pin: u8) -> Result<Self::IrqSender, Error> {685
let pin_flag = 1 << pin;686
if self.vm.pin_map.fetch_or(pin_flag, Ordering::AcqRel) & pin_flag == pin_flag {687
return Err(std::io::ErrorKind::AlreadyExists.into()).context(error::CreateIrq { pin });688
}689
self.vm.check_extension(KvmCap::IRQFD)?;690
let event_fd = ffi!(unsafe { eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK) })691
.context(error::CreateIrq { pin })?;692
let request = KvmIrqfd {693
fd: event_fd as u32,694
gsi: pin as u32,695
..Default::default()696
};697
self.vm.update_routing_table(&self.vm.msi_table.read())?;698
unsafe { kvm_irqfd(&self.vm.fd, &request) }.context(error::CreateIrq { pin })?;699
Ok(KvmIrqSender {700
pin,701
vm: self.vm.clone(),702
event_fd: unsafe { OwnedFd::from_raw_fd(event_fd) },703
})704
}705
706
fn create_msi_sender(707
&self,708
#[cfg(target_arch = "aarch64")] devid: u32,709
) -> Result<Self::MsiSender> {710
self.vm.check_extension(KvmCap::SIGNAL_MSI)?;711
Ok(KvmMsiSender {712
vm: self.vm.clone(),713
#[cfg(target_arch = "aarch64")]714
devid,715
})716
}717
718
fn create_ioeventfd_registry(&self) -> Result<Self::IoeventFdRegistry> {719
Ok(KvmIoeventFdRegistry {720
vm: self.vm.clone(),721
})722
}723
724
#[cfg(target_arch = "x86_64")]725
fn sev_launch_start(&self, policy: SevPolicy) -> Result<(), Error> {726
KvmVm::sev_launch_start(self, policy)727
}728
729
#[cfg(target_arch = "x86_64")]730
fn sev_launch_update_data(&self, range: &mut [u8]) -> Result<(), Error> {731
KvmVm::sev_launch_update_data(self, range)732
}733
734
#[cfg(target_arch = "x86_64")]735
fn sev_launch_update_vmsa(&self) -> Result<(), Error> {736
KvmVm::sev_launch_update_vmsa(self)737
}738
739
#[cfg(target_arch = "x86_64")]740
fn sev_launch_measure(&self) -> Result<Vec<u8>, Error> {741
KvmVm::sev_launch_measure(self)742
}743
744
#[cfg(target_arch = "x86_64")]745
fn sev_launch_finish(&self) -> Result<(), Error> {746
KvmVm::sev_launch_finish(self)747
}748
749
#[cfg(target_arch = "x86_64")]750
fn snp_launch_start(&self, policy: SnpPolicy) -> Result<()> {751
KvmVm::snp_launch_start(self, policy)752
}753
754
#[cfg(target_arch = "x86_64")]755
fn snp_launch_update(&self, range: &mut [u8], gpa: u64, type_: SnpPageType) -> Result<()> {756
KvmVm::snp_launch_update(self, range, gpa, type_)757
}758
759
#[cfg(target_arch = "x86_64")]760
fn snp_launch_finish(&self) -> Result<()> {761
KvmVm::snp_launch_finish(self)762
}763
764
#[cfg(target_arch = "x86_64")]765
fn tdx_init_vm(&self, attr: TdAttr, cpuids: &HashMap<CpuidIn, CpuidResult>) -> Result<()> {766
KvmVm::tdx_init_vm(self, attr, cpuids)767
}768
769
#[cfg(target_arch = "x86_64")]770
fn tdx_finalize_vm(&self) -> Result<()> {771
KvmVm::tdx_finalize_vm(self)772
}773
774
#[cfg(target_arch = "aarch64")]775
fn create_gic_v2(&self, distributor_base: u64, cpu_interface_base: u64) -> Result<Self::GicV2> {776
aarch64::KvmGicV2::new(self, distributor_base, cpu_interface_base)777
}778
779
#[cfg(target_arch = "aarch64")]780
fn create_gic_v2m(&self, _base: u64) -> Result<Self::GicV2m> {781
Err(std::io::ErrorKind::Unsupported.into()).context(error::CreateDevice)782
}783
784
#[cfg(target_arch = "aarch64")]785
fn create_gic_v3(786
&self,787
distributor_base: u64,788
redistributor_base: u64,789
redistributor_count: u16,790
) -> Result<Self::GicV3> {791
aarch64::KvmGicV3::new(792
self,793
distributor_base,794
redistributor_base,795
redistributor_count,796
)797
}798
799
#[cfg(target_arch = "aarch64")]800
fn create_its(&self, base: u64) -> Result<Self::Its> {801
aarch64::KvmIts::new(self, base)802
}803
}804
805
#[cfg(test)]806
#[path = "vm_test.rs"]807
mod tests;808