pci.rs0.00%
1
// Copyright 2024 Google LLC2
//3
// Licensed under the Apache License, Version 2.0 (the "License");4
// you may not use this file except in compliance with the License.5
// You may obtain a copy of the License at6
//7
// https://www.apache.org/licenses/LICENSE-2.08
//9
// Unless required by applicable law or agreed to in writing, software10
// distributed under the License is distributed on an "AS IS" BASIS,11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12
// See the License for the specific language governing permissions and13
// limitations under the License.14
15
use std::io::ErrorKind;16
use std::marker::PhantomData;17
use std::mem::size_of;18
use std::os::fd::{AsFd, AsRawFd, BorrowedFd};19
use std::sync::Arc;20
use std::sync::atomic::{AtomicU16, Ordering};21
use std::sync::mpsc::Sender;22
23
use alioth_macros::Layout;24
use parking_lot::{Mutex, RwLock};25
use zerocopy::{FromZeros, Immutable, IntoBytes};26
27
use crate::device::Pause;28
use crate::hv::{self, IoeventFd, IoeventFdRegistry, IrqFd, MsiSender};29
use crate::mem::emulated::{Action, Mmio};30
use crate::mem::{MemRange, MemRegion, MemRegionCallback, MemRegionEntry};31
use crate::pci::cap::{32
MsixCap, MsixCapMmio, MsixCapOffset, MsixMsgCtrl, MsixTableEntry, MsixTableMmio,33
MsixTableMmioEntry, PciCap, PciCapHdr, PciCapId, PciCapList,34
};35
use crate::pci::config::{36
BAR_MEM32, BAR_MEM64, BAR_PREFETCHABLE, CommonHeader, DeviceHeader, EmulatedConfig, HeaderType,37
PciConfig, PciConfigArea,38
};39
use crate::pci::{self, Pci, PciBar};40
use crate::sync::notifier::Notifier;41
use crate::utils::{get_atomic_high32, get_atomic_low32, set_atomic_high32, set_atomic_low32};42
use crate::virtio::dev::{Register, StartParam, VirtioDevice, WakeEvent};43
use crate::virtio::queue::QueueReg;44
use crate::virtio::{DevStatus, DeviceId, IrqSender, Result, error};45
use crate::{consts, impl_mmio_for_zerocopy, mem};46
47
const VIRTIO_MSI_NO_VECTOR: u16 = 0xffff;48
49
#[derive(Debug)]50
struct VirtioPciMsixVector {51
config: AtomicU16,52
queues: Vec<AtomicU16>,53
}54
55
#[derive(Debug)]56
pub struct PciIrqSender<S>57
where58
S: MsiSender,59
{60
msix_vector: VirtioPciMsixVector,61
msix_table: Arc<MsixTableMmio<S::IrqFd>>,62
msi_sender: S,63
}64
65
impl<S> PciIrqSender<S>66
where67
S: MsiSender,68
{69
fn send(&self, vector: u16) {70
let entries = self.msix_table.entries.read();71
let Some(entry) = entries.get(vector as usize) else {72
log::error!("invalid config vector: {vector:x}");73
return;74
};75
if entry.get_masked() {76
log::info!("{vector} is masked");77
return;78
}79
let data = entry.get_data();80
let addr = ((entry.get_addr_hi() as u64) << 32) | (entry.get_addr_lo() as u64);81
if let Err(e) = self.msi_sender.send(addr, data) {82
log::error!("send msi data = {data:#x} to {addr:#x}: {e}")83
} else {84
log::trace!("send msi data = {data:#x} to {addr:#x}: done")85
}86
}87
88
fn get_irqfd<F, T>(&self, vector: u16, f: F) -> Result<T>89
where90
F: FnOnce(BorrowedFd) -> Result<T>,91
{92
let mut entries = self.msix_table.entries.write();93
let Some(entry) = entries.get_mut(vector as usize) else {94
return error::InvalidMsixVector { vector }.fail();95
};96
match &*entry {97
MsixTableMmioEntry::Entry(e) => {98
let irqfd = self.msi_sender.create_irqfd()?;99
irqfd.set_addr_hi(e.addr_hi)?;100
irqfd.set_addr_lo(e.addr_lo)?;101
irqfd.set_data(e.data)?;102
irqfd.set_masked(e.control.masked())?;103
let r = f(irqfd.as_fd())?;104
*entry = MsixTableMmioEntry::IrqFd(irqfd);105
Ok(r)106
}107
MsixTableMmioEntry::IrqFd(fd) => f(fd.as_fd()),108
}109
}110
}111
112
impl<S> IrqSender for PciIrqSender<S>113
where114
S: MsiSender,115
{116
fn config_irq(&self) {117
let vector = self.msix_vector.config.load(Ordering::Acquire);118
if vector != VIRTIO_MSI_NO_VECTOR {119
self.send(vector)120
}121
}122
123
fn queue_irq(&self, idx: u16) {124
let Some(vector) = self.msix_vector.queues.get(idx as usize) else {125
log::error!("invalid queue index: {idx}");126
return;127
};128
let vector = vector.load(Ordering::Acquire);129
if vector != VIRTIO_MSI_NO_VECTOR {130
self.send(vector);131
}132
}133
134
fn config_irqfd<F, T>(&self, f: F) -> Result<T>135
where136
F: FnOnce(BorrowedFd) -> Result<T>,137
{138
self.get_irqfd(self.msix_vector.config.load(Ordering::Acquire), f)139
}140
141
fn queue_irqfd<F, T>(&self, idx: u16, f: F) -> Result<T>142
where143
F: FnOnce(BorrowedFd) -> Result<T>,144
{145
let Some(vector) = self.msix_vector.queues.get(idx as usize) else {146
return error::InvalidQueueIndex { index: idx }.fail();147
};148
self.get_irqfd(vector.load(Ordering::Acquire), f)149
}150
}151
152
#[repr(C, align(4))]153
#[derive(Layout)]154
pub struct VirtioCommonCfg {155
device_feature_select: u32,156
device_feature: u32,157
driver_feature_select: u32,158
driver_feature: u32,159
config_msix_vector: u16,160
num_queues: u16,161
device_status: u8,162
config_generation: u8,163
queue_select: u16,164
queue_size: u16,165
queue_msix_vector: u16,166
queue_enable: u16,167
queue_notify_off: u16,168
queue_desc_lo: u32,169
queue_desc_hi: u32,170
queue_driver_lo: u32,171
queue_driver_hi: u32,172
queue_device_lo: u32,173
queue_device_hi: u32,174
queue_notify_data: u16,175
queue_reset: u16,176
}177
178
#[derive(Layout)]179
#[repr(C, align(4))]180
pub struct VirtioPciRegister {181
common: VirtioCommonCfg,182
isr_status: u32,183
queue_notify: PhantomData<[u32]>,184
}185
186
#[derive(Debug)]187
pub struct VirtioPciRegisterMmio<M, E>188
where189
M: MsiSender,190
E: IoeventFd,191
{192
name: Arc<str>,193
reg: Register,194
queues: Arc<[QueueReg]>,195
irq_sender: Arc<PciIrqSender<M>>,196
ioeventfds: Option<Arc<[E]>>,197
event_tx: Sender<WakeEvent<PciIrqSender<M>, E>>,198
notifier: Arc<Notifier>,199
}200
201
impl<M, E> VirtioPciRegisterMmio<M, E>202
where203
M: MsiSender,204
E: IoeventFd,205
{206
fn wake_up_dev(&self, event: WakeEvent<PciIrqSender<M>, E>) {207
let is_start = matches!(event, WakeEvent::Start { .. });208
if let Err(e) = self.event_tx.send(event) {209
log::error!("{}: failed to send event: {e}", self.name);210
return;211
}212
if is_start {213
return;214
}215
if let Err(e) = self.notifier.notify() {216
log::error!("{}: failed to wake up device: {e}", self.name);217
}218
}219
220
fn reset(&self) {221
let config_msix = &self.irq_sender.msix_vector.config;222
config_msix.store(VIRTIO_MSI_NO_VECTOR, Ordering::Release);223
for q_vector in self.irq_sender.msix_vector.queues.iter() {224
q_vector.store(VIRTIO_MSI_NO_VECTOR, Ordering::Release);225
}226
self.irq_sender.msix_table.reset();227
for q in self.queues.iter() {228
q.enabled.store(false, Ordering::Release);229
}230
}231
232
fn msix_change_allowed(&self, old: u16) -> bool {233
let entries = self.irq_sender.msix_table.entries.read();234
let Some(entry) = entries.get(old as usize) else {235
return true;236
};237
if let MsixTableMmioEntry::IrqFd(fd) = entry {238
log::error!(239
"{}: MSI-X vector {old:#x} was assigned to irqfd {:#x}",240
self.name,241
fd.as_fd().as_raw_fd(),242
);243
false244
} else {245
true246
}247
}248
}249
250
impl<M, E> Mmio for VirtioPciRegisterMmio<M, E>251
where252
M: MsiSender,253
E: IoeventFd,254
{255
fn size(&self) -> u64 {256
(size_of::<VirtioPciRegister>() + size_of::<u32>() * self.queues.len()) as u64257
}258
259
fn read(&self, offset: u64, size: u8) -> mem::Result<u64> {260
let reg = &self.reg;261
let ret = match (offset as usize, size as usize) {262
VirtioCommonCfg::LAYOUT_DEVICE_FEATURE_SELECT => {263
reg.device_feature_sel.load(Ordering::Acquire) as u64264
}265
VirtioCommonCfg::LAYOUT_DEVICE_FEATURE => {266
let sel = reg.device_feature_sel.load(Ordering::Acquire);267
if let Some(feature) = reg.device_feature.get(sel as usize) {268
*feature as u64269
} else {270
0271
}272
}273
VirtioCommonCfg::LAYOUT_DRIVER_FEATURE_SELECT => {274
reg.driver_feature_sel.load(Ordering::Acquire) as u64275
}276
VirtioCommonCfg::LAYOUT_DRIVER_FEATURE => {277
let sel = reg.driver_feature_sel.load(Ordering::Acquire);278
if let Some(feature) = reg.driver_feature.get(sel as usize) {279
feature.load(Ordering::Acquire) as u64280
} else {281
0282
}283
}284
VirtioCommonCfg::LAYOUT_CONFIG_MSIX_VECTOR => {285
self.irq_sender.msix_vector.config.load(Ordering::Acquire) as u64286
}287
VirtioCommonCfg::LAYOUT_NUM_QUEUES => self.queues.len() as u64,288
VirtioCommonCfg::LAYOUT_DEVICE_STATUS => reg.status.load(Ordering::Acquire) as u64,289
VirtioCommonCfg::LAYOUT_CONFIG_GENERATION => {290
0 // TODO: support device config change at runtime291
}292
VirtioCommonCfg::LAYOUT_QUEUE_SELECT => reg.queue_sel.load(Ordering::Acquire) as u64,293
VirtioCommonCfg::LAYOUT_QUEUE_SIZE => {294
let q_sel = reg.queue_sel.load(Ordering::Acquire) as usize;295
if let Some(q) = self.queues.get(q_sel) {296
q.size.load(Ordering::Acquire) as u64297
} else {298
0299
}300
}301
VirtioCommonCfg::LAYOUT_QUEUE_MSIX_VECTOR => {302
let q_sel = reg.queue_sel.load(Ordering::Acquire) as usize;303
if let Some(msix_vector) = self.irq_sender.msix_vector.queues.get(q_sel) {304
msix_vector.load(Ordering::Acquire) as u64305
} else {306
VIRTIO_MSI_NO_VECTOR as u64307
}308
}309
VirtioCommonCfg::LAYOUT_QUEUE_ENABLE => {310
let q_sel = reg.queue_sel.load(Ordering::Acquire) as usize;311
if let Some(q) = self.queues.get(q_sel) {312
q.enabled.load(Ordering::Acquire) as u64313
} else {314
0315
}316
}317
VirtioCommonCfg::LAYOUT_QUEUE_NOTIFY_OFF => {318
reg.queue_sel.load(Ordering::Acquire) as u64319
}320
VirtioCommonCfg::LAYOUT_QUEUE_DESC_LO => {321
let q_sel = reg.queue_sel.load(Ordering::Relaxed);322
if let Some(q) = self.queues.get(q_sel as usize) {323
get_atomic_low32(&q.desc) as u64324
} else {325
0326
}327
}328
VirtioCommonCfg::LAYOUT_QUEUE_DESC_HI => {329
let q_sel = reg.queue_sel.load(Ordering::Relaxed);330
if let Some(q) = self.queues.get(q_sel as usize) {331
get_atomic_high32(&q.desc) as u64332
} else {333
0334
}335
}336
VirtioCommonCfg::LAYOUT_QUEUE_DRIVER_LO => {337
let q_sel = reg.queue_sel.load(Ordering::Relaxed);338
if let Some(q) = self.queues.get(q_sel as usize) {339
get_atomic_high32(&q.driver) as u64340
} else {341
0342
}343
}344
VirtioCommonCfg::LAYOUT_QUEUE_DRIVER_HI => {345
let q_sel = reg.queue_sel.load(Ordering::Relaxed);346
if let Some(q) = self.queues.get(q_sel as usize) {347
get_atomic_high32(&q.driver) as u64348
} else {349
0350
}351
}352
VirtioCommonCfg::LAYOUT_QUEUE_DEVICE_LO => {353
let q_sel = reg.queue_sel.load(Ordering::Relaxed);354
if let Some(q) = self.queues.get(q_sel as usize) {355
get_atomic_high32(&q.device) as u64356
} else {357
0358
}359
}360
VirtioCommonCfg::LAYOUT_QUEUE_DEVICE_HI => {361
let q_sel = reg.queue_sel.load(Ordering::Relaxed);362
if let Some(q) = self.queues.get(q_sel as usize) {363
get_atomic_high32(&q.device) as u64364
} else {365
0366
}367
}368
VirtioCommonCfg::LAYOUT_QUEUE_NOTIFY_DATA => {369
todo!()370
}371
VirtioCommonCfg::LAYOUT_QUEUE_RESET => {372
todo!()373
}374
_ => {375
log::error!(376
"{}: read invalid register: offset = {offset:#x}, size = {size}",377
self.name378
);379
0380
}381
};382
Ok(ret)383
}384
385
fn write(&self, offset: u64, size: u8, val: u64) -> mem::Result<Action> {386
let reg = &self.reg;387
match (offset as usize, size as usize) {388
VirtioCommonCfg::LAYOUT_DEVICE_FEATURE_SELECT => {389
reg.device_feature_sel.store(val as u8, Ordering::Release);390
}391
VirtioCommonCfg::LAYOUT_DRIVER_FEATURE_SELECT => {392
reg.driver_feature_sel.store(val as u8, Ordering::Release);393
}394
VirtioCommonCfg::LAYOUT_DRIVER_FEATURE => {395
let sel = reg.driver_feature_sel.load(Ordering::Acquire);396
if let Some(feature) = reg.driver_feature.get(sel as usize) {397
feature.store(val as u32, Ordering::Release);398
} else if val != 0 {399
log::error!("{}: unknown feature {val:#x} for sel {sel}", self.name);400
}401
}402
VirtioCommonCfg::LAYOUT_CONFIG_MSIX_VECTOR => {403
let config_msix = &self.irq_sender.msix_vector.config;404
let old = config_msix.load(Ordering::Acquire);405
if self.msix_change_allowed(old) {406
config_msix.store(val as u16, Ordering::Release);407
log::trace!(408
"{}: config MSI-X vector update: {old:#x} -> {val:#x}",409
self.name410
);411
} else {412
log::error!(413
"{}: cannot change config MSI-X vector from {old:#x} to {val:#x}",414
self.name415
)416
}417
}418
VirtioCommonCfg::LAYOUT_DEVICE_STATUS => {419
let status = DevStatus::from_bits_truncate(val as u8);420
let old = reg.status.swap(status.bits(), Ordering::AcqRel);421
let old = DevStatus::from_bits_retain(old);422
if (old ^ status).contains(DevStatus::DRIVER_OK) {423
let event = if status.contains(DevStatus::DRIVER_OK) {424
let mut feature = 0;425
for (i, v) in reg.driver_feature.iter().enumerate() {426
feature |= (v.load(Ordering::Acquire) as u128) << (i << 5);427
}428
let param = StartParam {429
feature,430
irq_sender: self.irq_sender.clone(),431
ioeventfds: self.ioeventfds.clone(),432
};433
WakeEvent::Start { param }434
} else {435
self.reset();436
WakeEvent::Reset437
};438
self.wake_up_dev(event);439
}440
}441
VirtioCommonCfg::LAYOUT_QUEUE_SELECT => {442
reg.queue_sel.store(val as u16, Ordering::Relaxed);443
if self.queues.get(val as usize).is_none() {444
log::error!("{}: unknown queue index {val}", self.name)445
}446
}447
VirtioCommonCfg::LAYOUT_QUEUE_SIZE => {448
let q_sel = reg.queue_sel.load(Ordering::Relaxed) as usize;449
if let Some(q) = self.queues.get(q_sel) {450
// TODO: validate queue size451
q.size.store(val as u16, Ordering::Release);452
}453
}454
VirtioCommonCfg::LAYOUT_QUEUE_MSIX_VECTOR => {455
let q_sel = reg.queue_sel.load(Ordering::Relaxed) as usize;456
if let Some(msix_vector) = self.irq_sender.msix_vector.queues.get(q_sel) {457
let old = msix_vector.load(Ordering::Acquire);458
if self.msix_change_allowed(old) {459
msix_vector.store(val as u16, Ordering::Release);460
log::trace!(461
"{}: queue {q_sel} MSI-X vector update: {old:#x} -> {val:#x}",462
self.name463
);464
} else {465
log::error!(466
"{}: cannot change queue {q_sel} MSI-X vector from {old:#x} to {val:#x}",467
self.name468
)469
}470
}471
}472
VirtioCommonCfg::LAYOUT_QUEUE_ENABLE => {473
let q_sel = reg.queue_sel.load(Ordering::Relaxed);474
if let Some(q) = self.queues.get(q_sel as usize) {475
q.enabled.store(val != 0, Ordering::Release);476
};477
}478
VirtioCommonCfg::LAYOUT_QUEUE_DESC_LO => {479
let q_sel = reg.queue_sel.load(Ordering::Relaxed);480
if let Some(q) = self.queues.get(q_sel as usize) {481
set_atomic_low32(&q.desc, val as u32)482
}483
}484
VirtioCommonCfg::LAYOUT_QUEUE_DESC_HI => {485
let q_sel = reg.queue_sel.load(Ordering::Relaxed);486
if let Some(q) = self.queues.get(q_sel as usize) {487
set_atomic_high32(&q.desc, val as u32)488
}489
}490
VirtioCommonCfg::LAYOUT_QUEUE_DRIVER_LO => {491
let q_sel = reg.queue_sel.load(Ordering::Relaxed);492
if let Some(q) = self.queues.get(q_sel as usize) {493
set_atomic_low32(&q.driver, val as u32)494
}495
}496
VirtioCommonCfg::LAYOUT_QUEUE_DRIVER_HI => {497
let q_sel = reg.queue_sel.load(Ordering::Relaxed);498
if let Some(q) = self.queues.get(q_sel as usize) {499
set_atomic_high32(&q.driver, val as u32)500
}501
}502
VirtioCommonCfg::LAYOUT_QUEUE_DEVICE_LO => {503
let q_sel = reg.queue_sel.load(Ordering::Relaxed);504
if let Some(q) = self.queues.get(q_sel as usize) {505
set_atomic_low32(&q.device, val as u32)506
}507
}508
VirtioCommonCfg::LAYOUT_QUEUE_DEVICE_HI => {509
let q_sel = reg.queue_sel.load(Ordering::Relaxed);510
if let Some(q) = self.queues.get(q_sel as usize) {511
set_atomic_high32(&q.device, val as u32)512
}513
}514
VirtioCommonCfg::LAYOUT_QUEUE_RESET => {515
todo!()516
}517
(offset, _)518
if offset >= VirtioPciRegister::OFFSET_QUEUE_NOTIFY519
&& offset520
< VirtioPciRegister::OFFSET_QUEUE_NOTIFY521
+ size_of::<u32>() * self.queues.len() =>522
{523
let q_index = (offset - VirtioPciRegister::OFFSET_QUEUE_NOTIFY) as u16 / 4;524
if self.ioeventfds.is_some() {525
log::warn!("{}: notifying queue-{q_index} by vm exit!", self.name);526
}527
let event = WakeEvent::Notify { q_index };528
self.wake_up_dev(event)529
}530
_ => {531
log::error!(532
"{}: write 0x{val:0width$x} to invalid register offset = {offset:#x}",533
self.name,534
width = 2 * size as usize535
);536
}537
}538
Ok(Action::None)539
}540
}541
542
#[derive(Debug)]543
struct IoeventFdCallback<R>544
where545
R: IoeventFdRegistry,546
{547
registry: R,548
ioeventfds: Arc<[R::IoeventFd]>,549
}550
551
impl<R> MemRegionCallback for IoeventFdCallback<R>552
where553
R: IoeventFdRegistry,554
{555
fn mapped(&self, addr: u64) -> mem::Result<()> {556
for (q_index, fd) in self.ioeventfds.iter().enumerate() {557
let base_addr = addr + (12 << 10) + VirtioPciRegister::OFFSET_QUEUE_NOTIFY as u64;558
let notify_addr = base_addr + (q_index * size_of::<u32>()) as u64;559
self.registry.register(fd, notify_addr, 0, None)?;560
log::info!("q-{q_index} ioeventfd registered at {notify_addr:x}",)561
}562
Ok(())563
}564
565
fn unmapped(&self) -> mem::Result<()> {566
for fd in self.ioeventfds.iter() {567
self.registry.deregister(fd)?;568
log::info!("ioeventfd {fd:?} de-registered")569
}570
Ok(())571
}572
}573
574
const VIRTIO_VENDOR_ID: u16 = 0x1af4;575
const VIRTIO_DEVICE_ID_BASE: u16 = 0x1040;576
577
fn get_class(id: DeviceId) -> (u8, u8) {578
match id {579
DeviceId::NET => (0x02, 0x00),580
DeviceId::FILE_SYSTEM => (0x01, 0x80),581
DeviceId::BLOCK => (0x01, 0x00),582
DeviceId::SOCKET => (0x02, 0x80),583
_ => (0xff, 0x00),584
}585
}586
587
consts! {588
#[derive(Default, FromZeros, Immutable, IntoBytes)]589
pub struct VirtioPciCfg(u8) {590
COMMON = 1;591
NOTIFY = 2;592
ISR = 3;593
DEVICE = 4;594
PCI = 5;595
SHARED_MEMORY = 8;596
VENDOR = 9;597
}598
}599
600
#[repr(C, align(4))]601
#[derive(Debug, Default, FromZeros, Immutable, IntoBytes)]602
pub struct VirtioPciCap {603
header: PciCapHdr,604
cap_len: u8,605
cfg_type: VirtioPciCfg,606
bar: u8,607
id: u8,608
padding: [u8; 2],609
offset: u32,610
length: u32,611
}612
impl_mmio_for_zerocopy!(VirtioPciCap);613
614
impl PciConfigArea for VirtioPciCap {615
fn reset(&self) -> pci::Result<()> {616
Ok(())617
}618
}619
620
impl PciCap for VirtioPciCap {621
fn set_next(&mut self, val: u8) {622
self.header.next = val623
}624
}625
626
#[repr(C, align(4))]627
#[derive(Debug, Default, FromZeros, Immutable, IntoBytes)]628
pub struct VirtioPciCap64 {629
cap: VirtioPciCap,630
offset_hi: u32,631
length_hi: u32,632
}633
impl_mmio_for_zerocopy!(VirtioPciCap64);634
635
impl PciConfigArea for VirtioPciCap64 {636
fn reset(&self) -> pci::Result<()> {637
Ok(())638
}639
}640
641
impl PciCap for VirtioPciCap64 {642
fn set_next(&mut self, val: u8) {643
PciCap::set_next(&mut self.cap, val)644
}645
}646
647
#[repr(C, align(4))]648
#[derive(Debug, Default, FromZeros, Immutable, IntoBytes)]649
pub struct VirtioPciNotifyCap {650
cap: VirtioPciCap,651
multiplier: u32,652
}653
impl_mmio_for_zerocopy!(VirtioPciNotifyCap);654
655
impl PciConfigArea for VirtioPciNotifyCap {656
fn reset(&self) -> pci::Result<()> {657
Ok(())658
}659
}660
661
impl PciCap for VirtioPciNotifyCap {662
fn set_next(&mut self, val: u8) {663
self.cap.header.next = val;664
}665
}666
667
#[derive(Debug)]668
pub struct VirtioPciDevice<M, E>669
where670
M: MsiSender,671
E: IoeventFd,672
{673
pub dev: VirtioDevice<PciIrqSender<M>, E>,674
pub config: EmulatedConfig,675
pub registers: Arc<VirtioPciRegisterMmio<M, E>>,676
}677
678
impl<M, E> VirtioPciDevice<M, E>679
where680
M: MsiSender,681
E: IoeventFd,682
{683
pub fn new<R>(684
dev: VirtioDevice<PciIrqSender<M>, E>,685
msi_sender: M,686
ioeventfd_reg: R,687
) -> Result<Self>688
where689
R: IoeventFdRegistry<IoeventFd = E>,690
{691
let (class, subclass) = get_class(dev.id);692
let mut header = DeviceHeader {693
common: CommonHeader {694
vendor: VIRTIO_VENDOR_ID,695
device: VIRTIO_DEVICE_ID_BASE + dev.id.raw(),696
revision: 0x1,697
header_type: HeaderType::DEVICE,698
class,699
subclass,700
..Default::default()701
},702
subsystem: VIRTIO_DEVICE_ID_BASE + dev.id.raw(),703
..Default::default()704
};705
let device_config = dev.device_config.clone();706
let num_queues = dev.queue_regs.len();707
let table_entries = num_queues + 1;708
709
let msix_table_offset = 0;710
let msix_table_size = size_of::<MsixTableEntry>() * table_entries;711
712
let msix_pba_offset = 8 << 10;713
714
let virtio_register_offset = 12 << 10;715
let device_config_offset =716
virtio_register_offset + size_of::<VirtioPciRegister>() + size_of::<u32>() * num_queues;717
718
let msix_msg_ctrl = MsixMsgCtrl::new(table_entries as u16);719
720
let cap_msix = MsixCap {721
header: PciCapHdr {722
id: PciCapId::MSIX,723
..Default::default()724
},725
control: msix_msg_ctrl,726
table_offset: MsixCapOffset::new(msix_table_offset as u32, 0),727
pba_offset: MsixCapOffset::new(msix_pba_offset as u32, 0),728
};729
let cap_common = VirtioPciCap {730
header: PciCapHdr {731
id: PciCapId::VENDOR,732
..Default::default()733
},734
cap_len: size_of::<VirtioPciCap>() as u8,735
cfg_type: VirtioPciCfg::COMMON,736
bar: 0,737
id: 0,738
offset: (virtio_register_offset + VirtioPciRegister::OFFSET_COMMON) as u32,739
length: size_of::<VirtioCommonCfg>() as u32,740
..Default::default()741
};742
let cap_isr = VirtioPciCap {743
header: PciCapHdr {744
id: PciCapId::VENDOR,745
..Default::default()746
},747
cap_len: size_of::<VirtioPciCap>() as u8,748
cfg_type: VirtioPciCfg::ISR,749
bar: 0,750
id: 0,751
offset: (virtio_register_offset + VirtioPciRegister::OFFSET_ISR_STATUS) as u32,752
length: size_of::<u32>() as u32,753
..Default::default()754
};755
let cap_notify = VirtioPciNotifyCap {756
cap: VirtioPciCap {757
header: PciCapHdr {758
id: PciCapId::VENDOR,759
..Default::default()760
},761
cap_len: size_of::<VirtioPciNotifyCap>() as u8,762
cfg_type: VirtioPciCfg::NOTIFY,763
bar: 0,764
id: 0,765
offset: (virtio_register_offset + VirtioPciRegister::OFFSET_QUEUE_NOTIFY) as u32,766
length: (size_of::<u32>() * num_queues) as u32,767
..Default::default()768
},769
multiplier: size_of::<u32>() as u32,770
};771
let cap_device_config = VirtioPciCap {772
header: PciCapHdr {773
id: PciCapId::VENDOR,774
..Default::default()775
},776
cap_len: size_of::<VirtioPciCap>() as u8,777
cfg_type: VirtioPciCfg::DEVICE,778
bar: 0,779
id: 0,780
offset: device_config_offset as u32,781
length: device_config.size() as u32,782
..Default::default()783
};784
let entries = RwLock::new(785
(0..table_entries)786
.map(|_| MsixTableMmioEntry::Entry(MsixTableEntry::default()))787
.collect(),788
);789
let msix_table = Arc::new(MsixTableMmio { entries });790
let bar0_size = 16 << 10;791
let mut bar0 = MemRegion {792
ranges: vec![],793
entries: vec![MemRegionEntry {794
size: bar0_size,795
type_: mem::MemRegionType::Hidden,796
}],797
callbacks: Mutex::new(vec![]),798
};799
800
let mut caps: Vec<Box<dyn PciCap>> = vec![801
Box::new(MsixCapMmio::new(cap_msix)),802
Box::new(cap_common),803
Box::new(cap_isr),804
Box::new(cap_notify),805
];806
if device_config.size() > 0 {807
caps.push(Box::new(cap_device_config));808
}809
if let Some(region) = &dev.shared_mem_regions {810
let mut offset = 0;811
for (index, entry) in region.entries.iter().enumerate() {812
let share_mem_cap = VirtioPciCap64 {813
cap: VirtioPciCap {814
header: PciCapHdr {815
id: PciCapId::VENDOR,816
..Default::default()817
},818
cap_len: size_of::<VirtioPciCap64>() as u8,819
cfg_type: VirtioPciCfg::SHARED_MEMORY,820
bar: 2,821
id: index as u8,822
offset: offset as u32,823
length: entry.size as u32,824
..Default::default()825
},826
length_hi: (entry.size >> 32) as u32,827
offset_hi: (offset >> 32) as u32,828
};829
caps.push(Box::new(share_mem_cap));830
offset += entry.size;831
}832
}833
834
let cap_list = PciCapList::try_from(caps)?;835
836
let msix_vector = VirtioPciMsixVector {837
config: AtomicU16::new(VIRTIO_MSI_NO_VECTOR),838
queues: (0..num_queues)839
.map(|_| AtomicU16::new(VIRTIO_MSI_NO_VECTOR))840
.collect(),841
};842
843
let maybe_ioeventfds = (0..num_queues)844
.map(|_| ioeventfd_reg.create())845
.collect::<Result<Arc<_>, _>>();846
let ioeventfds = match maybe_ioeventfds {847
Ok(fds) => Some(fds),848
Err(hv::Error::IoeventFd { error, .. }) if error.kind() == ErrorKind::Unsupported => {849
None850
}851
Err(e) => {852
log::warn!("{}: failed to create ioeventfds: {e:?}", dev.name);853
None854
}855
};856
857
let mut device_feature = [0u32; 4];858
for (i, v) in device_feature.iter_mut().enumerate() {859
*v = (dev.device_feature >> (i << 5)) as u32;860
}861
let registers = Arc::new(VirtioPciRegisterMmio {862
name: dev.name.clone(),863
reg: Register {864
device_feature,865
..Default::default()866
},867
event_tx: dev.event_tx.clone(),868
notifier: dev.notifier.clone(),869
queues: dev.queue_regs.clone(),870
irq_sender: Arc::new(PciIrqSender {871
msix_vector,872
msix_table: msix_table.clone(),873
msi_sender,874
}),875
ioeventfds: ioeventfds.clone(),876
});877
bar0.ranges.push(MemRange::Emulated(msix_table));878
bar0.ranges879
.push(MemRange::Span((12 << 10) - msix_table_size as u64));880
bar0.ranges.push(MemRange::Emulated(registers.clone()));881
if let Some(ioeventfds) = ioeventfds {882
bar0.callbacks.lock().push(Box::new(IoeventFdCallback {883
registry: ioeventfd_reg,884
ioeventfds,885
}));886
}887
if device_config.size() > 0 {888
bar0.ranges.push(MemRange::Emulated(device_config))889
}890
let mut bars = [const { PciBar::Empty }; 6];891
892
bars[0] = PciBar::Mem(Arc::new(bar0));893
header.bars[0] = BAR_MEM32;894
895
if let Some(region) = &dev.shared_mem_regions {896
let region_size = region.size();897
898
let mut not_emulated = |r| !matches!(r, &MemRange::Emulated(_));899
let prefetchable = region.ranges.iter().all(&mut not_emulated);900
if prefetchable {901
bars[2] = PciBar::Mem(region.clone());902
header.bars[2] = BAR_MEM64 | BAR_PREFETCHABLE;903
} else {904
assert!(region_size <= u32::MAX as u64);905
bars[2] = PciBar::Mem(region.clone());906
header.bars[2] = BAR_MEM32;907
}908
}909
910
let config = EmulatedConfig::new_device(header, bars, cap_list);911
912
Ok(VirtioPciDevice {913
dev,914
config,915
registers,916
})917
}918
}919
920
impl<M, E> Pause for VirtioPciDevice<M, E>921
where922
M: MsiSender,923
E: IoeventFd,924
{925
}926
927
impl<M, E> Pci for VirtioPciDevice<M, E>928
where929
M: MsiSender,930
E: IoeventFd,931
{932
fn name(&self) -> &str {933
&self.dev.name934
}935
936
fn config(&self) -> &dyn PciConfig {937
&self.config938
}939
940
fn reset(&self) -> pci::Result<()> {941
self.registers.wake_up_dev(WakeEvent::Reset);942
self.registers.reset();943
self.registers.reg.status.store(0, Ordering::Release);944
Ok(())945
}946
}947