packed.rs90.76%
1
// Copyright 2025 Google LLC2
//3
// Licensed under the Apache License, Version 2.0 (the "License");4
// you may not use this file except in compliance with the License.5
// You may obtain a copy of the License at6
//7
// https://www.apache.org/licenses/LICENSE-2.08
//9
// Unless required by applicable law or agreed to in writing, software10
// distributed under the License is distributed on an "AS IS" BASIS,11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12
// See the License for the specific language governing permissions and13
// limitations under the License.14
15
use std::marker::PhantomData;16
use std::sync::atomic::Ordering;17
18
use bitfield::bitfield;19
use zerocopy::{FromBytes, Immutable, IntoBytes};20
21
use crate::consts;22
use crate::mem::mapped::Ram;23
use crate::virtio::Result;24
use crate::virtio::queue::{DescChain, DescFlag, QueueReg, VirtQueue};25
26
#[repr(C, align(16))]27
#[derive(Debug, Clone, Default, FromBytes, Immutable, IntoBytes)]28
struct Desc {29
pub addr: u64,30
pub len: u32,31
pub id: u16,32
pub flag: u16,33
}34
35
bitfield! {36
#[derive(Copy, Clone, Default, PartialEq, Eq, Hash)]37
pub struct WrappedIndex(u16);38
impl Debug;39
pub u16, offset, set_offset : 14, 0;40
pub wrap_counter, set_warp_counter: 15;41
}42
43
impl WrappedIndex {44
const INIT: WrappedIndex = WrappedIndex(1 << 15);45
46
fn wrapping_add(&self, delta: u16, size: u16) -> WrappedIndex {39x47
let mut offset = self.offset() + delta;39x48
let mut wrap_counter = self.wrap_counter();39x49
if offset >= size {39x50
offset -= size;6x51
wrap_counter = !wrap_counter;6x52
}33x53
let mut r = WrappedIndex(offset);39x54
r.set_warp_counter(wrap_counter);39x55
r39x56
}39x57
58
fn wrapping_sub(&self, delta: u16, size: u16) -> WrappedIndex {36x59
let mut offset = self.offset();36x60
let mut wrap_counter = self.wrap_counter();36x61
if offset >= delta {36x62
offset -= delta;18x63
} else {18x64
offset += size - delta;18x65
wrap_counter = !wrap_counter;18x66
}18x67
let mut r = WrappedIndex(offset);36x68
r.set_warp_counter(wrap_counter);36x69
r36x70
}36x71
}72
73
consts! {74
struct EventFlag(u16) {75
ENABLE = 0;76
DISABLE = 1;77
DESC = 2;78
}79
}80
81
struct DescEvent {82
index: WrappedIndex,83
flag: EventFlag,84
}85
86
#[derive(Debug)]87
pub struct PackedQueue<'m> {88
size: u16,89
desc: *mut Desc,90
enable_event_idx: bool,91
notification: *mut DescEvent,92
interrupt: *mut DescEvent,93
_phantom: PhantomData<&'m ()>,94
}95
96
impl<'m> PackedQueue<'m> {97
pub fn new(reg: &QueueReg, ram: &'m Ram, event_idx: bool) -> Result<Option<PackedQueue<'m>>> {51x98
if !reg.enabled.load(Ordering::Acquire) {51x99
return Ok(None);3x100
}48x101
let size = reg.size.load(Ordering::Acquire);48x102
let desc = reg.desc.load(Ordering::Acquire);48x103
let notification: *mut DescEvent = ram.get_ptr(reg.device.load(Ordering::Acquire))?;48x104
Ok(Some(PackedQueue {105
size,48x106
desc: ram.get_ptr(desc)?,48x107
enable_event_idx: event_idx,48x108
notification,48x109
interrupt: ram.get_ptr(reg.driver.load(Ordering::Acquire))?,48x110
_phantom: PhantomData,48x111
}))112
}51x113
114
fn flag_is_avail(&self, flag: DescFlag, wrap_counter: bool) -> bool {12x115
flag.contains(DescFlag::AVAIL) == wrap_counter12x116
&& flag.contains(DescFlag::USED) != wrap_counter9x117
}12x118
119
fn set_flag_used(&self, flag: &mut DescFlag, wrap_counter: bool) {6x120
if wrap_counter {6x121
flag.insert(DescFlag::USED | DescFlag::AVAIL);6x122
} else {6x123
flag.remove(DescFlag::USED | DescFlag::AVAIL);124
}125
}6x126
}127
128
impl<'m> VirtQueue<'m> for PackedQueue<'m> {129
type Index = WrappedIndex;130
131
const INIT_INDEX: WrappedIndex = WrappedIndex::INIT;132
133
fn desc_avail(&self, index: WrappedIndex) -> bool {12x134
self.flag_is_avail(12x135
DescFlag::from_bits_retain(unsafe { &*self.desc.offset(index.offset() as isize) }.flag),12x136
index.wrap_counter(),12x137
)138
}12x139
140
fn get_avail(&self, index: Self::Index, ram: &'m Ram) -> Result<Option<DescChain<'m>>> {9x141
if !self.desc_avail(index) {9x142
return Ok(None);3x143
}6x144
let mut readable = Vec::new();6x145
let mut writeable = Vec::new();6x146
let mut delta = 0;6x147
let mut offset = index.offset();6x148
let id = loop {6x149
let desc = unsafe { &*self.desc.offset(offset as isize) };9x150
let flag = DescFlag::from_bits_retain(desc.flag);9x151
if flag.contains(DescFlag::INDIRECT) {9x152
for i in 0..(desc.len as usize / size_of::<Desc>()) {153
let addr = desc.addr + (i * size_of::<Desc>()) as u64;154
let desc: Desc = ram.read_t(addr)?;155
let flag = DescFlag::from_bits_retain(desc.flag);156
if flag.contains(DescFlag::WRITE) {157
writeable.push((desc.addr, desc.len as u64));158
} else {159
readable.push((desc.addr, desc.len as u64));160
}161
}162
} else if flag.contains(DescFlag::WRITE) {9x163
writeable.push((desc.addr, desc.len as u64));3x164
} else {6x165
readable.push((desc.addr, desc.len as u64));6x166
}6x167
delta += 1;9x168
if !flag.contains(DescFlag::NEXT) {9x169
break desc.id;6x170
}3x171
offset = (offset + 1) % self.size;3x172
};173
Ok(Some(DescChain {174
id,6x175
delta,6x176
readable: ram.translate_iov(&readable)?,6x177
writable: ram.translate_iov_mut(&writeable)?,6x178
}))179
}9x180
181
fn set_used(&self, index: Self::Index, id: u16, len: u32) {6x182
let first = unsafe { &mut *self.desc.offset(index.offset() as isize) };6x183
first.id = id;6x184
first.len = len;6x185
let mut flag = DescFlag::from_bits_retain(first.flag);6x186
self.set_flag_used(&mut flag, index.wrap_counter());6x187
first.flag = flag.bits();6x188
}6x189
190
fn enable_notification(&self, enabled: bool) {6x191
unsafe {192
(&mut *self.notification).flag = if enabled {6x193
EventFlag::ENABLE3x194
} else {195
EventFlag::DISABLE3x196
};197
}198
}6x199
200
fn interrupt_enabled(&self, index: Self::Index, delta: u16) -> bool {39x201
let interrupt = unsafe { &*self.interrupt };39x202
if self.enable_event_idx && interrupt.flag == EventFlag::DESC {39x203
let prev_used_index = index.wrapping_sub(delta, self.size);24x204
let base = prev_used_index.offset();24x205
let end = base + delta;24x206
let mut offset = interrupt.index.offset();24x207
if interrupt.index.wrap_counter() != prev_used_index.wrap_counter() {24x208
offset += self.size;9x209
}15x210
base <= offset && offset < end24x211
} else {212
interrupt.flag == EventFlag::ENABLE15x213
}214
}39x215
216
fn index_add(&self, index: Self::Index, delta: u16) -> Self::Index {18x217
index.wrapping_add(delta, self.size)18x218
}18x219
}220
221
#[cfg(test)]222
#[path = "packed_test.rs"]223
mod tests;224