Alioth Code Coverage

mapped.rs67.21%

1// Copyright 2024 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::cell::UnsafeCell;
16#[cfg(target_os = "linux")]
17use std::ffi::CStr;
18use std::fmt::Debug;
19use std::fs::File;
20use std::io::{IoSlice, IoSliceMut, Read, Write};
21use std::mem::{align_of, size_of};
22#[cfg(target_os = "linux")]
23use std::os::fd::FromRawFd;
24use std::os::fd::{AsFd, AsRawFd, BorrowedFd};
25use std::ptr::{NonNull, null_mut};
26use std::sync::Arc;
27
28#[cfg(target_os = "linux")]
29use libc::{MADV_HUGEPAGE, MFD_CLOEXEC};
30use libc::{
31 MAP_ANONYMOUS, MAP_FAILED, MAP_PRIVATE, MAP_SHARED, MS_ASYNC, PROT_READ, PROT_WRITE, c_void,
32 madvise, mmap, msync, munmap,
33};
34use parking_lot::{RwLock, RwLockReadGuard};
35use snafu::ResultExt;
36use zerocopy::{FromBytes, Immutable, IntoBytes};
37
38use crate::ffi;
39use crate::mem::addressable::{Addressable, SlotBackend};
40use crate::mem::{Error, Result, error};
41
42#[derive(Debug)]
43struct MemPages {
44 addr: NonNull<c_void>,
45 len: usize,
46 fd: Option<(File, u64)>,
47}
48
49unsafe impl Send for MemPages {}
50unsafe impl Sync for MemPages {}
51
52impl Drop for MemPages {
53 fn drop(&mut self) {78x
54 let ret = unsafe { munmap(self.addr.as_ptr(), self.len) };78x
55 if ret != 0 {78x
56 log::error!("munmap({:p}, {:x}) = {:x}", self.addr, self.len, ret);
57 } else {
58 log::info!("munmap({:p}, {:x}) = {:x}, done", self.addr, self.len, ret);78x
59 }
60 }78x
61}
62// ArcMemPages uses Arc to manage the underlying memory and caches
63// the address and size on the stack. Compared with using Arc<MemPages>,
64// it avoids a memory load when a caller tries to read/write the pages.
65// TODO: is it really necessary?
66#[derive(Debug, Clone)]
67pub struct ArcMemPages {
68 addr: usize,
69 size: usize,
70 _inner: Arc<MemPages>,
71}
72
73impl SlotBackend for ArcMemPages {
74 fn size(&self) -> u64 {2085x
75 self.size as u642085x
76 }2085x
77}
78
79impl ArcMemPages {
80 pub fn addr(&self) -> usize {
81 self.addr
82 }
83
84 pub fn size(&self) -> u64 {
85 self.size as u64
86 }
87
88 pub fn fd(&self) -> Option<(BorrowedFd<'_>, u64)> {
89 self._inner
90 .fd
91 .as_ref()
92 .map(|(f, offset)| (f.as_fd(), *offset))
93 }
94
95 pub fn sync(&self) -> Result<()> {
96 ffi!(unsafe { msync(self.addr as *mut _, self.size, MS_ASYNC) })?;
97 Ok(())
98 }
99
100 #[cfg(target_os = "linux")]
101 pub fn madvise_hugepage(&self) -> Result<()> {
102 ffi!(unsafe { madvise(self.addr as *mut _, self.size, MADV_HUGEPAGE) })?;
103 Ok(())
104 }
105
106 fn from_raw(addr: *mut c_void, len: usize, fd: Option<(File, u64)>) -> Self {78x
107 let addr = NonNull::new(addr).expect("address from mmap() should not be null");78x
108 ArcMemPages {78x
109 addr: addr.as_ptr() as usize,78x
110 size: len,78x
111 _inner: Arc::new(MemPages { addr, len, fd }),78x
112 }78x
113 }78x
114
115 pub fn from_file(file: File, offset: i64, len: usize, prot: i32) -> Result<Self> {
116 let addr = ffi!(
117 unsafe { mmap(null_mut(), len, prot, MAP_SHARED, file.as_raw_fd(), offset) },
118 MAP_FAILED
119 )?;
120 Ok(Self::from_raw(addr, len, Some((file, offset as u64))))
121 }
122
123 #[cfg(target_os = "linux")]
124 pub fn from_memfd(name: &CStr, size: usize, prot: Option<i32>) -> Result<Self> {
125 let fd = ffi!(unsafe { libc::memfd_create(name.as_ptr(), MFD_CLOEXEC) })?;
126 let prot = prot.unwrap_or(PROT_WRITE | PROT_READ);
127 let addr = ffi!(
128 unsafe { mmap(null_mut(), size, prot, MAP_SHARED, fd, 0) },
129 MAP_FAILED
130 )?;
131 let file = unsafe { File::from_raw_fd(fd) };
132 file.set_len(size as _)?;
133 Ok(Self::from_raw(addr, size, Some((file, 0))))
134 }
135
136 pub fn from_anonymous(size: usize, prot: Option<i32>, flags: Option<i32>) -> Result<Self> {78x
137 let prot = prot.unwrap_or(PROT_WRITE | PROT_READ);78x
138 let flags = flags.unwrap_or(MAP_PRIVATE) | MAP_ANONYMOUS;78x
139 let addr = ffi!(78x
140 unsafe { mmap(null_mut(), size, prot, flags, -1, 0) },78x
141 MAP_FAILED
142 )?;
143 Ok(Self::from_raw(addr, size, None))78x
144 }78x
145
146 /// Given offset and len, return the host virtual address and len;
147 /// len might be truncated.
148 fn get_valid_range(&self, offset: usize, len: usize) -> Result<(usize, usize)> {2388x
149 let end = offset.wrapping_add(len).wrapping_sub(1);2388x
150 if offset >= self.size || end < offset {2388x
151 return error::ExceedsLimit {
152 addr: offset as u64,
153 size: len as u64,
154 }
155 .fail();
156 }2388x
157 let valid_len = std::cmp::min(self.size - offset, len);2388x
158 Ok((self.addr + offset, valid_len))2388x
159 }2388x
160
161 pub fn as_slice_mut(&mut self) -> &mut [u8] {
162 unsafe { std::slice::from_raw_parts_mut(self.addr as *mut u8, self.size) }
163 }
164
165 pub fn as_slice(&self) -> &[u8] {
166 unsafe { std::slice::from_raw_parts(self.addr as *const u8, self.size) }
167 }
168
169 /// Given offset and len, return a slice, len might be truncated.
170 fn get_partial_slice(&self, offset: usize, len: usize) -> Result<&[u8], Error> {1002x
171 let (addr, len) = self.get_valid_range(offset, len)?;1002x
172 Ok(unsafe { std::slice::from_raw_parts(addr as *const u8, len) })1002x
173 }1002x
174
175 /// Given offset and len, return a mutable slice, len might be truncated.
176 #[allow(clippy::mut_from_ref)]
177 fn get_partial_slice_mut(&self, offset: usize, len: usize) -> Result<&mut [u8], Error> {1386x
178 let (addr, len) = self.get_valid_range(offset, len)?;1386x
179 Ok(unsafe { std::slice::from_raw_parts_mut(addr as *mut u8, len) })1386x
180 }1386x
181}
182
183#[derive(Debug)]
184pub struct Ram {
185 inner: Addressable<ArcMemPages>,
186}
187
188#[derive(Debug)]
189pub struct RamBus {
190 ram: RwLock<Ram>,
191}
192
193struct Iter<'m> {
194 ram: &'m Ram,
195 gpa: u64,
196 remain: u64,
197}
198
199impl<'m> Iterator for Iter<'m> {
200 type Item = Result<&'m [u8]>;
201
202 fn next(&mut self) -> Option<Self::Item> {1224x
203 if self.remain == 0 {1224x
204 return None;384x
205 }840x
206 let r = self.ram.get_partial_slice(self.gpa, self.remain);840x
207 if let Ok(s) = r {840x
208 self.gpa += s.len() as u64;747x
209 self.remain -= s.len() as u64;747x
210 }747x
211 Some(r)840x
212 }1224x
213}
214
215struct IterMut<'m> {
216 ram: &'m Ram,
217 gpa: u64,
218 remain: u64,
219}
220
221impl<'m> Iterator for IterMut<'m> {
222 type Item = Result<&'m mut [u8]>;
223
224 fn next(&mut self) -> Option<Self::Item> {1158x
225 if self.remain == 0 {1158x
226 return None;345x
227 }813x
228 let r = self.ram.get_partial_slice_mut(self.gpa, self.remain);813x
229 if let Ok(ref s) = r {813x
230 self.gpa += s.len() as u64;720x
231 self.remain -= s.len() as u64;720x
232 }720x
233 Some(r)813x
234 }1158x
235}
236
237impl Ram {
238 fn slice_iter(&self, gpa: u64, len: u64) -> Iter<'_> {477x
239 Iter {477x
240 ram: self,477x
241 gpa,477x
242 remain: len,477x
243 }477x
244 }477x
245
246 fn slice_iter_mut(&self, gpa: u64, len: u64) -> IterMut<'_> {438x
247 IterMut {438x
248 ram: self,438x
249 gpa,438x
250 remain: len,438x
251 }438x
252 }438x
253
254 fn get_partial_slice(&self, gpa: u64, len: u64) -> Result<&[u8]> {1095x
255 let Some((start, user_mem)) = self.inner.search(gpa) else {1095x
256 return error::NotMapped { addr: gpa }.fail();93x
257 };
258 user_mem.get_partial_slice((gpa - start) as usize, len as usize)1002x
259 }1095x
260
261 fn get_partial_slice_mut(&self, gpa: u64, len: u64) -> Result<&mut [u8]> {1479x
262 let Some((start, user_mem)) = self.inner.search(gpa) else {1479x
263 return error::NotMapped { addr: gpa }.fail();93x
264 };
265 user_mem.get_partial_slice_mut((gpa - start) as usize, len as usize)1386x
266 }1479x
267
268 pub fn get_slice<T>(&self, gpa: u64, len: u64) -> Result<&[UnsafeCell<T>], Error> {
269 let total_len = len * size_of::<T>() as u64;
270 let host_ref = self.get_partial_slice(gpa, total_len)?;
271 let ptr = host_ref.as_ptr() as *const UnsafeCell<T>;
272 if host_ref.len() as u64 != total_len {
273 error::NotContinuous {
274 addr: gpa,
275 size: total_len,
276 }
277 .fail()
278 } else if !ptr.is_aligned() {
279 error::NotAligned {
280 addr: ptr as u64,
281 align: align_of::<T>(),
282 }
283 .fail()
284 } else {
285 Ok(unsafe { &*core::ptr::slice_from_raw_parts(ptr, len as usize) })
286 }
287 }
288
289 pub fn get_ptr<T>(&self, gpa: u64) -> Result<*mut T, Error> {390x
290 let host_ref = self.get_partial_slice_mut(gpa, size_of::<T>() as u64)?;390x
291 let ptr = host_ref.as_mut_ptr();390x
292 if host_ref.len() != size_of::<T>() {390x
293 error::NotContinuous {
294 addr: gpa,
295 size: size_of::<T>() as u64,
296 }
297 .fail()
298 } else if !ptr.is_aligned() {390x
299 error::NotAligned {
300 addr: ptr as u64,
301 align: align_of::<T>(),
302 }
303 .fail()
304 } else {
305 Ok(ptr as *mut T)390x
306 }
307 }390x
308
309 pub fn read(&self, gpa: u64, buf: &mut [u8]) -> Result<()> {255x
310 let host_ref = self.get_partial_slice(gpa, buf.len() as u64)?;255x
311 if host_ref.len() == buf.len() {255x
312 buf.copy_from_slice(host_ref);69x
313 } else {69x
314 let mut cur = 0;186x
315 for r in self.slice_iter(gpa, buf.len() as u64) {372x
316 let s = r?;372x
317 let s_len = s.len();279x
318 buf[cur..(cur + s_len)].copy_from_slice(s);279x
319 cur += s_len;279x
320 }
321 }
322 Ok(())162x
323 }255x
324
325 pub fn read_t<T>(&self, gpa: u64) -> Result<T>225x
326 where225x
327 T: FromBytes + IntoBytes,225x
328 {
329 let mut v = T::new_zeroed();225x
330 self.read(gpa, v.as_mut_bytes())?;225x
331 Ok(v)132x
332 }225x
333
334 pub fn write(&self, gpa: u64, buf: &[u8]) -> Result<()> {276x
335 let len = buf.len() as u64;276x
336 let host_ref = self.get_partial_slice_mut(gpa, len)?;276x
337 if host_ref.len() == buf.len() {276x
338 host_ref.copy_from_slice(buf);90x
339 Ok(())90x
340 } else {
341 let mut cur = 0;186x
342 for r in self.slice_iter_mut(gpa, len) {372x
343 let s = r?;372x
344 let s_len = s.len();279x
345 s.copy_from_slice(&buf[cur..(cur + s_len)]);279x
346 cur += s_len;279x
347 }
348 Ok(())93x
349 }
350 }276x
351
352 pub fn write_t<T>(&self, gpa: u64, val: &T) -> Result<(), Error>225x
353 where225x
354 T: IntoBytes + Immutable,225x
355 {
356 self.write(gpa, val.as_bytes())225x
357 }225x
358
359 pub fn translate(&self, gpa: u64) -> Result<*const u8> {
360 let s = self.get_partial_slice(gpa, 1)?;
361 Ok(s.as_ptr())
362 }
363
364 pub fn translate_iov<'a>(&'a self, iov: &[(u64, u64)]) -> Result<Vec<IoSlice<'a>>> {90x
365 let mut slices = vec![];90x
366 for (gpa, len) in iov {90x
367 for r in self.slice_iter(*gpa, *len) {87x
368 slices.push(IoSlice::new(r?));72x
369 }
370 }
371 Ok(slices)90x
372 }90x
373
374 pub fn translate_iov_mut<'a>(&'a self, iov: &[(u64, u64)]) -> Result<Vec<IoSliceMut<'a>>> {87x
375 let mut slices = vec![];87x
376 for (gpa, len) in iov {87x
377 for r in self.slice_iter_mut(*gpa, *len) {48x
378 slices.push(IoSliceMut::new(r?));45x
379 }
380 }
381 Ok(slices)87x
382 }87x
383
384 pub fn iter(&self) -> impl DoubleEndedIterator<Item = (u64, &ArcMemPages)> {
385 self.inner.iter()
386 }
387
388 pub fn madvise(&self, gpa: u64, size: u64, advice: i32) -> Result<()> {
389 for r in self.slice_iter_mut(gpa, size) {
390 let s = r?;
391 ffi!(unsafe { madvise(s.as_mut_ptr() as _, s.len(), advice) })?;
392 }
393 Ok(())
394 }
395}
396
397impl Default for RamBus {
398 fn default() -> Self {
399 Self::new()
400 }
401}
402
403impl RamBus {
404 pub fn lock_layout(&self) -> RwLockReadGuard<'_, Ram> {81x
405 self.ram.read()81x
406 }81x
407
408 pub fn new() -> Self {81x
409 Self {81x
410 ram: RwLock::new(Ram {81x
411 inner: Addressable::default(),81x
412 }),81x
413 }81x
414 }81x
415
416 pub(crate) fn add(&self, gpa: u64, user_mem: ArcMemPages) -> Result<(), Error> {78x
417 let mut ram = self.ram.write();78x
418 ram.inner.add(gpa, user_mem)?;78x
419 Ok(())78x
420 }78x
421
422 pub(crate) fn remove(&self, gpa: u64) -> Result<ArcMemPages, Error> {3x
423 let mut ram = self.ram.write();3x
424 ram.inner.remove(gpa)3x
425 }3x
426
427 pub fn read(&self, gpa: u64, buf: &mut [u8]) -> Result<()> {
428 let ram = self.ram.read();
429 ram.read(gpa, buf)
430 }
431
432 pub fn write(&self, gpa: u64, buf: &[u8]) -> Result<()> {
433 let ram = self.ram.read();
434 ram.write(gpa, buf)
435 }
436
437 pub fn read_t<T>(&self, gpa: u64) -> Result<T, Error>225x
438 where225x
439 T: FromBytes + IntoBytes,225x
440 {
441 let ram = self.ram.read();225x
442 ram.read_t(gpa)225x
443 }225x
444
445 pub fn write_t<T>(&self, gpa: u64, val: &T) -> Result<(), Error>225x
446 where225x
447 T: IntoBytes + Immutable,225x
448 {
449 let ram = self.ram.read();225x
450 ram.write_t(gpa, val)225x
451 }225x
452
453 pub fn read_range(&self, gpa: u64, len: u64, dst: &mut impl Write) -> Result<()> {195x
454 let ram = self.ram.read();195x
455 for r in ram.slice_iter(gpa, len) {384x
456 dst.write_all(r?).context(error::Write)?;384x
457 }
458 Ok(())195x
459 }195x
460
461 pub fn write_range(&self, gpa: u64, len: u64, mut src: impl Read) -> Result<()> {195x
462 let ram = self.ram.read();195x
463 for r in ram.slice_iter_mut(gpa, len) {384x
464 src.read_exact(r?).context(error::Read)?;384x
465 }
466 Ok(())195x
467 }195x
468
469 pub fn read_vectored<T, F>(&self, bufs: &[(u64, u64)], callback: F) -> Result<T, Error>3x
470 where3x
471 F: FnOnce(&[IoSlice<'_>]) -> T,3x
472 {
473 let ram = self.ram.read();3x
474 let mut iov = vec![];3x
475 for (gpa, len) in bufs {9x
476 for r in ram.slice_iter(*gpa, *len) {12x
477 iov.push(IoSlice::new(r?));12x
478 }
479 }
480 Ok(callback(&iov))3x
481 }3x
482
483 pub fn write_vectored<T, F>(&self, bufs: &[(u64, u64)], callback: F) -> Result<T, Error>3x
484 where3x
485 F: FnOnce(&mut [IoSliceMut<'_>]) -> T,3x
486 {
487 let ram = self.ram.read();3x
488 let mut iov = vec![];3x
489 for (gpa, len) in bufs {9x
490 for r in ram.slice_iter_mut(*gpa, *len) {12x
491 iov.push(IoSliceMut::new(r?));12x
492 }
493 }
494 Ok(callback(&mut iov))3x
495 }3x
496}
497
498#[cfg(test)]
499#[path = "mapped_test.rs"]
500mod tests;
501