Alioth Code Coverage

mem.rs45.97%

1// Copyright 2024 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15pub mod addressable;
16pub mod emulated;
17pub mod mapped;
18
19use std::any::{Any, type_name};
20use std::fmt::Debug;
21use std::sync::Arc;
22
23use parking_lot::{Mutex, RwLock};
24use serde::Deserialize;
25use serde_aco::Help;
26use snafu::Snafu;
27
28#[cfg(target_arch = "aarch64")]
29use crate::arch::layout::IO_START;
30use crate::errors::{DebugTrace, trace_error};
31use crate::hv::{MemMapOption, VmEntry, VmMemory};
32
33use self::addressable::{Addressable, SlotBackend};
34use self::emulated::{Action, Mmio, MmioBus};
35use self::mapped::{ArcMemPages, Ram, RamBus};
36
37#[trace_error]
38#[derive(Snafu, DebugTrace)]
39#[snafu(module, visibility(pub), context(suffix(false)))]
40pub enum Error {
41 #[snafu(display("Hypervisor internal error"), context(false))]
42 HvError { source: Box<crate::hv::Error> },
43 #[snafu(display("Cannot add a zero-sized slot"))]
44 ZeroSizedSlot,
45 #[snafu(display("(addr={addr:#x}, size={size:#x}) exceeds the address limit"))]
46 ExceedsLimit { addr: u64, size: u64 },
47 #[snafu(display("{new_item:#x?} overlaps with {exist_item:#x?}"))]
48 Overlap {
49 new_item: [u64; 2],
50 exist_item: [u64; 2],
51 },
52 #[snafu(display("{addr:#x} is not mapped"))]
53 NotMapped { addr: u64 },
54 #[snafu(display("Sum of backend range sizes {sum:#x} exceeds the region total size"))]
55 BackendTooBig { sum: u64, size: u64 },
56 #[snafu(display("address {addr:#x} is not {align}-byte aligned"))]
57 NotAligned { addr: u64, align: usize },
58 #[snafu(display(
59 "Guest address {addr:#x} (size = {:#x}) is not backed by continuous host memory"
60 ))]
61 NotContinuous { addr: u64, size: u64 },
62 #[snafu(display("Error from OS"), context(false))]
63 System { error: std::io::Error },
64 #[snafu(display("Failed to write data to destination"))]
65 Write { error: std::io::Error },
66 #[snafu(display("Failed to read data from source"))]
67 Read { error: std::io::Error },
68 #[snafu(display("Failed to do MMIO"))]
69 Mmio {
70 source: Box<dyn DebugTrace + Send + Sync + 'static>,
71 },
72 #[snafu(display("Failed to change memory layout"))]
73 ChangeLayout {
74 source: Box<dyn DebugTrace + Send + Sync + 'static>,
75 },
76}
77
78pub type Result<T, E = Error> = std::result::Result<T, E>;
79
80fn default_memory_size() -> u64 {
81 1 << 30
82}
83
84#[derive(Debug, Default, PartialEq, Eq, Deserialize, Help)]
85pub struct MemConfig {
86 /// Total guest memory size in bytes. [default: 1G]
87 #[serde(default = "default_memory_size")]
88 pub size: u64,
89 /// Host backend [default: anon]
90 #[serde(default)]
91 pub backend: MemBackend,
92 /// mmap() guest memory with MAP_SHARED or MAP_PRIVATE.
93 /// [default: false]
94 #[serde(default)]
95 pub shared: bool,
96 /// Enable transparent hugepage. [default: false]
97 #[cfg(target_os = "linux")]
98 #[serde(default, alias = "thp")]
99 pub transparent_hugepage: bool,
100}
101
102#[derive(Debug, Default, PartialEq, Eq, Deserialize, Help)]
103pub enum MemBackend {
104 /// Anonymous memory by MAP_ANONYMOUS.
105 #[default]
106 #[serde(alias = "anon")]
107 Anonymous,
108 /// Anonymous file by memfd_create(). Always uses MAP_SHARED.
109 #[cfg(target_os = "linux")]
110 #[serde(alias = "memfd")]
111 Memfd,
112}
113
114impl MemConfig {
115 pub fn has_shared_fd(&self) -> bool {
116 match &self.backend {
117 #[cfg(target_os = "linux")]
118 MemBackend::Memfd => true,
119 MemBackend::Anonymous => false,
120 }
121 }
122}
123
124#[derive(Debug)]
125pub enum MemRange {
126 Ram(ArcMemPages),
127 DevMem(ArcMemPages),
128 Emulated(Arc<dyn Mmio>),
129 Span(u64),
130}
131
132impl MemRange {
133 pub fn size(&self) -> u64 {36x
134 match self {36x
135 MemRange::Ram(pages) | MemRange::DevMem(pages) => pages.size(),
136 MemRange::Emulated(range) => Mmio::size(range),36x
137 MemRange::Span(size) => *size,
138 }
139 }36x
140}
141
142#[derive(Debug, Clone, Copy, PartialEq, Eq)]
143pub enum MemRegionType {
144 Hidden,
145 Ram,
146 Reserved,
147 Acpi,
148 Pmem,
149}
150
151#[derive(Debug, Clone, Copy)]
152pub struct MemRegionEntry {
153 pub size: u64,
154 pub type_: MemRegionType,
155}
156
157pub trait MemRegionCallback: Debug + Send + Sync + Any + 'static {
158 fn mapped(&self, addr: u64) -> Result<()>;
159 fn unmapped(&self) -> Result<()> {18x
160 log::debug!("{} unmapped", type_name::<Self>());18x
161 Ok(())18x
162 }18x
163}
164
165#[derive(Debug)]
166pub struct MemRegion {
167 pub ranges: Vec<MemRange>,
168 pub entries: Vec<MemRegionEntry>,
169 pub callbacks: Mutex<Vec<Box<dyn MemRegionCallback>>>,
170}
171
172impl MemRegion {
173 pub fn size(&self) -> u64 {240x
174 self.entries.iter().fold(0, |accu, e| accu + e.size)240x
175 }240x
176
177 pub fn with_ram(pages: ArcMemPages, type_: MemRegionType) -> MemRegion {
178 let size = pages.size();
179 MemRegion {
180 ranges: vec![MemRange::Ram(pages)],
181 entries: vec![MemRegionEntry { type_, size }],
182 callbacks: Mutex::new(vec![]),
183 }
184 }
185
186 pub fn with_dev_mem(pages: ArcMemPages, type_: MemRegionType) -> MemRegion {
187 let size = pages.size();
188 MemRegion {
189 ranges: vec![MemRange::DevMem(pages)],
190 entries: vec![MemRegionEntry { type_, size }],
191 callbacks: Mutex::new(vec![]),
192 }
193 }
194
195 pub fn with_emulated(range: Arc<dyn Mmio>, type_: MemRegionType) -> MemRegion {195x
196 let size = Mmio::size(&range);195x
197 MemRegion {195x
198 ranges: vec![MemRange::Emulated(range)],195x
199 entries: vec![MemRegionEntry { type_, size }],195x
200 callbacks: Mutex::new(vec![]),195x
201 }195x
202 }195x
203
204 pub fn validate(&self) -> Result<()> {12x
205 let entries_size = self.size();12x
206 let ranges_size = self.ranges.iter().fold(0, |accu, r| accu + r.size());12x
207 if ranges_size > entries_size {12x
208 return error::BackendTooBig {
209 sum: ranges_size,
210 size: entries_size,
211 }
212 .fail();
213 }12x
214 Ok(())12x
215 }12x
216}
217
218impl SlotBackend for Arc<MemRegion> {
219 fn size(&self) -> u64 {228x
220 MemRegion::size(self.as_ref())228x
221 }228x
222}
223
224#[derive(Debug)]
225pub struct IoRegion {
226 pub range: Arc<dyn Mmio>,
227 pub callbacks: Mutex<Vec<Box<dyn MemRegionCallback>>>,
228}
229
230impl IoRegion {
231 pub fn new(range: Arc<dyn Mmio>) -> IoRegion {60x
232 IoRegion {60x
233 range,60x
234 callbacks: Mutex::new(vec![]),60x
235 }60x
236 }60x
237}
238
239impl SlotBackend for Arc<IoRegion> {
240 fn size(&self) -> u64 {78x
241 Mmio::size(self.range.as_ref())78x
242 }78x
243}
244
245pub trait LayoutChanged: Debug + Send + Sync + 'static {
246 fn ram_added(&self, gpa: u64, pages: &ArcMemPages) -> Result<()>;
247 fn ram_removed(&self, gpa: u64, pages: &ArcMemPages) -> Result<()>;
248}
249
250pub trait LayoutUpdated: Debug + Send + Sync + 'static {
251 fn ram_updated(&self, ram: &Ram) -> Result<()>;
252}
253
254#[derive(Debug, Default)]
255struct LayoutCallbacks {
256 changed: Vec<Box<dyn LayoutChanged>>,
257 updated: Vec<Box<dyn LayoutUpdated>>,
258}
259
260// lock order: region -> callbacks -> bus
261#[derive(Debug)]
262pub struct Memory {
263 regions: Mutex<Addressable<Arc<MemRegion>>>,
264 callbacks: Mutex<LayoutCallbacks>,
265 ram_bus: Arc<RamBus>,
266 mmio_bus: RwLock<MmioBus>,
267 vm_memory: Arc<dyn VmMemory>,
268
269 #[cfg(target_arch = "x86_64")]
270 io_bus: RwLock<MmioBus>,
271 io_regions: Mutex<Addressable<Arc<IoRegion>>>,
272}
273
274impl Memory {
275 pub fn new(vm_memory: Arc<dyn VmMemory>) -> Self {6x
276 Memory {6x
277 regions: Mutex::new(Addressable::new()),6x
278 callbacks: Mutex::new(LayoutCallbacks::default()),6x
279 ram_bus: Arc::new(RamBus::new()),6x
280 mmio_bus: RwLock::new(MmioBus::new()),6x
281 vm_memory,6x
282 #[cfg(target_arch = "x86_64")]6x
283 io_bus: RwLock::new(MmioBus::new()),6x
284 io_regions: Mutex::new(Addressable::new()),6x
285 }6x
286 }6x
287
288 pub fn register_change_callback(&self, callback: Box<dyn LayoutChanged>) -> Result<()> {
289 let regions = self.regions.lock();
290 for (addr, region) in regions.iter() {
291 let mut offset = 0;
292 for range in &region.ranges {
293 let gpa = addr + offset;
294 match range {
295 MemRange::Ram(r) => callback.ram_added(gpa, r)?,
296 MemRange::Span(_) | MemRange::Emulated(_) | MemRange::DevMem(_) => {}
297 }
298 offset += range.size();
299 }
300 }
301 let mut callbacks = self.callbacks.lock();
302 callbacks.changed.push(callback);
303 Ok(())
304 }
305
306 pub fn register_update_callback(&self, callback: Box<dyn LayoutUpdated>) -> Result<()> {
307 let _regions = self.regions.lock();
308 let mut callbacks = self.callbacks.lock();
309 let ram = self.ram_bus.lock_layout();
310 callback.ram_updated(&ram)?;
311 callbacks.updated.push(callback);
312 Ok(())
313 }
314
315 pub fn reset(&self) -> Result<()> {
316 self.clear()?;
317 self.vm_memory.reset()?;
318 Ok(())
319 }
320
321 pub fn ram_bus(&self) -> Arc<RamBus> {
322 self.ram_bus.clone()
323 }
324
325 fn map_to_vm(&self, gpa: u64, user_mem: &ArcMemPages) -> Result<(), Error> {
326 let mem_options = MemMapOption {
327 read: true,
328 write: true,
329 exec: true,
330 log_dirty: false,
331 };
332 self.vm_memory
333 .mem_map(gpa, user_mem.size(), user_mem.addr(), mem_options)?;
334 Ok(())
335 }
336
337 fn unmap_from_vm(&self, gpa: u64, user_mem: &ArcMemPages) -> Result<(), Error> {
338 self.vm_memory.unmap(gpa, user_mem.size())?;
339 Ok(())
340 }
341
342 pub fn add_mmio_dev(&self, addr: u64, dev: Arc<dyn Mmio>) -> Result<()> {
343 let region = MemRegion::with_emulated(dev, MemRegionType::Hidden);
344 self.add_region(addr, Arc::new(region))
345 }
346
347 pub fn add_region(&self, addr: u64, region: Arc<MemRegion>) -> Result<()> {12x
348 region.validate()?;12x
349 let mut regions = self.regions.lock();12x
350 regions.add(addr, region.clone())?;12x
351 let mut offset = 0;12x
352 let callbacks = self.callbacks.lock();12x
353 let mut ram_updated = false;12x
354 for range in &region.ranges {12x
355 let gpa = addr + offset;12x
356 match range {12x
357 MemRange::Emulated(r) => {12x
358 let mut mmio_bus = self.mmio_bus.write();12x
359 mmio_bus.add(gpa, r.clone())?12x
360 }
361 MemRange::Ram(r) => {
362 self.map_to_vm(gpa, r)?;
363 for callback in &callbacks.changed {
364 callback.ram_added(gpa, r)?;
365 }
366 self.ram_bus.add(gpa, r.clone())?;
367 ram_updated = true;
368 }
369 MemRange::DevMem(r) => self.map_to_vm(gpa, r)?,
370 MemRange::Span(_) => {}
371 }
372 offset += range.size();12x
373 }
374 if ram_updated {12x
375 let ram = self.ram_bus.lock_layout();
376 for update_callback in &callbacks.updated {
377 update_callback.ram_updated(&ram)?;
378 }
379 }12x
380 let region_callbacks = region.callbacks.lock();12x
381 for callback in region_callbacks.iter() {12x
382 callback.mapped(addr)?;12x
383 }
384 Ok(())12x
385 }12x
386
387 fn unmap_region(&self, addr: u64, region: &MemRegion) -> Result<()> {12x
388 let mut offset = 0;12x
389 let callbacks = self.callbacks.lock();12x
390 let mut ram_updated = false;12x
391 for range in &region.ranges {12x
392 let gpa = addr + offset;12x
393 match range {12x
394 MemRange::Emulated(_) => {
395 let mut mmio_bus = self.mmio_bus.write();12x
396 mmio_bus.remove(gpa)?;12x
397 }
398 MemRange::Ram(r) => {
399 self.ram_bus.remove(gpa)?;
400 for callback in callbacks.changed.iter().rev() {
401 callback.ram_removed(gpa, r)?;
402 }
403 self.unmap_from_vm(gpa, r)?;
404 ram_updated = true;
405 }
406 MemRange::DevMem(r) => self.unmap_from_vm(gpa, r)?,
407 MemRange::Span(_) => {}
408 };
409 offset += range.size();12x
410 }
411 if ram_updated {12x
412 let ram = self.ram_bus.lock_layout();
413 for callback in &callbacks.updated {
414 callback.ram_updated(&ram)?;
415 }
416 }12x
417 let region_callbacks = region.callbacks.lock();12x
418 for callback in region_callbacks.iter() {12x
419 callback.unmapped()?;12x
420 }
421 Ok(())12x
422 }12x
423
424 pub fn remove_region(&self, addr: u64) -> Result<Arc<MemRegion>> {12x
425 let mut regions = self.regions.lock();12x
426 let region = regions.remove(addr)?;12x
427 self.unmap_region(addr, &region)?;12x
428 Ok(region)12x
429 }12x
430
431 // TODO can be optimized
432 fn clear(&self) -> Result<()> {6x
433 let mut regions = self.regions.lock();6x
434 let regions = regions.drain(..);6x
435 for (addr, region) in regions {6x
436 self.unmap_region(addr, &region)?;
437 }
438 let mut io_regions = self.io_regions.lock();6x
439 let io_regions = io_regions.drain(..);6x
440 for (port, io_region) in io_regions {6x
441 self.unmap_io_region(port as u16, &io_region)?;
442 }
443 Ok(())6x
444 }6x
445
446 pub fn mem_region_entries(&self) -> Vec<(u64, MemRegionEntry)> {9x
447 let mut entries = vec![];9x
448 let regions = self.regions.lock();9x
449 for (start, region) in regions.iter() {12x
450 let mut offset = 0;12x
451 for entry in region.entries.iter() {12x
452 entries.push((start + offset, *entry));12x
453 offset += entry.size;12x
454 }12x
455 }
456 entries9x
457 }9x
458
459 pub fn io_region_entries(&self) -> Vec<(u64, u64)> {9x
460 let mut entries = vec![];9x
461 let regions = self.io_regions.lock();9x
462 for (start, region) in regions.iter() {9x
463 entries.push((start, region.size()));6x
464 }6x
465 entries9x
466 }9x
467
468 pub fn add_io_dev(&self, port: u16, dev: Arc<dyn Mmio>) -> Result<()> {
469 self.add_io_region(port, Arc::new(IoRegion::new(dev)))
470 }
471
472 pub fn add_io_region(&self, port: u16, region: Arc<IoRegion>) -> Result<()> {6x
473 let mut regions = self.io_regions.lock();6x
474 regions.add(port as u64, region.clone())?;6x
475 #[cfg(target_arch = "x86_64")]
476 {
477 let mut io_bus = self.io_bus.write();2x
478 io_bus.add(port as u64, region.range.clone())?;2x
479 }
480 #[cfg(target_arch = "aarch64")]
481 {
482 let mut mmio_bus = self.mmio_bus.write();4x
483 mmio_bus.add(IO_START + port as u64, region.range.clone())?;4x
484 }
485 let callbacks = region.callbacks.lock();6x
486 for callback in callbacks.iter() {6x
487 callback.mapped(port as u64)?;6x
488 }
489 Ok(())6x
490 }6x
491
492 fn unmap_io_region(&self, port: u16, region: &IoRegion) -> Result<()> {6x
493 #[cfg(target_arch = "x86_64")]
494 {
495 let mut io_bus = self.io_bus.write();2x
496 io_bus.remove(port as u64)?;2x
497 }
498 #[cfg(target_arch = "aarch64")]
499 {
500 let mut mmio_bus = self.mmio_bus.write();4x
501 mmio_bus.remove(IO_START + port as u64)?;4x
502 }
503 let callbacks = region.callbacks.lock();6x
504 for callback in callbacks.iter() {6x
505 callback.unmapped()?;6x
506 }
507 Ok(())6x
508 }6x
509
510 pub fn remove_io_region(&self, port: u16) -> Result<Arc<IoRegion>> {6x
511 let mut io_regions = self.io_regions.lock();6x
512 let io_region = io_regions.remove(port as u64)?;6x
513 self.unmap_io_region(port, &io_region)?;6x
514 Ok(io_region)6x
515 }6x
516
517 pub fn register_encrypted_pages(&self, pages: &ArcMemPages) -> Result<()> {
518 self.vm_memory.register_encrypted_range(pages.as_slice())?;
519 Ok(())
520 }
521
522 pub fn deregister_encrypted_pages(&self, pages: &ArcMemPages) -> Result<()> {
523 self.vm_memory
524 .deregister_encrypted_range(pages.as_slice())?;
525 Ok(())
526 }
527
528 pub fn mark_private_memory(&self, gpa: u64, size: u64, private: bool) -> Result<()> {
529 let vm_memory = &self.vm_memory;
530 let regions = self.regions.lock();
531 let end = gpa + size;
532 let mut start = gpa;
533 'out: while let Some((mut addr, region)) = regions.search_next(start) {
534 let next_start = addr + region.size();
535 for range in &region.ranges {
536 let (MemRange::DevMem(r) | MemRange::Ram(r)) = range else {
537 addr += range.size();
538 continue;
539 };
540 let range_end = addr + r.size();
541 if range_end <= start {
542 addr = range_end;
543 continue;
544 }
545 let gpa_start = std::cmp::max(addr, start);
546 let gpa_end = std::cmp::min(end, range_end);
547 if gpa_start >= gpa_end {
548 break 'out;
549 }
550 vm_memory.mark_private_memory(gpa_start, gpa_end - gpa_start, private)?;
551 start = gpa_end;
552 }
553 if next_start >= end {
554 break;
555 }
556 start = next_start;
557 }
558 Ok(())
559 }
560}
561
562impl Drop for Memory {
563 fn drop(&mut self) {6x
564 if let Err(e) = self.clear() {6x
565 log::info!("dropping memory: {e}")
566 }6x
567 }6x
568}
569
570impl Memory {
571 fn handle_action(&self, action: Action, none: VmEntry) -> Result<VmEntry> {
572 match action {
573 Action::None => Ok(none),
574 Action::Shutdown => Ok(VmEntry::Shutdown),
575 Action::Reset => Ok(VmEntry::Reboot),
576 Action::ChangeLayout { callback } => {
577 callback.change(self)?;
578 Ok(none)
579 }
580 }
581 }
582
583 pub fn handle_mmio(&self, gpa: u64, write: Option<u64>, size: u8) -> Result<VmEntry> {
584 let mmio_bus = self.mmio_bus.read();
585 if let Some(val) = write {
586 let action = mmio_bus.write(gpa, size, val)?;
587 drop(mmio_bus);
588 self.handle_action(action, VmEntry::None)
589 } else {
590 let data = mmio_bus.read(gpa, size)?;
591 Ok(VmEntry::Mmio { data })
592 }
593 }
594
595 #[cfg(target_arch = "x86_64")]
596 pub fn handle_io(&self, port: u16, write: Option<u32>, size: u8) -> Result<VmEntry> {
597 let io_bus = self.io_bus.read();
598 if let Some(val) = write {
599 let action = io_bus.write(port as u64, size, val as u64)?;
600 drop(io_bus);
601 self.handle_action(action, VmEntry::Io { data: None })
602 } else {
603 let data = io_bus.read(port as u64, size)? as u32;
604 Ok(VmEntry::Io { data: Some(data) })
605 }
606 }
607}
608
609#[derive(Debug)]
610pub struct MarkPrivateMemory {
611 pub memory: Arc<dyn VmMemory>,
612}
613
614impl LayoutChanged for MarkPrivateMemory {
615 fn ram_added(&self, gpa: u64, pages: &ArcMemPages) -> Result<()> {
616 self.memory.mark_private_memory(gpa, pages.size(), true)?;
617 Ok(())
618 }
619
620 fn ram_removed(&self, _: u64, _: &ArcMemPages) -> Result<()> {
621 Ok(())
622 }
623}
624