Alioth Code Coverage

vcpu.rs0.00%

1// Copyright 2024 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#[cfg(target_arch = "aarch64")]
16#[path = "vcpu_aarch64.rs"]
17mod aarch64;
18#[cfg(target_arch = "x86_64")]
19#[path = "vcpu_x86_64/vcpu_x86_64.rs"]
20mod x86_64;
21
22mod vmentry;
23mod vmexit;
24
25#[cfg(target_arch = "x86_64")]
26use std::arch::x86_64::CpuidResult;
27#[cfg(target_arch = "x86_64")]
28use std::collections::HashMap;
29use std::ops::{Deref, DerefMut};
30use std::os::fd::{AsRawFd, OwnedFd};
31use std::ptr::null_mut;
32use std::sync::Arc;
33
34use libc::{MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE, mmap, munmap};
35use snafu::ResultExt;
36
37#[cfg(target_arch = "x86_64")]
38use crate::arch::cpuid::CpuidIn;
39#[cfg(target_arch = "x86_64")]
40use crate::arch::reg::{DtReg, DtRegVal, SegReg, SegRegVal};
41use crate::arch::reg::{Reg, SReg};
42use crate::ffi;
43use crate::hv::kvm::vm::{KvmVm, VmInner};
44use crate::hv::kvm::{KvmError, kvm_error};
45use crate::hv::{Error, Result, Vcpu, VmEntry, VmExit, error};
46use crate::sys::kvm::{KvmExit, KvmRun, kvm_run};
47
48#[cfg(target_arch = "aarch64")]
49use self::aarch64::VcpuArch;
50#[cfg(target_arch = "x86_64")]
51use self::x86_64::VcpuArch;
52
53struct KvmRunBlock {
54 addr: usize,
55 size: usize,
56}
57
58impl KvmRunBlock {
59 unsafe fn new(fd: &OwnedFd, mmap_size: usize) -> Result<KvmRunBlock, KvmError> {
60 let prot = PROT_READ | PROT_WRITE;
61 let addr = ffi!(
62 unsafe { mmap(null_mut(), mmap_size, prot, MAP_SHARED, fd.as_raw_fd(), 0,) },
63 MAP_FAILED
64 )
65 .context(kvm_error::MmapVcpuFd)?;
66 Ok(KvmRunBlock {
67 addr: addr as usize,
68 size: mmap_size,
69 })
70 }
71
72 #[cfg(target_arch = "x86_64")]
73 unsafe fn data_slice<T>(&self, offset: usize, count: usize) -> &[T] {
74 unsafe { std::slice::from_raw_parts((self.addr + offset) as *const T, count) }
75 }
76
77 #[cfg(target_arch = "x86_64")]
78 unsafe fn data_slice_mut<T>(&mut self, offset: usize, count: usize) -> &mut [T] {
79 unsafe { std::slice::from_raw_parts_mut((self.addr + offset) as *mut T, count) }
80 }
81}
82
83impl Deref for KvmRunBlock {
84 type Target = KvmRun;
85
86 fn deref(&self) -> &Self::Target {
87 unsafe { &*(self.addr as *const Self::Target) }
88 }
89}
90
91impl DerefMut for KvmRunBlock {
92 fn deref_mut(&mut self) -> &mut Self::Target {
93 unsafe { &mut *(self.addr as *mut Self::Target) }
94 }
95}
96
97impl Drop for KvmRunBlock {
98 fn drop(&mut self) {
99 if let Err(e) = ffi!(unsafe { munmap(self.addr as _, self.size) }) {
100 log::error!("unmap kvm_run: {e}")
101 }
102 }
103}
104
105pub struct KvmVcpu {
106 kvm_run: KvmRunBlock,
107 fd: OwnedFd,
108 arch: VcpuArch,
109 #[allow(dead_code)]
110 vm: Arc<VmInner>,
111}
112
113impl KvmVcpu {
114 pub fn new(vm: &KvmVm, index: u16, identity: u64) -> Result<Self> {
115 let vcpu_fd = Self::create_vcpu(vm, index, identity)?;
116 let kvm_run = unsafe { KvmRunBlock::new(&vcpu_fd, vm.vm.vcpu_mmap_size) }?;
117 let arch = VcpuArch::new(identity);
118 Ok(KvmVcpu {
119 fd: vcpu_fd,
120 kvm_run,
121 vm: vm.vm.clone(),
122 arch,
123 })
124 }
125}
126
127impl Vcpu for KvmVcpu {
128 #[cfg(target_arch = "aarch64")]
129 fn reset(&mut self, is_bsp: bool) -> Result<(), Error> {
130 self.kvm_vcpu_init(is_bsp)
131 }
132
133 fn get_reg(&self, reg: Reg) -> Result<u64, Error> {
134 self.kvm_get_reg(reg)
135 }
136
137 #[cfg(target_arch = "x86_64")]
138 fn get_dt_reg(&self, reg: DtReg) -> Result<DtRegVal, Error> {
139 self.kvm_get_dt_reg(reg)
140 }
141
142 #[cfg(target_arch = "x86_64")]
143 fn get_seg_reg(&self, reg: SegReg) -> Result<SegRegVal, Error> {
144 self.kvm_get_seg_reg(reg)
145 }
146
147 fn get_sreg(&self, reg: SReg) -> Result<u64, Error> {
148 self.kvm_get_sreg(reg)
149 }
150
151 fn set_regs(&mut self, vals: &[(Reg, u64)]) -> Result<(), Error> {
152 self.kvm_set_regs(vals)
153 }
154
155 #[cfg(target_arch = "x86_64")]
156 fn set_sregs(
157 &mut self,
158 sregs: &[(SReg, u64)],
159 seg_regs: &[(SegReg, SegRegVal)],
160 dt_regs: &[(DtReg, DtRegVal)],
161 ) -> Result<(), Error> {
162 self.kvm_set_sregs(sregs, seg_regs, dt_regs)
163 }
164
165 #[cfg(target_arch = "aarch64")]
166 fn set_sregs(&mut self, sregs: &[(SReg, u64)]) -> Result<(), Error> {
167 self.kvm_set_sregs(sregs)
168 }
169
170 fn run(&mut self, entry: VmEntry) -> Result<VmExit, Error> {
171 match entry {
172 VmEntry::None => {}
173 #[cfg(target_arch = "x86_64")]
174 VmEntry::Io { data } => {
175 let r = self.entry_io(data);
176 if let Some(exit) = r {
177 return Ok(exit);
178 }
179 }
180 VmEntry::Mmio { data } => self.entry_mmio(data),
181 VmEntry::Shutdown | VmEntry::Reboot | VmEntry::Pause => self.set_immediate_exit(true),
182 };
183 let ret = unsafe { kvm_run(&self.fd) };
184 match ret {
185 Err(e) => match (e.raw_os_error(), entry) {
186 (Some(libc::EAGAIN), _) => Ok(VmExit::Interrupted),
187 (Some(libc::EINTR), VmEntry::Shutdown) => {
188 self.set_immediate_exit(false);
189 Ok(VmExit::Shutdown)
190 }
191 (Some(libc::EINTR), VmEntry::Reboot) => {
192 self.set_immediate_exit(false);
193 Ok(VmExit::Reboot)
194 }
195 (Some(libc::EINTR), VmEntry::Pause) => {
196 #[cfg(target_arch = "x86_64")]
197 if let Err(e) = self.kvmclock_ctrl() {
198 log::error!("Failed to control kvmclock: {e:?}");
199 }
200 self.set_immediate_exit(false);
201 Ok(VmExit::Paused)
202 }
203 (Some(libc::EINTR), _) => Ok(VmExit::Interrupted),
204 (Some(libc::EFAULT), _) if self.kvm_run.exit_reason == KvmExit::MEMORY_FAULT => {
205 Ok(self.handle_memory_fault())
206 }
207 _ => Err(e).context(error::RunVcpu),
208 },
209 Ok(_) => match self.kvm_run.exit_reason {
210 #[cfg(target_arch = "x86_64")]
211 KvmExit::IO => Ok(self.handle_io()),
212 KvmExit::HYPERCALL => self.handle_hypercall(),
213 KvmExit::MMIO => self.handle_mmio(),
214 KvmExit::SHUTDOWN => Ok(VmExit::Shutdown),
215 KvmExit::SYSTEM_EVENT => self.handle_system_event(),
216 reason => error::VmExit {
217 msg: format!("unkown kvm exit: {reason:#x?}"),
218 }
219 .fail(),
220 },
221 }
222 }
223
224 #[cfg(target_arch = "x86_64")]
225 fn set_cpuids(&mut self, cpuids: HashMap<CpuidIn, CpuidResult>) -> Result<(), Error> {
226 self.kvm_set_cpuids(&cpuids)
227 }
228
229 #[cfg(target_arch = "x86_64")]
230 fn set_msrs(&mut self, msrs: &[(u32, u64)]) -> Result<()> {
231 self.kvm_set_msrs(msrs)
232 }
233
234 fn dump(&self) -> Result<(), Error> {
235 Ok(())
236 }
237
238 #[cfg(target_arch = "x86_64")]
239 fn tdx_init_vcpu(&self, hob: u64) -> Result<()> {
240 KvmVcpu::tdx_init_vcpu(self, hob)
241 }
242
243 #[cfg(target_arch = "x86_64")]
244 fn tdx_init_mem_region(&self, data: &[u8], gpa: u64, measure: bool) -> Result<()> {
245 KvmVcpu::tdx_init_mem_region(self, data, gpa, measure)
246 }
247}
248