Alioth Code Coverage

vcpu.rs0.00%

1// Copyright 2024 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15mod vmentry;
16mod vmexit;
17
18use std::collections::HashMap;
19use std::ptr::null_mut;
20use std::sync::atomic::{AtomicBool, Ordering};
21use std::sync::mpsc::{Receiver, Sender};
22use std::sync::{Arc, mpsc};
23
24use parking_lot::Mutex;
25use snafu::ResultExt;
26
27use crate::arch::reg::{MpidrEl1, Pstate, Reg, SReg};
28use crate::hv::hvf::check_ret;
29use crate::hv::hvf::vm::{HvfVm, VcpuEvent};
30use crate::hv::{Result, Vcpu, VmEntry, VmExit, error};
31use crate::sys::hvf::{
32 HvExitReason, HvReg, HvVcpuExit, hv_vcpu_create, hv_vcpu_destroy, hv_vcpu_get_reg,
33 hv_vcpu_get_sys_reg, hv_vcpu_run, hv_vcpu_set_reg, hv_vcpu_set_sys_reg,
34};
35
36#[derive(Debug)]
37pub struct VcpuHandle {
38 pub vcpu_id: u64,
39 pub power_on: Arc<AtomicBool>,
40 pub sender: Sender<VcpuEvent>,
41}
42
43#[derive(Debug)]
44pub struct HvfVcpu {
45 exit: *mut HvVcpuExit,
46 vcpu_id: u64,
47 vmexit: Option<VmExit>,
48 exit_reg: Option<HvReg>,
49 vcpus: Arc<Mutex<HashMap<MpidrEl1, Arc<VcpuHandle>>>>,
50 receiver: Receiver<VcpuEvent>,
51 power_on: Arc<AtomicBool>,
52}
53
54impl HvfVcpu {
55 fn power_on(&mut self, pc: u64, context: u64) -> Result<()> {
56 let pstate = (Pstate::EL_H | Pstate::EL_BIT2).bits() as u64;
57 self.set_regs(&[(Reg::Pc, pc), (Reg::X0, context), (Reg::Pstate, pstate)])?;
58 self.set_sregs(&[(SReg::SCTLR_EL1, 0x0)])?;
59 self.power_on.store(true, Ordering::Relaxed);
60 Ok(())
61 }
62
63 pub fn new(vm: &HvfVm, _: u16, identity: u64) -> Result<Self> {
64 let mut exit = null_mut();
65 let mut vcpu_id = 0;
66 let ret = unsafe { hv_vcpu_create(&mut vcpu_id, &mut exit, null_mut()) };
67 check_ret(ret).context(error::CreateVcpu)?;
68
69 let mpidr = MpidrEl1(identity);
70 let ret = unsafe { hv_vcpu_set_sys_reg(vcpu_id, SReg::MPIDR_EL1, mpidr.0) };
71 check_ret(ret).context(error::VcpuReg)?;
72
73 let (sender, receiver) = mpsc::channel();
74
75 let power_on = Arc::new(AtomicBool::new(false));
76 let handle = Arc::new(VcpuHandle {
77 vcpu_id,
78 sender,
79 power_on: power_on.clone(),
80 });
81
82 vm.vcpus.lock().insert(mpidr, handle);
83
84 Ok(HvfVcpu {
85 exit,
86 vcpu_id,
87 vmexit: None,
88 exit_reg: None,
89 vcpus: vm.vcpus.clone(),
90 receiver,
91 power_on,
92 })
93 }
94}
95
96impl Drop for HvfVcpu {
97 fn drop(&mut self) {
98 let ret = unsafe { hv_vcpu_destroy(self.vcpu_id) };
99 if let Err(e) = check_ret(ret) {
100 log::error!("hv_vcpu_destroy: {e:?}");
101 }
102 }
103}
104
105enum HvfReg {
106 Reg(HvReg),
107 SReg(SReg),
108}
109
110impl Reg {
111 fn to_hvf_reg(self) -> HvfReg {
112 match self {
113 Reg::X0 => HvfReg::Reg(HvReg::X0),
114 Reg::X1 => HvfReg::Reg(HvReg::X1),
115 Reg::X2 => HvfReg::Reg(HvReg::X2),
116 Reg::X3 => HvfReg::Reg(HvReg::X3),
117 Reg::X4 => HvfReg::Reg(HvReg::X4),
118 Reg::X5 => HvfReg::Reg(HvReg::X5),
119 Reg::X6 => HvfReg::Reg(HvReg::X6),
120 Reg::X7 => HvfReg::Reg(HvReg::X7),
121 Reg::X8 => HvfReg::Reg(HvReg::X8),
122 Reg::X9 => HvfReg::Reg(HvReg::X9),
123 Reg::X10 => HvfReg::Reg(HvReg::X10),
124 Reg::X11 => HvfReg::Reg(HvReg::X11),
125 Reg::X12 => HvfReg::Reg(HvReg::X12),
126 Reg::X13 => HvfReg::Reg(HvReg::X13),
127 Reg::X14 => HvfReg::Reg(HvReg::X14),
128 Reg::X15 => HvfReg::Reg(HvReg::X15),
129 Reg::X16 => HvfReg::Reg(HvReg::X16),
130 Reg::X17 => HvfReg::Reg(HvReg::X17),
131 Reg::X18 => HvfReg::Reg(HvReg::X18),
132 Reg::X19 => HvfReg::Reg(HvReg::X19),
133 Reg::X20 => HvfReg::Reg(HvReg::X20),
134 Reg::X21 => HvfReg::Reg(HvReg::X21),
135 Reg::X22 => HvfReg::Reg(HvReg::X22),
136 Reg::X23 => HvfReg::Reg(HvReg::X23),
137 Reg::X24 => HvfReg::Reg(HvReg::X24),
138 Reg::X25 => HvfReg::Reg(HvReg::X25),
139 Reg::X26 => HvfReg::Reg(HvReg::X26),
140 Reg::X27 => HvfReg::Reg(HvReg::X27),
141 Reg::X28 => HvfReg::Reg(HvReg::X28),
142 Reg::X29 => HvfReg::Reg(HvReg::X29),
143 Reg::X30 => HvfReg::Reg(HvReg::X30),
144 Reg::Sp => HvfReg::SReg(SReg::SP_EL0),
145 Reg::Pc => HvfReg::Reg(HvReg::PC),
146 Reg::Pstate => HvfReg::Reg(HvReg::CPSR),
147 }
148 }
149}
150
151impl Vcpu for HvfVcpu {
152 fn reset(&mut self, is_bsp: bool) -> Result<()> {
153 self.power_on.store(is_bsp, Ordering::Relaxed);
154 self.set_sregs(&[(SReg::SCTLR_EL1, 0x0)])
155 }
156
157 fn dump(&self) -> Result<()> {
158 unimplemented!()
159 }
160
161 fn run(&mut self, entry: VmEntry) -> Result<VmExit> {
162 match entry {
163 VmEntry::None => {}
164 VmEntry::Mmio { data } => self.entry_mmio(data)?,
165 VmEntry::Shutdown => return Ok(VmExit::Shutdown),
166 VmEntry::Reboot => return Ok(VmExit::Reboot),
167 VmEntry::Pause => return Ok(VmExit::Paused),
168 }
169
170 if !self.power_on.load(Ordering::Relaxed) {
171 match self.receiver.recv()? {
172 VcpuEvent::Interrupt => return Ok(VmExit::Interrupted),
173 VcpuEvent::PowerOn { pc, context } => {
174 self.power_on(pc, context)?;
175 }
176 }
177 }
178 loop {
179 let ret = unsafe { hv_vcpu_run(self.vcpu_id) };
180 check_ret(ret).context(error::RunVcpu)?;
181
182 let exit = unsafe { &*self.exit };
183 match exit.reason {
184 HvExitReason::EXCEPTION => {
185 self.handle_exception(&exit.exception)?;
186 }
187 HvExitReason::CANCEL => break Ok(VmExit::Interrupted),
188 _ => {
189 break error::VmExit {
190 msg: format!("{exit:x?}"),
191 }
192 .fail();
193 }
194 }
195 if let Some(exit) = self.vmexit.take() {
196 break Ok(exit);
197 }
198 }
199 }
200
201 fn get_reg(&self, reg: Reg) -> Result<u64> {
202 let hvf_reg = reg.to_hvf_reg();
203 let mut val = 0;
204 let ret = match hvf_reg {
205 HvfReg::Reg(r) => unsafe { hv_vcpu_get_reg(self.vcpu_id, r, &mut val) },
206 HvfReg::SReg(r) => unsafe { hv_vcpu_get_sys_reg(self.vcpu_id, r, &mut val) },
207 };
208 check_ret(ret).context(error::VcpuReg)?;
209 Ok(val)
210 }
211
212 fn set_regs(&mut self, vals: &[(Reg, u64)]) -> Result<()> {
213 for (reg, val) in vals {
214 let hvf_reg = reg.to_hvf_reg();
215 let ret = match hvf_reg {
216 HvfReg::Reg(r) => unsafe { hv_vcpu_set_reg(self.vcpu_id, r, *val) },
217 HvfReg::SReg(r) => unsafe { hv_vcpu_set_sys_reg(self.vcpu_id, r, *val) },
218 };
219 check_ret(ret).context(error::VcpuReg)?;
220 }
221 Ok(())
222 }
223
224 fn get_sreg(&self, reg: SReg) -> Result<u64> {
225 let mut val = 0;
226 let ret = unsafe { hv_vcpu_get_sys_reg(self.vcpu_id, reg, &mut val) };
227 check_ret(ret).context(error::VcpuReg)?;
228 Ok(val)
229 }
230
231 fn set_sregs(&mut self, sregs: &[(SReg, u64)]) -> Result<()> {
232 for (reg, val) in sregs {
233 let ret = unsafe { hv_vcpu_set_sys_reg(self.vcpu_id, *reg, *val) };
234 check_ret(ret).context(error::VcpuReg)?;
235 }
236 Ok(())
237 }
238}
239
240#[cfg(test)]
241#[path = "vcpu_test.rs"]
242mod tests;
243