Alioth Code Coverage

sev.rs0.00%

1// Copyright 2026 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::arch::x86_64::{__cpuid, CpuidResult};
16use std::collections::HashMap;
17use std::iter::zip;
18use std::sync::Arc;
19use std::sync::atomic::Ordering;
20
21use zerocopy::FromZeros;
22
23use crate::arch::cpuid::{
24 Cpuid1Ecx, Cpuid7Index0Ebx, Cpuid7Index0Edx, CpuidExt1fEAx, CpuidExt1fEbx, CpuidExt8Ebx,
25 CpuidExt21EAx, CpuidIn,
26};
27use crate::arch::layout::MEM_64_START;
28use crate::arch::reg::{Reg, SegAccess, SegReg, SegRegVal};
29use crate::arch::sev::{SevPolicy, SnpPageType, SnpPolicy};
30use crate::board::{Board, Result, error};
31use crate::firmware::ovmf::sev::{
32 SevDescType, SevMetadataDesc, SnpCpuidFunc, SnpCpuidInfo, parse_desc, parse_sev_ap_eip,
33};
34use crate::hv::{Coco, Vcpu, Vm, VmMemory};
35use crate::mem::mapped::ArcMemPages;
36use crate::mem::{self, LayoutChanged, MarkPrivateMemory};
37
38pub fn adjust_cpuid(coco: &Coco, cpuids: &mut HashMap<CpuidIn, CpuidResult>) -> Result<()> {
39 // AMD Volume 3, section E.4.17.
40 let in_ = CpuidIn {
41 func: 0x8000_001f,
42 index: None,
43 };
44 let Some(out) = cpuids.get_mut(&in_) else {
45 return error::MissingCpuid { leaf: in_ }.fail();
46 };
47 let host_ebx = CpuidExt1fEbx(__cpuid(in_.func).ebx);
48 out.ebx = CpuidExt1fEbx::new(host_ebx.cbit_pos(), 1, 0).0;
49 out.ecx = 0;
50 out.edx = 0;
51 if let Coco::AmdSev { policy } = coco {
52 out.eax = if policy.es() {
53 (CpuidExt1fEAx::SEV | CpuidExt1fEAx::SEV_ES).bits()
54 } else {
55 CpuidExt1fEAx::SEV.bits()
56 };
57 } else if let Coco::AmdSnp { .. } = coco {
58 out.eax = (CpuidExt1fEAx::SEV | CpuidExt1fEAx::SEV_ES | CpuidExt1fEAx::SEV_SNP).bits()
59 }
60
61 if let Coco::AmdSnp { .. } = coco {
62 snp_adjust_cpuids(cpuids);
63 }
64
65 Ok(())
66}
67
68fn snp_adjust_cpuids(cpuids: &mut HashMap<CpuidIn, CpuidResult>) {
69 let in_ = CpuidIn {
70 func: 0x1,
71 index: None,
72 };
73 if let Some(out) = cpuids.get_mut(&in_) {
74 out.ecx &= !Cpuid1Ecx::TSC_DEADLINE.bits()
75 };
76
77 let in_ = CpuidIn {
78 func: 0x7,
79 index: Some(0),
80 };
81 if let Some(out) = cpuids.get_mut(&in_) {
82 out.ebx &= !Cpuid7Index0Ebx::TSC_ADJUST.bits();
83 out.edx &= !(Cpuid7Index0Edx::IBRS_IBPB
84 | Cpuid7Index0Edx::SPEC_CTRL_ST_PREDICTORS
85 | Cpuid7Index0Edx::L1D_FLUSH_INTERFACE
86 | Cpuid7Index0Edx::ARCH_CAPABILITIES
87 | Cpuid7Index0Edx::CORE_CAPABILITIES
88 | Cpuid7Index0Edx::SPEC_CTRL_SSBD)
89 .bits()
90 }
91
92 let in_ = CpuidIn {
93 func: 0x8000_0008,
94 index: None,
95 };
96 if let Some(out) = cpuids.get_mut(&in_) {
97 out.ebx &= !CpuidExt8Ebx::SSBD_VIRT_SPEC_CTRL.bits();
98 }
99
100 let in_ = CpuidIn {
101 func: 0x8000_0021,
102 index: None,
103 };
104 if let Some(out) = cpuids.get_mut(&in_) {
105 out.eax &= !CpuidExt21EAx::NO_SMM_CTL_MSR.bits();
106 }
107
108 for index in 0..=4 {
109 cpuids.remove(&CpuidIn {
110 func: 0x8000_0026,
111 index: Some(index),
112 });
113 }
114}
115
116impl<V> Board<V>
117where
118 V: Vm,
119{
120 fn fill_snp_cpuid(&self, entries: &mut [SnpCpuidFunc]) {
121 for ((in_, out), dst) in zip(self.arch.cpuids.iter(), entries.iter_mut()) {
122 dst.eax_in = in_.func;
123 dst.ecx_in = in_.index.unwrap_or(0);
124 dst.eax = out.eax;
125 dst.ebx = out.ebx;
126 dst.ecx = out.ecx;
127 dst.edx = out.edx;
128 if dst.eax_in == 0xd && (dst.ecx_in == 0x0 || dst.ecx_in == 0x1) {
129 dst.ebx = 0x240;
130 dst.xcr0_in = 1;
131 dst.xss_in = 0;
132 }
133 }
134 }
135
136 fn parse_sev_api_eip(&self, data: &[u8]) -> Result<()> {
137 let ap_eip = parse_sev_ap_eip(data)?;
138 self.arch.sev_ap_eip.store(ap_eip, Ordering::Release);
139 Ok(())
140 }
141
142 fn update_snp_desc(&self, desc: &SevMetadataDesc) -> Result<()> {
143 let mut cpuid_table = SnpCpuidInfo::new_zeroed();
144 let ram_bus = self.memory.ram_bus();
145 let ram = ram_bus.lock_layout();
146 let page_type = match desc.type_ {
147 SevDescType::SNP_DESC_MEM => SnpPageType::UNMEASURED,
148 SevDescType::SNP_SECRETS => SnpPageType::SECRETS,
149 SevDescType::CPUID => {
150 assert!(desc.len as usize >= size_of::<SnpCpuidInfo>());
151 assert!(cpuid_table.entries.len() >= self.arch.cpuids.len());
152 cpuid_table.count = self.arch.cpuids.len() as u32;
153 self.fill_snp_cpuid(&mut cpuid_table.entries);
154 ram.write_t(desc.base as _, &cpuid_table)?;
155 SnpPageType::CPUID
156 }
157 _ => SnpPageType::ZERO,
158 };
159 let range_ref = ram.get_slice::<u8>(desc.base as u64, desc.len as u64)?;
160 let bytes =
161 unsafe { std::slice::from_raw_parts_mut(range_ref.as_ptr() as _, range_ref.len()) };
162 self.memory
163 .mark_private_memory(desc.base as _, desc.len as _, true)?;
164 let ret = self.vm.snp_launch_update(bytes, desc.base as _, page_type);
165 if ret.is_err() && desc.type_ == SevDescType::CPUID {
166 let updated_cpuid: SnpCpuidInfo = ram.read_t(desc.base as _)?;
167 for (set, got) in zip(&cpuid_table.entries, &updated_cpuid.entries) {
168 if set != got {
169 log::error!("set {set:#x?}, but firmware expects {got:#x?}");
170 }
171 }
172 }
173 ret?;
174 Ok(())
175 }
176
177 pub(crate) fn setup_sev(&self, fw: &mut ArcMemPages, policy: SevPolicy) -> Result<()> {
178 self.memory.register_encrypted_pages(fw)?;
179
180 let data = fw.as_slice_mut();
181 if policy.es() {
182 self.parse_sev_api_eip(data)?;
183 }
184 self.vm.sev_launch_update_data(data)?;
185 Ok(())
186 }
187
188 pub(crate) fn setup_snp(&self, fw: &mut ArcMemPages) -> Result<()> {
189 self.memory.register_encrypted_pages(fw)?;
190
191 let data = fw.as_slice_mut();
192 self.parse_sev_api_eip(data)?;
193 for desc in parse_desc(data)? {
194 self.update_snp_desc(desc)?;
195 }
196 let fw_gpa = MEM_64_START - data.len() as u64;
197 self.memory
198 .mark_private_memory(fw_gpa, data.len() as _, true)?;
199 self.vm
200 .snp_launch_update(data, fw_gpa, SnpPageType::NORMAL)?;
201 Ok(())
202 }
203
204 pub(crate) fn sev_finalize(&self, policy: SevPolicy) -> Result<()> {
205 if policy.es() {
206 self.vm.sev_launch_update_vmsa()?;
207 }
208 self.vm.sev_launch_measure()?;
209 self.vm.sev_launch_finish()?;
210 Ok(())
211 }
212
213 pub(crate) fn snp_finalize(&self) -> Result<()> {
214 self.vm.snp_launch_finish()?;
215 Ok(())
216 }
217
218 pub(crate) fn sev_init_ap(&self, vcpu: &mut V::Vcpu) -> Result<()> {
219 let eip = self.arch.sev_ap_eip.load(Ordering::Acquire);
220 vcpu.set_regs(&[(Reg::Rip, eip as u64 & 0xffff)])?;
221 vcpu.set_sregs(
222 &[],
223 &[(
224 SegReg::Cs,
225 SegRegVal {
226 selector: 0xf000,
227 base: eip as u64 & 0xffff_0000,
228 limit: 0xffff,
229 access: SegAccess(0x9b),
230 },
231 )],
232 &[],
233 )?;
234 Ok(())
235 }
236
237 pub(crate) fn sev_init(&self, policy: SevPolicy, memory: Arc<V::Memory>) -> Result<()> {
238 self.vm.sev_launch_start(policy)?;
239 let encrypt_pages = Box::new(EncryptPages { memory });
240 self.memory.register_change_callback(encrypt_pages)?;
241 Ok(())
242 }
243
244 pub(crate) fn snp_init(&self, policy: SnpPolicy, memory: Arc<V::Memory>) -> Result<()> {
245 self.vm.snp_launch_start(policy)?;
246 let encrypt_pages = Box::new(EncryptPages {
247 memory: memory.clone(),
248 });
249 self.memory.register_change_callback(encrypt_pages)?;
250 let mark_private_memory = Box::new(MarkPrivateMemory { memory });
251 self.memory.register_change_callback(mark_private_memory)?;
252 Ok(())
253 }
254}
255
256#[derive(Debug)]
257pub struct EncryptPages {
258 memory: Arc<dyn VmMemory>,
259}
260
261impl LayoutChanged for EncryptPages {
262 fn ram_added(&self, _: u64, pages: &ArcMemPages) -> mem::Result<()> {
263 self.memory.register_encrypted_range(pages.as_slice())?;
264 Ok(())
265 }
266
267 fn ram_removed(&self, _: u64, _: &ArcMemPages) -> mem::Result<()> {
268 Ok(())
269 }
270}
271