Alioth Code Coverage

sev.rs0.00%

1// Copyright 2026 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::iter::zip;
16use std::sync::Arc;
17use std::sync::atomic::Ordering;
18
19use zerocopy::FromZeros;
20
21use crate::arch::layout::MEM_64_START;
22use crate::arch::reg::{Reg, SegAccess, SegReg, SegRegVal};
23use crate::arch::sev::{SevPolicy, SnpPageType, SnpPolicy};
24use crate::board::{Board, Result};
25use crate::firmware::ovmf::sev::{
26 SevDescType, SevMetadataDesc, SnpCpuidFunc, SnpCpuidInfo, parse_desc, parse_sev_ap_eip,
27};
28use crate::hv::{Vcpu, Vm, VmMemory};
29use crate::mem::mapped::ArcMemPages;
30use crate::mem::{self, LayoutChanged, MarkPrivateMemory};
31
32impl<V> Board<V>
33where
34 V: Vm,
35{
36 fn fill_snp_cpuid(&self, entries: &mut [SnpCpuidFunc]) {
37 for ((in_, out), dst) in zip(self.arch.cpuids.iter(), entries.iter_mut()) {
38 dst.eax_in = in_.func;
39 dst.ecx_in = in_.index.unwrap_or(0);
40 dst.eax = out.eax;
41 dst.ebx = out.ebx;
42 dst.ecx = out.ecx;
43 dst.edx = out.edx;
44 if dst.eax_in == 0xd && (dst.ecx_in == 0x0 || dst.ecx_in == 0x1) {
45 dst.ebx = 0x240;
46 dst.xcr0_in = 1;
47 dst.xss_in = 0;
48 }
49 }
50 }
51
52 fn parse_sev_api_eip(&self, data: &[u8]) -> Result<()> {
53 let ap_eip = parse_sev_ap_eip(data)?;
54 self.arch.sev_ap_eip.store(ap_eip, Ordering::Release);
55 Ok(())
56 }
57
58 fn update_snp_desc(&self, desc: &SevMetadataDesc) -> Result<()> {
59 let mut cpuid_table = SnpCpuidInfo::new_zeroed();
60 let ram_bus = self.memory.ram_bus();
61 let ram = ram_bus.lock_layout();
62 let snp_page_type = match desc.type_ {
63 SevDescType::SNP_DESC_MEM => SnpPageType::UNMEASURED,
64 SevDescType::SNP_SECRETS => SnpPageType::SECRETS,
65 SevDescType::CPUID => {
66 assert!(desc.len as usize >= size_of::<SnpCpuidInfo>());
67 assert!(cpuid_table.entries.len() >= self.arch.cpuids.len());
68 cpuid_table.count = self.arch.cpuids.len() as u32;
69 self.fill_snp_cpuid(&mut cpuid_table.entries);
70 ram.write_t(desc.base as _, &cpuid_table)?;
71 SnpPageType::CPUID
72 }
73 _ => unimplemented!(),
74 };
75 let range_ref = ram.get_slice::<u8>(desc.base as u64, desc.len as u64)?;
76 let range_bytes =
77 unsafe { std::slice::from_raw_parts_mut(range_ref.as_ptr() as _, range_ref.len()) };
78 self.memory
79 .mark_private_memory(desc.base as _, desc.len as _, true)?;
80 let mut ret = self
81 .vm
82 .snp_launch_update(range_bytes, desc.base as _, snp_page_type);
83 if ret.is_err() && desc.type_ == SevDescType::CPUID {
84 let updated_cpuid: SnpCpuidInfo = ram.read_t(desc.base as _)?;
85 for (set, got) in zip(cpuid_table.entries.iter(), updated_cpuid.entries.iter()) {
86 if set != got {
87 log::error!("set {set:#x?}, but firmware expects {got:#x?}");
88 }
89 }
90 ram.write_t(desc.base as _, &updated_cpuid)?;
91 ret = self
92 .vm
93 .snp_launch_update(range_bytes, desc.base as _, snp_page_type);
94 }
95 ret?;
96 Ok(())
97 }
98
99 pub(crate) fn setup_sev(&self, fw: &mut ArcMemPages, policy: SevPolicy) -> Result<()> {
100 self.memory.register_encrypted_pages(fw)?;
101
102 let data = fw.as_slice_mut();
103 if policy.es() {
104 self.parse_sev_api_eip(data)?;
105 }
106 self.vm.sev_launch_update_data(data)?;
107 Ok(())
108 }
109
110 pub(crate) fn setup_snp(&self, fw: &mut ArcMemPages) -> Result<()> {
111 self.memory.register_encrypted_pages(fw)?;
112
113 let data = fw.as_slice_mut();
114 self.parse_sev_api_eip(data)?;
115 for desc in parse_desc(data)? {
116 self.update_snp_desc(desc)?;
117 }
118 let fw_gpa = MEM_64_START - data.len() as u64;
119 self.memory
120 .mark_private_memory(fw_gpa, data.len() as _, true)?;
121 self.vm
122 .snp_launch_update(data, fw_gpa, SnpPageType::NORMAL)?;
123 Ok(())
124 }
125
126 pub(crate) fn sev_finalize(&self, policy: SevPolicy) -> Result<()> {
127 if policy.es() {
128 self.vm.sev_launch_update_vmsa()?;
129 }
130 self.vm.sev_launch_measure()?;
131 self.vm.sev_launch_finish()?;
132 Ok(())
133 }
134
135 pub(crate) fn snp_finalize(&self) -> Result<()> {
136 self.vm.snp_launch_finish()?;
137 Ok(())
138 }
139
140 pub(crate) fn sev_init_ap(&self, vcpu: &mut V::Vcpu) -> Result<()> {
141 let eip = self.arch.sev_ap_eip.load(Ordering::Acquire);
142 vcpu.set_regs(&[(Reg::Rip, eip as u64 & 0xffff)])?;
143 vcpu.set_sregs(
144 &[],
145 &[(
146 SegReg::Cs,
147 SegRegVal {
148 selector: 0xf000,
149 base: eip as u64 & 0xffff_0000,
150 limit: 0xffff,
151 access: SegAccess(0x9b),
152 },
153 )],
154 &[],
155 )?;
156 Ok(())
157 }
158
159 pub(crate) fn sev_init(&self, policy: SevPolicy, memory: Arc<V::Memory>) -> Result<()> {
160 self.vm.sev_launch_start(policy)?;
161 let encrypt_pages = Box::new(EncryptPages { memory });
162 self.memory.register_change_callback(encrypt_pages)?;
163 Ok(())
164 }
165
166 pub(crate) fn snp_init(&self, policy: SnpPolicy, memory: Arc<V::Memory>) -> Result<()> {
167 self.vm.snp_launch_start(policy)?;
168 let encrypt_pages = Box::new(EncryptPages {
169 memory: memory.clone(),
170 });
171 self.memory.register_change_callback(encrypt_pages)?;
172 let mark_private_memory = Box::new(MarkPrivateMemory { memory });
173 self.memory.register_change_callback(mark_private_memory)?;
174 Ok(())
175 }
176}
177
178#[derive(Debug)]
179pub struct EncryptPages {
180 memory: Arc<dyn VmMemory>,
181}
182
183impl LayoutChanged for EncryptPages {
184 fn ram_added(&self, _: u64, pages: &ArcMemPages) -> mem::Result<()> {
185 self.memory.register_encrypted_range(pages.as_slice())?;
186 Ok(())
187 }
188
189 fn ram_removed(&self, _: u64, _: &ArcMemPages) -> mem::Result<()> {
190 Ok(())
191 }
192}
193