diff --git a/.gitignore b/.gitignore index 928e2b29..65d6b331 100644 --- a/.gitignore +++ b/.gitignore @@ -4,8 +4,8 @@ Cargo.lock target/ *.bin +*.obj # afl fuzz -out *.profraw # Intellj working directory .idea diff --git a/td-shim/.gitignore b/td-shim/.gitignore deleted file mode 100644 index 46966d59..00000000 --- a/td-shim/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -*~ - -# Cargo Junk -Cargo.lock -target/ -*.bin -*.obj diff --git a/td-shim/Cargo.toml b/td-shim/Cargo.toml index f4201952..a2aa68ba 100644 --- a/td-shim/Cargo.toml +++ b/td-shim/Cargo.toml @@ -6,6 +6,10 @@ edition = "2018" # add build process build = "build.rs" +[[bin]] +name = "td-shim" +required-features = ["main"] + [build-dependencies] anyhow = "1.0.55" cc = { git = "https://github.com/jyao1/cc-rs.git", branch = "uefi_support" } diff --git a/td-shim/src/bin/td-shim/acpi.rs b/td-shim/src/bin/td-shim/acpi.rs new file mode 100644 index 00000000..e61d8b10 --- /dev/null +++ b/td-shim/src/bin/td-shim/acpi.rs @@ -0,0 +1,177 @@ +// Copyright (c) 2021 Intel Corporation +// Copyright (c) 2022 Alibaba Cloud +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +extern crate alloc; + +use alloc::vec::Vec; +use td_shim::acpi::{calculate_checksum, Rsdp, Xsdt}; + +use super::*; + +#[derive(Default)] +pub struct AcpiTables<'a> { + acpi_memory: &'a mut [u8], + pa: u64, + size: usize, + fadt: Option<(usize, usize)>, // FADT offset in acpi memory + dsdt: Option, // DSDT offset in acpi memory + table_offset: Vec, +} + +impl<'a> AcpiTables<'a> { + pub fn new(td_acpi_mem: &'a mut [u8], pa: u64) -> Self { + AcpiTables { + acpi_memory: td_acpi_mem, + pa, + ..Default::default() + } + } + + pub fn finish(&mut self) -> u64 { + let mut xsdt = Xsdt::new(); + + // The Fixed ACPI Description Table (FADT) should always be the first table in XSDT. + if let Some((fadt_off, fadt_len)) = self.fadt { + // Safe because DSDT is loaded in acpi_memory which is below 4G + let dsdt = self + .dsdt + .as_ref() + .map(|v| self.offset_to_address(*v)) + .unwrap_or_default() as u32; + let fadt = &mut self.acpi_memory[fadt_off..fadt_off + fadt_len]; + // The Differentiated System Description Table (DSDT) is referred by the FADT table. + if dsdt != 0 { + // The DSDT field of FADT [40..44] + dsdt.write_to(&mut fadt[40..44]); + } + + // Update FADT checksum + fadt[9] = 0; + fadt[9] = calculate_checksum(fadt); + xsdt.add_table(self.offset_to_address(fadt_off)); + } + + for offset in &self.table_offset { + xsdt.add_table(self.offset_to_address(*offset)); + } + + let xsdt_addr = self.offset_to_address(self.size); + xsdt.checksum(); + xsdt.write_to(&mut self.acpi_memory[self.size..self.size + size_of::()]); + self.size += size_of::(); + + let rsdp_addr = self.offset_to_address(self.size); + let rsdp = Rsdp::new(xsdt_addr); + rsdp.write_to(&mut self.acpi_memory[self.size..self.size + size_of::()]); + self.size += size_of::(); + + rsdp_addr as u64 + } + + pub fn install(&mut self, table: &[u8]) { + // Also reserve space for Xsdt and Rsdp + let total_size = self.size + table.len() + size_of::() + size_of::(); + if self.acpi_memory.len() < total_size { + log::error!( + "ACPI content size exceeds limit 0x{:X}", + self.acpi_memory.len(), + ); + return; + } else if table.len() < size_of::() { + log::error!("ACPI table with length 0x{:X} is invalid", table.len()); + return; + } + + // Safe because we have checked buffer size. + let header = GenericSdtHeader::read_from(&table[..size_of::()]).unwrap(); + if header.length as usize > table.len() { + log::error!( + "invalid ACPI table, header length {} is bigger than data length {}", + header.length as usize, + table.len() + ); + return; + } + + if &header.signature == b"FACP" { + // We will write to the `dsdt` fields at [40-44) + if header.length < 44 { + log::error!("invalid ACPI FADT table"); + return; + } + self.fadt = Some((self.size, header.length as usize)); + } else if &header.signature == b"DSDT" { + self.dsdt = Some(self.size); + } else { + for offset in &self.table_offset { + // Safe because it's reading data from our own buffer. + let table_header = GenericSdtHeader::read_from( + &self.acpi_memory[*offset..*offset + size_of::()], + ) + .unwrap(); + if table_header.signature == header.signature { + log::info!( + "ACPI: {} has been installed, use first\n", + core::str::from_utf8(&header.signature).unwrap_or_default() + ); + return; + } + } + self.table_offset.push(self.size); + } + + self.acpi_memory[self.size..self.size + table.len()].copy_from_slice(table); + self.size += table.len(); + } + + fn offset_to_address(&self, offset: usize) -> u64 { + self.pa + offset as u64 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_acpi_tables() { + let mut buff = [0u8; 500]; + let mut tables = AcpiTables::new(&mut buff, 0x100000); + + assert_eq!(tables.offset_to_address(0x1000), 0x101000); + assert_eq!(tables.size, 0); + + tables.install(&[]); + assert_eq!(tables.size, 0); + tables.install(&[0u8]); + assert_eq!(tables.size, 0); + tables.install(&[0u8; 269]); + assert_eq!(tables.size, 0); + + let hdr = GenericSdtHeader::new(b"FACP", 44, 2); + let mut buf = [0u8; 44]; + buf[0..size_of::()].copy_from_slice(hdr.as_bytes()); + tables.install(&buf); + assert_eq!(tables.fadt, Some((0, 44))); + assert_eq!(tables.size, 44); + + let hdr = GenericSdtHeader::new(b"DSDT", size_of::() as u32, 2); + tables.install(hdr.as_bytes()); + assert_eq!(tables.size, 44 + size_of::()); + + let hdr = GenericSdtHeader::new(b"TEST", size_of::() as u32, 2); + tables.install(hdr.as_bytes()); + assert_eq!(tables.size, 44 + 2 * size_of::()); + + let hdr = GenericSdtHeader::new(b"TEST", size_of::() as u32, 2); + tables.install(hdr.as_bytes()); + assert_eq!(tables.size, 44 + 2 * size_of::()); + + let addr = tables.finish(); + assert_eq!( + addr, + 0x100000 + 240 + 2 * size_of::() as u64 + ); + } +} diff --git a/td-shim/src/bin/td-shim/asm/ap_loop.asm b/td-shim/src/bin/td-shim/asm/ap_loop.asm new file mode 100644 index 00000000..4588cc61 --- /dev/null +++ b/td-shim/src/bin/td-shim/asm/ap_loop.asm @@ -0,0 +1,90 @@ +# Copyright (c) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-2-Clause-Patent + +.set TDVMCALL_EXPOSE_REGS_MASK, 0xffec +.set TDVMCALL, 0x0 +.set INSTRUCTION_CPUID, 0xa + +.set CommandOffset, 0 +.set ApicIdOffset, 0x4 +.set WakeupVectorOffset, 0x8 + +.set MpProtectedModeWakeupCommandNoop, 0 +.set MpProtectedModeWakeupCommandWakeup, 1 +.set MpProtectedModeWakeupCommandSleep, 2 + +.set MailboxApicIdInvalid, 0xffffffff +.set MailboxApicIdBroadcast, 0xfffffffe + +.section .text +#--------------------------------------------------------------------- +# ap_relocated_func_size ( +# size: *mut u64, // rcx +# ); +#--------------------------------------------------------------------- +.global ap_relocated_func_size +ap_relocated_func_size: + push rax + push rbx + lea rax, .ap_relocated_func_end + lea rbx, ap_relocated_func + sub rax, rbx + mov qword ptr[rcx], rax + pop rbx + pop rax + ret + +#-------------------------------------------------------------------- +# ap_relocated_vector +# +# rbx: Relocated mailbox address +# rbp: vCpuId +#-------------------------------------------------------------------- +.global ap_relocated_func +ap_relocated_func: + # + # Get the APIC ID via TDVMCALL + mov rax, TDVMCALL + mov rcx, TDVMCALL_EXPOSE_REGS_MASK + mov r10, 0 + mov r11, INSTRUCTION_CPUID + mov r12, 0xb + mov r13, 0 + # TDVMCALL + .byte 0x66, 0x0f, 0x01, 0xcc + test rax, rax + jnz .panic + # + # r8 will hold the APIC ID of current AP + mov r8, r15 + +.check_apicid: + # + # Determine if this is a broadcast or directly for my apic-id, if not, ignore + cmp dword ptr[rbx + ApicIdOffset], MailboxApicIdBroadcast + je .check_command + cmp dword ptr[rbx + ApicIdOffset], r8d + jne .check_apicid + +.check_command: + mov eax, dword ptr[rbx + CommandOffset] + cmp eax, MpProtectedModeWakeupCommandNoop + je .check_apicid + + cmp eax, MpProtectedModeWakeupCommandWakeup + je .wakeup + + jmp .check_apicid + +.wakeup: + # + # BSP sets these variables before unblocking APs + mov rax, 0 + mov eax, dword ptr[rbx + WakeupVectorOffset] + nop + jmp rax + +.panic: + ud2 + +.ap_relocated_func_end: diff --git a/td-shim/src/bin/td-shim/asm/mod.rs b/td-shim/src/bin/td-shim/asm/mod.rs new file mode 100644 index 00000000..91f98806 --- /dev/null +++ b/td-shim/src/bin/td-shim/asm/mod.rs @@ -0,0 +1,11 @@ +// Copyright (c) 2020-2022 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +global_asm!(include_str!("switch_stack.asm")); +global_asm!(include_str!("msr64.asm")); +global_asm!(include_str!("ap_loop.asm")); + +extern "win64" { + pub fn ap_relocated_func_size(size: *mut u64); + pub fn ap_relocated_func(); +} diff --git a/td-shim/src/bin/td-shim/asm/msr64.asm b/td-shim/src/bin/td-shim/asm/msr64.asm new file mode 100644 index 00000000..d59c9398 --- /dev/null +++ b/td-shim/src/bin/td-shim/asm/msr64.asm @@ -0,0 +1,26 @@ +# Copyright (c) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-2-Clause-Patent + +.section .text +# asm_read_msr64( +# index: u32, // rcx +# ); +.global asm_read_msr64 +asm_read_msr64: + + rdmsr + shl rdx, 0x20 + or rax, rdx + ret + +# asm_write_msr64( +# index: u32, // rcx +# value: u64, // rdx +# ); +.global asm_write_msr64 +asm_write_msr64: + + mov rax, rdx + shr rdx, 0x20 + wrmsr + ret diff --git a/td-shim/src/bin/td-shim/asm/switch_stack.asm b/td-shim/src/bin/td-shim/asm/switch_stack.asm new file mode 100644 index 00000000..26451532 --- /dev/null +++ b/td-shim/src/bin/td-shim/asm/switch_stack.asm @@ -0,0 +1,22 @@ +# Copyright (c) 2020 Intel Corporation +# SPDX-License-Identifier: BSD-2-Clause-Patent + +.section .text + +# switch_stack_call( +# entry_point: usize, // rcx +# stack_top: usize, // rdx +# P1: usize, // r8 +# P2: usize // r9 +# ); +.global switch_stack_call +switch_stack_call: + sub rdx,0x20 + mov rsp,rdx + mov rax,rcx + mov rcx,r8 + mov rdx,r9 + call rax + int3 + jmp switch_stack_call + ret diff --git a/td-shim/src/bin/td-shim/cet_ss.rs b/td-shim/src/bin/td-shim/cet_ss.rs new file mode 100644 index 00000000..690e6af7 --- /dev/null +++ b/td-shim/src/bin/td-shim/cet_ss.rs @@ -0,0 +1,99 @@ +// Copyright (c) 2021 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +use lazy_static::lazy_static; +use spin::Mutex; +use x86_64::registers::control::Cr4; + +const CPUID_EXTEND_FEATURES: u32 = 0x7; +const CPUID_CET_SS_BIT: u32 = 1 << 7; +const CPUID_CET_IBT: u32 = 1 << 20; +const CPUID_CET_XSS_U: u32 = 1 << 11; +const CPUID_CET_XSS_S: u32 = 1 << 12; + +const MSR_IA32_S_CET: u32 = 0x6A2; +const MSR_IA32_PL0_SSP: u32 = 0x6A4; +const MSR_IA32_INTERRUPT_SSP_TABLE_ADDR: u32 = 0x6A8; +const MSR_IA32_XSS: u32 = 0xDA0; + +const CR4_CET_ENABLE_BIT: u64 = 1 << 23; + +const EXCEPTION_PAGE_SIZE: u64 = crate::stack_guard::STACK_EXCEPTION_PAGE_SIZE as u64; +const GUARD_PAGE_SIZE: u64 = crate::stack_guard::STACK_GUARD_PAGE_SIZE as u64; + +#[derive(Default)] +struct Isst { + entries: [u64; 8], +} + +lazy_static! { + static ref INTERRUPT_SSP_TABLE: Mutex = Mutex::new(Isst::default()); +} + +extern "win64" { + fn asm_read_msr64(index: u32) -> u64; + fn asm_write_msr64(index: u32, value: u64) -> u64; +} + +fn is_cet_available() -> (bool, bool) { + let mut cet_supported: bool = false; + let mut cet_xss_supported: bool = false; + + //EAX = 7, ECX = 0: extend features. + let cpuid = unsafe { core::arch::x86_64::__cpuid_count(7, 0) }; + if cpuid.ecx & CPUID_CET_SS_BIT != 0 { + cet_supported = true; + + let cpuid = unsafe { core::arch::x86_64::__cpuid_count(0x0D, 1) }; + if cpuid.ecx & CPUID_CET_XSS_S != 0 { + cet_xss_supported = true; + } + } + + (cet_supported, cet_xss_supported) +} + +fn enable_cet() { + unsafe { Cr4::write_raw(Cr4::read_raw() | CR4_CET_ENABLE_BIT) }; +} + +pub fn enable_cet_ss(shadow_stack_addr: u64, shadow_stack_size: u64) { + let (cet_supported, cet_xss_supported) = is_cet_available(); + log::info!("CET support: {}\n", cet_supported); + if !cet_supported { + return; + } + + unsafe { asm_write_msr64(MSR_IA32_S_CET, 1) }; + + // + // +------------------------------------------------------------- + + // | Exception Page -- Token | Guard Page | Shadow Stack -- Token | + // +------------------------------------------------------------- + + // + + // Init SS Token + let pl0_ssp: u64 = shadow_stack_addr + shadow_stack_size - 8; + unsafe { *(pl0_ssp as *mut u64) = pl0_ssp }; + + enable_cet(); + + // Init Exception Page token and interrupt ssp table + let ist = &mut INTERRUPT_SSP_TABLE.lock(); + let token = shadow_stack_addr + EXCEPTION_PAGE_SIZE - 8; + unsafe { *(token as *mut u64) = token }; + ist.entries[1] = token; + + if cet_xss_supported { + unsafe { asm_write_msr64(MSR_IA32_XSS, asm_read_msr64(MSR_IA32_XSS) | 1 << 12) }; + } + + unsafe { + asm_write_msr64(MSR_IA32_PL0_SSP, pl0_ssp); + asm_write_msr64( + MSR_IA32_INTERRUPT_SSP_TABLE_ADDR, + ist.entries.as_ptr() as u64, + ); + } +} diff --git a/td-shim/src/bin/td-shim/e820.rs b/td-shim/src/bin/td-shim/e820.rs new file mode 100644 index 00000000..e8cf5200 --- /dev/null +++ b/td-shim/src/bin/td-shim/e820.rs @@ -0,0 +1,136 @@ +// Copyright (c) 2021 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +use td_layout::{build_time, runtime, RuntimeMemoryLayout}; + +// Linux BootParam supports 128 e820 entries, so... +const MAX_E820_ENTRY: usize = 128; + +#[derive(Clone, Copy)] +pub enum E820Type { + Memory = 1, + Reserved = 2, + Acpi = 3, + Nvs = 4, + Unusable = 5, + Disabled = 6, + Pmem = 7, +} + +#[derive(Clone, Copy, Debug, Default)] +#[repr(C, packed)] +pub struct E820Entry { + pub addr: u64, + pub size: u64, + pub r#type: u32, +} + +impl E820Entry { + pub fn new(addr: u64, size: u64, r#type: E820Type) -> Self { + E820Entry { + addr, + size, + r#type: r#type as u32, + } + } +} + +#[derive(Clone, Copy, Debug)] +#[repr(C, packed)] +pub struct E820Table { + entries: [E820Entry; MAX_E820_ENTRY], + size: usize, +} + +impl Default for E820Table { + fn default() -> Self { + Self { + entries: [E820Entry::default(); MAX_E820_ENTRY], + size: 0, + } + } +} + +impl E820Table { + pub fn new() -> Self { + Self::default() + } + + pub fn add_range(&mut self, r#type: E820Type, start: u64, length: u64) { + if self.size == MAX_E820_ENTRY { + return; + } + if self.size > 0 { + let end_entry = &mut self.entries[self.size - 1]; + let exist_end = end_entry.addr + end_entry.size; + if start == exist_end && r#type as u32 == end_entry.r#type { + end_entry.size += length; + return; + } + } + self.entries[self.size] = E820Entry::new(start, length, r#type); + self.size += 1; + } + + pub fn as_slice(&self) -> &[E820Entry] { + &self.entries + } +} + +pub fn create_e820_entries(runtime_memory: &RuntimeMemoryLayout) -> E820Table { + let mut table = E820Table::new(); + + table.add_range(E820Type::Memory, 0, runtime_memory.runtime_acpi_base as u64); + table.add_range( + E820Type::Acpi, + runtime_memory.runtime_acpi_base, + runtime::TD_PAYLOAD_ACPI_SIZE as u64, + ); + table.add_range( + E820Type::Nvs, + runtime_memory.runtime_event_log_base, + runtime::TD_PAYLOAD_EVENT_LOG_SIZE as u64, + ); + table.add_range( + E820Type::Nvs, + runtime_memory.runtime_mailbox_base, + runtime::TD_PAYLOAD_MAILBOX_SIZE as u64, + ); + // TODO: above memory above 4G? Should those memory be reported as `Memory`? + + table +} + +#[cfg(test)] +mod tests { + use super::*; + use core::mem::size_of; + + #[test] + fn test_e820_entry_size() { + assert_eq!(size_of::(), 20); + assert_eq!( + size_of::<[E820Entry; MAX_E820_ENTRY]>(), + 20 * MAX_E820_ENTRY + ); + } + + #[test] + fn test_e820_table() { + let mut table = E820Table::new(); + assert_eq!(table.size as usize, 0); + table.add_range(E820Type::Memory, 0x0, 0x1000); + assert_eq!(table.size as usize, 1); + table.add_range(E820Type::Memory, 0x1000, 0x1000); + assert_eq!(table.size as usize, 1); + assert_eq!(table.entries[0].size as u64, 0x2000); + table.add_range(E820Type::Acpi, 0x2000, 0x1000); + assert_eq!(table.size as usize, 2); + + for idx in 0..MAX_E820_ENTRY { + table.add_range(E820Type::Memory, idx as u64 * 0x2000, 0x1000); + } + assert_eq!(table.size as usize, MAX_E820_ENTRY); + } +} diff --git a/td-shim/src/bin/td-shim/heap.rs b/td-shim/src/bin/td-shim/heap.rs new file mode 100644 index 00000000..a1ff16c8 --- /dev/null +++ b/td-shim/src/bin/td-shim/heap.rs @@ -0,0 +1,25 @@ +// Copyright (c) 2021 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +use linked_list_allocator::LockedHeap; +use td_layout::build_time::{TD_SHIM_TEMP_HEAP_BASE, TD_SHIM_TEMP_HEAP_SIZE}; + +#[cfg(not(test))] +#[global_allocator] +static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty(); + +/// Initialize the heap allocator. +pub(super) fn init() { + let heap_start = TD_SHIM_TEMP_HEAP_BASE as usize; + let heap_size = TD_SHIM_TEMP_HEAP_SIZE as usize; + + unsafe { + #[cfg(not(test))] + HEAP_ALLOCATOR.lock().init(heap_start, heap_size); + } + log::info!( + "Heap allocator init done: {:#x?}\n", + heap_start..heap_start + heap_size + ); +} diff --git a/td-shim/src/bin/td-shim/ipl.rs b/td-shim/src/bin/td-shim/ipl.rs new file mode 100644 index 00000000..164792fa --- /dev/null +++ b/td-shim/src/bin/td-shim/ipl.rs @@ -0,0 +1,116 @@ +// Copyright (c) 2020 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +use td_layout::memslice; +use td_layout::runtime::{TD_PAYLOAD_BASE, TD_PAYLOAD_SIZE}; +use td_loader::elf; +use td_loader::elf64::ProgramHeader; +use td_loader::pe::{self, Section}; + +use crate::memory::Memory; + +const SIZE_4KB: u64 = 0x00001000u64; + +pub fn efi_size_to_page(size: u64) -> u64 { + // It should saturate, but in case... + size.saturating_add(SIZE_4KB - 1) / SIZE_4KB +} + +pub fn efi_page_to_size(page: u64) -> u64 { + // It should saturate, but in case... + page.saturating_mul(SIZE_4KB) & !(SIZE_4KB - 1) +} + +pub fn find_and_report_entry_point( + mem: &mut Memory, + image_buffer: &[u8], +) -> Option<(u64, u64, u64)> { + // Safe because we are the only user in single-thread context. + let loaded_buffer = unsafe { memslice::get_mem_slice_mut(memslice::SliceType::Payload) }; + let loaded_buffer_slice = loaded_buffer.as_ptr() as u64; + + let res = if elf::is_elf(image_buffer) { + elf::relocate_elf_with_per_program_header(image_buffer, loaded_buffer, |ph| { + if !ph.is_executable() { + mem.set_nx_bit(ph.p_vaddr + loaded_buffer_slice, ph.p_filesz); + } + if !ph.is_write() { + log::info!("WP in elf: {:x}\n", ph.p_vaddr + loaded_buffer_slice); + mem.set_write_protect(ph.p_vaddr + loaded_buffer_slice, ph.p_filesz); + } + })? + } else if pe::is_x86_64_pe(image_buffer) { + pe::relocate_pe_mem_with_per_sections(image_buffer, loaded_buffer, |sc| { + if !sc.is_executable() { + mem.set_nx_bit( + sc.section_virtual_address() as u64 + loaded_buffer_slice, + sc.section_size() as u64, + ); + } + if !sc.is_write() { + mem.set_write_protect( + sc.section_virtual_address() as u64 + loaded_buffer_slice, + sc.section_size() as u64, + ); + } + })? + } else { + return None; + }; + + let entry = res.0; + let base = res.1; + let size = res.2; + if base < TD_PAYLOAD_BASE as u64 + || base >= TD_PAYLOAD_BASE + TD_PAYLOAD_SIZE as u64 + || size > TD_PAYLOAD_SIZE as u64 - (base - TD_PAYLOAD_BASE) + || entry < base + || entry > base + size + { + log::error!("invalid payload binary"); + None + } else { + log::info!( + "image_entry: {:x}, image_base: {:x}, image_size: {:x}\n", + entry, + base, + size + ); + Some(res) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_size_to_page() { + assert_eq!(efi_size_to_page(0), 0); + assert_eq!(efi_size_to_page(1), 1); + assert_eq!(efi_size_to_page(SIZE_4KB), 1); + assert_eq!(efi_size_to_page(SIZE_4KB + 1), 2); + assert_eq!(efi_size_to_page(u64::MAX), u64::MAX / SIZE_4KB); + assert_eq!(efi_page_to_size(1), SIZE_4KB); + assert_eq!(efi_page_to_size(u64::MAX), u64::MAX & !(SIZE_4KB - 1)); + } + + #[test] + fn test_parse_elf() { + let elf = include_bytes!("../../../../data/blobs/td-payload.elf"); + let mut loaded_buffer = vec![0u8; elf.len()]; + + assert!(elf::is_elf(elf)); + elf::relocate_elf_with_per_program_header(elf, &mut loaded_buffer, |_ph| {}).unwrap(); + } + + #[test] + fn test_parse_pe() { + let efi = include_bytes!("../../../../data/blobs/td-payload.efi"); + let mut loaded_buffer = vec![0u8; efi.len() * 2]; + + assert!(pe::is_x86_64_pe(efi)); + pe::relocate_pe_mem_with_per_sections(efi, &mut loaded_buffer, |_ph| {}).unwrap(); + } +} diff --git a/td-shim/src/bin/td-shim/linux/boot.rs b/td-shim/src/bin/td-shim/linux/boot.rs new file mode 100644 index 00000000..3b175be1 --- /dev/null +++ b/td-shim/src/bin/td-shim/linux/boot.rs @@ -0,0 +1,107 @@ +// Copyright (c) 2021 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +use core::mem::size_of; +use scroll::{Pread, Pwrite}; +use td_layout as layout; +use td_layout::runtime::TD_PAYLOAD_PARAM_BASE; +use td_shim::{PayloadInfo, TdKernelInfoHobType}; +use x86_64::{ + instructions::{segmentation::Segment, tables::lgdt}, + registers::segmentation as seg, + structures::{gdt, DescriptorTablePointer}, + PrivilegeLevel as RPL, VirtAddr, +}; + +use crate::e820::E820Entry; +use crate::linux::kernel_param::{BootParams, SetupHeader}; + +const KERNEL_64BIT_ENTRY_OFFSET: u64 = 0x200; +const GDT: [u64; 4] = [ + 0, + 0, + gdt::DescriptorFlags::KERNEL_CODE64.bits(), + gdt::DescriptorFlags::KERNEL_DATA.bits(), +]; + +pub enum Error { + InvalidBzImage, + UnknownImageType, +} + +pub fn setup_header(kernel_image: &[u8]) -> Result { + let mut setup_header = SetupHeader::from_file(kernel_image); + + if setup_header.header != 0x5372_6448 { + return Err(Error::InvalidBzImage); + } + + if (setup_header.version < 0x0200) || ((setup_header.loadflags & 0x1) == 0x0) { + return Err(Error::InvalidBzImage); + } + + let setup_sects = match setup_header.setup_sects { + 0 => 4, + n => n as u32, + }; + + let setup_bytes = (setup_sects + 1) * 512; + + setup_header.type_of_loader = 0xff; + setup_header.code32_start = kernel_image.as_ptr() as u32 + setup_bytes; + setup_header.cmd_line_ptr = TD_PAYLOAD_PARAM_BASE as u32; + + Ok(setup_header) +} + +pub fn boot_kernel( + kernel: &[u8], + rsdp_addr: u64, + e820: &[E820Entry], + info: &PayloadInfo, +) -> Result<(), Error> { + let mut params: BootParams = BootParams::default(); + params.acpi_rsdp_addr = rsdp_addr; + params.e820_entries = e820.len() as u8; + params.e820_table[..e820.len()].copy_from_slice(e820); + + let image_type = TdKernelInfoHobType::from(info.image_type); + match image_type { + TdKernelInfoHobType::BzImage => params.hdr = setup_header(kernel)?, + TdKernelInfoHobType::RawVmLinux => { + params.hdr.type_of_loader = 0xff; + params.hdr.boot_flag = 0xaa55; + params.hdr.header = 0x5372_6448; + params.hdr.kernel_alignment = 0x0100_0000; + //params.hdr.cmd_line_ptr = xxx as u32; + //params.hdr.cmdline_size = xxx as u32; + } + _ => return Err(Error::UnknownImageType), + } + + // Set the GDT, CS/DS/ES/SS, and disable interrupt + let gdtr = DescriptorTablePointer { + limit: (size_of::() * 4) as u16, + base: VirtAddr::new(GDT.as_ptr() as u64), + }; + + unsafe { + lgdt(&gdtr); + seg::CS::set_reg(seg::SegmentSelector::new(2, RPL::Ring0)); + seg::DS::set_reg(seg::SegmentSelector::new(3, RPL::Ring0)); + seg::ES::set_reg(seg::SegmentSelector::new(3, RPL::Ring0)); + seg::SS::set_reg(seg::SegmentSelector::new(3, RPL::Ring0)); + x86_64::instructions::interrupts::disable(); + } + + // Jump to kernel 64-bit entrypoint + log::info!("Jump to kernel...\n"); + let entry64 = params.hdr.code32_start as u64 + 0x200; + + // Calling kernel 64bit entry follows sysv64 calling convention + let entry64: extern "sysv64" fn(usize, usize) = unsafe { core::mem::transmute(entry64) }; + entry64(0, ¶ms as *const _ as usize); + + Ok(()) +} diff --git a/td-shim/src/bin/td-shim/linux/kernel_param.rs b/td-shim/src/bin/td-shim/linux/kernel_param.rs new file mode 100644 index 00000000..e822f3fc --- /dev/null +++ b/td-shim/src/bin/td-shim/linux/kernel_param.rs @@ -0,0 +1,135 @@ +// Copyright (c) 2021 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +use core::mem::size_of; +use zerocopy::{AsBytes, FromBytes}; + +use crate::e820::E820Entry; + +#[derive(Clone, Copy, Default, Debug, AsBytes, FromBytes)] +#[repr(C, packed)] +pub struct SetupHeader { + pub setup_sects: u8, + pub root_flags: u16, + pub syssize: u32, + pub ram_size: u16, + pub vid_mode: u16, + pub root_dev: u16, + pub boot_flag: u16, + pub jump: u16, + pub header: u32, + pub version: u16, + pub realmode_swtch: u32, + pub start_sys_seg: u16, + pub kernel_version: u16, + pub type_of_loader: u8, + pub loadflags: u8, + pub setup_move_size: u16, + pub code32_start: u32, + pub ramdisk_image: u32, + pub ramdisk_size: u32, + pub bootsect_kludge: u32, + pub heap_end_ptr: u16, + pub ext_loader_ver: u8, + pub ext_loader_type: u8, + pub cmd_line_ptr: u32, + pub initrd_addr_max: u32, + pub kernel_alignment: u32, + pub relocatable_kernel: u8, + pub min_alignment: u8, + pub xloadflags: u16, + pub cmdline_size: u32, + pub hardware_subarch: u32, + pub hardware_subarch_data: u64, + pub payload_offset: u32, + pub payload_length: u32, + pub setup_data: u64, + pub pref_address: u64, + pub init_size: u32, + pub handover_offset: u32, +} + +impl SetupHeader { + // Read a kernel header from the first two sectors of a file + pub fn from_file(kernel_file: &[u8]) -> Self { + SetupHeader::read_from(&kernel_file[0x1f1..0x1f1 + size_of::()]).unwrap() + } +} + +#[derive(Clone, Copy)] +#[repr(C, packed)] +pub struct BootParams { + screen_info: ScreenInfo, // 0x000/0x040 + apm_bios_info: ApmBiosInfo, // 0x040/0x014 + _pad2: [u8; 4], // 0x054/0x004 + tboot_addr: u64, // 0x058/0x002 + ist_info: IstInfo, // 0x060/0x010 + pub acpi_rsdp_addr: u64, // 0x070/0x008 + _pad3: [u8; 8], // 0x078/0x008 + hd0_info: [u8; 16], // 0x080/0x010 - obsolete + hd1_info: [u8; 16], // 0x090/0x010 - obsolete + sys_desc_table: SysDescTable, // 0x0a0/0x010 - obsolete + olpc_ofw_header: OlpcOfwHeader, // 0x0b0/0x010 + ext_ramdisk_image: u32, // 0x0c0/0x004 + ext_ramdisk_size: u32, // 0x0c4/0x004 + ext_cmd_line_ptr: u32, // 0x0c8/0x004 + _pad4: [u8; 116], // 0x0cc/0x074 + edd_info: EdidInfo, // 0x140/0x080 + efi_info: EfiInfo, // 0x1c0/0x020 + alt_mem_k: u32, // 0x1e0/0x004 + scratch: u32, // 0x1e4/0x004 + pub e820_entries: u8, // 0x1e8/0x001 + eddbuf_entries: u8, // 0x1e9/0x001 + edd_mbr_sig_buf_entries: u8, // 0x1ea/0x001 + kbd_status: u8, // 0x1eb/0x001 + secure_boot: u8, // 0x1ec/0x001 + _pad5: [u8; 2], // 0x1ed/0x002 + sentinel: u8, // 0x1ef/0x001 + _pad6: [u8; 1], // 0x1f0/0x001 + pub hdr: SetupHeader, // 0x1f1 + _pad7: [u8; 0x290 - 0x1f1 - size_of::()], + edd_mbr_sig_buffer: [u32; 16], // 0x290 + pub e820_table: [E820Entry; 128], // 0x2d0 + _pad8: [u8; 48], // 0xcd0 + eddbuf: [EddInfo; 6], // 0xd00 + _pad9: [u8; 276], // 0xeec +} + +#[derive(Clone, Copy)] +#[repr(C, packed)] +struct ScreenInfo([u8; 0x40]); + +#[derive(Clone, Copy)] +#[repr(C, packed)] +struct ApmBiosInfo([u8; 0x14]); + +#[derive(Clone, Copy)] +#[repr(C, packed)] +struct IstInfo([u8; 0x10]); + +#[derive(Clone, Copy)] +#[repr(C, packed)] +struct SysDescTable([u8; 0x10]); + +#[derive(Clone, Copy)] +#[repr(C, packed)] +struct OlpcOfwHeader([u8; 0x10]); + +#[derive(Clone, Copy)] +#[repr(C, packed)] +struct EdidInfo([u8; 0x80]); + +#[derive(Clone, Copy)] +#[repr(C, packed)] +struct EfiInfo([u8; 0x20]); + +#[derive(Clone, Copy)] +#[repr(C, packed)] +struct EddInfo([u8; 0x52]); + +impl Default for BootParams { + fn default() -> Self { + unsafe { core::mem::zeroed() } + } +} diff --git a/td-shim/src/bin/td-shim/linux/mod.rs b/td-shim/src/bin/td-shim/linux/mod.rs new file mode 100644 index 00000000..63a6b18a --- /dev/null +++ b/td-shim/src/bin/td-shim/linux/mod.rs @@ -0,0 +1,6 @@ +// Copyright (c) 2021 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +pub mod boot; +pub mod kernel_param; diff --git a/td-shim/src/bin/td-shim/main.rs b/td-shim/src/bin/td-shim/main.rs new file mode 100644 index 00000000..24e213e4 --- /dev/null +++ b/td-shim/src/bin/td-shim/main.rs @@ -0,0 +1,546 @@ +// Copyright (c) 2020 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +#![allow(unused)] +#![feature(global_asm)] +#![feature(asm)] +#![feature(alloc_error_handler)] +#![cfg_attr(not(test), no_std)] +#![cfg_attr(not(test), no_main)] +#![allow(unused_imports)] + +use core::ffi::c_void; +use core::mem::size_of; +use core::panic::PanicInfo; + +use r_efi::efi; +use scroll::{Pread, Pwrite}; +use zerocopy::{AsBytes, ByteSlice, FromBytes}; + +use td_layout::build_time::{self, *}; +use td_layout::memslice; +use td_layout::runtime::{self, *}; +use td_layout::RuntimeMemoryLayout; +use td_shim::acpi::GenericSdtHeader; +use td_shim::event_log::{ + self, TdHandoffTable, TdHandoffTablePointers, EV_EFI_HANDOFF_TABLES2, EV_PLATFORM_CONFIG_FLAGS, + TD_LOG_EFI_HANDOFF_TABLE_GUID, +}; +use td_shim::{ + HobTemplate, PayloadInfo, TdKernelInfoHobType, TD_ACPI_TABLE_HOB_GUID, TD_KERNEL_INFO_HOB_GUID, +}; +use td_uefi_pi::{fv, hob, pi}; + +use crate::tcg::TdEventLog; + +mod acpi; +mod asm; +mod e820; +mod heap; +mod ipl; +mod linux; +mod memory; +mod mp; +mod stack_guard; +mod tcg; +mod td; + +#[cfg(feature = "cet-ss")] +mod cet_ss; +#[cfg(feature = "secure-boot")] +mod verifier; + +extern "win64" { + fn switch_stack_call(entry_point: usize, stack_top: usize, P1: usize, P2: usize); +} + +#[cfg(not(test))] +#[panic_handler] +#[allow(clippy::empty_loop)] +fn panic(_info: &PanicInfo) -> ! { + log::info!("panic ... {:?}\n", _info); + panic!("deadloop"); +} + +#[cfg(not(test))] +#[alloc_error_handler] +#[allow(clippy::empty_loop)] +fn alloc_error(_info: core::alloc::Layout) -> ! { + log::info!("alloc_error ... {:?}\n", _info); + panic!("deadloop"); +} + +/// Main entry point of the td-shim, and the bootstrap code should jump here. +/// +/// The bootstrap should prepare the context to satisfy `_start()`'s expectation: +/// - the memory is in 1:1 identity mapping mode with paging enabled +/// - the stack is ready for use +/// +/// # Arguments +/// - `boot_fv`: pointer to the boot firmware volume +/// - `top_of_start`: top address of the stack +/// - `init_vp`: [31:0] TDINITVP - Untrusted Configuration +/// - `info`: [6:0] CPU supported GPA width, [7:7] 5 level page table support, [23:16] VCPUID, +/// [32:24] VCPU_Index +#[cfg(not(test))] +#[no_mangle] +#[export_name = "efi_main"] +pub extern "win64" fn _start( + boot_fv: *const c_void, + top_of_stack: *const c_void, + init_vp: *const c_void, + info: usize, +) -> ! { + // The bootstrap code has setup the stack, but only the stack is available now... + let _ = td_logger::init(); + log::info!("Starting RUST Based TdShim boot_fv - {:p}, Top of stack - {:p}, init_vp - {:p}, info - 0x{:x} \n", + boot_fv, top_of_stack, init_vp, info); + td_exception::setup_exception_handlers(); + log::info!("setup_exception_handlers done\n"); + + // First initialize the heap allocator so that we have a normal rust world to live in... + heap::init(); + + // Get HOB list + let hob_list = memslice::get_mem_slice(memslice::SliceType::ShimHob); + let hob_size = hob::get_hob_total_size(hob_list).expect("failed to get size of hob list"); + let hob_list = &hob_list[0..hob_size]; + hob::dump_hob(hob_list); + + // Initialize memory subsystem. + let num_vcpus = td::get_num_vcpus(); + accept_memory_resources(hob_list, num_vcpus); + td_paging::init(); + let memory_top_below_4gb = hob::get_system_memory_size_below_4gb(hob_list) + .expect("failed to figure out memory below 4G from hob list"); + let runtime_memory_layout = RuntimeMemoryLayout::new(memory_top_below_4gb); + let memory_all = memory::get_memory_size(hob_list); + let mut mem = memory::Memory::new(&runtime_memory_layout, memory_all); + mem.setup_paging(); + + // Relocate Mailbox along side with the AP function + td::relocate_mailbox(runtime_memory_layout.runtime_mailbox_base as u32); + + // Set up the TD event log buffer. + // Safe because it's used to initialize the EventLog subsystem which ensures safety. + let event_log_buf = unsafe { + memslice::get_dynamic_mem_slice_mut( + memslice::SliceType::EventLog, + runtime_memory_layout.runtime_event_log_base as usize, + ) + }; + let mut td_event_log = tcg::TdEventLog::new(event_log_buf); + log_hob_list(hob_list, &mut td_event_log); + + // If the Kernel Information GUID HOB is present, try to boot the Linux kernel. + if let Some(kernel_hob) = hob::get_next_extension_guid_hob(hob_list, &TD_KERNEL_INFO_HOB_GUID) { + boot_linux_kernel( + kernel_hob, + hob_list, + &runtime_memory_layout, + &mut td_event_log, + num_vcpus, + ); + } + + // Get and parse image file from the payload firmware volume. + let fv_buffer = memslice::get_mem_slice(memslice::SliceType::ShimPayload); + let mut payload = fv::get_image_from_fv( + fv_buffer, + pi::fv::FV_FILETYPE_DXE_CORE, + pi::fv::SECTION_PE32, + ) + .expect("Failed to get image file from Firmware Volume"); + + #[cfg(feature = "secure-boot")] + { + payload = secure_boot_verify_payload(payload, &mut td_event_log); + } + + let (entry, basefw, basefwsize) = + ipl::find_and_report_entry_point(&mut mem, payload).expect("Entry point not found!"); + let entry = entry as usize; + + // Initialize the stack to run the image + stack_guard::stack_guard_enable(&mut mem); + #[cfg(feature = "cet-ss")] + cet_ss::enable_cet_ss( + runtime_memory_layout.runtime_shadow_stack_base, + runtime_memory_layout.runtime_shadow_stack_top, + ); + let stack_top = + (runtime_memory_layout.runtime_stack_base + TD_PAYLOAD_STACK_SIZE as u64) as usize; + + // Prepare the HOB list to run the image + let hob_base = prepare_hob_list( + hob_list, + &runtime_memory_layout, + basefw, + basefwsize, + memory_top_below_4gb, + ); + + // Finally let's switch stack and jump to the image entry point... + log::info!( + " start launching payload {:p} and switch stack {:p}...\n", + entry as *const usize, + stack_top as *const usize + ); + unsafe { switch_stack_call(entry, stack_top, hob_base as usize, 0) }; + panic!("payload entry() should not return here, deadloop!!!"); +} + +fn log_hob_list(hob_list: &[u8], td_event_log: &mut tcg::TdEventLog) { + let hand_off_table_pointers = TdHandoffTablePointers { + table_descripion_size: 8, + table_description: [b't', b'd', b'_', b'h', b'o', b'b', 0, 0], + number_of_tables: 1, + table_entry: [TdHandoffTable { + guid: TD_LOG_EFI_HANDOFF_TABLE_GUID, + table: hob_list as *const _ as *const c_void as u64, + }], + }; + let mut tdx_handofftable_pointers_buffer = [0u8; size_of::()]; + + tdx_handofftable_pointers_buffer + .pwrite(hand_off_table_pointers, 0) + .expect("Failed to log HOB list to the td event log"); + td_event_log.create_event_log( + 1, + EV_EFI_HANDOFF_TABLES2, + &tdx_handofftable_pointers_buffer, + hob_list, + ); +} + +fn accept_memory_resources(hob_list: &[u8], num_vcpus: u32) { + let mut offset: usize = 0; + loop { + let hob = &hob_list[offset..]; + let header: pi::hob::Header = hob.pread(0).expect("Failed to read HOB header"); + + match header.r#type { + pi::hob::HOB_TYPE_RESOURCE_DESCRIPTOR => { + let resource_hob: pi::hob::ResourceDescription = hob.pread(0).unwrap(); + if resource_hob.resource_type == pi::hob::RESOURCE_SYSTEM_MEMORY { + td::accept_memory_resource_range( + num_vcpus, + resource_hob.physical_start, + resource_hob.resource_length, + ); + } + } + pi::hob::HOB_TYPE_END_OF_HOB_LIST => { + break; + } + _ => {} + } + + offset = hob::align_to_next_hob_offset(hob_list.len(), offset, header.length) + .expect("Failed to find next HOB entry"); + } +} + +fn boot_linux_kernel( + kernel_hob: &[u8], + hob_list: &[u8], + layout: &RuntimeMemoryLayout, + td_event_log: &mut TdEventLog, + vcpus: u32, +) { + let kernel_info = hob::get_guid_data(kernel_hob) + .expect("Can not fetch kernel data from the Kernel Info GUID HOB!!!"); + let vmm_kernel = kernel_info + .pread::(0) + .expect("Can not fetch PayloadInfo structure from the Kernel Info GUID HOB"); + + let image_type = TdKernelInfoHobType::from(vmm_kernel.image_type); + match image_type { + TdKernelInfoHobType::ExecutablePayload => return, + TdKernelInfoHobType::BzImage | TdKernelInfoHobType::RawVmLinux => {} + _ => panic!("Unknown kernel image type {}!!!", vmm_kernel.image_type), + }; + + let rsdp = prepare_acpi_tables(hob_list, layout, td_event_log, vcpus); + let e820_table = e820::create_e820_entries(layout); + // Safe because we are handle off this buffer to linux kernel. + let payload = unsafe { memslice::get_mem_slice_mut(memslice::SliceType::Payload) }; + + linux::boot::boot_kernel(payload, rsdp, e820_table.as_slice(), &vmm_kernel); + panic!("Linux kernel should not return here!!!"); +} + +// Prepare ACPI tables for the virtual machine and panics if error happens. +fn prepare_acpi_tables( + hob_list: &[u8], + layout: &RuntimeMemoryLayout, + td_event_log: &mut TdEventLog, + vcpus: u32, +) -> u64 { + // Safe because BSP is the only active vCPU so it's single-threaded context. + let acpi_slice = unsafe { + memslice::get_dynamic_mem_slice_mut( + memslice::SliceType::Acpi, + layout.runtime_acpi_base as usize, + ) + }; + let mut acpi_tables = acpi::AcpiTables::new(acpi_slice, acpi_slice.as_ptr() as *const _ as u64); + + let mut vmm_madt = None; + let mut next_hob = hob_list; + while let Some(hob) = hob::get_next_extension_guid_hob(next_hob, &TD_ACPI_TABLE_HOB_GUID) { + let table = hob::get_guid_data(hob).expect("Failed to get data from ACPI GUID HOB"); + let header = GenericSdtHeader::read_from(&table[..size_of::()]) + .expect("Faile to read table header from ACPI GUID HOB"); + // Protect MADT and TDEL from overwritten by the VMM. + if &header.signature != b"APIC" && &header.signature != b"TDEL" { + acpi_tables.install(table); + } + if &header.signature == b"APIC" { + vmm_madt = Some(table); + } + next_hob = hob::seek_to_next_hob(hob).unwrap(); + } + + let madt = if let Some(vmm_madt) = vmm_madt { + mp::create_madt(vmm_madt, layout.runtime_mailbox_base as u64) + .expect("Failed to create ACPI MADT table") + } else { + mp::create_madt_default(vcpus, layout.runtime_mailbox_base as u64) + .expect("Failed to create ACPI MADT table") + }; + + acpi_tables.install(madt.as_bytes()); + let tdel = td_event_log.create_tdel(); + acpi_tables.install(tdel.as_bytes()); + + acpi_tables.finish() +} + +fn prepare_hob_list( + hob_list: &[u8], + layout: &RuntimeMemoryLayout, + basefw: u64, + basefwsize: u64, + memory_top_below_4gb: u64, +) -> u64 { + let hob_base = layout.runtime_hob_base; + let memory_bottom = layout.runtime_memory_bottom; + + let handoff_info_table = pi::hob::HandoffInfoTable { + header: pi::hob::Header { + r#type: pi::hob::HOB_TYPE_HANDOFF, + length: size_of::() as u16, + reserved: 0, + }, + version: 9u32, + boot_mode: pi::boot_mode::BOOT_WITH_FULL_CONFIGURATION, + efi_memory_top: memory_top_below_4gb, + efi_memory_bottom: memory_bottom, + efi_free_memory_top: memory_top_below_4gb, + efi_free_memory_bottom: memory_bottom + + ipl::efi_page_to_size(ipl::efi_size_to_page(size_of::() as u64)), + efi_end_of_hob_list: hob_base + size_of::() as u64, + }; + + let cpu = pi::hob::Cpu { + header: pi::hob::Header { + r#type: pi::hob::HOB_TYPE_CPU, + length: size_of::() as u16, + reserved: 0, + }, + size_of_memory_space: memory::cpu_get_memory_space_size(), + size_of_io_space: 16u8, + reserved: [0u8; 6], + }; + + let firmware_volume = pi::hob::FirmwareVolume { + header: pi::hob::Header { + r#type: pi::hob::HOB_TYPE_FV, + length: size_of::() as u16, + reserved: 0, + }, + base_address: TD_SHIM_PAYLOAD_BASE as u64, + length: TD_SHIM_PAYLOAD_SIZE as u64, + }; + + const MEMORY_ALLOCATION_STACK_GUID: efi::Guid = efi::Guid::from_fields( + 0x4ED4BF27, + 0x4092, + 0x42E9, + 0x80, + 0x7D, + &[0x52, 0x7B, 0x1D, 0x00, 0xC9, 0xBD], + ); + let stack = pi::hob::MemoryAllocation { + header: pi::hob::Header { + r#type: pi::hob::HOB_TYPE_MEMORY_ALLOCATION, + length: size_of::() as u16, + reserved: 0, + }, + alloc_descriptor: pi::hob::MemoryAllocationHeader { + name: *MEMORY_ALLOCATION_STACK_GUID.as_bytes(), + memory_base_address: layout.runtime_stack_base, + memory_length: TD_PAYLOAD_STACK_SIZE as u64 + - (stack_guard::STACK_GUARD_PAGE_SIZE + stack_guard::STACK_EXCEPTION_PAGE_SIZE) + as u64, + memory_type: efi::MemoryType::BootServicesData as u32, + reserved: [0u8; 4], + }, + }; + + // Enable host Paging + const PAGE_TABLE_NAME_GUID: efi::Guid = efi::Guid::from_fields( + 0xF8E21975, + 0x0899, + 0x4F58, + 0xA4, + 0xBE, + &[0x55, 0x25, 0xA9, 0xC6, 0xD7, 0x7A], + ); + + let page_table = pi::hob::MemoryAllocation { + header: pi::hob::Header { + r#type: pi::hob::HOB_TYPE_MEMORY_ALLOCATION, + length: size_of::() as u16, + reserved: 0, + }, + alloc_descriptor: pi::hob::MemoryAllocationHeader { + name: *PAGE_TABLE_NAME_GUID.as_bytes(), + memory_base_address: TD_PAYLOAD_PAGE_TABLE_BASE, + memory_length: td_paging::PAGE_TABLE_SIZE as u64, + memory_type: efi::MemoryType::BootServicesData as u32, + reserved: [0u8; 4], + }, + }; + + let lowmemory = hob::get_system_memory_size_below_4gb(hob_list).unwrap(); + + let memory_above_1m = pi::hob::ResourceDescription { + header: pi::hob::Header { + r#type: pi::hob::HOB_TYPE_RESOURCE_DESCRIPTOR, + length: size_of::() as u16, + reserved: 0, + }, + owner: *efi::Guid::from_fields( + 0x4ED4BF27, + 0x4092, + 0x42E9, + 0x80, + 0x7D, + &[0x52, 0x7B, 0x1D, 0x00, 0xC9, 0xBD], + ) + .as_bytes(), + resource_type: pi::hob::RESOURCE_SYSTEM_MEMORY, + resource_attribute: pi::hob::RESOURCE_ATTRIBUTE_PRESENT + | pi::hob::RESOURCE_ATTRIBUTE_INITIALIZED + | pi::hob::RESOURCE_ATTRIBUTE_UNCACHEABLE + | pi::hob::RESOURCE_ATTRIBUTE_WRITE_COMBINEABLE + | pi::hob::RESOURCE_ATTRIBUTE_WRITE_THROUGH_CACHEABLE + | pi::hob::RESOURCE_ATTRIBUTE_WRITE_BACK_CACHEABLE + | pi::hob::RESOURCE_ATTRIBUTE_TESTED, + physical_start: 0x100000u64, + resource_length: lowmemory - 0x100000u64, + }; + + let memory_below_1m = pi::hob::ResourceDescription { + header: pi::hob::Header { + r#type: pi::hob::HOB_TYPE_RESOURCE_DESCRIPTOR, + length: size_of::() as u16, + reserved: 0, + }, + owner: *efi::Guid::from_fields( + 0x4ED4BF27, + 0x4092, + 0x42E9, + 0x80, + 0x7D, + &[0x52, 0x7B, 0x1D, 0x00, 0xC9, 0xBD], + ) + .as_bytes(), + resource_type: pi::hob::RESOURCE_SYSTEM_MEMORY, + resource_attribute: pi::hob::RESOURCE_ATTRIBUTE_PRESENT + | pi::hob::RESOURCE_ATTRIBUTE_INITIALIZED + | pi::hob::RESOURCE_ATTRIBUTE_UNCACHEABLE + | pi::hob::RESOURCE_ATTRIBUTE_WRITE_COMBINEABLE + | pi::hob::RESOURCE_ATTRIBUTE_WRITE_THROUGH_CACHEABLE + | pi::hob::RESOURCE_ATTRIBUTE_WRITE_BACK_CACHEABLE + | pi::hob::RESOURCE_ATTRIBUTE_TESTED, + physical_start: 0u64, + resource_length: 0x80000u64 + 0x20000u64, + }; + + const PAYLOAD_NAME_GUID: efi::Guid = efi::Guid::from_fields( + 0x6948d4a, + 0xd359, + 0x4721, + 0xad, + 0xf6, + &[0x52, 0x25, 0x48, 0x5a, 0x6a, 0x3a], + ); + + let payload = pi::hob::MemoryAllocation { + header: pi::hob::Header { + r#type: pi::hob::HOB_TYPE_MEMORY_ALLOCATION, + length: size_of::() as u16, + reserved: 0, + }, + alloc_descriptor: pi::hob::MemoryAllocationHeader { + name: *PAYLOAD_NAME_GUID.as_bytes(), + memory_base_address: basefw, + memory_length: ipl::efi_page_to_size(ipl::efi_size_to_page(basefwsize)), + memory_type: efi::MemoryType::BootServicesCode as u32, + reserved: [0u8; 4], + }, + }; + + let hob_template = HobTemplate { + handoff_info_table, + firmware_volume, + cpu, + payload, + page_table, + stack, + memory_above_1m, + memory_blow_1m: memory_below_1m, + end_off_hob: pi::hob::Header { + r#type: pi::hob::HOB_TYPE_END_OF_HOB_LIST, + length: size_of::() as u16, + reserved: 0, + }, + }; + + // Safe because we are the only consumer. + let hob_slice = unsafe { + memslice::get_dynamic_mem_slice_mut(memslice::SliceType::PayloadHob, hob_base as usize) + }; + let _res = hob_slice.pwrite(hob_template, 0); + + hob_base +} + +#[cfg(feature = "secure-boot")] +fn secure_boot_verify_payload<'a>(payload: &'a [u8], td_event_log: &mut TdEventLog) -> &'a [u8] { + let cfv = memslice::get_mem_slice(memslice::SliceType::Config); + let verifier = verifier::PayloadVerifier::new(payload, cfv) + .expect("Secure Boot: Cannot read verify header from payload binary"); + + td_event_log.create_event_log( + 4, + EV_PLATFORM_CONFIG_FLAGS, + b"td payload", + verifier::PayloadVerifier::get_trust_anchor(cfv).unwrap(), + ); + verifier.verify().expect("Verification fails"); + td_event_log.create_event_log(4, EV_PLATFORM_CONFIG_FLAGS, b"td payload", payload); + td_event_log.create_event_log( + 4, + EV_PLATFORM_CONFIG_FLAGS, + b"td payload svn", + &u64::to_le_bytes(verifier.get_payload_svn()), + ); + // Parse out the image from signed payload + return verifier::PayloadVerifier::get_payload_image(payload) + .expect("Unable to get payload image from signed binary"); +} diff --git a/td-shim/src/bin/td-shim/memory.rs b/td-shim/src/bin/td-shim/memory.rs new file mode 100644 index 00000000..8f5295a5 --- /dev/null +++ b/td-shim/src/bin/td-shim/memory.rs @@ -0,0 +1,162 @@ +// Copyright (c) 2020 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +use td_layout::runtime::{TD_PAYLOAD_EVENT_LOG_SIZE, TD_PAYLOAD_SIZE}; +use td_layout::RuntimeMemoryLayout; +use td_uefi_pi::hob; +use x86_64::{ + structures::paging::PageTableFlags as Flags, + structures::paging::{OffsetPageTable, PageTable}, + PhysAddr, VirtAddr, +}; + +use crate::td; + +const EXTENDED_FUNCTION_INFO: u32 = 0x80000000; +const VIRT_PHYS_MEM_SIZES: u32 = 0x80000008; + +pub struct Memory<'a> { + pub layout: &'a RuntimeMemoryLayout, + pt: OffsetPageTable<'a>, + memory_size: u64, +} + +impl<'a> Memory<'a> { + pub fn new(layout: &RuntimeMemoryLayout, memory_size: u64) -> Memory { + let pt = unsafe { + OffsetPageTable::new( + &mut *(layout.runtime_page_table_base as *mut PageTable), + VirtAddr::new(td_paging::PHYS_VIRT_OFFSET as u64), + ) + }; + + Memory { + pt, + layout, + memory_size, + } + } + + pub fn setup_paging(&mut self) { + let shared_page_flag = td::get_shared_page_mask(); + let flags = Flags::PRESENT | Flags::WRITABLE; + let with_s_flags = unsafe { Flags::from_bits_unchecked(flags.bits() | shared_page_flag) }; + let with_nx_flags = flags | Flags::NO_EXECUTE; + log::info!( + "shared page flags - smask: {:#x} flags: {:?}\n", + shared_page_flag, + with_s_flags + ); + + // 0..runtime_payload_base + td_paging::create_mapping( + &mut self.pt, + PhysAddr::new(0), + VirtAddr::new(0), + td_paging::PAGE_SIZE_DEFAULT as u64, + self.layout.runtime_payload_base, // self.layout.runtime_payload_base - 0 + ); + + // runtime_payload_base..runtime_payload_end + td_paging::create_mapping( + &mut self.pt, + PhysAddr::new(self.layout.runtime_payload_base), + VirtAddr::new(self.layout.runtime_payload_base), + td_paging::PAGE_SIZE_4K as u64, + TD_PAYLOAD_SIZE as u64, + ); + + let runtime_payload_end = self.layout.runtime_payload_base + TD_PAYLOAD_SIZE as u64; + // runtime_payload_end..runtime_dma_base + td_paging::create_mapping( + &mut self.pt, + PhysAddr::new(runtime_payload_end), + VirtAddr::new(runtime_payload_end), + td_paging::PAGE_SIZE_DEFAULT as u64, + self.layout.runtime_dma_base - runtime_payload_end, + ); + + // runtime_dma_base..runtime_heap_base with Shared flag + td_paging::create_mapping_with_flags( + &mut self.pt, + PhysAddr::new(self.layout.runtime_dma_base), + VirtAddr::new(self.layout.runtime_dma_base), + td_paging::PAGE_SIZE_DEFAULT as u64, + self.layout.runtime_heap_base - self.layout.runtime_dma_base, + with_s_flags | with_nx_flags, + ); + + let runtime_memory_top = + self.layout.runtime_event_log_base + TD_PAYLOAD_EVENT_LOG_SIZE as u64; + // runtime_heap_base..memory_top with NX flag + td_paging::create_mapping_with_flags( + &mut self.pt, + PhysAddr::new(self.layout.runtime_heap_base), + VirtAddr::new(self.layout.runtime_heap_base), + td_paging::PAGE_SIZE_4K as u64, + runtime_memory_top - self.layout.runtime_heap_base, + with_nx_flags, + ); + + // runtime_memory_top..memory_size (end) + td_paging::create_mapping( + &mut self.pt, + PhysAddr::new(runtime_memory_top), + VirtAddr::new(runtime_memory_top), + td_paging::PAGE_SIZE_DEFAULT as u64, + self.memory_size - runtime_memory_top, + ); + + td_paging::cr3_write(); + } + + pub fn set_write_protect(&mut self, address: u64, size: u64) { + let flags = Flags::PRESENT | Flags::USER_ACCESSIBLE; + + td_paging::set_page_flags(&mut self.pt, VirtAddr::new(address), size as i64, flags); + } + + pub fn set_nx_bit(&mut self, address: u64, size: u64) { + let flags = Flags::PRESENT | Flags::WRITABLE | Flags::USER_ACCESSIBLE | Flags::NO_EXECUTE; + + td_paging::set_page_flags(&mut self.pt, VirtAddr::new(address), size as i64, flags); + } + + pub fn set_not_present(&mut self, address: u64, size: u64) { + let flags: Flags = Flags::empty(); + + td_paging::set_page_flags(&mut self.pt, VirtAddr::new(address), size as i64, flags); + } +} + +/// Get the maximum physical memory addressability of the processor. +pub fn cpu_get_memory_space_size() -> u8 { + let cpuid = unsafe { core::arch::x86_64::__cpuid(EXTENDED_FUNCTION_INFO) }; + let size_of_mem_space = if cpuid.eax >= VIRT_PHYS_MEM_SIZES { + let cpuid = unsafe { core::arch::x86_64::__cpuid(VIRT_PHYS_MEM_SIZES) }; + // CPUID.80000008H:EAX[bits 7-0]: the size of the physical address range + cpuid.eax as u8 + } else { + // fallback value according to edk2 core + 36 + }; + + log::info!( + "Maximum physical memory addressability of the processor - {}\n", + size_of_mem_space + ); + + // TBD: Currently we only map the 64GB memory, change back to size_of_mem_space once page table + // allocator can be ready. + core::cmp::min(36, size_of_mem_space) +} + +pub fn get_memory_size(hob: &[u8]) -> u64 { + let cpu_men_space_size = cpu_get_memory_space_size() as u32; + let cpu_memory_size = 2u64.pow(cpu_men_space_size); + let hob_memory_size = hob::get_total_memory_top(hob).unwrap(); + let mem_size = core::cmp::min(cpu_memory_size, hob_memory_size); + log::info!("memory_size: 0x{:x}\n", mem_size); + mem_size +} diff --git a/td-shim/src/bin/td-shim/mp.rs b/td-shim/src/bin/td-shim/mp.rs new file mode 100644 index 00000000..2e1af689 --- /dev/null +++ b/td-shim/src/bin/td-shim/mp.rs @@ -0,0 +1,163 @@ +// Copyright (c) 2020 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +use core::convert::TryInto; +use core::mem::size_of; +use zerocopy::{AsBytes, FromBytes}; + +use td_shim::acpi::{self, GenericSdtHeader}; + +// 255 vCPUs needs 2278 bytes, refer to create_madt(). +const MADT_MAX_SIZE: usize = 0xc00; +const NUM_8259_IRQS: usize = 16; + +const ACPI_1_0_PROCESSOR_LOCAL_APIC: u8 = 0x00; +const ACPI_MADT_MPWK_STRUCT_TYPE: u8 = 0x10; + +pub struct Madt { + pub data: [u8; MADT_MAX_SIZE], + pub size: usize, +} + +impl Madt { + fn default() -> Self { + Madt { + data: [0; MADT_MAX_SIZE], + size: 0, + } + } + + fn write(&mut self, data: &[u8]) { + self.data[self.size..self.size + data.len()].copy_from_slice(data); + self.size += data.len(); + + // Update the length field in header + self.data[4..8].copy_from_slice(&u32::to_le_bytes(self.size as u32)); + self.update_checksum() + } + + fn update_checksum(&mut self) { + self.data[9] = 0; + self.data[9] = acpi::calculate_checksum(&self.data[0..self.size]); + } + + pub fn as_bytes(&self) -> &[u8] { + &self.data[..self.size] + } +} + +#[repr(packed)] +#[derive(Default, AsBytes, FromBytes)] +struct LocalApic { + pub r#type: u8, + pub length: u8, + pub processor_id: u8, + pub apic_id: u8, + pub flags: u32, +} + +#[repr(packed)] +#[derive(Default, AsBytes, FromBytes)] +struct MadtMpwkStruct { + r#type: u8, + length: u8, + mail_box_version: u16, + reserved: u32, + mail_box_address: u64, +} + +// Create ACPI MADT table based on the one from VMM +// APIC / IRQ information should be provided by VMM +// TD-Shim appends the MP wakeup structure to the table +pub fn create_madt(vmm_madt: &[u8], mailbox_base: u64) -> Option { + if &vmm_madt[0..4] != b"APIC" || vmm_madt.len() < size_of::() { + return None; + } + + // Safe since we have checked the length + let len = u32::from_le_bytes(vmm_madt[4..8].try_into().unwrap()); + + let mut madt = Madt::default(); + madt.write(&vmm_madt[..len as usize]); + + let mpwk = MadtMpwkStruct { + r#type: ACPI_MADT_MPWK_STRUCT_TYPE, + length: size_of::() as u8, + mail_box_version: 1, + reserved: 0, + mail_box_address: mailbox_base, + }; + madt.write(mpwk.as_bytes()); + + Some(madt) +} + +// If there is no MADT passed from VMM, construct the default +// one which contains the APIC base / version, local APIC and +// MP wakeup structure +pub fn create_madt_default(cpu_num: u32, mailbox_base: u64) -> Option { + log::info!("create_madt(): cpu_num: {:x}\n", cpu_num); + + let table_length = size_of::() + + 8 + + cpu_num as usize * size_of::() + + size_of::(); + if cpu_num == 0 || table_length > MADT_MAX_SIZE { + return None; + } + + let mut madt = Madt::default(); + let header = GenericSdtHeader::new(b"APIC", table_length as u32, 1); + + // Write generic header + madt.write(header.as_bytes()); + + // Write APIC base and version + madt.write(&0xfee00000u32.to_le_bytes()); + madt.write(&1u32.to_le_bytes()); + + for cpu in 0..cpu_num { + let lapic = LocalApic { + r#type: ACPI_1_0_PROCESSOR_LOCAL_APIC, + length: size_of::() as u8, + processor_id: cpu as u8, + apic_id: cpu as u8, + flags: 1, + }; + madt.write(lapic.as_bytes()); + } + + let mpwk = MadtMpwkStruct { + r#type: ACPI_MADT_MPWK_STRUCT_TYPE, + length: size_of::() as u8, + mail_box_version: 1, + reserved: 0, + mail_box_address: mailbox_base, + }; + madt.write(mpwk.as_bytes()); + + assert_eq!(madt.size, table_length); + madt.update_checksum(); + + Some(madt) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_create_mdat() { + assert!(create_madt_default(0, 0x1000).is_none()); + let madt = create_madt_default(255, 0x1000).unwrap(); + assert!(madt.size < MADT_MAX_SIZE); + + let mut vmm_madt = [0u8; size_of::()]; + assert!(create_madt(&vmm_madt, 0x1000).is_none()); + + vmm_madt[0..4].copy_from_slice(b"APIC"); + let madt = create_madt(&vmm_madt, mailbox).unwrap(); + assert_eq!(madt.size, vmm_madt.len() + size_of::()); + } +} diff --git a/td-shim/src/bin/td-shim/stack_guard.rs b/td-shim/src/bin/td-shim/stack_guard.rs new file mode 100644 index 00000000..7d64f8e3 --- /dev/null +++ b/td-shim/src/bin/td-shim/stack_guard.rs @@ -0,0 +1,207 @@ +// Copyright (c) 2021 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +use core::{mem::size_of, slice::from_raw_parts}; +use lazy_static::lazy_static; +use spin::Mutex; +use td_exception::idt; +use x86::{ + bits64::task::TaskStateSegment, dtables::DescriptorTablePointer, segmentation::SegmentSelector, + Ring, +}; + +use crate::memory::Memory; + +// +// stack guard feature +// +// +-----------------+ <-- Stack Top +// | | +// | Normal Stack | +// | | +// +-----------------+ +// | Guard Page | // Not-Present in page table +// +-----------------+ +// | Exception Stack | // Used in exception handler, when normal stack overflows to guard page. +// +-----------------+ +// + +pub const STACK_GUARD_PAGE_SIZE: usize = 0x1000; +pub const STACK_EXCEPTION_PAGE_SIZE: usize = 0x1000; + +// TSS occupies two GDT entries. +const TSS_DESC_SIZE: u16 = 2 * size_of::() as u16; +// For x86_64, and GDT with eight entries is defined in `ResetVector/Ia32/ReloadFlat32.asm`. +// And the TSS needs two GDT entries, so at least 10 GDT entries. +const MAX_GDT_SIZE: usize = 10; +// Avalible present TSS +const IA32_GDT_TYPE_TSS: u8 = 0x89; + +lazy_static! { + static ref GDT: Mutex = Mutex::new(Gdt { + entries: [GdtEntry(0); MAX_GDT_SIZE], + }); + static ref TSS: Mutex = Mutex::new(TaskStateSegment::new()); +} + +#[derive(Debug, Clone, Copy)] +struct GdtEntry(u64); + +#[repr(align(8))] +struct Gdt { + entries: [GdtEntry; MAX_GDT_SIZE], +} + +#[repr(C, packed)] +#[derive(Debug, Clone, Copy)] +struct TssDescriptor { + limit15_0: u16, + base15_0: u16, + base23_16: u8, + r#type: u8, + limit19_16_and_flags: u8, + base31_24: u8, + base63_32: u32, + reserved: u32, +} + +impl TssDescriptor { + fn new(base: u64, limit: u32, r#type: u8) -> Self { + TssDescriptor { + limit15_0: (limit & 0xffff) as u16, + base15_0: (base & 0xffff) as u16, + base23_16: (base >> 16 & 0xff) as u8, + r#type: r#type, + limit19_16_and_flags: (limit >> 16 & 0xf) as u8, + base31_24: (base >> 24 & 0xff) as u8, + base63_32: (base >> 32 & 0xffff) as u32, + reserved: 0, + } + } + + fn low(&self) -> u64 { + (self.limit15_0 as u64) + | (self.base15_0 as u64) << 16 + | (self.base23_16 as u64) << 32 + | (self.r#type as u64) << 40 + | (self.limit19_16_and_flags as u64) << 48 + | (self.base31_24 as u64) << 56 + } + + fn high(&self) -> u64 { + self.base63_32 as u64 + } +} + +fn store_gdtr() -> DescriptorTablePointer { + let mut gdtr: DescriptorTablePointer = Default::default(); + unsafe { x86::dtables::sgdt(&mut gdtr) }; + gdtr +} + +/// Get the Global Descriptor Table from the DescriptorTablePointer. +/// +/// ### Safety +/// +/// The caller needs to ensure/protect from: +/// - the DescriptorTablePointer is valid +/// - the lifetime of the return reference +/// - concurrent access to the returned reference +unsafe fn read_gdt(gdtr: &DescriptorTablePointer) -> &'static [GdtEntry] { + let gdt_addr = gdtr.base; + let gdt_size = (gdtr.limit + 1) as usize / size_of::(); + + unsafe { from_raw_parts(gdtr.base, gdt_size) } +} + +/// Load DescriptorTablePointer `idtr` into the Interrupt Descriptor Table Register. +/// +/// ### Safey +/// +/// Caller needs to ensure that `gdtr` is valid, otherwise behavior is undefined. +unsafe fn load_gdtr(gdtr: &DescriptorTablePointer) { + x86::dtables::lgdt(gdtr); +} + +fn setup_tss(exception_page_top: u64) { + // Read the original GDT + let mut gdtr = store_gdtr(); + let gdt_size = gdtr.limit + 1; + let origin_gdt_table_size = (gdt_size / 8) as usize; + assert_ne!(gdtr.base as *const _ as usize, 0); + assert!(origin_gdt_table_size + TSS_DESC_SIZE as usize <= MAX_GDT_SIZE * size_of::()); + + let mut gdt = GDT.lock(); + // Safe because the bootstrap code has initialized GDT and we have verified it just now. + unsafe { + let original_gdt_entries = read_gdt(&gdtr); + // Copy the original GDT to the new GDT + gdt.entries[0..origin_gdt_table_size as usize].copy_from_slice(original_gdt_entries); + } + + // Setup the TSS and append the TSS desc to the GDT + let mut tss = &mut *TSS.lock(); + tss.set_ist(0, exception_page_top); + let tss_desc: TssDescriptor = TssDescriptor::new( + tss as *const _ as u64, + size_of::() as u32 - 1, + IA32_GDT_TYPE_TSS, + ); + let mut tss_desc_entry = &mut gdt.entries[origin_gdt_table_size..origin_gdt_table_size + 2]; + tss_desc_entry[0].0 = tss_desc.low(); + tss_desc_entry[1].0 = tss_desc.high(); + + gdtr.base = &gdt.entries as *const _; + gdtr.limit = gdt_size + TSS_DESC_SIZE - 1; + // Safe because the `gdtr` is valid. + unsafe { load_gdtr(&gdtr) }; + + // load the tss selector into the task register + let tss_sel = SegmentSelector::new(origin_gdt_table_size as u16, Ring::Ring0); + unsafe { x86::task::load_tr(tss_sel) }; +} + +fn setup_idt() { + let mut idtr = idt::store_idtr(); + // Safe because _start() ensures that td_exception::setup_exception_handlers() get called + // before stack_guard_enable(). + unsafe { + let mut idt_entries = idt::read_idt(&idtr); + idt_entries[14].set_ist(1); + idt::load_idtr(&idtr); + } +} + +/// Turn on the stack red zone to guard from stack overflow. +/// +/// The GDT/IDT must have been initialized when calling this function. +pub fn stack_guard_enable(mem: &mut Memory) { + let stack_addr = mem.layout.runtime_stack_base; + let guard_page_addr = stack_addr + STACK_EXCEPTION_PAGE_SIZE as u64; + let exception_page_top = guard_page_addr; + + assert!(guard_page_addr + (STACK_GUARD_PAGE_SIZE as u64) < mem.layout.runtime_stack_top); + log::info!( + "Stack Guard: guard page top {:x}, known good stack top {:x}\n", + guard_page_addr, + exception_page_top + ); + mem.set_not_present(guard_page_addr, STACK_GUARD_PAGE_SIZE as u64); + + setup_idt(); + setup_tss(exception_page_top); +} + +#[cfg(test)] +mod tests { + use super::*; + use td_layout::runtime::TD_PAYLOAD_STACK_SIZE; + + #[test] + fn test_stack_guard_struct_size() { + assert_eq!(size_of::(), 8); + assert_eq!(size_of::(), TSS_DESC_SIZE as usize); + assert!(STACK_EXCEPTION_PAGE_SIZE + STACK_GUARD_PAGE_SIZE < TD_PAYLOAD_STACK_SIZE as usize); + } +} diff --git a/td-shim/src/bin/td-shim/tcg.rs b/td-shim/src/bin/td-shim/tcg.rs new file mode 100644 index 00000000..42caf320 --- /dev/null +++ b/td-shim/src/bin/td-shim/tcg.rs @@ -0,0 +1,113 @@ +// Copyright (c) 2020 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +use core::convert::TryInto; +use scroll::{Pread, Pwrite}; +use td_shim::event_log::{ + TcgPcrEvent2Header, Tdel, TpmlDigestValues, TpmtHa, TpmuHa, PCR_EVENT_HEADER_SIZE, + SHA384_DIGEST_SIZE, TPML_ALG_SHA384, +}; + +#[allow(unused)] +pub struct TdEventLog { + area: &'static mut [u8], + format: i32, + lasa: u64, + laml: usize, + size: usize, + last: u64, + started: bool, + truncated: bool, +} + +impl TdEventLog { + pub fn new(td_event_mem: &'static mut [u8]) -> TdEventLog { + let laml = td_event_mem.len(); + + TdEventLog { + area: td_event_mem, + format: 0x02, + lasa: 0, + laml, + size: 0, + last: 0, + started: false, + truncated: false, + } + } + + pub fn create_tdel(&self) -> Tdel { + Tdel::new(self.laml as u64, self.lasa as u64) + } + + pub fn create_event_log( + &mut self, + pcr_index: u32, + event_type: u32, + event_data: &[u8], + hash_data: &[u8], + ) { + log::info!("calc td_hob digest ...\n"); + + let event_data_size = event_data.len(); + let hash_value = ring::digest::digest(&ring::digest::SHA384, hash_data); + let hash_value = hash_value.as_ref(); + assert_eq!(hash_value.len(), SHA384_DIGEST_SIZE); + // Safe to unwrap() because we have checked the size. + let hash384_value: [u8; SHA384_DIGEST_SIZE] = hash_value.try_into().unwrap(); + + crate::td::extend_rtmr(&hash384_value, pcr_index); + + let event2_header = TcgPcrEvent2Header { + pcr_index, + event_type, + digest: TpmlDigestValues { + count: 1, + digests: [TpmtHa { + hash_alg: TPML_ALG_SHA384, + digest: TpmuHa { + sha384: hash384_value, + }, + }], + }, + event_size: event_data_size as u32, + }; + let new_log_size = PCR_EVENT_HEADER_SIZE + event2_header.event_size as usize; + if self.size + new_log_size > self.laml { + return; + } + + self.write_header(&event2_header, self.size); + self.write_data(event_data, self.size + PCR_EVENT_HEADER_SIZE); + + self.last = self.lasa + self.size as u64; + self.size += new_log_size; + } + + fn write_header(&mut self, header: &TcgPcrEvent2Header, offset: usize) { + let _ = self.area.pwrite(header, offset); + } + + fn write_data(&mut self, data: &[u8], offset: usize) { + self.area[offset..offset + data.len()].copy_from_slice(data); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_create_event_log() { + let mut buf = [0u8; 128]; + let slice = + unsafe { &mut *core::ptr::slice_from_raw_parts_mut(buf.as_mut_ptr(), buf.len()) }; + let mut logger = TdEventLog::new(slice); + let tdel = logger.create_tdel(); + assert_eq!(tdel.laml as u64, 128); + assert_eq!(tdel.lasa as u64, 0); + + logger.create_event_log(1, 2, &[0u8], &[08u8]); + } +} diff --git a/td-shim/src/bin/td-shim/td/dummy.rs b/td-shim/src/bin/td-shim/td/dummy.rs new file mode 100644 index 00000000..9e36df0b --- /dev/null +++ b/td-shim/src/bin/td-shim/td/dummy.rs @@ -0,0 +1,15 @@ +// Copyright (c) 2022 Alibaba Cloud +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +pub fn get_shared_page_mask() -> u64 { + 0 +} + +pub fn accept_memory_resource_range(_cpu_num: u32, _address: u64, _size: u64) {} + +pub fn get_num_vcpus() -> u32 { + 1 +} + +pub fn extend_rtmr(_data: &[u8], _pcr_index: u32) {} diff --git a/td-shim/src/bin/td-shim/td/mod.rs b/td-shim/src/bin/td-shim/td/mod.rs new file mode 100644 index 00000000..c20a1473 --- /dev/null +++ b/td-shim/src/bin/td-shim/td/mod.rs @@ -0,0 +1,15 @@ +// Copyright (c) 2022 Alibaba Cloud +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +#[cfg(feature = "tdx")] +mod tdx; +#[cfg(feature = "tdx")] +mod tdx_mailbox; +#[cfg(feature = "tdx")] +pub use tdx::*; + +#[cfg(not(feature = "tdx"))] +mod dummy; +#[cfg(not(feature = "tdx"))] +pub use dummy::*; diff --git a/td-shim/src/bin/td-shim/td/tdx.rs b/td-shim/src/bin/td-shim/td/tdx.rs new file mode 100644 index 00000000..fae2901e --- /dev/null +++ b/td-shim/src/bin/td-shim/td/tdx.rs @@ -0,0 +1,67 @@ +// Copyright (c) 2022 Alibaba Cloud +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +use tdx_tdcall::tdx; + +extern "win64" { + fn asm_read_msr64(index: u32) -> u64; + fn asm_write_msr64(index: u32, value: u64) -> u64; +} + +const EXTENDED_FUNCTION_INFO: u32 = 0x80000000; +const EXTENDED_PROCESSOR_INFO: u32 = 0x80000001; + +const SHA384_DIGEST_SIZE: usize = 48; + +pub fn get_shared_page_mask() -> u64 { + tdx_tdcall::tdx::td_shared_page_mask() +} + +pub fn accept_memory_resource_range(mut cpu_num: u32, address: u64, size: u64) { + super::tdx_mailbox::accept_memory_resource_range(cpu_num, address, size) +} + +pub fn relocate_mailbox(address: u32) { + super::tdx_mailbox::relocate_mailbox(address).expect("Unable to relocate mailbox"); +} + +pub fn get_num_vcpus() -> u32 { + let mut td_info = tdx::TdInfoReturnData { + gpaw: 0, + attributes: 0, + max_vcpus: 0, + num_vcpus: 0, + rsvd: [0; 3], + }; + + tdx::tdcall_get_td_info(&mut td_info); + log::info!("gpaw - {:?}\n", td_info.gpaw); + log::info!("num_vcpus - {:?}\n", td_info.num_vcpus); + + td_info.num_vcpus +} + +pub fn extend_rtmr(data: &[u8; SHA384_DIGEST_SIZE], pcr_index: u32) { + let digest = tdx::TdxDigest { data: *data }; + + log::info!("extend_rtmr ...\n"); + let mr_index = match pcr_index { + 0 => { + log::info!("PCR[0] should be extended vith RDMR\n"); + 0xFF + } + 1 | 7 => 0, + 2..=6 => 1, + 8..=15 => 2, + _ => { + log::info!("invalid pcr_index 0x{:x}\n", pcr_index); + 0xFF + } + }; + if mr_index >= 3 { + return; + } + + tdx::tdcall_extend_rtmr(&digest, mr_index); +} diff --git a/td-shim/src/bin/td-shim/td/tdx_mailbox.rs b/td-shim/src/bin/td-shim/td/tdx_mailbox.rs new file mode 100644 index 00000000..d07d1f06 --- /dev/null +++ b/td-shim/src/bin/td-shim/td/tdx_mailbox.rs @@ -0,0 +1,315 @@ +// Copyright (c) 2020 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +extern crate alloc; + +use alloc::vec::Vec; +use core::cmp::min; +use core::ops::RangeInclusive; +use td_layout::memslice::{get_dynamic_mem_slice_mut, get_mem_slice_mut, SliceType}; +use tdx_tdcall::tdx; + +use crate::asm::{ap_relocated_func, ap_relocated_func_size}; + +// The count of AP to wakeup is limited by the heap size that can be used for stack allocation +// The maximum size of memory used for AP stacks is 30 KB. +const MAX_WORKING_AP_COUNT: u32 = 15; +const AP_TEMP_STACK_SIZE: usize = 0x800; + +const ACCEPT_CHUNK_SIZE: u64 = 0x2000000; +const ACCEPT_PAGE_SIZE: u64 = 0x200000; +const PAGE_SIZE_2M: u64 = 0x200000; +const PAGE_SIZE_4K: u64 = 0x1000; +const MAILBOX_SIZE: usize = 0x1000; + +#[derive(Debug)] +pub enum MailboxError { + Relocation, +} + +mod spec { + pub type Field = ::core::ops::Range; + + pub const COMMAND: Field = 0..4; + pub const APIC_ID: Field = 4..8; + pub const WAKEUP_VECTOR: Field = 0x08..0x10; + pub const FW_ARGS: usize = 0x800; + pub const CPU_ARRIVAL: Field = 0x900..0x904; + pub const CPU_EXITING: Field = 0xa00..0xa04; + + pub const MP_WAKEUP_COMMAND_NOOP: u32 = 0; + pub const MP_WAKEUP_COMMAND_WAKEUP: u32 = 1; + pub const MP_WAKEUP_COMMAND_SLEEP: u32 = 2; + pub const MP_WAKEUP_COMMAND_ACCEPT_PAGES: u32 = 3; + pub const MP_WAKEUP_COMMAND_AVAILABLE: u32 = 4; + + pub const MAILBOX_APICID_INVALID: u32 = 0xffffffff; + pub const MAILBOX_APICID_BROADCAST: u32 = 0xfffffffe; +} + +struct MailBox<'a> { + buffer: &'a mut [u8], +} + +impl MailBox<'_> { + fn read_volatile(src: *const T) -> T { + // Safety: mailbox memory is always valid + // Mailbox memory should be read/written using volatile, it may be changed by AP. + unsafe { core::ptr::read_volatile(src) } + } + + fn write_volatile(dst: *mut T, src: T) { + // Safety: mailbox memory is always valid + unsafe { core::ptr::write_volatile(dst, src) } + } + + fn new(buffer: &mut [u8]) -> MailBox { + MailBox { buffer } + } + + fn apic_id(&self) -> u32 { + let p_apic_id = self.buffer[spec::APIC_ID].as_ptr() as *const u32; + MailBox::read_volatile(p_apic_id) + } + + fn fw_arg(&self, index: usize) -> u64 { + let offset = spec::FW_ARGS + index * 8; + let p_fw_arg = self.buffer[offset..offset + 8].as_ptr() as *const u64; + MailBox::read_volatile(p_fw_arg) + } + + fn cpu_arrival(&self) -> u32 { + let p_cpu_arrival = self.buffer[spec::CPU_ARRIVAL].as_ptr() as *const u32; + MailBox::read_volatile(p_cpu_arrival) + } + + fn cpu_exiting(&self) -> u32 { + let p_cpu_exiting = self.buffer[spec::CPU_EXITING].as_ptr() as *const u32; + MailBox::read_volatile(p_cpu_exiting) + } + + fn set_command(&mut self, command: u32) { + let p_command = self.buffer[spec::COMMAND].as_ptr() as *mut u32; + MailBox::write_volatile(p_command, command); + } + + fn set_apic_id(&mut self, apic_id: u32) { + let p_apic_id = self.buffer[spec::APIC_ID].as_ptr() as *mut u32; + MailBox::write_volatile(p_apic_id, apic_id); + } + + fn set_wakeup_vector(&mut self, wakeup_vector: u32) { + let p_wakeup_vector = self.buffer[spec::WAKEUP_VECTOR].as_ptr() as *mut u32; + MailBox::write_volatile(p_wakeup_vector, wakeup_vector); + } + + fn set_fw_arg(&mut self, index: usize, fw_arg: u64) { + let offset = spec::FW_ARGS + index * 8; + let p_fw_arg = self.buffer[offset..offset + 8].as_ptr() as *mut u64; + MailBox::write_volatile(p_fw_arg, fw_arg); + } +} + +fn cpu_pause() { + unsafe { asm!("pause") }; +} + +fn make_apic_range(end: u32) -> RangeInclusive { + // 0 is the bootstrap processor running this code + let start = 1; + + RangeInclusive::new(start, end) +} + +// Wait for AP to response the command set by BSP if needed. +// Typically AP will set the APIC ID field in mailbox to be invalid +fn wait_for_ap_response(mail_box: &mut MailBox) { + loop { + if mail_box.apic_id() == spec::MAILBOX_APICID_INVALID { + x86::fence::mfence(); + mail_box.set_command(spec::MP_WAKEUP_COMMAND_NOOP); + break; + } else { + cpu_pause(); + } + } +} + +// Wait for APs to arrive by checking if they are available +fn wait_for_ap_arrive(ap_num: u32) { + // Safety: + // BSP is the owner of the mailbox area, and APs cooperate with BSP to access the mailbox area. + let mut mail_box = unsafe { MailBox::new(get_mem_slice_mut(SliceType::MailBox)) }; + for i in make_apic_range(ap_num) { + mail_box.set_command(spec::MP_WAKEUP_COMMAND_AVAILABLE); + x86::fence::mfence(); + mail_box.set_apic_id(i); + wait_for_ap_response(&mut mail_box); + } +} + +pub fn ap_assign_work(cpu_index: u32, stack: u64, entry: u32) { + // Safety: + // BSP is the owner of the mailbox area, and APs cooperate with BSP to access the mailbox area. + let mut mail_box = unsafe { MailBox::new(get_mem_slice_mut(SliceType::MailBox)) }; + + mail_box.set_wakeup_vector(entry); + mail_box.set_fw_arg(0, stack); + mail_box.set_command(spec::MP_WAKEUP_COMMAND_ACCEPT_PAGES); + x86::fence::mfence(); + mail_box.set_apic_id(cpu_index); + + wait_for_ap_response(&mut mail_box); +} + +fn td_accept_pages(address: u64, pages: u64, page_size: u64) { + for i in 0..pages { + let mut accept_addr = address + i * page_size; + let accept_level = if page_size == PAGE_SIZE_2M { 1 } else { 0 }; + let res = tdx::tdcall_accept_page(accept_addr | accept_level).map_err(|e| { + if e == tdx::TdCallError::TdxExitReasonPageSizeMismatch { + if page_size == PAGE_SIZE_4K { + log::error!( + "Accept Page Error: 0x{:x}, page_size: {}\n", + accept_addr, + page_size + ); + } else { + td_accept_pages(accept_addr, 512, PAGE_SIZE_4K); + } + } + // TODO: what happens to other error code? + }); + } +} + +fn parallel_accept_memory(cpu_index: u64) { + // Safety: + // During this state, all the BSPs/APs are accessing the mailbox in shared immutable mode. + let mail_box = unsafe { MailBox::new(get_mem_slice_mut(SliceType::MailBox)) }; + + // The cpu number, start and end address of memory to be accepted is + // set to mailbox fw arguments by mp_accept_memory_resource_range() + let cpu_num = mail_box.fw_arg(1); + let start = mail_box.fw_arg(2); + let end = mail_box.fw_arg(3); + + let stride = ACCEPT_CHUNK_SIZE * cpu_num; + let mut phys_addr = start + ACCEPT_CHUNK_SIZE * cpu_index; + + while phys_addr < end { + let page_num = min(ACCEPT_CHUNK_SIZE, end - phys_addr) / ACCEPT_PAGE_SIZE; + td_accept_pages(phys_addr, page_num, ACCEPT_PAGE_SIZE); + phys_addr += stride; + } +} + +pub fn accept_memory_resource_range(mut cpu_num: u32, address: u64, size: u64) { + log::info!( + "mp_accept_memory_resource_range: 0x{:x} - 0x{:x} ... (wait for seconds)\n", + address, + size + ); + + let active_ap_cnt = if cpu_num - 1 > MAX_WORKING_AP_COUNT { + MAX_WORKING_AP_COUNT + } else { + cpu_num - 1 + }; + + let mut align_low = if address & (ACCEPT_PAGE_SIZE - 1) == 0 { + 0 + } else { + min(size, ACCEPT_PAGE_SIZE - (address & (ACCEPT_PAGE_SIZE - 1))) + }; + let mut major_part = size - align_low; + let mut align_high = 0u64; + + if size > ACCEPT_PAGE_SIZE { + major_part = (size - align_low) & !(ACCEPT_PAGE_SIZE - 1); + if major_part < ACCEPT_PAGE_SIZE { + align_low += major_part; + major_part = 0; + } else { + align_high = size - align_low - major_part; + } + } + + wait_for_ap_arrive(active_ap_cnt); + + // Safety: + // BSP is the owner of the mailbox area, and APs cooperate with BSP to access the mailbox area. + let mut mail_box = unsafe { MailBox::new(get_mem_slice_mut(SliceType::MailBox)) }; + let mut stacks: Vec = Vec::with_capacity(AP_TEMP_STACK_SIZE * active_ap_cnt as usize); + + // BSP calles the same function parallel_accept_memory to accept memory, + // so set the firmware arguments here. + // To do: Set these parameter only in ap_assign_work() when there's + // multiple cpus. + mail_box.set_fw_arg(1, active_ap_cnt as u64 + 1); + mail_box.set_fw_arg(2, address + align_low); + mail_box.set_fw_arg(3, address + size); + + if major_part > 0 { + // 0 is the bootstrap processor running this code + for i in make_apic_range(active_ap_cnt) { + let ap_stack = stacks.as_ptr() as u64 + (i - 1) as u64 * 0x800; + ap_assign_work(i, ap_stack, parallel_accept_memory as *const () as u32); + } + } + + parallel_accept_memory(0); + + td_accept_pages(address, align_low / PAGE_SIZE_4K, PAGE_SIZE_4K); + td_accept_pages( + address + align_low + major_part, + align_high / PAGE_SIZE_4K, + PAGE_SIZE_4K, + ); + + wait_for_ap_arrive(active_ap_cnt); + log::info!("mp_accept_memory_resource_range: done\n"); +} + +pub fn relocate_mailbox(address: u32) -> Result<(), MailboxError> { + // Safety: + // During this state, all the BSPs/APs are accessing the mailbox in shared immutable mode. + let mut mail_box = unsafe { MailBox::new(get_mem_slice_mut(SliceType::MailBox)) }; + + // Safe because the relocated mailbox is statically reserved + // in runtime memory layout + let mut mailbox = + unsafe { get_dynamic_mem_slice_mut(SliceType::RelocatedMailbox, address as usize) }; + + // Get the new AP function and its size + let func_addr = ap_relocated_func as *const fn() as u64; + let mut func_size = 0u64; + unsafe { ap_relocated_func_size(&mut func_size as *mut u64) }; + + // Ensure that the Mailbox memory can hold the AP loop function + if func_size as usize > mailbox.len() { + return Err(MailboxError::Relocation); + } + + // Safety: + // the code size is calculated according to the ASM symbol address + // in the code section + let ap_func = + unsafe { core::slice::from_raw_parts(func_addr as *const u8, func_size as usize) }; + + // Copy AP function into Mailbox memory + // The layout of Mailbox memory: |---Mailbox---|---Relocated function---| + mailbox[MAILBOX_SIZE..MAILBOX_SIZE + ap_func.len()].copy_from_slice(ap_func); + + // Wakeup APs to complete the relocation of mailbox and AP function + mail_box.set_wakeup_vector(address + MAILBOX_SIZE as u32); + // Put new mailbox base address to the first FW arg + mail_box.set_fw_arg(0, address as u64); + + // Broadcast the wakeup command to all the APs + mail_box.set_command(spec::MP_WAKEUP_COMMAND_WAKEUP); + mail_box.set_apic_id(spec::MAILBOX_APICID_BROADCAST); + + Ok(()) +} diff --git a/td-shim/src/bin/td-shim/verifier.rs b/td-shim/src/bin/td-shim/verifier.rs new file mode 100644 index 00000000..31e73414 --- /dev/null +++ b/td-shim/src/bin/td-shim/verifier.rs @@ -0,0 +1,311 @@ +// Copyright (c) 2020 Intel Corporation +// +// SPDX-License-Identifier: BSD-2-Clause-Patent + +extern crate alloc; + +use alloc::vec::Vec; +use core::mem::size_of; +use der::{asn1::UIntBytes, Decodable, Encodable, Message}; +use ring::{ + digest, + signature::{self, UnparsedPublicKey, VerificationAlgorithm}, +}; +use scroll::{Pread, Pwrite}; +use td_shim::secure_boot::{ + CfvPubKeyFileHeader, PayloadSignHeader, CFV_FFS_HEADER_TRUST_ANCHOR_GUID, + CFV_FILE_HEADER_PUBKEY_GUID, PAYLOAD_SIGN_ECDSA_NIST_P384_SHA384, + PAYLOAD_SIGN_RSA_PSS_3072_SHA384, SIGNED_PAYLOAD_FILE_HEADER_GUID, +}; +use td_uefi_pi::{fv, pi}; + +#[derive(Debug)] +pub enum VerifyErr { + UnknownAlgorithm, + InvalidContent, + InvalidPublicKey, + InvalidSignature, +} + +// rfc3279#section-2.3.1 RSA Keys +// The RSA public key is encoded using the ASN.1 type RSAPublicKey: +// +// RSAPublicKey ::= SEQUENCE { +// modulus INTEGER, -- n +// publicExponent INTEGER } -- e +// +#[derive(Copy, Clone, Debug, Eq, PartialEq, Message)] +struct RsaPublicKeyDer<'a> { + pub modulus: UIntBytes<'a>, + pub exponents: UIntBytes<'a>, +} + +pub struct PayloadVerifier<'a> { + header: PayloadSignHeader, + config: &'a [u8], + image: &'a [u8], + public_key: &'a [u8], + formated_public_key: Vec, + signature: &'a [u8], + verify_alg: &'static dyn VerificationAlgorithm, +} + +impl<'a> PayloadVerifier<'a> { + pub fn new(signed_payload: &'a [u8], config: &'a [u8]) -> Result { + let header = signed_payload + .pread::(0) + .map_err(|_e| VerifyErr::InvalidContent)?; + let mut offset = header.length as usize; + if + /*offset <= size_of::() || offset >= signed_payload.len() || */ + &header.type_guid != SIGNED_PAYLOAD_FILE_HEADER_GUID.as_bytes() { + return Err(VerifyErr::InvalidContent); + } + + // The image to be verified contains signing header and payload ELF/PE image + let image = &signed_payload[0..offset]; + + let mut formated_public_key: Vec = Vec::new(); + let verify_alg: &'static dyn VerificationAlgorithm; + let signature; + let public_key; + match header.signing_algorithm { + PAYLOAD_SIGN_ECDSA_NIST_P384_SHA384 => { + if signed_payload.len() < offset + 192 { + return Err(VerifyErr::InvalidContent); + } + + // Public key (X: first 48 bytes, Y: second 48 bytes) + public_key = &signed_payload[offset..offset + 96]; + offset += 96; + + // Signature: (R: first 48 bytes, S: second 48 byts) + signature = &signed_payload[offset..offset + 96]; + + // Uncompressed public key + formated_public_key.push(0x04); + formated_public_key.extend_from_slice(public_key); + + verify_alg = &signature::ECDSA_P384_SHA384_FIXED; + } + PAYLOAD_SIGN_RSA_PSS_3072_SHA384 => { + if signed_payload.len() < offset + 776 { + return Err(VerifyErr::InvalidContent); + } + + // Store the Mod(384 bytes)||Exponent(8 bytes) to the public_key to verify hash. + public_key = &signed_payload[offset..offset + 392]; + + // Public Mod (384 bytes) + let modulus = &signed_payload[offset..offset + 384]; + offset += 384; + + // Public Exponent (8 bytes) + let exp = &signed_payload[offset..offset + 8]; + offset += 8; + + // Signature (384 bytes) + signature = &signed_payload[offset..offset + 384]; + + let der = RsaPublicKeyDer { + modulus: UIntBytes::new(modulus).map_err(|_e| VerifyErr::InvalidContent)?, + exponents: UIntBytes::new(exp).map_err(|_e| VerifyErr::InvalidContent)?, + }; + der.encode_to_vec(&mut formated_public_key) + .map_err(|_e| VerifyErr::InvalidContent)?; + + verify_alg = &signature::RSA_PSS_2048_8192_SHA384; + } + _ => return Err(VerifyErr::UnknownAlgorithm), + } + + Ok(PayloadVerifier { + header, + image, + config, + public_key, + formated_public_key, + signature, + verify_alg, + }) + } + + pub fn get_payload_svn(&self) -> u64 { + self.header.payload_svn + } + + pub fn get_trust_anchor(cfv: &'a [u8]) -> Result<&'a [u8], VerifyErr> { + fv::get_file_from_fv( + cfv, + pi::fv::FV_FILETYPE_RAW, + CFV_FFS_HEADER_TRUST_ANCHOR_GUID, + ) + .ok_or(VerifyErr::InvalidContent) + } + + pub fn get_payload_image(signed_payload: &'a [u8]) -> Result<&'a [u8], VerifyErr> { + let header = signed_payload + .pread::(0) + .map_err(|_e| VerifyErr::InvalidContent)?; + let mut offset = header.length as usize; + + if offset <= size_of::() || offset > signed_payload.len() { + Err(VerifyErr::InvalidContent) + } else { + Ok(&signed_payload[size_of::()..offset]) + } + } + + fn verify_signature(&self) -> Result<(), VerifyErr> { + let signature_verifier = + UnparsedPublicKey::new(self.verify_alg, self.formated_public_key.as_slice()); + signature_verifier + .verify(self.image, self.signature) + .map_err(|_e| VerifyErr::InvalidSignature) + } + + // Calculate the hash of public key read from signed payload, and + // compare with the one enrolled in the CFV. + // + // The contents in CFV are stored as the below layout: + // CFV header | FFS header | data file (header | data) + // The public key hash is stored in the data field. + // + fn verify_public_key(&self) -> Result<(), VerifyErr> { + let file = fv::get_file_from_fv( + self.config, + pi::fv::FV_FILETYPE_RAW, + CFV_FFS_HEADER_TRUST_ANCHOR_GUID, + ) + .ok_or(VerifyErr::InvalidPublicKey)?; + + let mut readlen = 0; + let header = file.gread::(&mut readlen).unwrap(); + if &header.type_guid != CFV_FILE_HEADER_PUBKEY_GUID.as_bytes() + || header.length as usize > file.len() + { + return Err(VerifyErr::InvalidPublicKey); + } + + let trusted_hash = &file[readlen..header.length as usize]; + let real_hash = digest::digest(&digest::SHA384, self.public_key); + if real_hash.as_ref() != trusted_hash { + return Err(VerifyErr::InvalidPublicKey); + } + + Ok(()) + } + + pub fn verify(&self) -> Result<(), VerifyErr> { + self.verify_public_key()?; + self.verify_signature()?; + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_payload_verifier_new() { + assert!(PayloadVerifier::new(&[], &[]).is_err()); + + let mut hdr = PayloadSignHeader { + type_guid: *TD_PAYLOAD_SIGN_HEADER_GUID.as_bytes(), + struct_version: 1, + length: 0, + payload_version: 1, + payload_svn: 1, + signing_algorithm: 0, + reserved: 0, + }; + assert!(PayloadVerifier::new(hdr.as_bytes(), &[]).is_err()); + hdr.length = size_of::() as u32; + assert!(PayloadVerifier::new(hdr.as_bytes(), &[]).is_err()); + + hdr.length = size_of::() as u32 + 1; + let mut buf = [0u8; 2048]; + buf[0..size_of::()].copy_from_slice(hdr.as_bytes()); + assert!(PayloadVerifier::new(&buf[0..size_of::() + 1], &[]).is_err()); + + hdr.signing_algorithm = TD_PAYLOAD_SIGN_RSA_PSS_3072_SHA384; + buf[0..size_of::()].copy_from_slice(hdr.as_bytes()); + assert!(PayloadVerifier::new(&buf[0..size_of::() + 1], &[]).is_err()); + assert!(PayloadVerifier::new(&buf[0..size_of::() + 777], &[]).is_ok()); + + hdr.signing_algorithm = TD_PAYLOAD_SIGN_ECDSA_NIST_P384_SHA384; + buf[0..size_of::()].copy_from_slice(hdr.as_bytes()); + assert!(PayloadVerifier::new(&buf[0..size_of::() + 1], &[]).is_err()); + assert!(PayloadVerifier::new(&buf[0..size_of::() + 193], &[]).is_ok()); + } + + #[test] + fn test_get_payload_image() { + assert!(PayloadVerifier::get_payload_image(&[]).is_err()); + + let mut hdr = PayloadSignHeader { + type_guid: *TD_PAYLOAD_SIGN_HEADER_GUID.as_bytes(), + struct_version: 1, + length: 0, + payload_version: 1, + payload_svn: 1, + signing_algorithm: 0, + reserved: 0, + }; + assert!(PayloadVerifier::get_payload_image(hdr.as_bytes()).is_err()); + hdr.length = size_of::() as u32; + assert!(PayloadVerifier::get_payload_image(hdr.as_bytes()).is_err()); + + hdr.length = size_of::() as u32 + 1; + let mut buf = [0u8; 2048]; + buf[0..size_of::()].copy_from_slice(hdr.as_bytes()); + assert!( + PayloadVerifier::get_payload_image(&buf[0..size_of::()]).is_err() + ); + assert_eq!( + PayloadVerifier::get_payload_image(&buf[0..size_of::() + 1]) + .unwrap(), + &[0u8] + ); + assert_eq!( + PayloadVerifier::get_payload_image(&buf[0..size_of::() + 2]) + .unwrap(), + &[0u8] + ); + } + + /* + #[test] + fn test() { + let bin = include_bytes!("../unit-test/input/final.sb.bin"); + + let pstart = TD_SHIM_PAYLOAD_OFFSET as usize; + let pend = pstart + TD_SHIM_PAYLOAD_SIZE as usize; + let payload_fv = &bin[pstart..pend]; + + let mut offset = 0; + let payload = fv::get_image_from_fv( + payload_fv, + pi::fv::FV_FILETYPE_DXE_CORE, + pi::fv::SECTION_PE32, + ) + .unwrap(); + + let cstart = TD_SHIM_CONFIG_OFFSET as usize; + let cend = cstart + TD_SHIM_CONFIG_SIZE as usize; + let cfv = &bin[cstart..cend]; + + let verifier = PayloadVerifier::new(payload, cfv); + assert!( + verifier.is_some(), + "Cannot get verify header from payload binary" + ); + assert!( + verifier.unwrap().verify().is_ok(), + "Payload verification fail" + ); + } + */ +}