From 912349fe611c2bd389f0d3d3083333f506f70aed Mon Sep 17 00:00:00 2001 From: Thomas Leroy Date: Tue, 3 Dec 2024 15:36:26 +0100 Subject: [PATCH] utils/util: make zero_mem_region unsafe As advised by @Freax13 in #359, if misused, zero_mem_region() can corrupt valid memory by writing zeroes to it. Therefore, it should be unsafe so that callers verfies that it points to the intended memory. Signed-off-by: Thomas Leroy --- kernel/src/fw_meta.rs | 6 +++++- kernel/src/mm/alloc.rs | 16 ++++++++++++++-- kernel/src/protocols/core.rs | 9 ++++++++- kernel/src/svsm.rs | 15 +++++++++++---- kernel/src/utils/util.rs | 20 ++++++++++++++++---- 5 files changed, 54 insertions(+), 12 deletions(-) diff --git a/kernel/src/fw_meta.rs b/kernel/src/fw_meta.rs index 001de192c..30ada96a3 100644 --- a/kernel/src/fw_meta.rs +++ b/kernel/src/fw_meta.rs @@ -342,7 +342,11 @@ fn validate_fw_mem_region( PageSize::Regular, )?; - zero_mem_region(vaddr, vaddr + PAGE_SIZE); + // SAFETY: we trust PerCPUPageMappingGuard::create_4k() to return a + // valid pointer to a correctly mapped region of size PAGE_SIZE. + unsafe { + zero_mem_region(vaddr, vaddr + PAGE_SIZE); + } } Ok(()) diff --git a/kernel/src/mm/alloc.rs b/kernel/src/mm/alloc.rs index 793cbaeff..e389bf4d6 100644 --- a/kernel/src/mm/alloc.rs +++ b/kernel/src/mm/alloc.rs @@ -589,7 +589,12 @@ impl MemoryRegion { fn allocate_zeroed_page(&mut self) -> Result { let vaddr = self.allocate_page()?; - zero_mem_region(vaddr, vaddr + PAGE_SIZE); + // SAFETY: we trust allocate_page() to return a pointer to a valid + // page. vaddr + PAGE_SIZE also correctly points to the end of the + // page. + unsafe { + zero_mem_region(vaddr, vaddr + PAGE_SIZE); + } Ok(vaddr) } @@ -1091,7 +1096,14 @@ pub fn allocate_zeroed_page() -> Result { /// `SvsmError` if allocation fails. pub fn allocate_file_page() -> Result { let vaddr = ROOT_MEM.lock().allocate_file_page()?; - zero_mem_region(vaddr, vaddr + PAGE_SIZE); + + // SAFETY: we trust allocate_file_page() to return a pointer to a valid + // page. vaddr + PAGE_SIZE also correctly points to the end of the + // page. + unsafe { + zero_mem_region(vaddr, vaddr + PAGE_SIZE); + } + Ok(vaddr) } diff --git a/kernel/src/protocols/core.rs b/kernel/src/protocols/core.rs index 123b48fe8..b93f7a679 100644 --- a/kernel/src/protocols/core.rs +++ b/kernel/src/protocols/core.rs @@ -312,7 +312,14 @@ fn core_pvalidate_one(entry: u64, flush: &mut bool) -> Result<(), SvsmReqError> // FIXME: This check leaves a window open for the attack described // above. Remove the check once OVMF and Linux have been fixed and // no longer try to pvalidate MMIO memory. - zero_mem_region(vaddr, vaddr + page_size_bytes); + + // SAFETY: paddr is validated at the beginning of the function, and + // we trust PerCPUPageMappingGuard::create() to return a valid + // vaddr pointing to a mapped region of at least page_size_bytes + // size. + unsafe { + zero_mem_region(vaddr, vaddr + page_size_bytes); + } } else { log::warn!("Not clearing possible read-only page at PA {:#x}", paddr); } diff --git a/kernel/src/svsm.rs b/kernel/src/svsm.rs index 2b96eca5b..e7b254a1b 100755 --- a/kernel/src/svsm.rs +++ b/kernel/src/svsm.rs @@ -123,7 +123,11 @@ fn copy_secrets_page_to_fw(fw_addr: PhysAddr, caa_addr: PhysAddr) -> Result<(), let start = guard.virt_addr(); // Zero target - zero_mem_region(start, start + PAGE_SIZE); + // SAFETY: we trust PerCPUPageMappingGuard::create_4k() to return a + // valid pointer to a correctly mapped region of size PAGE_SIZE. + unsafe { + zero_mem_region(start, start + PAGE_SIZE); + } // Copy secrets page let mut fw_secrets_page = secrets_page().copy_for_vmpl(GUEST_VMPL); @@ -148,7 +152,11 @@ fn zero_caa_page(fw_addr: PhysAddr) -> Result<(), SvsmError> { let guard = PerCPUPageMappingGuard::create_4k(fw_addr)?; let vaddr = guard.virt_addr(); - zero_mem_region(vaddr, vaddr + PAGE_SIZE); + // SAFETY: we trust PerCPUPageMappingGuard::create_4k() to return a + // valid pointer to a correctly mapped region of size PAGE_SIZE. + unsafe { + zero_mem_region(vaddr, vaddr + PAGE_SIZE); + } Ok(()) } @@ -308,10 +316,9 @@ pub extern "C" fn svsm_start(li: &KernelLaunchInfo, vb_addr: usize) { // We trust stage 2 to give the value provided by IGVM. unsafe { secrets_page_mut().copy_from(secrets_page_virt); + zero_mem_region(secrets_page_virt, secrets_page_virt + PAGE_SIZE); } - zero_mem_region(secrets_page_virt, secrets_page_virt + PAGE_SIZE); - cr0_init(); cr4_init(&*platform); determine_cet_support(); diff --git a/kernel/src/utils/util.rs b/kernel/src/utils/util.rs index 45aa5c827..abfc78625 100644 --- a/kernel/src/utils/util.rs +++ b/kernel/src/utils/util.rs @@ -52,14 +52,22 @@ where x1 <= y2 && y1 <= x2 } -pub fn zero_mem_region(start: VirtAddr, end: VirtAddr) { - let size = end - start; +/// # Safety +/// +/// Caller should ensure [`core::ptr::write_bytes`] safety rules. +pub unsafe fn zero_mem_region(start: VirtAddr, end: VirtAddr) { if start.is_null() { panic!("Attempted to zero out a NULL pointer"); } + let count = end + .checked_sub(start.as_usize()) + .expect("Invalid size calculation") + .as_usize(); + // Zero region - unsafe { start.as_mut_ptr::().write_bytes(0, size) } + // SAFETY: the safety rules must be upheld by the caller. + unsafe { start.as_mut_ptr::().write_bytes(0, count) } } /// Obtain bit for a given position @@ -117,7 +125,11 @@ mod tests { let start = VirtAddr::from(data.as_mut_ptr()); let end = start + core::mem::size_of_val(&data); - zero_mem_region(start, end); + // SAFETY: start and end correctly point respectively to the start and + // end of data. + unsafe { + zero_mem_region(start, end); + } for byte in &data { assert_eq!(*byte, 0);