diff --git a/src/arch/aarch64/kernel/processor.rs b/src/arch/aarch64/kernel/processor.rs index 34b0e3aa7a..4dedd765aa 100644 --- a/src/arch/aarch64/kernel/processor.rs +++ b/src/arch/aarch64/kernel/processor.rs @@ -152,6 +152,7 @@ pub fn get_timestamp() -> u64 { } #[inline] +#[allow(dead_code)] pub fn supports_1gib_pages() -> bool { false } diff --git a/src/arch/aarch64/mm/paging.rs b/src/arch/aarch64/mm/paging.rs index e68d63409c..4f6ca55011 100644 --- a/src/arch/aarch64/mm/paging.rs +++ b/src/arch/aarch64/mm/paging.rs @@ -601,7 +601,9 @@ pub fn map( root_pagetable.map_pages(range, physical_address, flags); } -pub fn map_heap(virt_addr: VirtAddr, count: usize) { +/// Maps `count` pages at address `virt_addr`. If the allocation of a physical memory failed, +/// the number of successfull mapped pages are returned as error value. +pub fn map_heap(virt_addr: VirtAddr, count: usize) -> Result<(), usize> { let flags = { let mut flags = PageTableEntryFlags::empty(); flags.normal().writable().execute_disable(); @@ -610,10 +612,13 @@ pub fn map_heap(virt_addr: VirtAddr, count: usize) { let virt_addrs = (0..count).map(|n| virt_addr + n * S::SIZE as usize); - for virt_addr in virt_addrs { - let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize).unwrap(); + for (map_counter, virt_addr) in virt_addrs.enumerate() { + let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize) + .map_err(|_| map_counter)?; map::(virt_addr, phys_addr, 1, flags); } + + Ok(()) } pub fn unmap(virtual_address: VirtAddr, count: usize) { diff --git a/src/arch/x86_64/mm/paging.rs b/src/arch/x86_64/mm/paging.rs index f6ae7d6881..51158027f1 100644 --- a/src/arch/x86_64/mm/paging.rs +++ b/src/arch/x86_64/mm/paging.rs @@ -138,7 +138,9 @@ pub fn map( } } -pub fn map_heap(virt_addr: VirtAddr, count: usize) +/// Maps `count` pages at address `virt_addr`. If the allocation of a physical memory failed, +/// the number of successfull mapped pages are returned as error value. +pub fn map_heap(virt_addr: VirtAddr, count: usize) -> Result<(), usize> where S: PageSize + Debug, RecursivePageTable<'static>: Mapper, @@ -151,10 +153,13 @@ where let virt_addrs = (0..count).map(|n| virt_addr + n * S::SIZE as usize); - for virt_addr in virt_addrs { - let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize).unwrap(); + for (map_counter, virt_addr) in virt_addrs.enumerate() { + let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize) + .map_err(|_| map_counter)?; map::(virt_addr, phys_addr, 1, flags); } + + Ok(()) } #[cfg(feature = "acpi")] diff --git a/src/mm/mod.rs b/src/mm/mod.rs index 77e77f9c8a..3ecb05c41e 100644 --- a/src/mm/mod.rs +++ b/src/mm/mod.rs @@ -9,11 +9,11 @@ use hermit_sync::Lazy; #[cfg(feature = "newlib")] use hermit_sync::OnceCell; +#[cfg(target_arch = "x86_64")] +use crate::arch::mm::paging::HugePageSize; #[cfg(target_arch = "x86_64")] use crate::arch::mm::paging::PageTableEntryFlagsExt; -use crate::arch::mm::paging::{ - BasePageSize, HugePageSize, LargePageSize, PageSize, PageTableEntryFlags, -}; +use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize, PageTableEntryFlags}; use crate::arch::mm::physicalmem::total_memory_size; #[cfg(feature = "newlib")] use crate::arch::mm::virtualmem::kernel_heap_end; @@ -82,6 +82,7 @@ pub(crate) fn init() { let reserved_space = (npage_3tables + npage_2tables + npage_1tables) * BasePageSize::SIZE as usize + LargePageSize::SIZE as usize; + #[cfg(target_arch = "x86_64")] let has_1gib_pages = arch::processor::supports_1gib_pages(); let has_2mib_pages = arch::processor::supports_2mib_pages(); @@ -154,10 +155,15 @@ pub(crate) fn init() { #[cfg(target_arch = "x86_64")] if has_1gib_pages && virt_size > HugePageSize::SIZE as usize { // Mount large pages to the next huge page boundary - map_addr = virt_addr.align_up_to_huge_page(); - map_size = virt_size - (map_addr - virt_addr).as_usize(); - let npages = (map_addr - virt_addr).as_usize() / LargePageSize::SIZE as usize; - paging::map_heap::(virt_addr, npages); + let npages = (virt_addr.align_up_to_huge_page().as_usize() - virt_addr.as_usize()) + / LargePageSize::SIZE as usize; + if let Err(n) = paging::map_heap::(virt_addr, npages) { + map_addr = virt_addr + n * LargePageSize::SIZE as usize; + map_size = virt_size - (map_addr - virt_addr).as_usize(); + } else { + map_addr = virt_addr.align_up_to_huge_page(); + map_size = virt_size - (map_addr - virt_addr).as_usize(); + } } else { map_addr = virt_addr; map_size = virt_size; @@ -170,28 +176,50 @@ pub(crate) fn init() { } } + #[cfg(target_arch = "x86_64")] if has_1gib_pages && map_size > HugePageSize::SIZE as usize - && map_addr.as_usize().align_down(HugePageSize::SIZE as usize) == 0 + && map_addr.is_aligned(HugePageSize::SIZE) { let size = map_size.align_down(HugePageSize::SIZE as usize); - paging::map_heap::(map_addr, size / HugePageSize::SIZE as usize); - map_size -= size; - map_addr += size; + if let Err(num_pages) = + paging::map_heap::(map_addr, size / HugePageSize::SIZE as usize) + { + map_size -= num_pages * HugePageSize::SIZE as usize; + map_addr += num_pages * HugePageSize::SIZE as usize; + } else { + map_size -= size; + map_addr += size; + } } - if has_2mib_pages && map_size > LargePageSize::SIZE as usize { + if has_2mib_pages + && map_size > LargePageSize::SIZE as usize + && map_addr.is_aligned(LargePageSize::SIZE) + { let size = map_size.align_down(LargePageSize::SIZE as usize); - paging::map_heap::(map_addr, size / LargePageSize::SIZE as usize); - map_size -= size; - map_addr += size; + if let Err(num_pages) = + paging::map_heap::(map_addr, size / LargePageSize::SIZE as usize) + { + map_size -= num_pages * LargePageSize::SIZE as usize; + map_addr += num_pages * LargePageSize::SIZE as usize; + } else { + map_size -= size; + map_addr += size; + } } - if map_size > BasePageSize::SIZE as usize { + if map_size > BasePageSize::SIZE as usize && map_addr.is_aligned(BasePageSize::SIZE) { let size = map_size.align_down(BasePageSize::SIZE as usize); - paging::map_heap::(map_addr, size / BasePageSize::SIZE as usize); - map_size -= size; - map_addr += size; + if let Err(num_pages) = + paging::map_heap::(map_addr, size / BasePageSize::SIZE as usize) + { + map_size -= num_pages * BasePageSize::SIZE as usize; + map_addr += num_pages * BasePageSize::SIZE as usize; + } else { + map_size -= size; + map_addr += size; + } } let heap_end_addr = map_addr;