Skip to content

Commit

Permalink
Merge pull request #832 from stlankes/talc
Browse files Browse the repository at this point in the history
fix mapping of 1GB pages
  • Loading branch information
stlankes authored Aug 9, 2023
2 parents e1a0707 + 9fcce94 commit 2fda1c6
Show file tree
Hide file tree
Showing 4 changed files with 64 additions and 25 deletions.
1 change: 1 addition & 0 deletions src/arch/aarch64/kernel/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,7 @@ pub fn get_timestamp() -> u64 {
}

#[inline]
#[allow(dead_code)]
pub fn supports_1gib_pages() -> bool {
false
}
Expand Down
11 changes: 8 additions & 3 deletions src/arch/aarch64/mm/paging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -601,7 +601,9 @@ pub fn map<S: PageSize>(
root_pagetable.map_pages(range, physical_address, flags);
}

pub fn map_heap<S: PageSize>(virt_addr: VirtAddr, count: usize) {
/// Maps `count` pages at address `virt_addr`. If the allocation of a physical memory failed,
/// the number of successfull mapped pages are returned as error value.
pub fn map_heap<S: PageSize>(virt_addr: VirtAddr, count: usize) -> Result<(), usize> {
let flags = {
let mut flags = PageTableEntryFlags::empty();
flags.normal().writable().execute_disable();
Expand All @@ -610,10 +612,13 @@ pub fn map_heap<S: PageSize>(virt_addr: VirtAddr, count: usize) {

let virt_addrs = (0..count).map(|n| virt_addr + n * S::SIZE as usize);

for virt_addr in virt_addrs {
let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize).unwrap();
for (map_counter, virt_addr) in virt_addrs.enumerate() {
let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize)
.map_err(|_| map_counter)?;
map::<S>(virt_addr, phys_addr, 1, flags);
}

Ok(())
}

pub fn unmap<S: PageSize>(virtual_address: VirtAddr, count: usize) {
Expand Down
11 changes: 8 additions & 3 deletions src/arch/x86_64/mm/paging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,9 @@ pub fn map<S>(
}
}

pub fn map_heap<S: PageSize>(virt_addr: VirtAddr, count: usize)
/// Maps `count` pages at address `virt_addr`. If the allocation of a physical memory failed,
/// the number of successfull mapped pages are returned as error value.
pub fn map_heap<S: PageSize>(virt_addr: VirtAddr, count: usize) -> Result<(), usize>
where
S: PageSize + Debug,
RecursivePageTable<'static>: Mapper<S>,
Expand All @@ -151,10 +153,13 @@ where

let virt_addrs = (0..count).map(|n| virt_addr + n * S::SIZE as usize);

for virt_addr in virt_addrs {
let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize).unwrap();
for (map_counter, virt_addr) in virt_addrs.enumerate() {
let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize)
.map_err(|_| map_counter)?;
map::<S>(virt_addr, phys_addr, 1, flags);
}

Ok(())
}

#[cfg(feature = "acpi")]
Expand Down
66 changes: 47 additions & 19 deletions src/mm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ use hermit_sync::Lazy;
#[cfg(feature = "newlib")]
use hermit_sync::OnceCell;

#[cfg(target_arch = "x86_64")]
use crate::arch::mm::paging::HugePageSize;
#[cfg(target_arch = "x86_64")]
use crate::arch::mm::paging::PageTableEntryFlagsExt;
use crate::arch::mm::paging::{
BasePageSize, HugePageSize, LargePageSize, PageSize, PageTableEntryFlags,
};
use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize, PageTableEntryFlags};
use crate::arch::mm::physicalmem::total_memory_size;
#[cfg(feature = "newlib")]
use crate::arch::mm::virtualmem::kernel_heap_end;
Expand Down Expand Up @@ -82,6 +82,7 @@ pub(crate) fn init() {
let reserved_space = (npage_3tables + npage_2tables + npage_1tables)
* BasePageSize::SIZE as usize
+ LargePageSize::SIZE as usize;
#[cfg(target_arch = "x86_64")]
let has_1gib_pages = arch::processor::supports_1gib_pages();
let has_2mib_pages = arch::processor::supports_2mib_pages();

Expand Down Expand Up @@ -154,10 +155,15 @@ pub(crate) fn init() {
#[cfg(target_arch = "x86_64")]
if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
// Mount large pages to the next huge page boundary
map_addr = virt_addr.align_up_to_huge_page();
map_size = virt_size - (map_addr - virt_addr).as_usize();
let npages = (map_addr - virt_addr).as_usize() / LargePageSize::SIZE as usize;
paging::map_heap::<LargePageSize>(virt_addr, npages);
let npages = (virt_addr.align_up_to_huge_page().as_usize() - virt_addr.as_usize())
/ LargePageSize::SIZE as usize;
if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages) {
map_addr = virt_addr + n * LargePageSize::SIZE as usize;
map_size = virt_size - (map_addr - virt_addr).as_usize();
} else {
map_addr = virt_addr.align_up_to_huge_page();
map_size = virt_size - (map_addr - virt_addr).as_usize();
}
} else {
map_addr = virt_addr;
map_size = virt_size;
Expand All @@ -170,28 +176,50 @@ pub(crate) fn init() {
}
}

#[cfg(target_arch = "x86_64")]
if has_1gib_pages
&& map_size > HugePageSize::SIZE as usize
&& map_addr.as_usize().align_down(HugePageSize::SIZE as usize) == 0
&& map_addr.is_aligned(HugePageSize::SIZE)
{
let size = map_size.align_down(HugePageSize::SIZE as usize);
paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize);
map_size -= size;
map_addr += size;
if let Err(num_pages) =
paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize)
{
map_size -= num_pages * HugePageSize::SIZE as usize;
map_addr += num_pages * HugePageSize::SIZE as usize;
} else {
map_size -= size;
map_addr += size;
}
}

if has_2mib_pages && map_size > LargePageSize::SIZE as usize {
if has_2mib_pages
&& map_size > LargePageSize::SIZE as usize
&& map_addr.is_aligned(LargePageSize::SIZE)
{
let size = map_size.align_down(LargePageSize::SIZE as usize);
paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize);
map_size -= size;
map_addr += size;
if let Err(num_pages) =
paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize)
{
map_size -= num_pages * LargePageSize::SIZE as usize;
map_addr += num_pages * LargePageSize::SIZE as usize;
} else {
map_size -= size;
map_addr += size;
}
}

if map_size > BasePageSize::SIZE as usize {
if map_size > BasePageSize::SIZE as usize && map_addr.is_aligned(BasePageSize::SIZE) {
let size = map_size.align_down(BasePageSize::SIZE as usize);
paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize);
map_size -= size;
map_addr += size;
if let Err(num_pages) =
paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize)
{
map_size -= num_pages * BasePageSize::SIZE as usize;
map_addr += num_pages * BasePageSize::SIZE as usize;
} else {
map_size -= size;
map_addr += size;
}
}

let heap_end_addr = map_addr;
Expand Down

0 comments on commit 2fda1c6

Please sign in to comment.