diff --git a/kernel/kernel_config/src/memory.rs b/kernel/kernel_config/src/memory.rs index d2b4d02e49..c990a9f8ec 100644 --- a/kernel/kernel_config/src/memory.rs +++ b/kernel/kernel_config/src/memory.rs @@ -5,12 +5,10 @@ //! Current P4 (top-level page table) mappings: //! * 511: kernel text sections -//! * 510: recursive mapping to top of P4 +//! * 510: recursive mapping for accessing the current P4 root page table frame. //! * 509: kernel heap -//! * 508: kernel stacks -//! * 507: userspace stacks -//! * 506 down to 0: available for user processes - +//! * 508: recursive mapping for accessing the P4 root page table frame of an upcoming new page table. +//! * 507 down to 0: available for general usage /// 64-bit architecture results in 8 bytes per address. pub const BYTES_PER_ADDR: usize = core::mem::size_of::(); @@ -36,18 +34,18 @@ pub const MAX_VIRTUAL_ADDRESS: usize = usize::MAX; pub const TEMPORARY_PAGE_VIRT_ADDR: usize = MAX_VIRTUAL_ADDRESS; -/// Value: 512. +/// Value: 512. pub const ENTRIES_PER_PAGE_TABLE: usize = PAGE_SIZE / BYTES_PER_ADDR; /// Value: 511. The 511th entry is used for kernel text sections pub const KERNEL_TEXT_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 1; -/// Value: 510. The 510th entry is used for the recursive P4 mapping. +/// Value: 510. The 510th entry is used to recursively map the current P4 root page table frame +// such that it can be accessed and modified just like any other level of page table. pub const RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 2; /// Value: 509. The 509th entry is used for the kernel heap pub const KERNEL_HEAP_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 3; -/// Value: 508. The 508th entry is used for all kernel stacks -pub const KERNEL_STACK_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4; -/// Value: 507. The 507th entry is used for all userspace stacks -pub const USER_STACK_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 5; +/// Value: 508. The 508th entry is used to temporarily recursively map the P4 root page table frame +// of an upcoming (new) page table such that it can be accessed and modified. +pub const UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4; pub const MAX_PAGE_NUMBER: usize = MAX_VIRTUAL_ADDRESS / PAGE_SIZE; @@ -88,17 +86,5 @@ pub const KERNEL_HEAP_INITIAL_SIZE: usize = 256 * 1024 * 1024; // 256 MiB, debug /// the kernel heap gets the whole 509th P4 entry. pub const KERNEL_HEAP_MAX_SIZE: usize = ADDRESSABILITY_PER_P4_ENTRY; - -/// the kernel stack allocator gets the 508th P4 entry of addressability. -/// actual value: 0o177777_774_000_000_000_0000, or 0xFFFF_FE00_0000_0000 -pub const KERNEL_STACK_ALLOCATOR_BOTTOM: usize = 0xFFFF_0000_0000_0000 | (KERNEL_STACK_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); -/// the highest actually usuable address in the kernel stack allocator -pub const KERNEL_STACK_ALLOCATOR_TOP_ADDR: usize = KERNEL_STACK_ALLOCATOR_BOTTOM + ADDRESSABILITY_PER_P4_ENTRY - BYTES_PER_ADDR; - - -/// the userspace stack allocators (one per userspace task) each get the 507th P4 entry of addressability. -/// actual value: 0o177777_773_000_000_000_0000, or 0xFFFF_FD80_0000_0000 -pub const USER_STACK_ALLOCATOR_BOTTOM: usize = 0xFFFF_0000_0000_0000 | (USER_STACK_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); -/// the highest actually usuable address in each userspace stack allocator -pub const USER_STACK_ALLOCATOR_TOP_ADDR: usize = USER_STACK_ALLOCATOR_BOTTOM + ADDRESSABILITY_PER_P4_ENTRY - BYTES_PER_ADDR; - +/// The system (page allocator) must not use addresses at or above this address. +pub const UPCOMING_PAGE_TABLE_RECURSIVE_MEMORY_START: usize = 0xFFFF_0000_0000_0000 | (UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index cb79ca7010..bc515ebd43 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -10,7 +10,6 @@ use core::{ borrow::{Borrow, BorrowMut}, cmp::Ordering, - fmt::{self, Write}, hash::{Hash, Hasher}, marker::PhantomData, mem, @@ -23,7 +22,7 @@ use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, use crate::paging::{ get_current_p4, PageRange, - table::{P4, Table, Level4}, + table::{P4, UPCOMING_P4, Table, Level4}, }; use pte_flags::PteFlagsArch; use spin::Once; @@ -59,21 +58,41 @@ pub struct Mapper { } impl Mapper { + /// Creates (bootstraps) a `Mapper` based on the + /// currently-active P4 page table root. pub(crate) fn from_current() -> Mapper { Self::with_p4_frame(get_current_p4()) } + /// Creates a new `Mapper` that uses the recursive entry in the current P4 page table + /// to map the given `p4` frame. + /// + /// The given `p4` frame is the root frame of that upcoming page table. pub(crate) fn with_p4_frame(p4: Frame) -> Mapper { Mapper { - p4: Unique::new(P4).unwrap(), // cannot panic because we know the P4 value is valid + p4: Unique::new(P4).unwrap(), // cannot panic; the P4 value is valid + target_p4: p4, + } + } + + /// Creates a new mapper for an upcoming (soon-to-be-initialized) page table + /// that uses the `UPCOMING_P4` recursive entry in the current P4 table + /// to map that new page table. + /// + /// The given `p4` frame is the root frame of that upcoming page table. + pub(crate) fn upcoming(p4: Frame) -> Mapper { + Mapper { + p4: Unique::new(UPCOMING_P4).unwrap(), target_p4: p4, } } + /// Returns a reference to this `Mapper`'s root page table as a P4-level table. pub(crate) fn p4(&self) -> &Table { unsafe { self.p4.as_ref() } } + /// Returns a mutable reference to this `Mapper`'s root page table as a P4-level table. pub(crate) fn p4_mut(&mut self) -> &mut Table { unsafe { self.p4.as_mut() } } @@ -81,15 +100,14 @@ impl Mapper { /// Dumps all page table entries at all four page table levels for the given `VirtualAddress`, /// and also shows their `PteFlags`. /// - /// The page table details are written to the the given `writer`. - pub fn dump_pte(&self, writer: &mut W, virtual_address: VirtualAddress) -> fmt::Result { + /// The page table details are written to the log as an `info` message. + pub fn dump_pte(&self, virtual_address: VirtualAddress) { let page = Page::containing_address(virtual_address); let p4 = self.p4(); let p3 = p4.next_table(page.p4_index()); let p2 = p3.and_then(|p3| p3.next_table(page.p3_index())); let p1 = p2.and_then(|p2| p2.next_table(page.p2_index())); - write!( - writer, + log::info!( "VirtualAddress: {:#X}: P4 entry: {:#X} ({:?}) P3 entry: {:#X} ({:?}) @@ -104,7 +122,7 @@ impl Mapper { p2.map(|p2| &p2[page.p2_index()]).map(|p2_entry| p2_entry.flags()), p1.map(|p1| &p1[page.p1_index()]).map(|p1_entry| p1_entry.value()).unwrap_or(0x0), p1.map(|p1| &p1[page.p1_index()]).map(|p1_entry| p1_entry.flags()), - ) + ); } /// Translates a `VirtualAddress` to a `PhysicalAddress` by walking the page tables. diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 1ec51da0b7..e08335b361 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -34,7 +34,7 @@ use super::{Frame, FrameRange, PageRange, VirtualAddress, PhysicalAddress, use pte_flags::PteFlagsArch; use no_drop::NoDrop; use boot_info::BootInformation; -use kernel_config::memory::{RECURSIVE_P4_INDEX, PAGE_SIZE}; +use kernel_config::memory::{RECURSIVE_P4_INDEX, PAGE_SIZE, UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX}; /// A top-level root (P4) page table. @@ -114,10 +114,13 @@ impl PageTable { }) } - /// Temporarily maps the given other `PageTable` to the recursive entry (510th entry) - /// so that the given closure `f` can set up new mappings on the new `other_table` without actually switching to it yet. - /// Accepts a closure `f` that is passed a `Mapper`, such that it can set up new mappings on the other table. - /// Consumes the given `temporary_page` and automatically unmaps it afterwards. + /// Temporarily maps the given other `PageTable` to the temporary recursive + /// index (508th entry) + /// + /// Accepts a closure `f` that is passed a mutable reference to the other + /// table's mapper, and an immutable reference to the current table's + /// mapper. + /// /// # Note /// This does not perform any task switching or changing of the current page table register (e.g., cr3). pub fn with( @@ -125,47 +128,48 @@ impl PageTable { other_table: &mut PageTable, f: F, ) -> Result<(), &'static str> - where F: FnOnce(&mut Mapper) -> Result<(), &'static str> + where F: FnOnce(&mut Mapper, &Mapper) -> Result<(), &'static str> { let active_p4_frame = get_current_p4(); if self.p4_table.start() != &active_p4_frame || self.p4_table.end() != &active_p4_frame { return Err("PageTable::with(): this PageTable ('self') must be the currently active page table."); } - // Temporarily take ownership of this page table's p4 allocated frame and + // Temporarily take ownership of the other page table's p4 allocated frame and // create a new temporary page that maps to that frame. - let this_p4 = core::mem::replace(&mut self.p4_table, AllocatedFrames::empty()); - let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, this_p4, self)?; - - // overwrite recursive mapping - self.p4_mut()[RECURSIVE_P4_INDEX].set_entry( - other_table.p4_table.as_allocated_frame(), - PteFlagsArch::new().valid(true).writable(true), - ); - tlb_flush_all(); - - // set mapper's target frame to reflect that future mappings will be mapped into the other_table - self.mapper.target_p4 = *other_table.p4_table.start(); - - // execute `f` in the new context, in which the new page table is considered "active" - let ret = f(self); - - // restore mapper's target frame to reflect that future mappings will be mapped using the currently-active (original) PageTable - self.mapper.target_p4 = active_p4_frame; + let other_p4 = core::mem::replace(&mut other_table.p4_table, AllocatedFrames::empty()); + let other_p4_frame = *other_p4.start(); + let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, other_p4, self)?; - // restore recursive mapping to original p4 table - temporary_page.with_table_and_frame(|p4_table, frame| { - p4_table[RECURSIVE_P4_INDEX].set_entry( + // Overwrite upcoming page table recursive mapping. + temporary_page.with_table_and_frame(|table, frame| { + self.p4_mut()[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( + frame.as_allocated_frame(), + PteFlagsArch::new().valid(true).writable(true), + ); + table[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( frame.as_allocated_frame(), PteFlagsArch::new().valid(true).writable(true), ); })?; tlb_flush_all(); - // Here, recover the current page table's p4 frame and restore it into this current page table, + // This mapper will modify the `other_table` using the upcoming P4 recursive entry + // that is set for the currently active page table. + let mut mapper = Mapper::upcoming(other_p4_frame); + + // Execute `f` in the new context, in which the new page table is considered "active" + let ret = f(&mut mapper, self); + + // Clear both page table's upcoming recursive mapping entries. + self.p4_mut()[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].zero(); + other_table.p4_mut()[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].zero(); + tlb_flush_all(); + + // Here, recover the other page table's p4 frame and restore it into the other page table, // since we removed it earlier at the top of this function and gave it to the temporary page. let (_temp_page, p4_frame) = temporary_page.unmap_into_parts(self)?; - self.p4_table = p4_frame.ok_or("BUG: PageTable::with(): failed to take back unmapped Frame for p4_table")?; + other_table.p4_table = p4_frame.ok_or("BUG: PageTable::with(): failed to take back unmapped Frame for p4_table")?; ret } @@ -221,7 +225,7 @@ pub fn init( let boot_info_start_vaddr = boot_info.start().ok_or("boot_info start virtual address was invalid")?; let boot_info_start_paddr = page_table.translate(boot_info_start_vaddr).ok_or("Couldn't get boot_info start physical address")?; let boot_info_size = boot_info.len(); - debug!("multiboot vaddr: {:#X}, multiboot paddr: {:#X}, size: {:#X}\n", boot_info_start_vaddr, boot_info_start_paddr, boot_info_size); + debug!("multiboot vaddr: {:#X}, multiboot paddr: {:#X}, size: {:#X}", boot_info_start_vaddr, boot_info_start_paddr, boot_info_size); let new_p4_frame = frame_allocator::allocate_frames(1).ok_or("couldn't allocate frame for new page table")?; let mut new_table = PageTable::new_table(&mut page_table, new_p4_frame, None)?; @@ -234,8 +238,7 @@ pub fn init( let mut higher_half_mapped_pages: [Option>; 32] = Default::default(); let mut identity_mapped_pages: [Option>; 32] = Default::default(); - // Stack frames are not guaranteed to be contiguous. - let mut stack_mappings = [None; 34]; + // Stack frames are not guaranteed to be contiguous in physical memory. let stack_size = boot_info.stack_size()?; let stack_page_range = PageRange::from_virt_addr( // `PAGE_SIZE` accounts for the guard page, which does not have a corresponding frame. @@ -243,23 +246,13 @@ pub fn init( stack_size - PAGE_SIZE, ); debug!("Initial stack start {stack_start_virt:#X}, size: {stack_size:#X} bytes, {stack_page_range:X?}"); - for (i, page) in stack_page_range.into_iter().enumerate() { - let frame = page_table.translate_page(page).ok_or("couldn't translate stack page")?; - stack_mappings[i] = Some((page, frame)); - } - // Boot info frames are not guaranteed to be contiguous. - let mut boot_info_mappings = [None; 10]; + // Boot info frames are not guaranteed to be contiguous in physical memory. let boot_info_page_range = PageRange::from_virt_addr(boot_info_start_vaddr, boot_info_size); debug!("Boot info start: {boot_info_start_vaddr:#X}, size: {boot_info_size:#X}, {boot_info_page_range:#X?}"); - for (i, page) in boot_info_page_range.into_iter().enumerate() { - let frame = page_table.translate_page(page).ok_or("couldn't translate boot info page")?; - boot_info_mappings[i] = Some((page, frame)); - } // Create and initialize a new page table with the same contents as the currently-executing kernel code/data sections. - page_table.with(&mut new_table, |mapper| { - + page_table.with(&mut new_table, |new_mapper, current_mapper| { // Map every section found in the kernel image (given by the boot information above) into our new page table. // To allow the APs to boot up, we must identity map those kernel sections too, i.e., // map the same physical frames to both lower-half and higher-half virtual addresses. @@ -295,18 +288,18 @@ pub fn init( init_end_phys.value() - init_start_phys.value(), )?; identity_mapped_pages[index] = Some(NoDrop::new( unsafe { - Mapper::map_to_non_exclusive(mapper, init_pages_identity, &init_frames, init_flags)? + Mapper::map_to_non_exclusive(new_mapper, init_pages_identity, &init_frames, init_flags)? })); - let mut init_mapped_pages = mapper.map_allocated_pages_to(init_pages, init_frames, init_flags)?; + let mut init_mapped_pages = new_mapper.map_allocated_pages_to(init_pages, init_frames, init_flags)?; index += 1; let text_pages = page_allocator::allocate_pages_by_bytes_at(text_start_virt, text_end_virt.value() - text_start_virt.value())?; let text_frames = frame_allocator::allocate_frames_by_bytes_at(text_start_phys, text_end_phys.value() - text_start_phys.value())?; let text_pages_identity = page_allocator::allocate_pages_by_bytes_at(text_start_virt - KERNEL_OFFSET, text_end_virt.value() - text_start_virt.value())?; identity_mapped_pages[index] = Some(NoDrop::new( unsafe { - Mapper::map_to_non_exclusive(mapper, text_pages_identity, &text_frames, text_flags)? + Mapper::map_to_non_exclusive(new_mapper, text_pages_identity, &text_frames, text_flags)? })); - init_mapped_pages.merge(mapper.map_allocated_pages_to(text_pages, text_frames, text_flags)?).map_err(|(error, _)| error)?; + init_mapped_pages.merge(new_mapper.map_allocated_pages_to(text_pages, text_frames, text_flags)?).map_err(|(error, _)| error)?; text_mapped_pages = Some(NoDrop::new(init_mapped_pages)); index += 1; @@ -314,28 +307,33 @@ pub fn init( let rodata_frames = frame_allocator::allocate_frames_by_bytes_at(rodata_start_phys, rodata_end_phys.value() - rodata_start_phys.value())?; let rodata_pages_identity = page_allocator::allocate_pages_by_bytes_at(rodata_start_virt - KERNEL_OFFSET, rodata_end_virt.value() - rodata_start_virt.value())?; identity_mapped_pages[index] = Some(NoDrop::new( unsafe { - Mapper::map_to_non_exclusive(mapper, rodata_pages_identity, &rodata_frames, rodata_flags)? + Mapper::map_to_non_exclusive(new_mapper, rodata_pages_identity, &rodata_frames, rodata_flags)? })); - rodata_mapped_pages = Some(NoDrop::new(mapper.map_allocated_pages_to(rodata_pages, rodata_frames, rodata_flags)?)); + rodata_mapped_pages = Some(NoDrop::new(new_mapper.map_allocated_pages_to(rodata_pages, rodata_frames, rodata_flags)?)); index += 1; let data_pages = page_allocator::allocate_pages_by_bytes_at(data_start_virt, data_end_virt.value() - data_start_virt.value())?; let data_frames = frame_allocator::allocate_frames_by_bytes_at(data_start_phys, data_end_phys.value() - data_start_phys.value())?; let data_pages_identity = page_allocator::allocate_pages_by_bytes_at(data_start_virt - KERNEL_OFFSET, data_end_virt.value() - data_start_virt.value())?; identity_mapped_pages[index] = Some(NoDrop::new( unsafe { - Mapper::map_to_non_exclusive(mapper, data_pages_identity, &data_frames, data_flags)? + Mapper::map_to_non_exclusive(new_mapper, data_pages_identity, &data_frames, data_flags)? })); - data_mapped_pages = Some(NoDrop::new(mapper.map_allocated_pages_to(data_pages, data_frames, data_flags)?)); + data_mapped_pages = Some(NoDrop::new(new_mapper.map_allocated_pages_to(data_pages, data_frames, data_flags)?)); index += 1; - // Handle the stack (a separate data section), which consists of one guard page followed by the real stack pages. - // It does not need to be identity mapped because each AP core will have its own stack. + // Handle the stack (a separate data section), which consists of one guard page (unmapped) + // followed by the real (mapped) stack pages. + // The stack does not need to be identity mapped, because each secondary CPU will get its own stack. let stack_guard_page = page_allocator::allocate_pages_at(stack_start_virt, 1)?; let mut stack_mapped_pages: Option = None; - for (page, frame) in stack_mappings.into_iter().flatten() { + for page in stack_page_range.into_iter() { + // The stack is not guaranteed to be contiguous in physical memory, + // so we use the `current_mapper` to translate each page into its backing physical frame, + // and then reproduce the same mapping in the `new_mapper`. + let frame = current_mapper.translate_page(page).ok_or("couldn't translate stack page")?; let allocated_page = page_allocator::allocate_pages_at(page.start_address(), 1)?; let allocated_frame = frame_allocator::allocate_frames_at(frame.start_address(), 1)?; - let mapped_pages = mapper.map_allocated_pages_to(allocated_page, allocated_frame, data_flags)?; + let mapped_pages = new_mapper.map_allocated_pages_to(allocated_page, allocated_frame, data_flags)?; if let Some(ref mut stack_mapped_pages) = stack_mapped_pages { stack_mapped_pages.merge(mapped_pages).map_err(|_| "failed to merge stack mapped pages")?; } else { @@ -348,23 +346,28 @@ pub fn init( )); // Map the VGA display memory as writable. - // We do an identity mapping for the VGA display too, because the AP cores may access it while booting. + // We do an identity mapping for the VGA display too, because secondary CPUs may access it while booting. let (vga_phys_addr, vga_size_in_bytes, vga_flags) = get_vga_mem_addr()?; let vga_virt_addr_identity = VirtualAddress::new_canonical(vga_phys_addr.value()); let vga_display_pages = page_allocator::allocate_pages_by_bytes_at(vga_virt_addr_identity + KERNEL_OFFSET, vga_size_in_bytes)?; let vga_display_frames = frame_allocator::allocate_frames_by_bytes_at(vga_phys_addr, vga_size_in_bytes)?; let vga_display_pages_identity = page_allocator::allocate_pages_by_bytes_at(vga_virt_addr_identity, vga_size_in_bytes)?; identity_mapped_pages[index] = Some(NoDrop::new( unsafe { - Mapper::map_to_non_exclusive(mapper, vga_display_pages_identity, &vga_display_frames, vga_flags)? + Mapper::map_to_non_exclusive(new_mapper, vga_display_pages_identity, &vga_display_frames, vga_flags)? })); - higher_half_mapped_pages[index] = Some(NoDrop::new(mapper.map_allocated_pages_to(vga_display_pages, vga_display_frames, vga_flags)?)); + higher_half_mapped_pages[index] = Some(NoDrop::new(new_mapper.map_allocated_pages_to(vga_display_pages, vga_display_frames, vga_flags)?)); index += 1; - let mut iter = boot_info_mappings.iter(); - while let Some(Some((page, frame))) = iter.next() { + // Map the bootloader info, a separate region of read-only memory, so that we can access it later. + // This does not need to be identity mapped. + for page in boot_info_page_range.into_iter() { + // The boot info is not guaranteed to be contiguous in physical memory, + // so we use the `current_mapper` to translate each page into its backing physical frame, + // and then reproduce the same mapping in the `new_mapper`. + let frame = current_mapper.translate_page(page).ok_or("couldn't translate stack page")?; let allocated_page = page_allocator::allocate_pages_at(page.start_address(), 1)?; let allocated_frame = frame_allocator::allocate_frames_at(frame.start_address(), 1)?; - let mapped_pages = mapper.map_allocated_pages_to(allocated_page, allocated_frame, PteFlags::new())?; + let mapped_pages = new_mapper.map_allocated_pages_to(allocated_page, allocated_frame, PteFlags::new())?; if let Some(ref mut boot_info_mapped_pages) = boot_info_mapped_pages { boot_info_mapped_pages.merge(mapped_pages).map_err(|_| "failed to merge boot info pages")?; } else { @@ -375,10 +378,8 @@ pub fn init( debug!("identity_mapped_pages: {:?}", &identity_mapped_pages[..index]); debug!("higher_half_mapped_pages: {:?}", &higher_half_mapped_pages[..index]); - Ok(()) // mapping closure completed successfully - - })?; // TemporaryPage is dropped here - + Ok(()) + })?; let text_mapped_pages = text_mapped_pages .ok_or("Couldn't map .text section")?; let rodata_mapped_pages = rodata_mapped_pages .ok_or("Couldn't map .rodata section")?; diff --git a/kernel/memory/src/paging/table.rs b/kernel/memory/src/paging/table.rs index 935ec6a6b0..bb52f975c2 100644 --- a/kernel/memory/src/paging/table.rs +++ b/kernel/memory/src/paging/table.rs @@ -30,6 +30,13 @@ pub const P4: *mut Table = 0o177777_776_776_776_776_0000 as *mut _; // ^p4 ^p3 ^p2 ^p1 ^offset // ^ 0o776 means that we're always looking at the 510th entry recursively +/// By default this is an invalid address unless the upcoming page table recursive entry is modified. +/// +/// NOTE: this must be kept in sync with the upcoming page table recursive index in `kernel_config/memory.rs`. +/// +/// All four table indexes need to be set to 0o774 so that `Table::next_table_address` works properly. +pub const UPCOMING_P4: *mut Table = 0o177777_774_774_774_774_0000 as *mut _; + #[derive(FromBytes)] pub struct Table { entries: [PageTableEntry; ENTRIES_PER_PAGE_TABLE], diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index fe2e2e2425..236fd30b15 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -47,10 +47,13 @@ static DESIGNATED_PAGES_LOW_END: Once = Once::new(); /// Defines the upper part of the address space that's designated, similar to `DESIGNATED_PAGES_LOW_END`. /// Any virtual addresses **greater than or equal to** this address is considered "designated". -/// This higher part of the address range covers from the beginning of the heap area to the end of the address space. +/// This higher part of the address range covers from: +/// the beginning of the recursive P4 entry used for modifying upcoming page tables +/// to the very end of the address space. /// -/// TODO: once the heap is fully dynamic and not dependent on constant addresses, we can move this up to KERNEL_TEXT_START (511th entry of P4). -static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address(VirtualAddress::new_canonical(KERNEL_HEAP_START)); +/// TODO: once the heap is fully dynamic and not dependent on static addresses, +/// we can exclude the heap from the designated region. +static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address(VirtualAddress::new_canonical(UPCOMING_PAGE_TABLE_RECURSIVE_MEMORY_START)); const MIN_PAGE: Page = Page::containing_address(VirtualAddress::zero()); const MAX_PAGE: Page = Page::containing_address(VirtualAddress::new_canonical(MAX_VIRTUAL_ADDRESS));