From 9732fc901523bd1649cde5ec98e3adc7dc09d8d4 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Tue, 3 Jan 2023 17:56:10 +1100 Subject: [PATCH 01/18] Don't reuse recursive entry in `PageTable::with` Signed-off-by: Klim Tsoutsman --- Makefile | 1 + kernel/kernel_config/src/memory.rs | 4 ++- kernel/memory/src/paging/mapper.rs | 16 +++++++---- kernel/memory/src/paging/mod.rs | 43 +++++++++++------------------- kernel/memory/src/paging/table.rs | 8 ++++++ tools/uefi_builder/src/main.rs | 2 +- 6 files changed, 40 insertions(+), 34 deletions(-) diff --git a/Makefile b/Makefile index 49a4c033dd..7b509152d4 100644 --- a/Makefile +++ b/Makefile @@ -803,6 +803,7 @@ endif QEMU_FLAGS += -no-reboot -no-shutdown ## Enable a GDB stub so we can connect GDB to the QEMU instance QEMU_FLAGS += -s +QEMU_FLAGS += -monitor telnet:localhost:1235,server,nowait ## Enable the first serial port (the default log) to be redirected to the host terminal's stdio. ## Optionally, use the below `mon:` prefix to have the host terminal forward escape/control sequences to this serial port. diff --git a/kernel/kernel_config/src/memory.rs b/kernel/kernel_config/src/memory.rs index d2b4d02e49..5284b8c495 100644 --- a/kernel/kernel_config/src/memory.rs +++ b/kernel/kernel_config/src/memory.rs @@ -36,7 +36,7 @@ pub const MAX_VIRTUAL_ADDRESS: usize = usize::MAX; pub const TEMPORARY_PAGE_VIRT_ADDR: usize = MAX_VIRTUAL_ADDRESS; -/// Value: 512. +/// Value: 512. pub const ENTRIES_PER_PAGE_TABLE: usize = PAGE_SIZE / BYTES_PER_ADDR; /// Value: 511. The 511th entry is used for kernel text sections pub const KERNEL_TEXT_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 1; @@ -48,6 +48,8 @@ pub const KERNEL_HEAP_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 3; pub const KERNEL_STACK_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4; /// Value: 507. The 507th entry is used for all userspace stacks pub const USER_STACK_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 5; +// Value: 506. The 506th entry is used as a temporary recursive entry when mapping a new page table. +pub const TEMPORARY_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 6; pub const MAX_PAGE_NUMBER: usize = MAX_VIRTUAL_ADDRESS / PAGE_SIZE; diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 357197b596..3c4fe15f90 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -28,7 +28,7 @@ use crate::paging::{ use pte_flags::PteFlagsArch; use spin::Once; use kernel_config::memory::{PAGE_SIZE, ENTRIES_PER_PAGE_TABLE}; -use super::tlb_flush_virt_addr; +use super::{tlb_flush_virt_addr, table::TEMP_P4}; use zerocopy::FromBytes; use page_table_entry::UnmapResult; use owned_borrowed_trait::{OwnedOrBorrowed, Owned, Borrowed}; @@ -70,6 +70,13 @@ impl Mapper { } } + pub(crate) fn temp(p4: Frame) -> Mapper { + Mapper { + p4: Unique::new(TEMP_P4).unwrap(), + target_p4: p4, + } + } + pub(crate) fn p4(&self) -> &Table { unsafe { self.p4.as_ref() } } @@ -82,14 +89,13 @@ impl Mapper { /// and also shows their `PteFlags`. /// /// The page table details are written to the the given `writer`. - pub fn dump_pte(&self, writer: &mut W, virtual_address: VirtualAddress) -> fmt::Result { + pub fn dump_pte(&self,virtual_address: VirtualAddress) { let page = Page::containing_address(virtual_address); let p4 = self.p4(); let p3 = p4.next_table(page.p4_index()); let p2 = p3.and_then(|p3| p3.next_table(page.p3_index())); let p1 = p2.and_then(|p2| p2.next_table(page.p2_index())); - write!( - writer, + log::info!( "VirtualAddress: {:#X}: P4 entry: {:#X} ({:?}) P3 entry: {:#X} ({:?}) @@ -104,7 +110,7 @@ impl Mapper { p2.map(|p2| &p2[page.p2_index()]).map(|p2_entry| p2_entry.flags()), p1.map(|p1| &p1[page.p1_index()]).map(|p1_entry| p1_entry.value()).unwrap_or(0x0), p1.map(|p1| &p1[page.p1_index()]).map(|p1_entry| p1_entry.flags()), - ) + ); } /// Translates a `VirtualAddress` to a `PhysicalAddress` by walking the page tables. diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index d43d768ffb..7d5fe5b0ad 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -34,7 +34,7 @@ use super::{Frame, FrameRange, PageRange, VirtualAddress, PhysicalAddress, use pte_flags::PteFlagsArch; use no_drop::NoDrop; use boot_info::BootInformation; -use kernel_config::memory::{RECURSIVE_P4_INDEX, PAGE_SIZE}; +use kernel_config::memory::{RECURSIVE_P4_INDEX, PAGE_SIZE, TEMPORARY_RECURSIVE_P4_INDEX}; /// A top-level root (P4) page table. @@ -104,6 +104,10 @@ impl PageTable { frame.as_allocated_frame(), PteFlagsArch::new().valid(true).writable(true), ); + table[TEMPORARY_RECURSIVE_P4_INDEX].set_entry( + frame.as_allocated_frame(), + PteFlagsArch::new().valid(true).writable(true), + ); })?; let (_temp_page, inited_new_p4_frame) = temporary_page.unmap_into_parts(current_page_table)?; @@ -132,40 +136,27 @@ impl PageTable { return Err("PageTable::with(): this PageTable ('self') must be the currently active page table."); } - // Temporarily take ownership of this page table's p4 allocated frame and - // create a new temporary page that maps to that frame. - let this_p4 = core::mem::replace(&mut self.p4_table, AllocatedFrames::empty()); - let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, this_p4, self)?; + // // Temporarily take ownership of this page table's p4 allocated frame and + // // create a new temporary page that maps to that frame. + // let this_p4 = core::mem::replace(&mut self.p4_table, AllocatedFrames::empty()); + // let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, this_p4, self)?; // overwrite recursive mapping - self.p4_mut()[RECURSIVE_P4_INDEX].set_entry( + self.p4_mut()[TEMPORARY_RECURSIVE_P4_INDEX].set_entry( other_table.p4_table.as_allocated_frame(), PteFlagsArch::new().valid(true).writable(true), ); tlb_flush_all(); - // set mapper's target frame to reflect that future mappings will be mapped into the other_table - self.mapper.target_p4 = *other_table.p4_table.start(); + let mut mapper = Mapper::temp(*other_table.p4_table.start()); // execute `f` in the new context, in which the new page table is considered "active" - let ret = f(self); - - // restore mapper's target frame to reflect that future mappings will be mapped using the currently-active (original) PageTable - self.mapper.target_p4 = active_p4_frame; - - // restore recursive mapping to original p4 table - temporary_page.with_table_and_frame(|p4_table, frame| { - p4_table[RECURSIVE_P4_INDEX].set_entry( - frame.as_allocated_frame(), - PteFlagsArch::new().valid(true).writable(true), - ); - })?; - tlb_flush_all(); + let ret = f(&mut mapper); // Here, recover the current page table's p4 frame and restore it into this current page table, // since we removed it earlier at the top of this function and gave it to the temporary page. - let (_temp_page, p4_frame) = temporary_page.unmap_into_parts(self)?; - self.p4_table = p4_frame.ok_or("BUG: PageTable::with(): failed to take back unmapped Frame for p4_table")?; + // let (_temp_page, p4_frame) = temporary_page.unmap_into_parts(self)?; + // self.p4_table = p4_frame.ok_or("BUG: PageTable::with(): failed to take back unmapped Frame for p4_table")?; ret } @@ -359,10 +350,8 @@ pub fn init( debug!("identity_mapped_pages: {:?}", &identity_mapped_pages[..index]); debug!("higher_half_mapped_pages: {:?}", &higher_half_mapped_pages[..index]); - Ok(()) // mapping closure completed successfully - - })?; // TemporaryPage is dropped here - + Ok(()) + })?; let text_mapped_pages = text_mapped_pages .ok_or("Couldn't map .text section")?; let rodata_mapped_pages = rodata_mapped_pages .ok_or("Couldn't map .rodata section")?; diff --git a/kernel/memory/src/paging/table.rs b/kernel/memory/src/paging/table.rs index b2ecdca006..d328cf67a2 100644 --- a/kernel/memory/src/paging/table.rs +++ b/kernel/memory/src/paging/table.rs @@ -29,6 +29,8 @@ pub const P4: *mut Table = 0o177777_776_776_776_776_0000 as *mut _; // ^p4 ^p3 ^p2 ^p1 ^offset // ^ 0o776 means that we're always looking at the 510th entry recursively +pub const TEMP_P4: *mut Table = 0o177777_772_772_772_772_0000 as *mut _; + #[derive(FromBytes)] pub struct Table { entries: [PageTableEntry; ENTRIES_PER_PAGE_TABLE], @@ -87,16 +89,22 @@ impl Table { index: usize, flags: PteFlagsArch, ) -> &mut Table { + // log::debug!("NEXT TABLE CREATE"); if self.next_table(index).is_none() { + // log::debug!("creating next table"); assert!(!self[index].flags().is_huge(), "mapping code does not support huge pages"); + // log::debug!("allocating frames"); let af = frame_allocator::allocate_frames(1).expect("next_table_create(): no frames available"); + // log::debug!("setting entry"); self[index].set_entry( af.as_allocated_frame(), flags.valid(true).writable(true), // must be valid and writable on x86_64 ); + // log::debug!("zeroing next table"); self.next_table_mut(index).unwrap().zero(); core::mem::forget(af); // we currently forget frames allocated as page table frames since we don't yet have a way to track them. } + // log::debug!("getting next table"); self.next_table_mut(index).unwrap() } } diff --git a/tools/uefi_builder/src/main.rs b/tools/uefi_builder/src/main.rs index 9aaffd56f7..198f4fc980 100644 --- a/tools/uefi_builder/src/main.rs +++ b/tools/uefi_builder/src/main.rs @@ -48,7 +48,7 @@ fn main() { bootloader .create_disk_image(&efi_image) - .expect("failed to create uefi disk image"); + .expect("failed to create UEFI disk image"); std::fs::copy(ovmf_prebuilt::ovmf_pure_efi(), efi_firmware) .expect("couldn't copy efi firmware"); From c555e8fca3ed148c122226e010029578265230f2 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Tue, 3 Jan 2023 18:00:47 +1100 Subject: [PATCH 02/18] Add current mapper to `PageTable::with` closure Signed-off-by: Klim Tsoutsman --- kernel/memory/src/paging/mod.rs | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 7d5fe5b0ad..030b0d0ddd 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -129,7 +129,7 @@ impl PageTable { other_table: &mut PageTable, f: F, ) -> Result<(), &'static str> - where F: FnOnce(&mut Mapper) -> Result<(), &'static str> + where F: FnOnce(&mut Mapper, &Mapper) -> Result<(), &'static str> { let active_p4_frame = get_current_p4(); if self.p4_table.start() != &active_p4_frame || self.p4_table.end() != &active_p4_frame { @@ -151,7 +151,7 @@ impl PageTable { let mut mapper = Mapper::temp(*other_table.p4_table.start()); // execute `f` in the new context, in which the new page table is considered "active" - let ret = f(&mut mapper); + let ret = f(&mut mapper, self); // Here, recover the current page table's p4 frame and restore it into this current page table, // since we removed it earlier at the top of this function and gave it to the temporary page. @@ -249,8 +249,7 @@ pub fn init( } // Create and initialize a new page table with the same contents as the currently-executing kernel code/data sections. - page_table.with(&mut new_table, |mapper| { - + page_table.with(&mut new_table, |new_mapper, _current_mapper| { // Map every section found in the kernel image (given by the boot information above) into our new page table. // To allow the APs to boot up, we must identity map those kernel sections too, i.e., // map the same physical frames to both lower-half and higher-half virtual addresses. @@ -280,27 +279,27 @@ pub fn init( let text_frames = frame_allocator::allocate_frames_by_bytes_at(text_start_phys, text_end_phys.value() - text_start_phys.value())?; let text_pages_identity = page_allocator::allocate_pages_by_bytes_at(text_start_virt - KERNEL_OFFSET, text_end_virt.value() - text_start_virt.value())?; identity_mapped_pages[index] = Some(NoDrop::new( unsafe { - Mapper::map_to_non_exclusive(mapper, text_pages_identity, &text_frames, text_flags)? + Mapper::map_to_non_exclusive(new_mapper, text_pages_identity, &text_frames, text_flags)? })); - text_mapped_pages = Some(NoDrop::new(mapper.map_allocated_pages_to(text_pages, text_frames, text_flags)?)); + text_mapped_pages = Some(NoDrop::new(new_mapper.map_allocated_pages_to(text_pages, text_frames, text_flags)?)); index += 1; let rodata_pages = page_allocator::allocate_pages_by_bytes_at(rodata_start_virt, rodata_end_virt.value() - rodata_start_virt.value())?; let rodata_frames = frame_allocator::allocate_frames_by_bytes_at(rodata_start_phys, rodata_end_phys.value() - rodata_start_phys.value())?; let rodata_pages_identity = page_allocator::allocate_pages_by_bytes_at(rodata_start_virt - KERNEL_OFFSET, rodata_end_virt.value() - rodata_start_virt.value())?; identity_mapped_pages[index] = Some(NoDrop::new( unsafe { - Mapper::map_to_non_exclusive(mapper, rodata_pages_identity, &rodata_frames, rodata_flags)? + Mapper::map_to_non_exclusive(new_mapper, rodata_pages_identity, &rodata_frames, rodata_flags)? })); - rodata_mapped_pages = Some(NoDrop::new(mapper.map_allocated_pages_to(rodata_pages, rodata_frames, rodata_flags)?)); + rodata_mapped_pages = Some(NoDrop::new(new_mapper.map_allocated_pages_to(rodata_pages, rodata_frames, rodata_flags)?)); index += 1; let data_pages = page_allocator::allocate_pages_by_bytes_at(data_start_virt, data_end_virt.value() - data_start_virt.value())?; let data_frames = frame_allocator::allocate_frames_by_bytes_at(data_start_phys, data_end_phys.value() - data_start_phys.value())?; let data_pages_identity = page_allocator::allocate_pages_by_bytes_at(data_start_virt - KERNEL_OFFSET, data_end_virt.value() - data_start_virt.value())?; identity_mapped_pages[index] = Some(NoDrop::new( unsafe { - Mapper::map_to_non_exclusive(mapper, data_pages_identity, &data_frames, data_flags)? + Mapper::map_to_non_exclusive(new_mapper, data_pages_identity, &data_frames, data_flags)? })); - data_mapped_pages = Some(NoDrop::new(mapper.map_allocated_pages_to(data_pages, data_frames, data_flags)?)); + data_mapped_pages = Some(NoDrop::new(new_mapper.map_allocated_pages_to(data_pages, data_frames, data_flags)?)); index += 1; // Handle the stack (a separate data section), which consists of one guard page followed by the real stack pages. @@ -310,7 +309,7 @@ pub fn init( for (page, frame) in stack_mappings.into_iter().flatten() { let allocated_page = page_allocator::allocate_pages_at(page.start_address(), 1)?; let allocated_frame = frame_allocator::allocate_frames_at(frame.start_address(), 1)?; - let mapped_pages = mapper.map_allocated_pages_to(allocated_page, allocated_frame, data_flags)?; + let mapped_pages = new_mapper.map_allocated_pages_to(allocated_page, allocated_frame, data_flags)?; if let Some(ref mut stack_mapped_pages) = stack_mapped_pages { stack_mapped_pages.merge(mapped_pages).map_err(|_| "failed to merge stack mapped pages")?; } else { @@ -330,16 +329,16 @@ pub fn init( let vga_display_frames = frame_allocator::allocate_frames_by_bytes_at(vga_phys_addr, vga_size_in_bytes)?; let vga_display_pages_identity = page_allocator::allocate_pages_by_bytes_at(vga_virt_addr_identity, vga_size_in_bytes)?; identity_mapped_pages[index] = Some(NoDrop::new( unsafe { - Mapper::map_to_non_exclusive(mapper, vga_display_pages_identity, &vga_display_frames, vga_flags)? + Mapper::map_to_non_exclusive(new_mapper, vga_display_pages_identity, &vga_display_frames, vga_flags)? })); - higher_half_mapped_pages[index] = Some(NoDrop::new(mapper.map_allocated_pages_to(vga_display_pages, vga_display_frames, vga_flags)?)); + higher_half_mapped_pages[index] = Some(NoDrop::new(new_mapper.map_allocated_pages_to(vga_display_pages, vga_display_frames, vga_flags)?)); index += 1; let mut iter = boot_info_mappings.iter(); while let Some(Some((page, frame))) = iter.next() { let allocated_page = page_allocator::allocate_pages_at(page.start_address(), 1)?; let allocated_frame = frame_allocator::allocate_frames_at(frame.start_address(), 1)?; - let mapped_pages = mapper.map_allocated_pages_to(allocated_page, allocated_frame, PteFlags::new())?; + let mapped_pages = new_mapper.map_allocated_pages_to(allocated_page, allocated_frame, PteFlags::new())?; if let Some(ref mut boot_info_mapped_pages) = boot_info_mapped_pages { boot_info_mapped_pages.merge(mapped_pages).map_err(|_| "failed to merge boot info pages")?; } else { From 1600a23fcff95cf975dd968f09025d1a8e91ca74 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Tue, 3 Jan 2023 18:07:49 +1100 Subject: [PATCH 03/18] Use current page table to calculate stack and boot info frames Signed-off-by: Klim Tsoutsman --- kernel/memory/src/paging/mod.rs | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 030b0d0ddd..46f67e1bcf 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -226,7 +226,6 @@ pub fn init( let mut identity_mapped_pages: [Option>; 32] = Default::default(); // Stack frames are not guaranteed to be contiguous. - let mut stack_mappings = [None; 34]; let stack_size = boot_info.stack_size()?; let stack_page_range = PageRange::from_virt_addr( // `PAGE_SIZE` accounts for the guard page, which does not have a corresponding frame. @@ -234,22 +233,13 @@ pub fn init( stack_size - PAGE_SIZE, ); debug!("Initial stack start {stack_start_virt:#X}, size: {stack_size:#X} bytes, {stack_page_range:X?}"); - for (i, page) in stack_page_range.into_iter().enumerate() { - let frame = page_table.translate_page(page).ok_or("couldn't translate stack page")?; - stack_mappings[i] = Some((page, frame)); - } // Boot info frames are not guaranteed to be contiguous. - let mut boot_info_mappings = [None; 10]; let boot_info_page_range = PageRange::from_virt_addr(boot_info_start_vaddr, boot_info_size); debug!("Boot info start: {boot_info_start_vaddr:#X}, size: {boot_info_size:#X}, {boot_info_page_range:#X?}"); - for (i, page) in boot_info_page_range.into_iter().enumerate() { - let frame = page_table.translate_page(page).ok_or("couldn't translate boot info page")?; - boot_info_mappings[i] = Some((page, frame)); - } // Create and initialize a new page table with the same contents as the currently-executing kernel code/data sections. - page_table.with(&mut new_table, |new_mapper, _current_mapper| { + page_table.with(&mut new_table, |new_mapper, current_mapper| { // Map every section found in the kernel image (given by the boot information above) into our new page table. // To allow the APs to boot up, we must identity map those kernel sections too, i.e., // map the same physical frames to both lower-half and higher-half virtual addresses. @@ -306,7 +296,9 @@ pub fn init( // It does not need to be identity mapped because each AP core will have its own stack. let stack_guard_page = page_allocator::allocate_pages_at(stack_start_virt, 1)?; let mut stack_mapped_pages: Option = None; - for (page, frame) in stack_mappings.into_iter().flatten() { + for page in stack_page_range.into_iter() { + let frame = current_mapper.translate_page(page).ok_or("couldn't translate stack page")?; + let allocated_page = page_allocator::allocate_pages_at(page.start_address(), 1)?; let allocated_frame = frame_allocator::allocate_frames_at(frame.start_address(), 1)?; let mapped_pages = new_mapper.map_allocated_pages_to(allocated_page, allocated_frame, data_flags)?; @@ -334,8 +326,9 @@ pub fn init( higher_half_mapped_pages[index] = Some(NoDrop::new(new_mapper.map_allocated_pages_to(vga_display_pages, vga_display_frames, vga_flags)?)); index += 1; - let mut iter = boot_info_mappings.iter(); - while let Some(Some((page, frame))) = iter.next() { + for page in boot_info_page_range.into_iter() { + let frame = current_mapper.translate_page(page).ok_or("couldn't translate stack page")?; + let allocated_page = page_allocator::allocate_pages_at(page.start_address(), 1)?; let allocated_frame = frame_allocator::allocate_frames_at(frame.start_address(), 1)?; let mapped_pages = new_mapper.map_allocated_pages_to(allocated_page, allocated_frame, PteFlags::new())?; From 82e6da9d9f57a4ca72a01272c99f199c5e7c3cb3 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Tue, 3 Jan 2023 18:22:26 +1100 Subject: [PATCH 04/18] Cleanup Signed-off-by: Klim Tsoutsman --- Makefile | 1 - kernel/memory/src/paging/mapper.rs | 6 +++--- kernel/memory/src/paging/mod.rs | 26 ++++++++++++++++++-------- kernel/memory/src/paging/table.rs | 11 +++++------ tools/uefi_builder/src/main.rs | 2 +- 5 files changed, 27 insertions(+), 19 deletions(-) diff --git a/Makefile b/Makefile index 7b509152d4..49a4c033dd 100644 --- a/Makefile +++ b/Makefile @@ -803,7 +803,6 @@ endif QEMU_FLAGS += -no-reboot -no-shutdown ## Enable a GDB stub so we can connect GDB to the QEMU instance QEMU_FLAGS += -s -QEMU_FLAGS += -monitor telnet:localhost:1235,server,nowait ## Enable the first serial port (the default log) to be redirected to the host terminal's stdio. ## Optionally, use the below `mon:` prefix to have the host terminal forward escape/control sequences to this serial port. diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 3c4fe15f90..1242300c0f 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -10,7 +10,6 @@ use core::{ borrow::{Borrow, BorrowMut}, cmp::Ordering, - fmt::{self, Write}, hash::{Hash, Hasher}, marker::PhantomData, mem, @@ -23,12 +22,12 @@ use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, use crate::paging::{ get_current_p4, PageRange, - table::{P4, Table, Level4}, + table::{P4, TEMP_P4, Table, Level4}, }; use pte_flags::PteFlagsArch; use spin::Once; use kernel_config::memory::{PAGE_SIZE, ENTRIES_PER_PAGE_TABLE}; -use super::{tlb_flush_virt_addr, table::TEMP_P4}; +use super::tlb_flush_virt_addr; use zerocopy::FromBytes; use page_table_entry::UnmapResult; use owned_borrowed_trait::{OwnedOrBorrowed, Owned, Borrowed}; @@ -70,6 +69,7 @@ impl Mapper { } } + /// Creates a new mapper that uses the temporary recursive P4 address. pub(crate) fn temp(p4: Frame) -> Mapper { Mapper { p4: Unique::new(TEMP_P4).unwrap(), diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 46f67e1bcf..4684b3eb7f 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -136,12 +136,12 @@ impl PageTable { return Err("PageTable::with(): this PageTable ('self') must be the currently active page table."); } - // // Temporarily take ownership of this page table's p4 allocated frame and - // // create a new temporary page that maps to that frame. - // let this_p4 = core::mem::replace(&mut self.p4_table, AllocatedFrames::empty()); - // let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, this_p4, self)?; + // Temporarily take ownership of this page table's p4 allocated frame and + // create a new temporary page that maps to that frame. + let this_p4 = core::mem::replace(&mut self.p4_table, AllocatedFrames::empty()); + let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, this_p4, self)?; - // overwrite recursive mapping + // Overwrite temporary recursive mapping self.p4_mut()[TEMPORARY_RECURSIVE_P4_INDEX].set_entry( other_table.p4_table.as_allocated_frame(), PteFlagsArch::new().valid(true).writable(true), @@ -150,13 +150,23 @@ impl PageTable { let mut mapper = Mapper::temp(*other_table.p4_table.start()); - // execute `f` in the new context, in which the new page table is considered "active" + // Execute `f` in the new context, in which the new page table is considered "active" let ret = f(&mut mapper, self); + // Restore temporary recursive mapping to original p4 table. This isn't strictly necessary, + // but ensures the current table is returned to its original state. + temporary_page.with_table_and_frame(|p4_table, frame| { + p4_table[TEMPORARY_RECURSIVE_P4_INDEX].set_entry( + frame.as_allocated_frame(), + PteFlagsArch::new().valid(true).writable(true), + ); + })?; + tlb_flush_all(); + // Here, recover the current page table's p4 frame and restore it into this current page table, // since we removed it earlier at the top of this function and gave it to the temporary page. - // let (_temp_page, p4_frame) = temporary_page.unmap_into_parts(self)?; - // self.p4_table = p4_frame.ok_or("BUG: PageTable::with(): failed to take back unmapped Frame for p4_table")?; + let (_temp_page, p4_frame) = temporary_page.unmap_into_parts(self)?; + self.p4_table = p4_frame.ok_or("BUG: PageTable::with(): failed to take back unmapped Frame for p4_table")?; ret } diff --git a/kernel/memory/src/paging/table.rs b/kernel/memory/src/paging/table.rs index d328cf67a2..c33b4c1bf5 100644 --- a/kernel/memory/src/paging/table.rs +++ b/kernel/memory/src/paging/table.rs @@ -29,6 +29,11 @@ pub const P4: *mut Table = 0o177777_776_776_776_776_0000 as *mut _; // ^p4 ^p3 ^p2 ^p1 ^offset // ^ 0o776 means that we're always looking at the 510th entry recursively +/// By default this is equivalent to [`P4`] unless the temporary recursive entry is modified. +/// +/// NOTE: this must be kept in sync with the recursive index in `kernel_config/memory.rs`. +/// +/// All four table indexes need to be set to 0o772 so that `Table::next_table_address` works properly. pub const TEMP_P4: *mut Table = 0o177777_772_772_772_772_0000 as *mut _; #[derive(FromBytes)] @@ -89,22 +94,16 @@ impl Table { index: usize, flags: PteFlagsArch, ) -> &mut Table { - // log::debug!("NEXT TABLE CREATE"); if self.next_table(index).is_none() { - // log::debug!("creating next table"); assert!(!self[index].flags().is_huge(), "mapping code does not support huge pages"); - // log::debug!("allocating frames"); let af = frame_allocator::allocate_frames(1).expect("next_table_create(): no frames available"); - // log::debug!("setting entry"); self[index].set_entry( af.as_allocated_frame(), flags.valid(true).writable(true), // must be valid and writable on x86_64 ); - // log::debug!("zeroing next table"); self.next_table_mut(index).unwrap().zero(); core::mem::forget(af); // we currently forget frames allocated as page table frames since we don't yet have a way to track them. } - // log::debug!("getting next table"); self.next_table_mut(index).unwrap() } } diff --git a/tools/uefi_builder/src/main.rs b/tools/uefi_builder/src/main.rs index 198f4fc980..9aaffd56f7 100644 --- a/tools/uefi_builder/src/main.rs +++ b/tools/uefi_builder/src/main.rs @@ -48,7 +48,7 @@ fn main() { bootloader .create_disk_image(&efi_image) - .expect("failed to create UEFI disk image"); + .expect("failed to create uefi disk image"); std::fs::copy(ovmf_prebuilt::ovmf_pure_efi(), efi_firmware) .expect("couldn't copy efi firmware"); From c7cc8001f23377ecdb6033ef5c73965d4dafb93b Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Wed, 4 Jan 2023 12:59:03 +1100 Subject: [PATCH 05/18] Remove kernel and user stack references Signed-off-by: Klim Tsoutsman --- kernel/kernel_config/src/memory.rs | 22 +++------------------- kernel/page_allocator/src/lib.rs | 2 +- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/kernel/kernel_config/src/memory.rs b/kernel/kernel_config/src/memory.rs index 5284b8c495..48f3cffc53 100644 --- a/kernel/kernel_config/src/memory.rs +++ b/kernel/kernel_config/src/memory.rs @@ -44,11 +44,7 @@ pub const KERNEL_TEXT_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 1; pub const RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 2; /// Value: 509. The 509th entry is used for the kernel heap pub const KERNEL_HEAP_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 3; -/// Value: 508. The 508th entry is used for all kernel stacks -pub const KERNEL_STACK_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4; -/// Value: 507. The 507th entry is used for all userspace stacks -pub const USER_STACK_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 5; -// Value: 506. The 506th entry is used as a temporary recursive entry when mapping a new page table. +// Value: 508. The 508th entry is used as a temporary recursive entry when mapping a new page table. pub const TEMPORARY_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 6; @@ -90,17 +86,5 @@ pub const KERNEL_HEAP_INITIAL_SIZE: usize = 256 * 1024 * 1024; // 256 MiB, debug /// the kernel heap gets the whole 509th P4 entry. pub const KERNEL_HEAP_MAX_SIZE: usize = ADDRESSABILITY_PER_P4_ENTRY; - -/// the kernel stack allocator gets the 508th P4 entry of addressability. -/// actual value: 0o177777_774_000_000_000_0000, or 0xFFFF_FE00_0000_0000 -pub const KERNEL_STACK_ALLOCATOR_BOTTOM: usize = 0xFFFF_0000_0000_0000 | (KERNEL_STACK_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); -/// the highest actually usuable address in the kernel stack allocator -pub const KERNEL_STACK_ALLOCATOR_TOP_ADDR: usize = KERNEL_STACK_ALLOCATOR_BOTTOM + ADDRESSABILITY_PER_P4_ENTRY - BYTES_PER_ADDR; - - -/// the userspace stack allocators (one per userspace task) each get the 507th P4 entry of addressability. -/// actual value: 0o177777_773_000_000_000_0000, or 0xFFFF_FD80_0000_0000 -pub const USER_STACK_ALLOCATOR_BOTTOM: usize = 0xFFFF_0000_0000_0000 | (USER_STACK_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); -/// the highest actually usuable address in each userspace stack allocator -pub const USER_STACK_ALLOCATOR_TOP_ADDR: usize = USER_STACK_ALLOCATOR_BOTTOM + ADDRESSABILITY_PER_P4_ENTRY - BYTES_PER_ADDR; - +/// The page allocator doesn't allocate addresses above this address. +pub const TEMPORARY_RECURSIVE_MEMORY_START: usize = 0xFFFF_0000_0000_0000 | (TEMPORARY_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index 47e533ef7f..0435777ccd 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -50,7 +50,7 @@ static DESIGNATED_PAGES_LOW_END: Once = Once::new(); /// This higher part of the address range covers from the beginning of the heap area to the end of the address space. /// /// TODO: once the heap is fully dynamic and not dependent on constant addresses, we can move this up to KERNEL_TEXT_START (511th entry of P4). -static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address(VirtualAddress::new_canonical(KERNEL_HEAP_START)); +static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address(VirtualAddress::new_canonical(TEMPORARY_RECURSIVE_MEMORY_START)); const MIN_PAGE: Page = Page::containing_address(VirtualAddress::zero()); const MAX_PAGE: Page = Page::containing_address(VirtualAddress::new_canonical(MAX_VIRTUAL_ADDRESS)); From 8ebb5e7329bb913eeac23806ca8677e47b7c22e0 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Wed, 4 Jan 2023 13:02:11 +1100 Subject: [PATCH 06/18] Update docs Signed-off-by: Klim Tsoutsman --- kernel/kernel_config/src/memory.rs | 2 +- kernel/memory/src/paging/mod.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/kernel_config/src/memory.rs b/kernel/kernel_config/src/memory.rs index 48f3cffc53..8f1ddade1e 100644 --- a/kernel/kernel_config/src/memory.rs +++ b/kernel/kernel_config/src/memory.rs @@ -86,5 +86,5 @@ pub const KERNEL_HEAP_INITIAL_SIZE: usize = 256 * 1024 * 1024; // 256 MiB, debug /// the kernel heap gets the whole 509th P4 entry. pub const KERNEL_HEAP_MAX_SIZE: usize = ADDRESSABILITY_PER_P4_ENTRY; -/// The page allocator doesn't allocate addresses above this address. +/// The page allocator doesn't allocate pages above this address. pub const TEMPORARY_RECURSIVE_MEMORY_START: usize = 0xFFFF_0000_0000_0000 | (TEMPORARY_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 4684b3eb7f..d23403cff3 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -148,6 +148,7 @@ impl PageTable { ); tlb_flush_all(); + // This mapper will modify the other table using the temporary recursive p4 index set in the current page table. let mut mapper = Mapper::temp(*other_table.p4_table.start()); // Execute `f` in the new context, in which the new page table is considered "active" From 48bf42b17be6e1553bc1e8b3398d6802e93d68f3 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Fri, 6 Jan 2023 14:28:27 +1100 Subject: [PATCH 07/18] Update kernel/kernel_config/src/memory.rs Co-authored-by: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> --- kernel/kernel_config/src/memory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/kernel_config/src/memory.rs b/kernel/kernel_config/src/memory.rs index 8f1ddade1e..c9071729f9 100644 --- a/kernel/kernel_config/src/memory.rs +++ b/kernel/kernel_config/src/memory.rs @@ -45,7 +45,7 @@ pub const RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 2; /// Value: 509. The 509th entry is used for the kernel heap pub const KERNEL_HEAP_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 3; // Value: 508. The 508th entry is used as a temporary recursive entry when mapping a new page table. -pub const TEMPORARY_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 6; +pub const TEMPORARY_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4; pub const MAX_PAGE_NUMBER: usize = MAX_VIRTUAL_ADDRESS / PAGE_SIZE; From 84312a59d53f1c94aebc1e049a3a8a3234c9ea50 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Fri, 6 Jan 2023 14:30:09 +1100 Subject: [PATCH 08/18] Fix `TEMP_P4` address Signed-off-by: Klim Tsoutsman --- kernel/memory/src/paging/table.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/memory/src/paging/table.rs b/kernel/memory/src/paging/table.rs index c33b4c1bf5..71863b5eb8 100644 --- a/kernel/memory/src/paging/table.rs +++ b/kernel/memory/src/paging/table.rs @@ -34,7 +34,7 @@ pub const P4: *mut Table = 0o177777_776_776_776_776_0000 as *mut _; /// NOTE: this must be kept in sync with the recursive index in `kernel_config/memory.rs`. /// /// All four table indexes need to be set to 0o772 so that `Table::next_table_address` works properly. -pub const TEMP_P4: *mut Table = 0o177777_772_772_772_772_0000 as *mut _; +pub const TEMP_P4: *mut Table = 0o177777_774_774_774_774_0000 as *mut _; #[derive(FromBytes)] pub struct Table { From 823b0a59d2a99e71a15a5aa8ed7219ab53a95e93 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Fri, 6 Jan 2023 14:32:55 +1100 Subject: [PATCH 09/18] Update `PageTable::with` docs Signed-off-by: Klim Tsoutsman --- kernel/memory/src/paging/mod.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index d23403cff3..d9f10af9f0 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -118,10 +118,13 @@ impl PageTable { }) } - /// Temporarily maps the given other `PageTable` to the recursive entry (510th entry) - /// so that the given closure `f` can set up new mappings on the new `other_table` without actually switching to it yet. - /// Accepts a closure `f` that is passed a `Mapper`, such that it can set up new mappings on the other table. - /// Consumes the given `temporary_page` and automatically unmaps it afterwards. + /// Temporarily maps the given other `PageTable` to the temporary recursive + /// index (508th entry) + /// + /// Accepts a closure `f` that is passed a mutable reference to the other + /// table's mapper, and an immutable reference to the current table's + /// mapper. + /// /// # Note /// This does not perform any task switching or changing of the current page table register (e.g., cr3). pub fn with( From e07b3b47153f2bde9ff608c355099fd9ee0d7905 Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Fri, 6 Jan 2023 15:21:59 +1100 Subject: [PATCH 10/18] Update temporary recursive index naming Signed-off-by: Klim Tsoutsman --- kernel/kernel_config/src/memory.rs | 6 +++--- kernel/memory/src/paging/mapper.rs | 6 +++--- kernel/memory/src/paging/mod.rs | 12 ++++++------ kernel/memory/src/paging/table.rs | 6 +++--- kernel/page_allocator/src/lib.rs | 2 +- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/kernel/kernel_config/src/memory.rs b/kernel/kernel_config/src/memory.rs index c9071729f9..7aa76f11be 100644 --- a/kernel/kernel_config/src/memory.rs +++ b/kernel/kernel_config/src/memory.rs @@ -44,8 +44,8 @@ pub const KERNEL_TEXT_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 1; pub const RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 2; /// Value: 509. The 509th entry is used for the kernel heap pub const KERNEL_HEAP_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 3; -// Value: 508. The 508th entry is used as a temporary recursive entry when mapping a new page table. -pub const TEMPORARY_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4; +/// Value: 508. The 508th entry is used as a temporary recursive entry when mapping an inactive page table. +pub const INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4; pub const MAX_PAGE_NUMBER: usize = MAX_VIRTUAL_ADDRESS / PAGE_SIZE; @@ -87,4 +87,4 @@ pub const KERNEL_HEAP_INITIAL_SIZE: usize = 256 * 1024 * 1024; // 256 MiB, debug pub const KERNEL_HEAP_MAX_SIZE: usize = ADDRESSABILITY_PER_P4_ENTRY; /// The page allocator doesn't allocate pages above this address. -pub const TEMPORARY_RECURSIVE_MEMORY_START: usize = 0xFFFF_0000_0000_0000 | (TEMPORARY_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); +pub const INACTIVE_PAGE_TABLE_RECURSIVE_MEMORY_START: usize = 0xFFFF_0000_0000_0000 | (INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 1242300c0f..3d3ff33eb0 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -22,7 +22,7 @@ use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, use crate::paging::{ get_current_p4, PageRange, - table::{P4, TEMP_P4, Table, Level4}, + table::{P4, INACTIVE_P4, Table, Level4}, }; use pte_flags::PteFlagsArch; use spin::Once; @@ -70,9 +70,9 @@ impl Mapper { } /// Creates a new mapper that uses the temporary recursive P4 address. - pub(crate) fn temp(p4: Frame) -> Mapper { + pub(crate) fn inactive(p4: Frame) -> Mapper { Mapper { - p4: Unique::new(TEMP_P4).unwrap(), + p4: Unique::new(INACTIVE_P4).unwrap(), target_p4: p4, } } diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index d9f10af9f0..dd885d6998 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -34,7 +34,7 @@ use super::{Frame, FrameRange, PageRange, VirtualAddress, PhysicalAddress, use pte_flags::PteFlagsArch; use no_drop::NoDrop; use boot_info::BootInformation; -use kernel_config::memory::{RECURSIVE_P4_INDEX, PAGE_SIZE, TEMPORARY_RECURSIVE_P4_INDEX}; +use kernel_config::memory::{RECURSIVE_P4_INDEX, PAGE_SIZE, INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX}; /// A top-level root (P4) page table. @@ -104,7 +104,7 @@ impl PageTable { frame.as_allocated_frame(), PteFlagsArch::new().valid(true).writable(true), ); - table[TEMPORARY_RECURSIVE_P4_INDEX].set_entry( + table[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( frame.as_allocated_frame(), PteFlagsArch::new().valid(true).writable(true), ); @@ -145,22 +145,22 @@ impl PageTable { let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, this_p4, self)?; // Overwrite temporary recursive mapping - self.p4_mut()[TEMPORARY_RECURSIVE_P4_INDEX].set_entry( + self.p4_mut()[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( other_table.p4_table.as_allocated_frame(), PteFlagsArch::new().valid(true).writable(true), ); tlb_flush_all(); // This mapper will modify the other table using the temporary recursive p4 index set in the current page table. - let mut mapper = Mapper::temp(*other_table.p4_table.start()); + let mut mapper = Mapper::inactive(*other_table.p4_table.start()); // Execute `f` in the new context, in which the new page table is considered "active" let ret = f(&mut mapper, self); - // Restore temporary recursive mapping to original p4 table. This isn't strictly necessary, + // Restore inactive page table recursive mapping to original p4 table. This isn't strictly necessary, // but ensures the current table is returned to its original state. temporary_page.with_table_and_frame(|p4_table, frame| { - p4_table[TEMPORARY_RECURSIVE_P4_INDEX].set_entry( + p4_table[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( frame.as_allocated_frame(), PteFlagsArch::new().valid(true).writable(true), ); diff --git a/kernel/memory/src/paging/table.rs b/kernel/memory/src/paging/table.rs index 71863b5eb8..82c93af6b3 100644 --- a/kernel/memory/src/paging/table.rs +++ b/kernel/memory/src/paging/table.rs @@ -29,12 +29,12 @@ pub const P4: *mut Table = 0o177777_776_776_776_776_0000 as *mut _; // ^p4 ^p3 ^p2 ^p1 ^offset // ^ 0o776 means that we're always looking at the 510th entry recursively -/// By default this is equivalent to [`P4`] unless the temporary recursive entry is modified. +/// By default this is equivalent to [`P4`] unless the inactive page table recursive entry is modified. /// -/// NOTE: this must be kept in sync with the recursive index in `kernel_config/memory.rs`. +/// NOTE: this must be kept in sync with the inactive page table recursive index in `kernel_config/memory.rs`. /// /// All four table indexes need to be set to 0o772 so that `Table::next_table_address` works properly. -pub const TEMP_P4: *mut Table = 0o177777_774_774_774_774_0000 as *mut _; +pub const INACTIVE_P4: *mut Table = 0o177777_774_774_774_774_0000 as *mut _; #[derive(FromBytes)] pub struct Table { diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index 0435777ccd..9a99f7f7d0 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -50,7 +50,7 @@ static DESIGNATED_PAGES_LOW_END: Once = Once::new(); /// This higher part of the address range covers from the beginning of the heap area to the end of the address space. /// /// TODO: once the heap is fully dynamic and not dependent on constant addresses, we can move this up to KERNEL_TEXT_START (511th entry of P4). -static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address(VirtualAddress::new_canonical(TEMPORARY_RECURSIVE_MEMORY_START)); +static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address(VirtualAddress::new_canonical(INACTIVE_PAGE_TABLE_RECURSIVE_MEMORY_START)); const MIN_PAGE: Page = Page::containing_address(VirtualAddress::zero()); const MAX_PAGE: Page = Page::containing_address(VirtualAddress::new_canonical(MAX_VIRTUAL_ADDRESS)); From 2c833190214d3aaee4b3862abfa7383f1b89102d Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Mon, 9 Jan 2023 21:17:08 +1100 Subject: [PATCH 11/18] Only set inactive page table recursive index in `PageTable::with` Signed-off-by: Klim Tsoutsman --- kernel/memory/src/paging/mod.rs | 48 ++++++++++++++++----------------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index dd885d6998..f13289a8dd 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -104,10 +104,6 @@ impl PageTable { frame.as_allocated_frame(), PteFlagsArch::new().valid(true).writable(true), ); - table[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( - frame.as_allocated_frame(), - PteFlagsArch::new().valid(true).writable(true), - ); })?; let (_temp_page, inited_new_p4_frame) = temporary_page.unmap_into_parts(current_page_table)?; @@ -139,38 +135,40 @@ impl PageTable { return Err("PageTable::with(): this PageTable ('self') must be the currently active page table."); } - // Temporarily take ownership of this page table's p4 allocated frame and + // Temporarily take ownership of the other page table's p4 allocated frame and // create a new temporary page that maps to that frame. - let this_p4 = core::mem::replace(&mut self.p4_table, AllocatedFrames::empty()); - let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, this_p4, self)?; - - // Overwrite temporary recursive mapping - self.p4_mut()[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( - other_table.p4_table.as_allocated_frame(), - PteFlagsArch::new().valid(true).writable(true), - ); + let other_p4 = core::mem::replace(&mut other_table.p4_table, AllocatedFrames::empty()); + let other_p4_frame = *other_p4.start(); + let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, other_p4, self)?; + + // Overwrite inactive page table recursive mapping. + temporary_page.with_table_and_frame(|table, frame| { + self.p4_mut()[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( + frame.as_allocated_frame(), + PteFlagsArch::new().valid(true).writable(true), + ); + table[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( + frame.as_allocated_frame(), + PteFlagsArch::new().valid(true).writable(true), + ); + })?; tlb_flush_all(); - // This mapper will modify the other table using the temporary recursive p4 index set in the current page table. - let mut mapper = Mapper::inactive(*other_table.p4_table.start()); + // This mapper will modify the other table using the inactive page table recursive p4 index set in the current page table. + let mut mapper = Mapper::inactive(other_p4_frame); // Execute `f` in the new context, in which the new page table is considered "active" let ret = f(&mut mapper, self); - // Restore inactive page table recursive mapping to original p4 table. This isn't strictly necessary, - // but ensures the current table is returned to its original state. - temporary_page.with_table_and_frame(|p4_table, frame| { - p4_table[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( - frame.as_allocated_frame(), - PteFlagsArch::new().valid(true).writable(true), - ); - })?; + // Clear inactive page table recursive mapping. + self.p4_mut()[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].zero(); + other_table.p4_mut()[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].zero(); tlb_flush_all(); - // Here, recover the current page table's p4 frame and restore it into this current page table, + // Here, recover the other page table's p4 frame and restore it into the other page table, // since we removed it earlier at the top of this function and gave it to the temporary page. let (_temp_page, p4_frame) = temporary_page.unmap_into_parts(self)?; - self.p4_table = p4_frame.ok_or("BUG: PageTable::with(): failed to take back unmapped Frame for p4_table")?; + other_table.p4_table = p4_frame.ok_or("BUG: PageTable::with(): failed to take back unmapped Frame for p4_table")?; ret } From bfed13f433ab0eae10daa7ed4c071a76738374aa Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Mon, 9 Jan 2023 03:40:16 -0800 Subject: [PATCH 12/18] docs --- kernel/memory/src/paging/mapper.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 3d3ff33eb0..5c4b2893c8 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -88,8 +88,8 @@ impl Mapper { /// Dumps all page table entries at all four page table levels for the given `VirtualAddress`, /// and also shows their `PteFlags`. /// - /// The page table details are written to the the given `writer`. - pub fn dump_pte(&self,virtual_address: VirtualAddress) { + /// The page table details are written to the log as an `info` message. + pub fn dump_pte(&self, virtual_address: VirtualAddress) { let page = Page::containing_address(virtual_address); let p4 = self.p4(); let p3 = p4.next_table(page.p4_index()); From 6f9adf5dff3f3210603ca19e0d1d6a23cb5aa9af Mon Sep 17 00:00:00 2001 From: Klim Tsoutsman Date: Mon, 9 Jan 2023 23:08:44 +1100 Subject: [PATCH 13/18] Change inactive terminology to upcoming Signed-off-by: Klim Tsoutsman --- kernel/kernel_config/src/memory.rs | 6 +++--- kernel/memory/src/paging/mapper.rs | 4 ++-- kernel/memory/src/paging/mod.rs | 14 +++++++------- kernel/memory/src/paging/table.rs | 8 ++++---- kernel/page_allocator/src/lib.rs | 2 +- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/kernel/kernel_config/src/memory.rs b/kernel/kernel_config/src/memory.rs index 7aa76f11be..b08e44a73a 100644 --- a/kernel/kernel_config/src/memory.rs +++ b/kernel/kernel_config/src/memory.rs @@ -44,8 +44,8 @@ pub const KERNEL_TEXT_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 1; pub const RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 2; /// Value: 509. The 509th entry is used for the kernel heap pub const KERNEL_HEAP_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 3; -/// Value: 508. The 508th entry is used as a temporary recursive entry when mapping an inactive page table. -pub const INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4; +/// Value: 508. The 508th entry is used as a temporary recursive entry when mapping an upcoming page table. +pub const UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4; pub const MAX_PAGE_NUMBER: usize = MAX_VIRTUAL_ADDRESS / PAGE_SIZE; @@ -87,4 +87,4 @@ pub const KERNEL_HEAP_INITIAL_SIZE: usize = 256 * 1024 * 1024; // 256 MiB, debug pub const KERNEL_HEAP_MAX_SIZE: usize = ADDRESSABILITY_PER_P4_ENTRY; /// The page allocator doesn't allocate pages above this address. -pub const INACTIVE_PAGE_TABLE_RECURSIVE_MEMORY_START: usize = 0xFFFF_0000_0000_0000 | (INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); +pub const UPCOMING_PAGE_TABLE_RECURSIVE_MEMORY_START: usize = 0xFFFF_0000_0000_0000 | (UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 5c4b2893c8..9bb75f04f9 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -22,7 +22,7 @@ use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, use crate::paging::{ get_current_p4, PageRange, - table::{P4, INACTIVE_P4, Table, Level4}, + table::{P4, UPCOMING_P4, Table, Level4}, }; use pte_flags::PteFlagsArch; use spin::Once; @@ -72,7 +72,7 @@ impl Mapper { /// Creates a new mapper that uses the temporary recursive P4 address. pub(crate) fn inactive(p4: Frame) -> Mapper { Mapper { - p4: Unique::new(INACTIVE_P4).unwrap(), + p4: Unique::new(UPCOMING_P4).unwrap(), target_p4: p4, } } diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 286524c367..1cb807ce28 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -34,7 +34,7 @@ use super::{Frame, FrameRange, PageRange, VirtualAddress, PhysicalAddress, use pte_flags::PteFlagsArch; use no_drop::NoDrop; use boot_info::BootInformation; -use kernel_config::memory::{RECURSIVE_P4_INDEX, PAGE_SIZE, INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX}; +use kernel_config::memory::{RECURSIVE_P4_INDEX, PAGE_SIZE, UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX}; /// A top-level root (P4) page table. @@ -141,28 +141,28 @@ impl PageTable { let other_p4_frame = *other_p4.start(); let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, other_p4, self)?; - // Overwrite inactive page table recursive mapping. + // Overwrite upcoming page table recursive mapping. temporary_page.with_table_and_frame(|table, frame| { - self.p4_mut()[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( + self.p4_mut()[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( frame.as_allocated_frame(), PteFlagsArch::new().valid(true).writable(true), ); - table[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( + table[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].set_entry( frame.as_allocated_frame(), PteFlagsArch::new().valid(true).writable(true), ); })?; tlb_flush_all(); - // This mapper will modify the other table using the inactive page table recursive p4 index set in the current page table. + // This mapper will modify the other table using the upcoming page table recursive p4 index set in the current page table. let mut mapper = Mapper::inactive(other_p4_frame); // Execute `f` in the new context, in which the new page table is considered "active" let ret = f(&mut mapper, self); // Clear inactive page table recursive mapping. - self.p4_mut()[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].zero(); - other_table.p4_mut()[INACTIVE_PAGE_TABLE_RECURSIVE_P4_INDEX].zero(); + self.p4_mut()[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].zero(); + other_table.p4_mut()[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].zero(); tlb_flush_all(); // Here, recover the other page table's p4 frame and restore it into the other page table, diff --git a/kernel/memory/src/paging/table.rs b/kernel/memory/src/paging/table.rs index 82c93af6b3..cf1e30c6fc 100644 --- a/kernel/memory/src/paging/table.rs +++ b/kernel/memory/src/paging/table.rs @@ -29,12 +29,12 @@ pub const P4: *mut Table = 0o177777_776_776_776_776_0000 as *mut _; // ^p4 ^p3 ^p2 ^p1 ^offset // ^ 0o776 means that we're always looking at the 510th entry recursively -/// By default this is equivalent to [`P4`] unless the inactive page table recursive entry is modified. +/// By default this is an invalid address unless the upcoming page table recursive entry is modified. /// -/// NOTE: this must be kept in sync with the inactive page table recursive index in `kernel_config/memory.rs`. +/// NOTE: this must be kept in sync with the upcoming page table recursive index in `kernel_config/memory.rs`. /// -/// All four table indexes need to be set to 0o772 so that `Table::next_table_address` works properly. -pub const INACTIVE_P4: *mut Table = 0o177777_774_774_774_774_0000 as *mut _; +/// All four table indexes need to be set to 0o774 so that `Table::next_table_address` works properly. +pub const UPCOMING_P4: *mut Table = 0o177777_774_774_774_774_0000 as *mut _; #[derive(FromBytes)] pub struct Table { diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index 9a99f7f7d0..5e193712fb 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -50,7 +50,7 @@ static DESIGNATED_PAGES_LOW_END: Once = Once::new(); /// This higher part of the address range covers from the beginning of the heap area to the end of the address space. /// /// TODO: once the heap is fully dynamic and not dependent on constant addresses, we can move this up to KERNEL_TEXT_START (511th entry of P4). -static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address(VirtualAddress::new_canonical(INACTIVE_PAGE_TABLE_RECURSIVE_MEMORY_START)); +static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address(VirtualAddress::new_canonical(UPCOMING_PAGE_TABLE_RECURSIVE_MEMORY_START)); const MIN_PAGE: Page = Page::containing_address(VirtualAddress::zero()); const MAX_PAGE: Page = Page::containing_address(VirtualAddress::new_canonical(MAX_VIRTUAL_ADDRESS)); From 2aca71466f109d2af2631196ef5b5d4bcbc8dc98 Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Mon, 9 Jan 2023 18:25:14 -0800 Subject: [PATCH 14/18] update docs --- kernel/memory/src/paging/mapper.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 9bb75f04f9..6b57372355 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -58,29 +58,41 @@ pub struct Mapper { } impl Mapper { + /// Creates (bootstraps) a `Mapper` based on the + /// currently-active P4 page table root. pub(crate) fn from_current() -> Mapper { Self::with_p4_frame(get_current_p4()) } + /// Creates a new `Mapper` that uses the recursive entry in the current P4 page table + /// to map the given `p4` frame. + /// + /// The given `p4` frame is the root frame of that upcoming page table. pub(crate) fn with_p4_frame(p4: Frame) -> Mapper { Mapper { - p4: Unique::new(P4).unwrap(), // cannot panic because we know the P4 value is valid + p4: Unique::new(P4).unwrap(), // cannot panic; the P4 value is valid target_p4: p4, } } - /// Creates a new mapper that uses the temporary recursive P4 address. - pub(crate) fn inactive(p4: Frame) -> Mapper { + /// Creates a new mapper for an upcoming (soon-to-be-initialized) page table + /// that uses the `UPCOMING_P4` recursive entry in the current P4 table + /// to map that new page table. + /// + /// The given `p4` frame is the root frame of that upcoming page table. + pub(crate) fn upcoming(p4: Frame) -> Mapper { Mapper { p4: Unique::new(UPCOMING_P4).unwrap(), target_p4: p4, } } + /// Returns a reference to this `Mapper`'s root page table as a P4-level table. pub(crate) fn p4(&self) -> &Table { unsafe { self.p4.as_ref() } } + /// Returns a mutable reference to this `Mapper`'s root page table as a P4-level table. pub(crate) fn p4_mut(&mut self) -> &mut Table { unsafe { self.p4.as_mut() } } From 4632bba62f44d436ec759c4866db803e3636c0ea Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Mon, 9 Jan 2023 18:27:19 -0800 Subject: [PATCH 15/18] minor clarifications --- kernel/memory/src/paging/mod.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 1cb807ce28..0bb74e92d9 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -154,13 +154,14 @@ impl PageTable { })?; tlb_flush_all(); - // This mapper will modify the other table using the upcoming page table recursive p4 index set in the current page table. - let mut mapper = Mapper::inactive(other_p4_frame); + // This mapper will modify the `other_table` using the upcoming P4 recursive entry + // that is set for the currently active page table. + let mut mapper = Mapper::upcoming(other_p4_frame); // Execute `f` in the new context, in which the new page table is considered "active" let ret = f(&mut mapper, self); - // Clear inactive page table recursive mapping. + // Clear both page table's upcoming recursive mapping entries. self.p4_mut()[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].zero(); other_table.p4_mut()[UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX].zero(); tlb_flush_all(); From bcfdabb4ddb7cfd99a6b7c8afc6674c3f06a504c Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Mon, 9 Jan 2023 18:30:43 -0800 Subject: [PATCH 16/18] clarify docs --- kernel/page_allocator/src/lib.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kernel/page_allocator/src/lib.rs b/kernel/page_allocator/src/lib.rs index 5e193712fb..342e78692e 100644 --- a/kernel/page_allocator/src/lib.rs +++ b/kernel/page_allocator/src/lib.rs @@ -47,9 +47,12 @@ static DESIGNATED_PAGES_LOW_END: Once = Once::new(); /// Defines the upper part of the address space that's designated, similar to `DESIGNATED_PAGES_LOW_END`. /// Any virtual addresses **greater than or equal to** this address is considered "designated". -/// This higher part of the address range covers from the beginning of the heap area to the end of the address space. +/// This higher part of the address range covers from: +/// the beginning of the recursive P4 entry used for modifying upcoming page tables +/// to the very end of the address space. /// -/// TODO: once the heap is fully dynamic and not dependent on constant addresses, we can move this up to KERNEL_TEXT_START (511th entry of P4). +/// TODO: once the heap is fully dynamic and not dependent on static addresses, +/// we can exclude the heap from the designated region. static DESIGNATED_PAGES_HIGH_START: Page = Page::containing_address(VirtualAddress::new_canonical(UPCOMING_PAGE_TABLE_RECURSIVE_MEMORY_START)); const MIN_PAGE: Page = Page::containing_address(VirtualAddress::zero()); From 20ae477e79bf2f55819555c66a3795878e690553 Mon Sep 17 00:00:00 2001 From: Kevin Boos <1139460+kevinaboos@users.noreply.github.com> Date: Mon, 9 Jan 2023 18:34:48 -0800 Subject: [PATCH 17/18] clarify docs --- kernel/kernel_config/src/memory.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/kernel_config/src/memory.rs b/kernel/kernel_config/src/memory.rs index b08e44a73a..c990a9f8ec 100644 --- a/kernel/kernel_config/src/memory.rs +++ b/kernel/kernel_config/src/memory.rs @@ -5,12 +5,10 @@ //! Current P4 (top-level page table) mappings: //! * 511: kernel text sections -//! * 510: recursive mapping to top of P4 +//! * 510: recursive mapping for accessing the current P4 root page table frame. //! * 509: kernel heap -//! * 508: kernel stacks -//! * 507: userspace stacks -//! * 506 down to 0: available for user processes - +//! * 508: recursive mapping for accessing the P4 root page table frame of an upcoming new page table. +//! * 507 down to 0: available for general usage /// 64-bit architecture results in 8 bytes per address. pub const BYTES_PER_ADDR: usize = core::mem::size_of::(); @@ -40,11 +38,13 @@ pub const TEMPORARY_PAGE_VIRT_ADDR: usize = MAX_VIRTUAL_ADDRESS; pub const ENTRIES_PER_PAGE_TABLE: usize = PAGE_SIZE / BYTES_PER_ADDR; /// Value: 511. The 511th entry is used for kernel text sections pub const KERNEL_TEXT_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 1; -/// Value: 510. The 510th entry is used for the recursive P4 mapping. +/// Value: 510. The 510th entry is used to recursively map the current P4 root page table frame +// such that it can be accessed and modified just like any other level of page table. pub const RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 2; /// Value: 509. The 509th entry is used for the kernel heap pub const KERNEL_HEAP_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 3; -/// Value: 508. The 508th entry is used as a temporary recursive entry when mapping an upcoming page table. +/// Value: 508. The 508th entry is used to temporarily recursively map the P4 root page table frame +// of an upcoming (new) page table such that it can be accessed and modified. pub const UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4; @@ -86,5 +86,5 @@ pub const KERNEL_HEAP_INITIAL_SIZE: usize = 256 * 1024 * 1024; // 256 MiB, debug /// the kernel heap gets the whole 509th P4 entry. pub const KERNEL_HEAP_MAX_SIZE: usize = ADDRESSABILITY_PER_P4_ENTRY; -/// The page allocator doesn't allocate pages above this address. +/// The system (page allocator) must not use addresses at or above this address. pub const UPCOMING_PAGE_TABLE_RECURSIVE_MEMORY_START: usize = 0xFFFF_0000_0000_0000 | (UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT)); From 5bcba25eaaf81f5aedbcea887b576a539172bd20 Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Tue, 10 Jan 2023 11:25:21 +0800 Subject: [PATCH 18/18] Clarify docs --- kernel/memory/src/paging/mod.rs | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index 0bb74e92d9..e08335b361 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -225,7 +225,7 @@ pub fn init( let boot_info_start_vaddr = boot_info.start().ok_or("boot_info start virtual address was invalid")?; let boot_info_start_paddr = page_table.translate(boot_info_start_vaddr).ok_or("Couldn't get boot_info start physical address")?; let boot_info_size = boot_info.len(); - debug!("multiboot vaddr: {:#X}, multiboot paddr: {:#X}, size: {:#X}\n", boot_info_start_vaddr, boot_info_start_paddr, boot_info_size); + debug!("multiboot vaddr: {:#X}, multiboot paddr: {:#X}, size: {:#X}", boot_info_start_vaddr, boot_info_start_paddr, boot_info_size); let new_p4_frame = frame_allocator::allocate_frames(1).ok_or("couldn't allocate frame for new page table")?; let mut new_table = PageTable::new_table(&mut page_table, new_p4_frame, None)?; @@ -238,7 +238,7 @@ pub fn init( let mut higher_half_mapped_pages: [Option>; 32] = Default::default(); let mut identity_mapped_pages: [Option>; 32] = Default::default(); - // Stack frames are not guaranteed to be contiguous. + // Stack frames are not guaranteed to be contiguous in physical memory. let stack_size = boot_info.stack_size()?; let stack_page_range = PageRange::from_virt_addr( // `PAGE_SIZE` accounts for the guard page, which does not have a corresponding frame. @@ -247,7 +247,7 @@ pub fn init( ); debug!("Initial stack start {stack_start_virt:#X}, size: {stack_size:#X} bytes, {stack_page_range:X?}"); - // Boot info frames are not guaranteed to be contiguous. + // Boot info frames are not guaranteed to be contiguous in physical memory. let boot_info_page_range = PageRange::from_virt_addr(boot_info_start_vaddr, boot_info_size); debug!("Boot info start: {boot_info_start_vaddr:#X}, size: {boot_info_size:#X}, {boot_info_page_range:#X?}"); @@ -321,13 +321,16 @@ pub fn init( data_mapped_pages = Some(NoDrop::new(new_mapper.map_allocated_pages_to(data_pages, data_frames, data_flags)?)); index += 1; - // Handle the stack (a separate data section), which consists of one guard page followed by the real stack pages. - // It does not need to be identity mapped because each AP core will have its own stack. + // Handle the stack (a separate data section), which consists of one guard page (unmapped) + // followed by the real (mapped) stack pages. + // The stack does not need to be identity mapped, because each secondary CPU will get its own stack. let stack_guard_page = page_allocator::allocate_pages_at(stack_start_virt, 1)?; let mut stack_mapped_pages: Option = None; for page in stack_page_range.into_iter() { + // The stack is not guaranteed to be contiguous in physical memory, + // so we use the `current_mapper` to translate each page into its backing physical frame, + // and then reproduce the same mapping in the `new_mapper`. let frame = current_mapper.translate_page(page).ok_or("couldn't translate stack page")?; - let allocated_page = page_allocator::allocate_pages_at(page.start_address(), 1)?; let allocated_frame = frame_allocator::allocate_frames_at(frame.start_address(), 1)?; let mapped_pages = new_mapper.map_allocated_pages_to(allocated_page, allocated_frame, data_flags)?; @@ -343,7 +346,7 @@ pub fn init( )); // Map the VGA display memory as writable. - // We do an identity mapping for the VGA display too, because the AP cores may access it while booting. + // We do an identity mapping for the VGA display too, because secondary CPUs may access it while booting. let (vga_phys_addr, vga_size_in_bytes, vga_flags) = get_vga_mem_addr()?; let vga_virt_addr_identity = VirtualAddress::new_canonical(vga_phys_addr.value()); let vga_display_pages = page_allocator::allocate_pages_by_bytes_at(vga_virt_addr_identity + KERNEL_OFFSET, vga_size_in_bytes)?; @@ -355,9 +358,13 @@ pub fn init( higher_half_mapped_pages[index] = Some(NoDrop::new(new_mapper.map_allocated_pages_to(vga_display_pages, vga_display_frames, vga_flags)?)); index += 1; + // Map the bootloader info, a separate region of read-only memory, so that we can access it later. + // This does not need to be identity mapped. for page in boot_info_page_range.into_iter() { + // The boot info is not guaranteed to be contiguous in physical memory, + // so we use the `current_mapper` to translate each page into its backing physical frame, + // and then reproduce the same mapping in the `new_mapper`. let frame = current_mapper.translate_page(page).ok_or("couldn't translate stack page")?; - let allocated_page = page_allocator::allocate_pages_at(page.start_address(), 1)?; let allocated_frame = frame_allocator::allocate_frames_at(frame.start_address(), 1)?; let mapped_pages = new_mapper.map_allocated_pages_to(allocated_page, allocated_frame, PteFlags::new())?;