Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add access to current page table in PageTable::with #786

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 11 additions & 25 deletions kernel/kernel_config/src/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,10 @@

//! Current P4 (top-level page table) mappings:
//! * 511: kernel text sections
//! * 510: recursive mapping to top of P4
//! * 510: recursive mapping for accessing the current P4 root page table frame.
//! * 509: kernel heap
//! * 508: kernel stacks
//! * 507: userspace stacks
//! * 506 down to 0: available for user processes

//! * 508: recursive mapping for accessing the P4 root page table frame of an upcoming new page table.
//! * 507 down to 0: available for general usage

/// 64-bit architecture results in 8 bytes per address.
pub const BYTES_PER_ADDR: usize = core::mem::size_of::<usize>();
Expand All @@ -36,18 +34,18 @@ pub const MAX_VIRTUAL_ADDRESS: usize = usize::MAX;

pub const TEMPORARY_PAGE_VIRT_ADDR: usize = MAX_VIRTUAL_ADDRESS;

/// Value: 512.
/// Value: 512.
pub const ENTRIES_PER_PAGE_TABLE: usize = PAGE_SIZE / BYTES_PER_ADDR;
/// Value: 511. The 511th entry is used for kernel text sections
pub const KERNEL_TEXT_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 1;
/// Value: 510. The 510th entry is used for the recursive P4 mapping.
/// Value: 510. The 510th entry is used to recursively map the current P4 root page table frame
// such that it can be accessed and modified just like any other level of page table.
pub const RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 2;
/// Value: 509. The 509th entry is used for the kernel heap
pub const KERNEL_HEAP_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 3;
/// Value: 508. The 508th entry is used for all kernel stacks
pub const KERNEL_STACK_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4;
/// Value: 507. The 507th entry is used for all userspace stacks
pub const USER_STACK_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 5;
/// Value: 508. The 508th entry is used to temporarily recursively map the P4 root page table frame
// of an upcoming (new) page table such that it can be accessed and modified.
pub const UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX: usize = ENTRIES_PER_PAGE_TABLE - 4;


pub const MAX_PAGE_NUMBER: usize = MAX_VIRTUAL_ADDRESS / PAGE_SIZE;
Expand Down Expand Up @@ -88,17 +86,5 @@ pub const KERNEL_HEAP_INITIAL_SIZE: usize = 256 * 1024 * 1024; // 256 MiB, debug
/// the kernel heap gets the whole 509th P4 entry.
pub const KERNEL_HEAP_MAX_SIZE: usize = ADDRESSABILITY_PER_P4_ENTRY;


/// the kernel stack allocator gets the 508th P4 entry of addressability.
/// actual value: 0o177777_774_000_000_000_0000, or 0xFFFF_FE00_0000_0000
pub const KERNEL_STACK_ALLOCATOR_BOTTOM: usize = 0xFFFF_0000_0000_0000 | (KERNEL_STACK_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT));
/// the highest actually usuable address in the kernel stack allocator
pub const KERNEL_STACK_ALLOCATOR_TOP_ADDR: usize = KERNEL_STACK_ALLOCATOR_BOTTOM + ADDRESSABILITY_PER_P4_ENTRY - BYTES_PER_ADDR;


/// the userspace stack allocators (one per userspace task) each get the 507th P4 entry of addressability.
/// actual value: 0o177777_773_000_000_000_0000, or 0xFFFF_FD80_0000_0000
pub const USER_STACK_ALLOCATOR_BOTTOM: usize = 0xFFFF_0000_0000_0000 | (USER_STACK_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT));
/// the highest actually usuable address in each userspace stack allocator
pub const USER_STACK_ALLOCATOR_TOP_ADDR: usize = USER_STACK_ALLOCATOR_BOTTOM + ADDRESSABILITY_PER_P4_ENTRY - BYTES_PER_ADDR;

/// The system (page allocator) must not use addresses at or above this address.
pub const UPCOMING_PAGE_TABLE_RECURSIVE_MEMORY_START: usize = 0xFFFF_0000_0000_0000 | (UPCOMING_PAGE_TABLE_RECURSIVE_P4_INDEX << (P4_INDEX_SHIFT + PAGE_SHIFT));
34 changes: 26 additions & 8 deletions kernel/memory/src/paging/mapper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
use core::{
borrow::{Borrow, BorrowMut},
cmp::Ordering,
fmt::{self, Write},
hash::{Hash, Hasher},
marker::PhantomData,
mem,
Expand All @@ -23,7 +22,7 @@ use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page,
use crate::paging::{
get_current_p4,
PageRange,
table::{P4, Table, Level4},
table::{P4, UPCOMING_P4, Table, Level4},
};
use pte_flags::PteFlagsArch;
use spin::Once;
Expand Down Expand Up @@ -59,37 +58,56 @@ pub struct Mapper {
}

impl Mapper {
/// Creates (bootstraps) a `Mapper` based on the
/// currently-active P4 page table root.
pub(crate) fn from_current() -> Mapper {
Self::with_p4_frame(get_current_p4())
}

/// Creates a new `Mapper` that uses the recursive entry in the current P4 page table
/// to map the given `p4` frame.
///
/// The given `p4` frame is the root frame of that upcoming page table.
pub(crate) fn with_p4_frame(p4: Frame) -> Mapper {
Mapper {
p4: Unique::new(P4).unwrap(), // cannot panic because we know the P4 value is valid
p4: Unique::new(P4).unwrap(), // cannot panic; the P4 value is valid
target_p4: p4,
}
}

/// Creates a new mapper for an upcoming (soon-to-be-initialized) page table
/// that uses the `UPCOMING_P4` recursive entry in the current P4 table
/// to map that new page table.
///
/// The given `p4` frame is the root frame of that upcoming page table.
pub(crate) fn upcoming(p4: Frame) -> Mapper {
Mapper {
p4: Unique::new(UPCOMING_P4).unwrap(),
target_p4: p4,
}
}

/// Returns a reference to this `Mapper`'s root page table as a P4-level table.
pub(crate) fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.as_ref() }
}

/// Returns a mutable reference to this `Mapper`'s root page table as a P4-level table.
pub(crate) fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.as_mut() }
}

/// Dumps all page table entries at all four page table levels for the given `VirtualAddress`,
/// and also shows their `PteFlags`.
///
/// The page table details are written to the the given `writer`.
pub fn dump_pte<W: Write>(&self, writer: &mut W, virtual_address: VirtualAddress) -> fmt::Result {
/// The page table details are written to the log as an `info` message.
pub fn dump_pte(&self, virtual_address: VirtualAddress) {
let page = Page::containing_address(virtual_address);
let p4 = self.p4();
let p3 = p4.next_table(page.p4_index());
let p2 = p3.and_then(|p3| p3.next_table(page.p3_index()));
let p1 = p2.and_then(|p2| p2.next_table(page.p2_index()));
write!(
writer,
log::info!(
"VirtualAddress: {:#X}:
P4 entry: {:#X} ({:?})
P3 entry: {:#X} ({:?})
Expand All @@ -104,7 +122,7 @@ impl Mapper {
p2.map(|p2| &p2[page.p2_index()]).map(|p2_entry| p2_entry.flags()),
p1.map(|p1| &p1[page.p1_index()]).map(|p1_entry| p1_entry.value()).unwrap_or(0x0),
p1.map(|p1| &p1[page.p1_index()]).map(|p1_entry| p1_entry.flags()),
)
);
}

/// Translates a `VirtualAddress` to a `PhysicalAddress` by walking the page tables.
Expand Down
Loading