Skip to content

Commit

Permalink
refactor: move riscv specific code to arch mod
Browse files Browse the repository at this point in the history
In the future we want to port this to other architectures beside riscv, to make this easier its helpful to have architecture-specific code cleanly separated.
  • Loading branch information
JonasKruckenberg committed Nov 30, 2023
1 parent 4f38488 commit b6c3a8d
Show file tree
Hide file tree
Showing 20 changed files with 735 additions and 616 deletions.
103 changes: 103 additions & 0 deletions crates/kernel/src/arch/backtrace.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
use core::arch::asm;
use core::ops;
use gimli::{Register, RiscV};

// The LLVM source (https://llvm.org/doxygen/RISCVFrameLowering_8cpp_source.html)
// specify that only ra (x1) and saved registers (x8-x9, x18-x27) are used for
// frame unwinding info, plus sp (x2) for the CFA, so we only need to save those.
// If this causes issues down the line it should be trivial to change this to capture the full context.
#[derive(Debug, Clone)]
pub struct Context {
pub ra: usize,
pub sp: usize,
pub s: [usize; 12],
}

impl Context {
// Load bearing inline don't remove
// TODO figure out why this is and remove
#[inline(always)]
pub fn capture() -> Self {
let (ra, sp, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11);
unsafe {
asm!(
"mv {}, ra",
"mv {}, sp",
"mv {}, s0",
"mv {}, s1",
"mv {}, s2",
"mv {}, s3",
"mv {}, s4",
"mv {}, s5",
"mv {}, s6",
"mv {}, s7",
"mv {}, s8",
"mv {}, s9",
"mv {}, s10",
"mv {}, s11",
out(reg) ra,
out(reg) sp,
out(reg) s0,
out(reg) s1,
out(reg) s2,
out(reg) s3,
out(reg) s4,
out(reg) s5,
out(reg) s6,
out(reg) s7,
out(reg) s8,
out(reg) s9,
out(reg) s10,
out(reg) s11,
)
}

Self {
ra,
sp,
s: [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11],
}
}

pub fn return_address(&self) -> usize {
self.ra
}

pub fn set_return_address(&mut self, ra: usize) {
self.ra = ra;
}

pub fn stack_pointer(&self) -> usize {
self.sp
}

pub fn set_stack_pointer(&mut self, sp: usize) {
self.sp = sp;
}
}

impl ops::Index<Register> for Context {
type Output = usize;

fn index(&self, index: Register) -> &Self::Output {
match index {
RiscV::RA => &self.ra,
RiscV::SP => &self.sp,
Register(reg @ 8..=9) => &self.s[reg as usize - 8],
Register(reg @ 18..=27) => &self.s[reg as usize - 16],
reg => panic!("unsupported register {reg:?}"),
}
}
}

impl ops::IndexMut<Register> for Context {
fn index_mut(&mut self, index: Register) -> &mut Self::Output {
match index {
RiscV::RA => &mut self.ra,
RiscV::SP => &mut self.sp,
Register(reg @ 8..=9) => &mut self.s[reg as usize - 8],
Register(reg @ 18..=27) => &mut self.s[reg as usize - 16],
reg => panic!("unsupported register {reg:?}"),
}
}
}
File renamed without changes.
19 changes: 19 additions & 0 deletions crates/kernel/src/arch/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
use core::arch::asm;

pub mod backtrace;
pub mod interrupt;
pub mod paging;
mod start;
pub mod trap;

pub const PAGE_SIZE: usize = 4096;

pub const STACK_SIZE_PAGES: usize = 25;

pub fn halt() -> ! {
unsafe {
loop {
asm!("wfi");
}
}
}
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use super::PhysicalAddress;
use crate::paging::PhysicalAddress;
use bitflags::bitflags;
use core::fmt;
use core::fmt::Formatter;
Expand Down Expand Up @@ -31,11 +31,11 @@ impl Entry {
}

pub fn set_address(&mut self, adress: PhysicalAddress) {
self.0 |= adress.0 >> 2;
self.0 |= adress.as_raw() >> 2;
}

pub fn address(&self) -> PhysicalAddress {
PhysicalAddress((self.0 & !0x3ff) << 2)
unsafe { PhysicalAddress::new((self.0 & !0x3ff) << 2) }
}
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
use super::VirtualAddress;
use crate::paging::VirtualAddress;
use core::mem;

pub struct Flush {
virt: VirtualAddress,
}

impl Flush {
pub(super) fn new(virt: VirtualAddress) -> Self {
pub(crate) fn new(virt: VirtualAddress) -> Self {
Self { virt }
}

pub fn flush(self) {
// TODO check if this is necessary & make SBI call instead
unsafe {
riscv::asm::sfence_vma(0, self.virt.0);
riscv::asm::sfence_vma(0, self.virt.as_raw());
}
}
pub unsafe fn ignore(self) {
Expand Down
143 changes: 143 additions & 0 deletions crates/kernel/src/arch/paging/mapper.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
use super::entry::PageFlags;
use super::flush::Flush;
use super::table::Table;
use crate::arch::paging::MAX_LEVEL;
use crate::arch::PAGE_SIZE;
use crate::paging::frame_alloc::FrameAllocator;
use crate::paging::{PhysicalAddress, VirtualAddress};
use crate::Error;
use riscv::register::satp;
use riscv::register::satp::Mode;

pub struct Mapper {
root_table: PhysicalAddress,
allocator: FrameAllocator,
}

impl Mapper {
pub fn new(mut allocator: FrameAllocator) -> crate::Result<Self> {
let root_table = allocator.allocate_frame()?;

Ok(Self {
root_table,
allocator,
})
}

pub fn from_active(allocator: FrameAllocator) -> Self {
let root_table = unsafe { PhysicalAddress::new(satp::read().ppn() << 12) };

Self {
root_table,
allocator,
}
}

pub fn activate(&self) -> crate::Result<()> {
unsafe {
// we have to access these addresses as the table is not mapped
// so after activating the page table, we can't access it anymore
// TODO: this is a bit of a hack, we should probably map the table
let start = self.root_table().lowest_mapped_address().unwrap();
let end = self.root_table().highest_mapped_address().unwrap();

let ppn = self.root_table().address().as_raw() >> 12;
satp::set(Mode::Sv39, 0, ppn);

// the most brutal approach to this, probably not necessary
// this will take a hammer to the page tables and synchronize *everything*
sbicall::rfence::sfence_vma(0, -1isize as usize, start.as_raw(), end.as_raw())?;
}

Ok(())
}

pub fn allocator(&self) -> &FrameAllocator {
&self.allocator
}

pub fn root_table(&self) -> Table {
Table::from_address(self.root_table, MAX_LEVEL)
}

pub fn map_identity(
&mut self,
phys: PhysicalAddress,
flags: PageFlags,
) -> crate::Result<Flush> {
let virt = unsafe { VirtualAddress::new(phys.as_raw()) };
self.map(virt, phys, flags, 0)
}

pub fn map(
&mut self,
virt: VirtualAddress,
phys: PhysicalAddress,
flags: PageFlags,
level: usize,
) -> crate::Result<Flush> {
assert_eq!(
phys.as_raw() % PAGE_SIZE,
0,
"can only map to aligned physical addresses {:#x?}",
phys.as_raw()
);

// Make sure that Read, Write, or Execute have been provided
// otherwise, we'll leak memory and always create a page fault.
assert!(flags.intersects(PageFlags::READ | PageFlags::WRITE | PageFlags::EXECUTE));

let mut table = self.root_table();

for i in (level..=MAX_LEVEL).rev() {
let entry = &mut table[virt];

if i == level {
entry.set_flags(flags | PageFlags::VALID);
entry.set_address(phys);
return Ok(Flush::new(virt));
} else {
if !entry.flags().contains(PageFlags::VALID) {
let frame = self.allocator.allocate_frame()?;
entry.set_flags(PageFlags::VALID);
entry.set_address(frame);
}

table = Table::from_address(entry.address(), i - 1);
}
}

Err(Error::VirtualAddressTooLarge(virt))
}

pub fn virt_to_phys(&self, virt: VirtualAddress) -> crate::Result<PhysicalAddress> {
let mut table = self.root_table();

for i in (0..=MAX_LEVEL).rev() {
let entry = &table[virt];

if entry
.flags()
.intersects(PageFlags::EXECUTE | PageFlags::READ)
{
let addr = entry.address();
let off_mask = (1 << 12) - 1;
let pgoff = virt.as_raw() & off_mask;

unsafe {
return Ok(PhysicalAddress::new(addr.as_raw() & !off_mask | pgoff));
}
} else {
// PTE is pointer to next level page table
assert!(
entry.flags().contains(PageFlags::VALID),
"invalid page table entry {entry:?} for virt {:#x?}",
virt.as_raw()
);
table = Table::from_address(entry.address(), i - 1);
}
}

Err(Error::VirtualAddressNotMapped(virt))
}
}
Loading

0 comments on commit b6c3a8d

Please sign in to comment.