Skip to content

Commit

Permalink
feat(newlib): remove sbrk
Browse files Browse the repository at this point in the history
Signed-off-by: Martin Kröning <martin.kroening@eonerc.rwth-aachen.de>
  • Loading branch information
mkroening committed Aug 16, 2024
1 parent 6b84d0a commit 80c5182
Show file tree
Hide file tree
Showing 6 changed files with 4 additions and 123 deletions.
6 changes: 0 additions & 6 deletions src/arch/aarch64/mm/virtualmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,9 +145,3 @@ pub fn print_information() {
let free_list = KERNEL_FREE_LIST.lock();
info!("Virtual memory free list:\n{free_list}");
}

#[cfg(feature = "newlib")]
#[inline]
pub const fn kernel_heap_end() -> VirtAddr {
KERNEL_VIRTUAL_MEMORY_END
}
7 changes: 0 additions & 7 deletions src/arch/riscv64/mm/virtualmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ pub fn allocate(size: usize) -> Result<VirtAddr, AllocError> {
))
}

#[cfg(not(feature = "newlib"))]
pub fn allocate_aligned(size: usize, align: usize) -> Result<VirtAddr, AllocError> {
assert!(size > 0);
assert!(align > 0);
Expand Down Expand Up @@ -152,9 +151,3 @@ pub fn print_information() {
let free_list = KERNEL_FREE_LIST.lock();
info!("Virtual memory free list:\n{free_list}");
}

#[cfg(feature = "newlib")]
#[inline]
pub const fn kernel_heap_end() -> VirtAddr {
KERNEL_VIRTUAL_MEMORY_END
}
12 changes: 2 additions & 10 deletions src/arch/x86_64/mm/virtualmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ pub fn allocate(size: usize) -> Result<VirtAddr, AllocError> {
))
}

#[cfg(not(feature = "newlib"))]
pub fn allocate_aligned(size: usize, align: usize) -> Result<VirtAddr, AllocError> {
assert!(size > 0);
assert!(align > 0);
Expand Down Expand Up @@ -147,21 +146,14 @@ pub fn print_information() {

/// End of the virtual memory address space reserved for kernel memory.
/// This also marks the start of the virtual memory address space reserved for the task heap.
/// In case of pure rust applications, we don't have a task heap.
#[cfg(all(not(feature = "common-os"), not(feature = "newlib")))]
#[cfg(not(feature = "common-os"))]
#[inline]
pub const fn kernel_heap_end() -> VirtAddr {
VirtAddr(0x8000_0000_0000u64)
}

#[cfg(all(feature = "common-os", not(feature = "newlib")))]
#[cfg(feature = "common-os")]
#[inline]
pub const fn kernel_heap_end() -> VirtAddr {
VirtAddr(0x100_0000_0000u64)
}

#[cfg(all(not(feature = "common-os"), feature = "newlib"))]
#[inline]
pub const fn kernel_heap_end() -> VirtAddr {
VirtAddr(0x1_0000_0000u64)
}
68 changes: 2 additions & 66 deletions src/mm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ use core::ops::Range;

use align_address::Align;
use hermit_sync::Lazy;
#[cfg(feature = "newlib")]
use hermit_sync::OnceCell;

use self::allocator::LockedAllocator;
#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
Expand All @@ -16,8 +14,6 @@ use crate::arch::mm::paging::HugePageSize;
use crate::arch::mm::paging::PageTableEntryFlagsExt;
use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize, PageTableEntryFlags};
use crate::arch::mm::physicalmem::total_memory_size;
#[cfg(feature = "newlib")]
use crate::arch::mm::virtualmem::kernel_heap_end;
#[cfg(feature = "pci")]
use crate::arch::mm::PhysAddr;
use crate::arch::mm::VirtAddr;
Expand All @@ -38,10 +34,6 @@ static KERNEL_ADDR_RANGE: Lazy<Range<VirtAddr>> = Lazy::new(|| {
}
});

#[cfg(feature = "newlib")]
/// User heap address range.
static HEAP_ADDR_RANGE: OnceCell<Range<VirtAddr>> = OnceCell::new();

pub(crate) fn kernel_start_address() -> VirtAddr {
KERNEL_ADDR_RANGE.start
}
Expand All @@ -50,16 +42,6 @@ pub(crate) fn kernel_end_address() -> VirtAddr {
KERNEL_ADDR_RANGE.end
}

#[cfg(feature = "newlib")]
pub(crate) fn task_heap_start() -> VirtAddr {
HEAP_ADDR_RANGE.get().unwrap().start
}

#[cfg(feature = "newlib")]
pub(crate) fn task_heap_end() -> VirtAddr {
HEAP_ADDR_RANGE.get().unwrap().end
}

#[cfg(target_os = "none")]
pub(crate) fn init() {
use crate::arch::mm::paging;
Expand Down Expand Up @@ -111,48 +93,7 @@ pub(crate) fn init() {

let heap_start_addr;

#[cfg(all(feature = "newlib", not(feature = "common-os")))]
{
// we reserve 10% of the memory for stack allocations
let stack_reserve: usize = (available_memory * 10) / 100;

info!("An application with a C-based runtime is running on top of Hermit!");
let kernel_heap_size = 10 * LargePageSize::SIZE as usize;

unsafe {
let start = {
let physical_address = arch::mm::physicalmem::allocate(kernel_heap_size).unwrap();
let virtual_address = arch::mm::virtualmem::allocate(kernel_heap_size).unwrap();

let count = kernel_heap_size / BasePageSize::SIZE as usize;
let mut flags = PageTableEntryFlags::empty();
flags.normal().writable().execute_disable();
arch::mm::paging::map::<BasePageSize>(
virtual_address,
physical_address,
count,
flags,
);

virtual_address
};
ALLOCATOR.init(start.as_mut_ptr(), kernel_heap_size);

info!("Kernel heap starts at {:#x}", start);
}

info!("Kernel heap size: {} MB", kernel_heap_size >> 20);
let user_heap_size =
(available_memory - kernel_heap_size - stack_reserve - LargePageSize::SIZE as usize)
.align_down(LargePageSize::SIZE as usize);
info!("User-space heap size: {} MB", user_heap_size >> 20);

map_addr = kernel_heap_end();
map_size = user_heap_size;
heap_start_addr = map_addr;
}

#[cfg(all(not(feature = "newlib"), feature = "common-os"))]
#[cfg(feature = "common-os")]
{
info!("Using HermitOS as common OS!");

Expand Down Expand Up @@ -197,13 +138,11 @@ pub(crate) fn init() {
}
}

#[cfg(all(not(feature = "newlib"), not(feature = "common-os")))]
#[cfg(not(feature = "common-os"))]
{
// we reserve 10% of the memory for stack allocations
let stack_reserve: usize = (available_memory * 10) / 100;

info!("A pure Rust application is running on top of Hermit!");

// At first, we map only a small part into the heap.
// Afterwards, we already use the heap and map the rest into
// the virtual address space.
Expand Down Expand Up @@ -297,7 +236,6 @@ pub(crate) fn init() {

let heap_end_addr = map_addr;

#[cfg(not(feature = "newlib"))]
unsafe {
ALLOCATOR.init(
heap_start_addr.as_mut_ptr(),
Expand All @@ -307,8 +245,6 @@ pub(crate) fn init() {

let heap_addr_range = heap_start_addr..heap_end_addr;
info!("Heap is located at {heap_addr_range:#x?} ({map_size} Bytes unmapped)");
#[cfg(feature = "newlib")]
HEAP_ADDR_RANGE.set(heap_addr_range).unwrap();
}

pub(crate) fn print_information() {
Expand Down
2 changes: 0 additions & 2 deletions src/syscalls/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,6 @@ pub(crate) fn init() {
SYS.init();

init_entropy();
#[cfg(feature = "newlib")]
sbrk_init();
}

/// Interface to allocate memory from system heap
Expand Down
32 changes: 0 additions & 32 deletions src/syscalls/tasks.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,11 @@
use alloc::collections::BTreeMap;
#[cfg(feature = "newlib")]
use core::sync::atomic::{AtomicUsize, Ordering};

use hermit_sync::InterruptTicketMutex;

use crate::arch::core_local::*;
use crate::arch::processor::{get_frequency, get_timestamp};
use crate::config::USER_STACK_SIZE;
use crate::errno::*;
#[cfg(feature = "newlib")]
use crate::mm::{task_heap_end, task_heap_start};
use crate::scheduler::task::{Priority, TaskHandle, TaskId};
use crate::scheduler::PerCoreSchedulerExt;
use crate::time::timespec;
Expand Down Expand Up @@ -69,34 +65,6 @@ pub extern "C" fn sys_abort() -> ! {
exit(-1)
}

#[cfg(feature = "newlib")]
static SBRK_COUNTER: AtomicUsize = AtomicUsize::new(0);

#[cfg(feature = "newlib")]
pub fn sbrk_init() {
SBRK_COUNTER.store(task_heap_start().as_usize(), Ordering::SeqCst);
}

#[cfg(feature = "newlib")]
#[hermit_macro::system]
#[no_mangle]
pub extern "C" fn sys_sbrk(incr: isize) -> usize {
// Get the boundaries of the task heap and verify that they are suitable for sbrk.
let task_heap_start = task_heap_start();
let task_heap_end = task_heap_end();
let old_end;

if incr >= 0 {
old_end = SBRK_COUNTER.fetch_add(incr as usize, Ordering::SeqCst);
assert!(task_heap_end.as_usize() >= old_end + incr as usize);
} else {
old_end = SBRK_COUNTER.fetch_sub(incr.unsigned_abs(), Ordering::SeqCst);
assert!(task_heap_start.as_usize() < old_end - incr.unsigned_abs());
}

old_end
}

pub(super) fn usleep(usecs: u64) {
if usecs >= 10_000 {
// Enough time to set a wakeup timer and block the current task.
Expand Down

0 comments on commit 80c5182

Please sign in to comment.