Skip to content

Commit

Permalink
make mycelium compile with breaking changes
Browse files Browse the repository at this point in the history
maybe `new` should infer the lock type idk...
  • Loading branch information
hawkw committed Jul 28, 2024
1 parent 53e4b70 commit 2074ae9
Show file tree
Hide file tree
Showing 11 changed files with 127 additions and 114 deletions.
114 changes: 60 additions & 54 deletions alloc/src/buddy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use mycelium_util::intrusive::{list, Linked, List};
use mycelium_util::math::Logarithm;
use mycelium_util::sync::{
atomic::{AtomicUsize, Ordering::*},
spin,
blocking::Mutex,
};

#[derive(Debug)]
Expand All @@ -41,7 +41,7 @@ pub struct Alloc<const FREE_LISTS: usize> {
/// Array of free lists by "order". The order of an block is the number
/// of times the minimum page size must be doubled to reach that block's
/// size.
free_lists: [spin::Mutex<List<Free>>; FREE_LISTS],
free_lists: [Mutex<List<Free>>; FREE_LISTS],
}

type Result<T> = core::result::Result<T, AllocErr>;
Expand All @@ -65,7 +65,7 @@ impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
//
// see https://github.com/rust-lang/rust-clippy/issues/7665
#[allow(clippy::declare_interior_mutable_const)]
const ONE_FREE_LIST: spin::Mutex<List<Free>> = spin::Mutex::new(List::new());
const ONE_FREE_LIST: Mutex<List<Free>> = Mutex::new(List::new());

// ensure we don't split memory into regions too small to fit the free
// block header in them.
Expand Down Expand Up @@ -190,16 +190,12 @@ impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
let _span =
tracing::debug_span!("free_list", order, size = self.size_for_order(order),)
.entered();
match list.try_lock() {
Some(list) => {
for entry in list.iter() {
tracing::debug!("entry={entry:?}");
}
}
None => {
tracing::debug!("<THIS IS THE ONE WHERE THE PANIC HAPPENED LOL>");
list.try_with_lock(|list| {
for entry in list.iter() {
tracing::debug!("entry={entry:?}");
}
}
})
.unwrap_or_else(|| tracing::debug!("<THIS IS THE ONE WHERE THE PANIC HAPPENED LOL>"))
}
}

Expand Down Expand Up @@ -283,29 +279,35 @@ impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
tracing::trace!(curr_order = idx + order);

// Is there an available block on this free list?
let mut free_list = free_list.lock();
if let Some(mut block) = free_list.pop_back() {
let block = unsafe { block.as_mut() };
tracing::trace!(?block, ?free_list, "found");

// Unless this is the free list on which we'd expect to find a
// block of the requested size (the first free list we checked),
// the block is larger than the requested allocation. In that
// case, we'll need to split it down and push the remainder onto
// the appropriate free lists.
if idx > 0 {
let curr_order = idx + order;
tracing::trace!(?curr_order, ?order, "split down");
self.split_down(block, curr_order, order);
}
let allocated = free_list.with_lock(|free_list| {
if let Some(mut block) = free_list.pop_back() {
let block = unsafe { block.as_mut() };
tracing::trace!(?block, ?free_list, "found");

// Unless this is the free list on which we'd expect to find a
// block of the requested size (the first free list we checked),
// the block is larger than the requested allocation. In that
// case, we'll need to split it down and push the remainder onto
// the appropriate free lists.
if idx > 0 {
let curr_order = idx + order;
tracing::trace!(?curr_order, ?order, "split down");
self.split_down(block, curr_order, order);
}

// Change the block's magic to indicate that it is allocated, so
// that we can avoid checking the free list if we try to merge
// it before the first word is written to.
block.make_busy();
tracing::trace!(?block, "made busy");
self.allocated_size.fetch_add(block.size(), Release);
return Some(block.into());
// Change the block's magic to indicate that it is allocated, so
// that we can avoid checking the free list if we try to merge
// it before the first word is written to.
block.make_busy();
tracing::trace!(?block, "made busy");
self.allocated_size.fetch_add(block.size(), Release);
Some(block.into())
} else {
None
}
});
if let Some(block) = allocated {
return Some(block);
}
}
None
Expand Down Expand Up @@ -334,25 +336,29 @@ impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
// Starting at the minimum order on which the freed range will fit
for (idx, free_list) in self.free_lists.as_ref()[min_order..].iter().enumerate() {
let curr_order = idx + min_order;
let mut free_list = free_list.lock();

// Is there a free buddy block at this order?
if let Some(mut buddy) = unsafe { self.take_buddy(block, curr_order, &mut free_list) } {
// Okay, merge the blocks, and try the next order!
if buddy < block {
mem::swap(&mut block, &mut buddy);
}
unsafe {
block.as_mut().merge(buddy.as_mut());
let done = free_list.with_lock(|free_list| {
// Is there a free buddy block at this order?
if let Some(mut buddy) = unsafe { self.take_buddy(block, curr_order, free_list) } {
// Okay, merge the blocks, and try the next order!
if buddy < block {
mem::swap(&mut block, &mut buddy);
}
unsafe {
block.as_mut().merge(buddy.as_mut());
}
tracing::trace!(?buddy, ?block, "merged with buddy");
// Keep merging!
false
} else {
// Okay, we can't keep merging, so push the block on the current
// free list.
free_list.push_front(block);
tracing::trace!("deallocated block");
self.allocated_size.fetch_sub(size, Release);
true
}
tracing::trace!(?buddy, ?block, "merged with buddy");
// Keep merging!
} else {
// Okay, we can't keep merging, so push the block on the current
// free list.
free_list.push_front(block);
tracing::trace!("deallocated block");
self.allocated_size.fetch_sub(size, Release);
});
if done {
return Ok(());
}
}
Expand All @@ -369,7 +375,7 @@ impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
if order > free_lists.len() {
todo!("(eliza): choppity chop chop down the block!");
}
free_lists[order].lock().push_front(block);
free_lists[order].with_lock(|list| list.push_front(block));
let mut sz = self.heap_size.load(Acquire);
while let Err(actual) =
// TODO(eliza): if this overflows that's bad news lol...
Expand Down Expand Up @@ -473,7 +479,7 @@ impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
.split_back(size, self.offset())
.expect("block too small to split!");
tracing::trace!(?block, ?new_block);
free_lists[order].lock().push_front(new_block);
free_lists[order].with_lock(|list| list.push_front(new_block));
}
}
}
Expand Down
7 changes: 4 additions & 3 deletions hal-x86_64/src/interrupt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,8 @@ pub struct Registers {
_pad2: [u16; 3],
}

static IDT: spin::Mutex<idt::Idt> = spin::Mutex::new(idt::Idt::new());
static IDT: spin::Mutex<idt::Idt> =
spin::Mutex::with_raw_mutex(idt::Idt::new(), spin::Spinlock::new());
static INTERRUPT_CONTROLLER: InitOnce<Controller> = InitOnce::uninitialized();

impl Controller {
Expand Down Expand Up @@ -207,7 +208,7 @@ impl Controller {

InterruptModel::Apic {
local,
io: spin::Mutex::new(io),
io: spin::Mutex::with_raw_mutex(io, spin::Spinlock::new()),
}
}
model => {
Expand All @@ -225,7 +226,7 @@ impl Controller {
// clear for you, the reader, that at this point they are definitely intentionally enabled.
pics.enable();
}
InterruptModel::Pic(spin::Mutex::new(pics))
InterruptModel::Pic(spin::Mutex::with_raw_mutex(pics, spin::Spinlock::new()))
}
};
tracing::trace!(interrupt_model = ?model);
Expand Down
2 changes: 1 addition & 1 deletion hal-x86_64/src/serial.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ impl Port {
})?;

Ok(Self {
inner: spin::Mutex::new(registers),
inner: spin::Mutex::with_raw_mutex(registers, spin::Spinlock::new()),
})
}

Expand Down
4 changes: 2 additions & 2 deletions hal-x86_64/src/time/pit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use core::{
use mycelium_util::{
bits::{bitfield, enum_from_bits},
fmt,
sync::spin::Mutex,
sync::spin::{Mutex, Spinlock},
};

/// Intel 8253/8254 Programmable Interval Timer (PIT).
Expand Down Expand Up @@ -210,7 +210,7 @@ enum_from_bits! {
/// publicly and is represented as a singleton. It's stored in a [`Mutex`] in
/// order to ensure that multiple CPU cores don't try to write conflicting
/// configurations to the PIT's configuration ports.
pub static PIT: Mutex<Pit> = Mutex::new(Pit::new());
pub static PIT: Mutex<Pit> = Mutex::with_raw_mutex(Pit::new(), Spinlock::new());

/// Are we currently sleeping on an interrupt?
static SLEEPING: AtomicBool = AtomicBool::new(false);
Expand Down
15 changes: 9 additions & 6 deletions hal-x86_64/src/vga.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,15 @@ use mycelium_util::{
};
use volatile::Volatile;
static BUFFER: Lazy<spin::Mutex<Buffer>> = Lazy::new(|| {
spin::Mutex::new(Buffer {
col: 0,
row: 0,
color: ColorSpec::new(Color::LightGray, Color::Black),
buf: Volatile::new(unsafe { &mut *(0xb8000u64 as *mut Buf) }),
})
spin::Mutex::with_raw_mutex(
Buffer {
col: 0,
row: 0,
color: ColorSpec::new(Color::LightGray, Color::Black),
buf: Volatile::new(unsafe { &mut *(0xb8000u64 as *mut Buf) }),
},
spin::Spinlock::new(),
)
});

pub fn writer() -> Writer {
Expand Down
3 changes: 2 additions & 1 deletion maitake-sync/src/spin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,8 @@ pub struct RwSpinlock {

impl Spinlock {
loom_const_fn! {
pub(crate) fn new() -> Self {
/// Returns a new `Spinlock`, in the unlocked state.
pub fn new() -> Self {
Self { locked: AtomicBool::new(false) }
}
}
Expand Down
36 changes: 13 additions & 23 deletions maitake/src/time/timer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,11 @@
//! [future]: core::future::Future
use super::clock::{self, Clock, Instant, Ticks};
use crate::{
loom::sync::{
atomic::{AtomicUsize, Ordering::*},
spin::{Mutex, MutexGuard},
},
loom::sync::atomic::{AtomicUsize, Ordering::*},
util::expect_display,
};
use core::time::Duration;
use maitake_sync::blocking::Mutex;
use mycelium_util::fmt;

#[cfg(test)]
Expand Down Expand Up @@ -393,12 +391,7 @@ impl Timer {
// instead, if the timer wheel is busy (e.g. the timer ISR was called on
// another core, or if a `Sleep` future is currently canceling itself),
// we just add to a counter of pending ticks, and bail.
if let Some(mut core) = self.core.try_lock() {
Some(self.advance_locked(&mut core))
} else {
trace!("could not lock timer wheel");
None
}
self.core.try_with_lock(|core| self.advance_locked(core))
}

/// Advance the timer to the current time, ensuring any [`Sleep`] futures that
Expand Down Expand Up @@ -426,7 +419,7 @@ impl Timer {
/// ensure that pending ticks are drained frequently.
#[inline]
pub fn turn(&self) -> Turn {
self.advance_locked(&mut self.core.lock())
self.core.with_lock(|core| self.advance_locked(core))
}

pub(in crate::time) fn ticks_to_dur(&self, ticks: Ticks) -> Duration {
Expand Down Expand Up @@ -470,15 +463,12 @@ impl Timer {
}
}

fn core(&self) -> MutexGuard<'_, wheel::Core> {
self.core.lock()
}

#[cfg(all(test, not(loom)))]
fn reset(&self) {
let mut core = self.core();
*core = wheel::Core::new();
self.pending_ticks.store(0, Release);
self.core.with_lock(|core| {
*core = wheel::Core::new();
self.pending_ticks.store(0, Release);
});
}
}

Expand All @@ -489,12 +479,12 @@ impl fmt::Debug for Timer {
pending_ticks,
core,
} = self;
f.debug_struct("Timer")
.field("clock", &clock)
let mut s = f.debug_struct("Timer");
s.field("clock", &clock)
.field("tick_duration", &clock.tick_duration())
.field("pending_ticks", &pending_ticks.load(Acquire))
.field("core", &fmt::opt(&core.try_lock()).or_else("<locked>"))
.finish()
.field("pending_ticks", &pending_ticks.load(Acquire));
core.try_with_lock(|core| s.field("core", &core).finish())
.unwrap_or_else(|| s.field("core", &"<locked>").finish())
}
}

Expand Down
38 changes: 22 additions & 16 deletions maitake/src/time/timer/sleep.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,22 +113,26 @@ impl Future for Sleep<'_> {
let ptr =
unsafe { ptr::NonNull::from(Pin::into_inner_unchecked(this.entry.as_mut())) };
// Acquire the wheel lock to insert the sleep.
let mut core = this.timer.core();

// While we are holding the wheel lock, go ahead and advance the
// timer, too. This way, the timer wheel gets advanced more
// frequently than just when a scheduler tick completes or a
// timer IRQ fires, helping to increase timer accuracy.
this.timer.advance_locked(&mut core);

match test_dbg!(core.register_sleep(ptr)) {
Poll::Ready(()) => {
*this.state = State::Completed;
return Poll::Ready(());
}
Poll::Pending => {
*this.state = State::Registered;
let done = this.timer.core.with_lock(|core| {
// While we are holding the wheel lock, go ahead and advance the
// timer, too. This way, the timer wheel gets advanced more
// frequently than just when a scheduler tick completes or a
// timer IRQ fires, helping to increase timer accuracy.
this.timer.advance_locked(core);

match test_dbg!(core.register_sleep(ptr)) {
Poll::Ready(()) => {
*this.state = State::Completed;
true
}
Poll::Pending => {
*this.state = State::Registered;
false
}
}
});
if done {
return Poll::Ready(());
}
}
State::Registered => {}
Expand All @@ -154,7 +158,9 @@ impl PinnedDrop for Sleep<'_> {
// yet, or it has already completed, we don't need to lock the timer to
// remove it.
if test_dbg!(this.entry.linked.load(Acquire)) {
this.timer.core().cancel_sleep(this.entry);
this.timer
.core
.with_lock(|core| core.cancel_sleep(this.entry));
}
}
}
Expand Down
Loading

0 comments on commit 2074ae9

Please sign in to comment.