Skip to content

Commit

Permalink
We seem to be passing the tests now
Browse files Browse the repository at this point in the history
  • Loading branch information
skogseth committed Sep 5, 2024
1 parent 2e96189 commit 554d2d0
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 21 deletions.
1 change: 1 addition & 0 deletions src/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ impl<'hzrd, T> ReadHandle<'hzrd, T> {
ptr = new_ptr;
}
}
std::sync::atomic::fence(SeqCst);

// SAFETY: This pointer is now held valid by the hazard pointer
let value = unsafe { &*ptr };
Expand Down
8 changes: 3 additions & 5 deletions src/domains.rs
Original file line number Diff line number Diff line change
Expand Up @@ -202,9 +202,6 @@ cell_1.reclaim();
// There is no need to call `HzrdCell::reclaim` on cell_2 as they both share the `GlobalDomain`.
```
# Thread local garbage
There is some more complexity to the garbage collection in `GlobalDomain`: Each thread holds its own local garbage, as well as access to some garbage shared between all threads. If a thread closes down with local garbage still remaining (it will attempt one last cleanup before closing), then that garbage will be moved to the shared garbage. Whenever a thread does garbage collection it will first try to clean up the local garbage it holds, followed by an attempt to clean up the shared garbage. However, since the shared garbage is locked by a [`Mutex`](`std::sync::Mutex`) it will only attempt to do soM; if the shared garbage is locked by another thread this step will be skipped.
*/
#[derive(Clone, Copy)]
pub struct GlobalDomain;
Expand Down Expand Up @@ -339,7 +336,7 @@ unsafe impl Domain for SharedDomain {
// as others may be looking at them
match self.hzrd_ptrs.iter().find_map(|node| node.try_acquire()) {
Some(hzrd_ptr) => hzrd_ptr,
None => self.hzrd_ptrs.push(HzrdPtr::new()),
None => self.hzrd_ptrs.push_get(HzrdPtr::new()),
}
}

Expand All @@ -356,6 +353,7 @@ unsafe impl Domain for SharedDomain {
return 0;
}

std::sync::atomic::fence(std::sync::atomic::Ordering::SeqCst);
let hzrd_ptrs = HzrdPtrs::load(self.hzrd_ptrs.iter());
let remaining: SharedStack<RetiredPtr> = retired_ptrs
.into_iter()
Expand All @@ -364,7 +362,7 @@ unsafe impl Domain for SharedDomain {

let new_size = remaining.iter().count();
self.retired_ptrs.push_stack(remaining);
assert!(prev_size > new_size);
assert!(prev_size >= new_size);
prev_size - new_size
}
}
Expand Down
43 changes: 27 additions & 16 deletions src/stack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,9 @@ impl<T> SharedStack<T> {
}
}

/// Push a new value onto the stack and return a reference to the value
pub fn push(&self, val: T) -> &T {
fn __push(&self, val: T) -> *mut Node<T> {
let node = Box::into_raw(Box::new(Node::new(val)));

// taken from Jon Gjengset
std::sync::atomic::fence(SeqCst);

let mut old_top = self.top.load(Acquire);
Expand All @@ -49,11 +47,22 @@ impl<T> SharedStack<T> {
}
}

node
}

/// Push a new value onto the stack
pub fn push(&self, val: T) {
let _ = self.__push(val);
}

/// Push a new value onto the stack and return a reference to the value
pub fn push_get(&self, val: T) -> &T {
let node = self.__push(val);
unsafe { &(*node).val }
}

/// Push a new value onto the stack and return a mutable reference to the value
pub fn push_mut(&mut self, val: T) -> &mut T {
pub fn push_get_mut(&mut self, val: T) -> &mut T {
let node = Box::into_raw(Box::new(Node::new(val)));

let old_top = self.top.load(Acquire);
Expand All @@ -69,11 +78,12 @@ impl<T> SharedStack<T> {
pub fn push_stack(&self, stack: Self) {
// TODO: This can be done much more efficiently
for val in stack {
self.push(val);
let _ = self.__push(val);
}
}

pub unsafe fn take(&self) -> Self {
std::sync::atomic::fence(SeqCst);
let top = self.top.swap(std::ptr::null_mut(), Acquire);
Self {
top: AtomicPtr::new(top),
Expand All @@ -82,6 +92,7 @@ impl<T> SharedStack<T> {

/// Create an iterator over the stack
pub fn iter(&self) -> Iter<'_, T> {
std::sync::atomic::fence(SeqCst);
Iter {
next: AtomicPtr::new(self.top.load(SeqCst)),
_marker: PhantomData,
Expand Down Expand Up @@ -113,7 +124,7 @@ impl<T> FromIterator<T> for SharedStack<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut stack = SharedStack::new();
for item in iter {
stack.push_mut(item);
stack.push_get_mut(item);
}
stack
}
Expand All @@ -122,7 +133,7 @@ impl<T> FromIterator<T> for SharedStack<T> {
impl<T> Extend<T> for SharedStack<T> {
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
for item in iter {
self.push_mut(item);
self.push_get_mut(item);
}
}
}
Expand Down Expand Up @@ -202,9 +213,9 @@ mod tests {

fn stack() -> SharedStack<i32> {
let stack = SharedStack::new();
stack.push(0);
stack.push(1);
stack.push(2);
stack.push_get(0);
stack.push_get(1);
stack.push_get(2);
stack
}

Expand All @@ -225,13 +236,13 @@ mod tests {

std::thread::scope(|s| {
s.spawn(|| {
stack.push(1);
stack.push(2);
stack.push_get(1);
stack.push_get(2);
});

s.spawn(|| {
stack.push(3);
stack.push(4);
stack.push_get(3);
stack.push_get(4);
});
});

Expand All @@ -245,13 +256,13 @@ mod tests {
std::thread::scope(|s| {
s.spawn(|| {
for _ in 0..100 {
stack.push(vec![String::from("hello"), String::from("worlds")]);
stack.push_get(vec![String::from("hello"), String::from("worlds")]);
}
});

s.spawn(|| {
for _ in 0..100 {
stack.push(vec![String::from("hazard"), String::from("pointer")]);
stack.push_get(vec![String::from("hazard"), String::from("pointer")]);
}
});
});
Expand Down

0 comments on commit 554d2d0

Please sign in to comment.