Skip to content

Commit

Permalink
Remote stub (#604)
Browse files Browse the repository at this point in the history
* Alter is_empty

* Use a stub in remoteallocator

Rather than allocating a sizeclass use a stub.  This change adds a
branch on enqueue.
  • Loading branch information
mjp41 authored Mar 24, 2023
1 parent ccca98a commit 798f2fa
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 53 deletions.
5 changes: 5 additions & 0 deletions src/snmalloc/ds_core/ptrwrap.h
Original file line number Diff line number Diff line change
Expand Up @@ -470,6 +470,11 @@ namespace snmalloc
: unsafe_capptr(n)
{}

/**
* default to nullptr
*/
constexpr SNMALLOC_FAST_PATH AtomicCapPtr() : AtomicCapPtr(nullptr) {}

/**
* Interconversion with CapPtr
*/
Expand Down
48 changes: 15 additions & 33 deletions src/snmalloc/mem/corealloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,41 +170,11 @@ namespace snmalloc

/**
* The message queue has non-trivial initialisation as it needs to
* be non-empty, so we prime it with a single allocation.
* be non-empty, so we prime it with a fake allocation.
*/
void init_message_queue()
{
// Manufacture an allocation to prime the queue
// Using an actual allocation removes a conditional from a critical path.
auto dummy = capptr::Alloc<void>(small_alloc_one(MIN_ALLOC_SIZE))
.template as_static<freelist::Object::T<>>();
if (dummy == nullptr)
{
error("Critical error: Out-of-memory during initialisation.");
}
message_queue().init(dummy);
}

/**
* There are a few internal corner cases where we need to allocate
* a small object. These are not on the fast path,
* - Allocating stub in the message queue
* Note this is not performance critical as very infrequently called.
*/
capptr::Alloc<void> small_alloc_one(size_t size)
{
SNMALLOC_ASSERT(attached_cache != nullptr);
auto domesticate =
[this](freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
return capptr_domesticate<Config>(backend_state_ptr(), p);
};
// Use attached cache, and fill it if it is empty.
return attached_cache->template alloc<NoZero, Config>(
domesticate,
size,
[&](smallsizeclass_t sizeclass, freelist::Iter<>* fl) {
return small_alloc<NoZero>(sizeclass, *fl);
});
message_queue().init();
}

static SNMALLOC_FAST_PATH void alloc_new_list(
Expand Down Expand Up @@ -480,7 +450,19 @@ namespace snmalloc
*/
SNMALLOC_FAST_PATH bool has_messages()
{
return !(message_queue().is_empty());
auto domesticate = [local_state = backend_state_ptr()](
freelist::QueuePtr p) SNMALLOC_FAST_PATH_LAMBDA {
if constexpr (Config::Options.QueueHeadsAreTame)
{
return freelist::HeadPtr::unsafe_from(p.unsafe_ptr());
}
else
{
return capptr_domesticate<Config>(local_state, p);
}
};

return !(message_queue().can_dequeue(key_global, domesticate));
}

/**
Expand Down
4 changes: 3 additions & 1 deletion src/snmalloc/mem/freelist.h
Original file line number Diff line number Diff line change
Expand Up @@ -162,9 +162,11 @@ namespace snmalloc

SNMALLOC_NO_UNIQUE_ADDRESS
std::conditional_t<mitigations(freelist_backward_edge), Prev, Empty>
prev;
prev{};

public:
constexpr T() : next_object(){};

template<
SNMALLOC_CONCEPT(capptr::IsBound) BView = typename BQueue::
template with_wildness<capptr::dimension::Wildness::Tame>,
Expand Down
46 changes: 27 additions & 19 deletions src/snmalloc/mem/remoteallocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,36 +51,43 @@ namespace snmalloc
alignas(CACHELINE_SIZE) freelist::AtomicQueuePtr back{nullptr};
// Store the two ends on different cache lines as access by different
// threads.
alignas(CACHELINE_SIZE) freelist::QueuePtr front{nullptr};
alignas(CACHELINE_SIZE) freelist::AtomicQueuePtr front{nullptr};
// Fake first entry
freelist::Object::T<capptr::bounds::AllocWild> stub{};

constexpr RemoteAllocator() = default;

void invariant()
{
SNMALLOC_ASSERT(back != nullptr);
SNMALLOC_ASSERT(
(back != nullptr) ||
(address_cast(front.load()) == address_cast(&stub)));
}

void init(freelist::HeadPtr stub)
void init()
{
freelist::Object::atomic_store_null(stub, key_global);
front = capptr_rewild(stub);
back.store(front, std::memory_order_relaxed);
freelist::HeadPtr stub_ptr = freelist::HeadPtr::unsafe_from(&stub);
freelist::Object::atomic_store_null(stub_ptr, key_global);
front.store(freelist::QueuePtr::unsafe_from(&stub));
back.store(nullptr, std::memory_order_relaxed);
invariant();
}

freelist::QueuePtr destroy()
{
freelist::QueuePtr fnt = front;
freelist::QueuePtr fnt = front.load();
back.store(nullptr, std::memory_order_relaxed);
front = nullptr;
if (address_cast(front.load()) == address_cast(&stub))
return nullptr;
return fnt;
}

inline bool is_empty()
template<typename Domesticator_head>
inline bool
can_dequeue(const FreeListKey& key, Domesticator_head domesticate_head)
{
freelist::QueuePtr bk = back.load(std::memory_order_relaxed);

return bk == front;
return domesticate_head(front.load())
->atomic_read_next(key, domesticate_head) == nullptr;
}

/**
Expand All @@ -107,12 +114,13 @@ namespace snmalloc
freelist::QueuePtr prev =
back.exchange(capptr_rewild(last), std::memory_order_acq_rel);

freelist::Object::atomic_store_next(domesticate_head(prev), first, key);
}
if (SNMALLOC_LIKELY(prev != nullptr))
{
freelist::Object::atomic_store_next(domesticate_head(prev), first, key);
return;
}

freelist::QueuePtr peek()
{
return front;
front.store(capptr_rewild(first));
}

/**
Expand All @@ -134,11 +142,11 @@ namespace snmalloc
Cb cb)
{
invariant();
SNMALLOC_ASSERT(front != nullptr);
SNMALLOC_ASSERT(front.load() != nullptr);

// Use back to bound, so we don't handle new entries.
auto b = back.load(std::memory_order_relaxed);
freelist::HeadPtr curr = domesticate_head(front);
freelist::HeadPtr curr = domesticate_head(front.load());

while (address_cast(curr) != address_cast(b))
{
Expand Down

0 comments on commit 798f2fa

Please sign in to comment.