Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Towards heap walk #569

Merged
merged 16 commits into from
Dec 20, 2022
10 changes: 10 additions & 0 deletions src/snmalloc/aal/address.h
Original file line number Diff line number Diff line change
Expand Up @@ -290,4 +290,14 @@ namespace snmalloc
return static_cast<size_t>(a - pointer_align_down<alignment>(a));
}

/**
* Convert an address_t to a pointer. The returned pointer should never be
* followed. On CHERI following this pointer will result in a capability
* violation.
*/
template<typename T>
SNMALLOC_FAST_PATH_INLINE T* useless_ptr_from_addr(address_t p)
{
return reinterpret_cast<T*>(static_cast<uintptr_t>(p));
}
} // namespace snmalloc
12 changes: 6 additions & 6 deletions src/snmalloc/ds_core/seqset.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,12 @@ namespace snmalloc
#endif
}

public:
/**
* Empty queue
*/
constexpr SeqSet() = default;

/**
* Check for empty
*/
Expand All @@ -95,12 +101,6 @@ namespace snmalloc
return head.next == &head;
}

public:
/**
* Empty queue
*/
constexpr SeqSet() = default;

/**
* Remove an element from the queue
*
Expand Down
75 changes: 61 additions & 14 deletions src/snmalloc/mem/corealloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,12 @@ namespace snmalloc
uint16_t length = 0;
} alloc_classes[NUM_SMALL_SIZECLASSES]{};

/**
* The set of all slabs and large allocations
* from this allocator that are full or almost full.
*/
SeqSet<BackendSlabMetadata> laden{};

/**
* Local entropy source and current version of keys for
* this thread
Expand Down Expand Up @@ -420,6 +426,9 @@ namespace snmalloc
UNUSED(size);
#endif

// Remove from set of fully used slabs.
meta->node.remove();

Config::Backend::dealloc_chunk(
get_backend_local_state(), *meta, p, size);

Expand All @@ -436,6 +445,9 @@ namespace snmalloc
// Wake slab up.
meta->set_not_sleeping(sizeclass);

// Remove from set of fully used slabs.
meta->node.remove();

alloc_classes[sizeclass].available.insert(meta);
alloc_classes[sizeclass].length++;

Expand Down Expand Up @@ -744,6 +756,10 @@ namespace snmalloc
alloc_classes[sizeclass].length++;
sl.insert(meta);
}
else
{
laden.insert(meta);
}

auto r = finish_alloc<zero_mem, Config>(p, sizeclass);
return ticker.check_tick(r);
Expand Down Expand Up @@ -794,7 +810,8 @@ namespace snmalloc
}

// Set meta slab to empty.
meta->initialise(sizeclass);
meta->initialise(
sizeclass, address_cast(slab), entropy.get_free_list_key());

// Build a free list for the slab
alloc_new_list(slab, meta, rsize, slab_size, entropy);
Expand All @@ -811,6 +828,10 @@ namespace snmalloc
alloc_classes[sizeclass].length++;
alloc_classes[sizeclass].available.insert(meta);
}
else
{
laden.insert(meta);
}

auto r = finish_alloc<zero_mem, Config>(p, sizeclass);
return ticker.check_tick(r);
Expand Down Expand Up @@ -864,6 +885,14 @@ namespace snmalloc
dealloc_local_slabs<true>(sizeclass);
}

laden.iterate([this, domesticate](
BackendSlabMetadata* meta) SNMALLOC_FAST_PATH_LAMBDA {
if (!meta->is_large())
{
meta->free_queue.validate(entropy.get_free_list_key(), domesticate);
}
});

return posted;
}

Expand All @@ -883,7 +912,7 @@ namespace snmalloc
c->remote_allocator = public_state();

// Set up remote cache.
c->remote_dealloc_cache.init();
c->remote_dealloc_cache.init(entropy.get_free_list_key());
}

/**
Expand All @@ -892,28 +921,46 @@ namespace snmalloc
*/
bool debug_is_empty_impl(bool* result)
{
auto test = [&result](auto& queue, smallsizeclass_t size_class) {
queue.iterate([&result, size_class](auto slab_metadata) {
auto& key = entropy.get_free_list_key();

auto error = [&result, &key](auto slab_metadata) {
auto slab_interior = slab_metadata->get_slab_interior(key);
const PagemapEntry& entry =
Config::Backend::get_metaentry(slab_interior);
SNMALLOC_ASSERT(slab_metadata == entry.get_slab_metadata());
auto size_class = entry.get_sizeclass();
mjp41 marked this conversation as resolved.
Show resolved Hide resolved
auto slab_size = sizeclass_full_to_slab_size(size_class);
auto slab_start = bits::align_down(slab_interior, slab_size);

if (result != nullptr)
*result = false;
else
report_fatal_error(
"debug_is_empty: found non-empty allocator: size={} on "
"slab_start {}",
sizeclass_full_to_size(size_class),
slab_start);
};

auto test = [&error](auto& queue) {
queue.iterate([&error](auto slab_metadata) {
if (slab_metadata->needed() != 0)
{
if (result != nullptr)
*result = false;
else
report_fatal_error(
"debug_is_empty: found non-empty allocator: size={} ({})",
sizeclass_to_size(size_class),
size_class);
error(slab_metadata);
}
});
};

bool sent_something = flush(true);

smallsizeclass_t size_class = 0;
for (auto& alloc_class : alloc_classes)
{
test(alloc_class.available, size_class);
size_class++;
test(alloc_class.available);
}

if (!laden.is_empty())
{
error(laden.peek());
}

// Place the static stub message on the queue.
Expand Down
34 changes: 17 additions & 17 deletions src/snmalloc/mem/freelist.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,6 @@ namespace snmalloc
class T
{
template<
bool,
bool,
SNMALLOC_CONCEPT(capptr::IsBound),
SNMALLOC_CONCEPT(capptr::IsBound)>
Expand Down Expand Up @@ -220,7 +219,6 @@ namespace snmalloc
return reinterpret_cast<Object::T<BQueue>*>(ptr);
}

private:
/**
* Involutive encryption with raw pointers
*/
Expand All @@ -247,7 +245,6 @@ namespace snmalloc
}
}

public:
/**
* Encode next. We perform two convenient little bits of type-level
* sleight of hand here:
Expand Down Expand Up @@ -506,7 +503,6 @@ namespace snmalloc
*/
template<
bool RANDOM,
bool INIT = true,
SNMALLOC_CONCEPT(capptr::IsBound) BView = capptr::bounds::Alloc,
SNMALLOC_CONCEPT(capptr::IsBound) BQueue = capptr::bounds::AllocWild>
class Builder
Expand All @@ -532,7 +528,7 @@ namespace snmalloc
// This enables branch free enqueuing.
std::array<void**, LENGTH> end{nullptr};

Object::BQueuePtr<BQueue>* cast_end(uint32_t ix)
[[nodiscard]] Object::BQueuePtr<BQueue>* cast_end(uint32_t ix) const
{
return reinterpret_cast<Object::BQueuePtr<BQueue>*>(end[ix]);
}
Expand All @@ -542,7 +538,7 @@ namespace snmalloc
end[ix] = reinterpret_cast<void**>(p);
}

Object::BHeadPtr<BView, BQueue> cast_head(uint32_t ix)
[[nodiscard]] Object::BHeadPtr<BView, BQueue> cast_head(uint32_t ix) const
{
return Object::BHeadPtr<BView, BQueue>::unsafe_from(
static_cast<Object::T<BQueue>*>(head[ix]));
Expand All @@ -551,13 +547,7 @@ namespace snmalloc
std::array<uint16_t, RANDOM ? 2 : 0> length{};

public:
constexpr Builder()
{
if (INIT)
{
init();
}
}
constexpr Builder() = default;

/**
* Checks if the builder contains any elements.
Expand Down Expand Up @@ -629,8 +619,8 @@ namespace snmalloc
* and is thus subject to encoding if the next_object pointers
* encoded.
*/
Object::BHeadPtr<BView, BQueue>
read_head(uint32_t index, const FreeListKey& key)
[[nodiscard]] Object::BHeadPtr<BView, BQueue>
read_head(uint32_t index, const FreeListKey& key) const
{
return Object::decode_next(
address_cast(&head[index]), cast_head(index), key);
Expand Down Expand Up @@ -688,7 +678,7 @@ namespace snmalloc
/**
* Set the builder to a not building state.
*/
constexpr void init()
constexpr void init(address_t slab, const FreeListKey& key)
{
for (size_t i = 0; i < LENGTH; i++)
{
Expand All @@ -697,6 +687,16 @@ namespace snmalloc
{
length[i] = 0;
}

// Head is not live when a building is initialised.
// We use this slot to store a pointer into the slab for the
// allocations. This then establishes the invariant that head is
// always (a possibly encoded) pointer into the slab, and thus
// the Freelist builder always knows which block it is referring too.
head[i] = Object::code_next(
address_cast(&head[i]),
useless_ptr_from_addr<Object::T<BQueue>>(slab),
key);
}
}

Expand All @@ -718,7 +718,7 @@ namespace snmalloc
// empty, but you are not allowed to call this in the empty case.
auto last = Object::BHeadPtr<BView, BQueue>::unsafe_from(
Object::from_next_ptr(cast_end(0)));
init();
init(address_cast(head[0]), key);
return {first, last};
}

Expand Down
6 changes: 5 additions & 1 deletion src/snmalloc/mem/localalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,11 @@ namespace snmalloc

// Initialise meta data for a successful large allocation.
if (meta != nullptr)
meta->initialise_large();
{
meta->initialise_large(
address_cast(chunk), local_cache.entropy.get_free_list_key());
core_alloc->laden.insert(meta);
}

if (zero_mem == YesZero && chunk.unsafe_ptr() != nullptr)
{
Expand Down
16 changes: 12 additions & 4 deletions src/snmalloc/mem/metadata.h
Original file line number Diff line number Diff line change
Expand Up @@ -440,12 +440,13 @@ namespace snmalloc
/**
* Initialise FrontendSlabMetadata for a slab.
*/
void initialise(smallsizeclass_t sizeclass)
void initialise(
smallsizeclass_t sizeclass, address_t slab, const FreeListKey& key)
{
static_assert(
std::is_base_of<FrontendSlabMetadata_Trait, BackendType>::value,
"Template should be a subclass of FrontendSlabMetadata");
free_queue.init();
free_queue.init(slab, key);
// Set up meta data as if the entire slab has been turned into a free
// list. This means we don't have to check for special cases where we have
// returned all the elements, but this is a slab that is still being bump
Expand All @@ -461,10 +462,10 @@ namespace snmalloc
*
* Set needed so immediately moves to slow path.
*/
void initialise_large()
void initialise_large(address_t slab, const FreeListKey& key)
{
// We will push to this just to make the fast path clean.
free_queue.init();
free_queue.init(slab, key);

// Flag to detect that it is a large alloc on the slow path
large_ = true;
Expand Down Expand Up @@ -579,6 +580,13 @@ namespace snmalloc

return {p, !sleeping};
}

// Returns a pointer to somewhere in the slab. May not be the
// start of the slab.
[[nodiscard]] address_t get_slab_interior(const FreeListKey& key) const
{
return address_cast(free_queue.read_head(0, key));
}
};

/**
Expand Down
8 changes: 5 additions & 3 deletions src/snmalloc/mem/remotecache.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namespace snmalloc
*/
struct RemoteDeallocCache
{
std::array<freelist::Builder<false, false>, REMOTE_SLOTS> list;
std::array<freelist::Builder<false>, REMOTE_SLOTS> list;

/**
* The total amount of memory we are waiting for before we will dispatch
Expand Down Expand Up @@ -165,14 +165,16 @@ namespace snmalloc
* Must be called before anything else to ensure actually initialised
* not just zero init.
*/
void init()
void init(const FreeListKey& key)
{
#ifndef NDEBUG
initialised = true;
#endif
for (auto& l : list)
{
l.init();
// We do not need to initialise with a particular slab, so pass
// a null address.
l.init(0, key);
}
capacity = REMOTE_CACHE;
}
Expand Down
Loading