Skip to content

Commit

Permalink
Make slabmeta track a slab interior pointer
Browse files Browse the repository at this point in the history
Use the head of the free list builder to track an interior pointer to
the slab. This is unused unless the list contains something.
Hence, we can use this to represent an interior pointer to the slab and
report more accurate leaks.
  • Loading branch information
mjp41 committed Oct 31, 2022
1 parent bddb353 commit e0fd327
Show file tree
Hide file tree
Showing 5 changed files with 63 additions and 41 deletions.
47 changes: 29 additions & 18 deletions src/snmalloc/mem/corealloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -810,7 +810,8 @@ namespace snmalloc
}

// Set meta slab to empty.
meta->initialise(sizeclass);
meta->initialise(
sizeclass, address_cast(slab), entropy.get_free_list_key());

// Build a free list for the slab
alloc_new_list(slab, meta, rsize, slab_size, entropy);
Expand Down Expand Up @@ -912,7 +913,7 @@ namespace snmalloc
c->remote_allocator = public_state();

// Set up remote cache.
c->remote_dealloc_cache.init();
c->remote_dealloc_cache.init(entropy.get_free_list_key());
}

/**
Expand All @@ -921,36 +922,46 @@ namespace snmalloc
*/
bool debug_is_empty_impl(bool* result)
{
auto test = [&result](auto& queue, smallsizeclass_t size_class) {
queue.iterate([&result, size_class](auto slab_metadata) {
auto& key = entropy.get_free_list_key();

auto error = [&result, &key](auto slab_metadata) {

auto slab_interior = slab_metadata->get_slab_interior(key);
const PagemapEntry& entry =
Config::Backend::get_metaentry(slab_interior);
auto size_class = entry.get_sizeclass();
auto slab_size = sizeclass_full_to_slab_size(size_class);
auto slab_start = bits::align_down(slab_interior, slab_size);

if (result != nullptr)
*result = false;
else
report_fatal_error(
"debug_is_empty: found non-empty allocator: size={} on "
"slab_start {}",
sizeclass_full_to_size(size_class),
slab_start);
};

auto test = [&error](auto& queue) {
queue.iterate([&error](auto slab_metadata) {
if (slab_metadata->needed() != 0)
{
if (result != nullptr)
*result = false;
else
report_fatal_error(
"debug_is_empty: found non-empty allocator: size={} ({})",
sizeclass_to_size(size_class),
size_class);
error(slab_metadata);
}
});
};

bool sent_something = flush(true);

smallsizeclass_t size_class = 0;
for (auto& alloc_class : alloc_classes)
{
test(alloc_class.available, size_class);
size_class++;
test(alloc_class.available);
}

if (!laden.is_empty())
{
if (result != nullptr)
*result = false;
else
report_fatal_error("debug_is_empty: found non-empty allocator");
error(laden.peek());
}

// Place the static stub message on the queue.
Expand Down
30 changes: 15 additions & 15 deletions src/snmalloc/mem/freelist.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,6 @@ namespace snmalloc
class T
{
template<
bool,
bool,
SNMALLOC_CONCEPT(capptr::IsBound),
SNMALLOC_CONCEPT(capptr::IsBound)>
Expand Down Expand Up @@ -220,7 +219,6 @@ namespace snmalloc
return reinterpret_cast<Object::T<BQueue>*>(ptr);
}

private:
/**
* Involutive encryption with raw pointers
*/
Expand All @@ -247,7 +245,6 @@ namespace snmalloc
}
}

public:
/**
* Encode next. We perform two convenient little bits of type-level
* sleight of hand here:
Expand Down Expand Up @@ -506,7 +503,6 @@ namespace snmalloc
*/
template<
bool RANDOM,
bool INIT = true,
SNMALLOC_CONCEPT(capptr::IsBound) BView = capptr::bounds::Alloc,
SNMALLOC_CONCEPT(capptr::IsBound) BQueue = capptr::bounds::AllocWild>
class Builder
Expand Down Expand Up @@ -542,7 +538,7 @@ namespace snmalloc
end[ix] = reinterpret_cast<void**>(p);
}

Object::BHeadPtr<BView, BQueue> cast_head(uint32_t ix)
Object::BHeadPtr<BView, BQueue> cast_head(uint32_t ix) const
{
return Object::BHeadPtr<BView, BQueue>::unsafe_from(
static_cast<Object::T<BQueue>*>(head[ix]));
Expand All @@ -551,13 +547,7 @@ namespace snmalloc
std::array<uint16_t, RANDOM ? 2 : 0> length{};

public:
constexpr Builder()
{
if (INIT)
{
init();
}
}
constexpr Builder() {}

/**
* Checks if the builder contains any elements.
Expand Down Expand Up @@ -630,7 +620,7 @@ namespace snmalloc
* encoded.
*/
Object::BHeadPtr<BView, BQueue>
read_head(uint32_t index, const FreeListKey& key)
read_head(uint32_t index, const FreeListKey& key) const
{
return Object::decode_next(
address_cast(&head[index]), cast_head(index), key);
Expand Down Expand Up @@ -688,7 +678,7 @@ namespace snmalloc
/**
* Set the builder to a not building state.
*/
constexpr void init()
constexpr void init(address_t slab, const FreeListKey& key)
{
for (size_t i = 0; i < LENGTH; i++)
{
Expand All @@ -697,6 +687,16 @@ namespace snmalloc
{
length[i] = 0;
}

// Head is not live when a building is initialised.
// We use this slot to store a pointer into the slab for the
// allocations. This then establishes the invariant that head is
// always (a possibly encoded) pointer into the slab, and thus
// the Freelist builder always knows which block it is referring too.
head[i] = Object::code_next(
address_cast(&head[i]),
reinterpret_cast<Object::T<BQueue>*>(slab),
key);
}
}

Expand All @@ -718,7 +718,7 @@ namespace snmalloc
// empty, but you are not allowed to call this in the empty case.
auto last = Object::BHeadPtr<BView, BQueue>::unsafe_from(
Object::from_next_ptr(cast_end(0)));
init();
init(address_cast(head[0]), key);
return {first, last};
}

Expand Down
3 changes: 2 additions & 1 deletion src/snmalloc/mem/localalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,8 @@ namespace snmalloc
// Initialise meta data for a successful large allocation.
if (meta != nullptr)
{
meta->initialise_large();
meta->initialise_large(
address_cast(chunk), local_cache.entropy.get_free_list_key());
core_alloc->laden.insert(meta);
}

Expand Down
16 changes: 12 additions & 4 deletions src/snmalloc/mem/metadata.h
Original file line number Diff line number Diff line change
Expand Up @@ -440,12 +440,13 @@ namespace snmalloc
/**
* Initialise FrontendSlabMetadata for a slab.
*/
void initialise(smallsizeclass_t sizeclass)
void initialise(
smallsizeclass_t sizeclass, address_t slab, const FreeListKey& key)
{
static_assert(
std::is_base_of<FrontendSlabMetadata_Trait, BackendType>::value,
"Template should be a subclass of FrontendSlabMetadata");
free_queue.init();
free_queue.init(slab, key);
// Set up meta data as if the entire slab has been turned into a free
// list. This means we don't have to check for special cases where we have
// returned all the elements, but this is a slab that is still being bump
Expand All @@ -461,10 +462,10 @@ namespace snmalloc
*
* Set needed so immediately moves to slow path.
*/
void initialise_large()
void initialise_large(address_t slab, const FreeListKey& key)
{
// We will push to this just to make the fast path clean.
free_queue.init();
free_queue.init(slab, key);

// Flag to detect that it is a large alloc on the slow path
large_ = true;
Expand Down Expand Up @@ -579,6 +580,13 @@ namespace snmalloc

return {p, !sleeping};
}

// Returns a pointer to somewhere in the slab. May not be the
// start of the slab.
address_t get_slab_interior(const FreeListKey& key) const
{
return address_cast(free_queue.read_head(0, key));
}
};

/**
Expand Down
8 changes: 5 additions & 3 deletions src/snmalloc/mem/remotecache.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namespace snmalloc
*/
struct RemoteDeallocCache
{
std::array<freelist::Builder<false, false>, REMOTE_SLOTS> list;
std::array<freelist::Builder<false>, REMOTE_SLOTS> list;

/**
* The total amount of memory we are waiting for before we will dispatch
Expand Down Expand Up @@ -165,14 +165,16 @@ namespace snmalloc
* Must be called before anything else to ensure actually initialised
* not just zero init.
*/
void init()
void init(const FreeListKey& key)
{
#ifndef NDEBUG
initialised = true;
#endif
for (auto& l : list)
{
l.init();
// We do not need to initialise with a particular slab, so pass
// an address of 0.
l.init(0, key);
}
capacity = REMOTE_CACHE;
}
Expand Down

0 comments on commit e0fd327

Please sign in to comment.