Skip to content

Commit

Permalink
Make GlobalPlaceholder zero init
Browse files Browse the repository at this point in the history
The GlobalPlaceholder allocator is now a zero init block of memory.
This removes various issues for when things are initialised. It is made read-only
to we detect write to it on some platforms.
  • Loading branch information
mjp41 committed Mar 25, 2020
1 parent 0c40c84 commit 65bb8c1
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 25 deletions.
20 changes: 20 additions & 0 deletions src/ds/address.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,15 @@ namespace snmalloc
return reinterpret_cast<T*>(reinterpret_cast<char*>(base) + diff);
}

/**
* Perform pointer arithmetic and return the adjusted pointer.
*/
template<typename T>
inline T* pointer_offset_signed(T* base, ptrdiff_t diff)
{
return reinterpret_cast<T*>(reinterpret_cast<char*>(base) + diff);
}

/**
* Cast from a pointer type to an address.
*/
Expand Down Expand Up @@ -115,4 +124,15 @@ namespace snmalloc
return static_cast<size_t>(
static_cast<char*>(cursor) - static_cast<char*>(base));
}

/**
* Compute the difference in pointers in units of char. This can be used
* across allocations.
*/
inline ptrdiff_t pointer_diff_signed(void* base, void* cursor)
{
return static_cast<ptrdiff_t>(
static_cast<char*>(cursor) - static_cast<char*>(base));
}

} // namespace snmalloc
4 changes: 2 additions & 2 deletions src/ds/cdllist.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ namespace snmalloc
{
// TODO: CHERI will need a real pointer too
// next = c;
to_next = pointer_diff(c, this);
to_next = pointer_diff_signed(this, c);
}

public:
Expand Down Expand Up @@ -71,7 +71,7 @@ namespace snmalloc
{
// TODO: CHERI will require a real pointer
// return next;
return pointer_offset(this, to_next);
return pointer_offset_signed(this, to_next);
}

SNMALLOC_FAST_PATH CDLLNode* get_prev()
Expand Down
31 changes: 17 additions & 14 deletions src/mem/alloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -165,8 +165,6 @@ namespace snmalloc
else
return calloc(1, size);
#else
stats().alloc_request(size);

// Perform the - 1 on size, so that zero wraps around and ends up on
// slow path.
if (likely((size - 1) <= (sizeclass_to_size(NUM_SMALL_CLASSES - 1) - 1)))
Expand Down Expand Up @@ -1010,17 +1008,18 @@ namespace snmalloc

SNMALLOC_ASSUME(size <= SLAB_SIZE);
sizeclass_t sizeclass = size_to_sizeclass(size);
return small_alloc_inner<zero_mem, allow_reserve>(sizeclass);
return small_alloc_inner<zero_mem, allow_reserve>(sizeclass, size);
}

template<ZeroMem zero_mem, AllowReserve allow_reserve>
SNMALLOC_FAST_PATH void* small_alloc_inner(sizeclass_t sizeclass)
SNMALLOC_FAST_PATH void* small_alloc_inner(sizeclass_t sizeclass, size_t size)
{
SNMALLOC_ASSUME(sizeclass < NUM_SMALL_CLASSES);
auto& fl = small_fast_free_lists[sizeclass];
void* head = fl.value;
if (likely(head != nullptr))
{
stats().alloc_request(size);
stats().sizeclass_alloc(sizeclass);
// Read the next slot from the memory that's about to be allocated.
fl.value = Metaslab::follow_next(head);
Expand All @@ -1034,28 +1033,28 @@ namespace snmalloc
}

if (likely(!has_messages()))
return small_alloc_next_free_list<zero_mem, allow_reserve>(sizeclass);
return small_alloc_next_free_list<zero_mem, allow_reserve>(sizeclass, size);

return small_alloc_mq_slow<zero_mem, allow_reserve>(sizeclass);
return small_alloc_mq_slow<zero_mem, allow_reserve>(sizeclass, size);
}

/**
* Slow path for handling message queue, before dealing with small
* allocation request.
*/
template<ZeroMem zero_mem, AllowReserve allow_reserve>
SNMALLOC_SLOW_PATH void* small_alloc_mq_slow(sizeclass_t sizeclass)
SNMALLOC_SLOW_PATH void* small_alloc_mq_slow(sizeclass_t sizeclass, size_t size)
{
handle_message_queue_inner();

return small_alloc_next_free_list<zero_mem, allow_reserve>(sizeclass);
return small_alloc_next_free_list<zero_mem, allow_reserve>(sizeclass, size);
}

/**
* Attempt to find a new free list to allocate from
*/
template<ZeroMem zero_mem, AllowReserve allow_reserve>
SNMALLOC_SLOW_PATH void* small_alloc_next_free_list(sizeclass_t sizeclass)
SNMALLOC_SLOW_PATH void* small_alloc_next_free_list(sizeclass_t sizeclass, size_t size)
{
size_t rsize = sizeclass_to_size(sizeclass);
auto& sl = small_classes[sizeclass];
Expand All @@ -1064,6 +1063,7 @@ namespace snmalloc

if (likely(!sl.is_empty()))
{
stats().alloc_request(size);
stats().sizeclass_alloc(sizeclass);

SlabLink* link = sl.get_next();
Expand All @@ -1072,7 +1072,7 @@ namespace snmalloc
return slab->alloc<zero_mem>(
sl, ffl, rsize, large_allocator.memory_provider);
}
return small_alloc_rare<zero_mem, allow_reserve>(sizeclass);
return small_alloc_rare<zero_mem, allow_reserve>(sizeclass, size);
}

/**
Expand All @@ -1081,26 +1081,27 @@ namespace snmalloc
* new free list.
*/
template<ZeroMem zero_mem, AllowReserve allow_reserve>
SNMALLOC_SLOW_PATH void* small_alloc_rare(sizeclass_t sizeclass)
SNMALLOC_SLOW_PATH void* small_alloc_rare(sizeclass_t sizeclass, size_t size)
{
if (likely(!NeedsInitialisation(this)))
{
stats().alloc_request(size);
stats().sizeclass_alloc(sizeclass);
return small_alloc_new_free_list<zero_mem, allow_reserve>(sizeclass);
}
return small_alloc_first_alloc<zero_mem, allow_reserve>(sizeclass);
return small_alloc_first_alloc<zero_mem, allow_reserve>(sizeclass, size);
}

/**
* Called on first allocation to set up the thread local allocator,
* then directs the allocation request to the newly created allocator.
*/
template<ZeroMem zero_mem, AllowReserve allow_reserve>
SNMALLOC_SLOW_PATH void* small_alloc_first_alloc(sizeclass_t sizeclass)
SNMALLOC_SLOW_PATH void* small_alloc_first_alloc(sizeclass_t sizeclass, size_t size)
{
auto replacement = InitThreadAllocator();
return reinterpret_cast<Allocator*>(replacement)
->template small_alloc_inner<zero_mem, allow_reserve>(sizeclass);
->template small_alloc_inner<zero_mem, allow_reserve>(sizeclass, size);
}

/**
Expand Down Expand Up @@ -1297,6 +1298,7 @@ namespace snmalloc
sc->insert(slab);
}

stats().alloc_request(size);
stats().sizeclass_alloc(sizeclass);
return p;
}
Expand Down Expand Up @@ -1364,6 +1366,7 @@ namespace snmalloc

chunkmap().set_large_size(p, size);

stats().alloc_request(size);
stats().large_alloc(large_class);
return p;
}
Expand Down
27 changes: 18 additions & 9 deletions src/mem/threadalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,19 @@ namespace snmalloc
* slabs to allocate from, it will discover that it is the placeholder and
* replace itself with the thread-local allocator, allocating one if
* required. This avoids a branch on the fast path.
*
* The fake allocator is a zero initialised area of memory of the correct
* size. All data structures used potentially before initialisation must be
* okay with zero init to move to the slow path, that is, zero must signify
* empty.
*/
inline GlobalVirtual dummy_memory_provider;
inline Alloc GlobalPlaceHolder(
dummy_memory_provider, SNMALLOC_DEFAULT_CHUNKMAP(), nullptr, true);
inline const char GlobalPlaceHolder[sizeof(Alloc)] = {0};

inline Alloc* get_GlobalPlaceHolder()
{
auto a = reinterpret_cast<const Alloc*>(&GlobalPlaceHolder);
return const_cast<Alloc*>(a);
}

/**
* Common aspects of thread local allocator. Subclasses handle how releasing
Expand All @@ -85,10 +94,10 @@ namespace snmalloc
static inline void inner_release()
{
auto& per_thread = get_reference();
if (per_thread != &GlobalPlaceHolder)
if (per_thread != get_GlobalPlaceHolder())
{
current_alloc_pool()->release(per_thread);
per_thread = &GlobalPlaceHolder;
per_thread = get_GlobalPlaceHolder();
}
}

Expand Down Expand Up @@ -123,7 +132,7 @@ namespace snmalloc
*/
static inline Alloc*& get_reference()
{
static thread_local Alloc* alloc = &GlobalPlaceHolder;
static thread_local Alloc* alloc = get_GlobalPlaceHolder();
return alloc;
}

Expand Down Expand Up @@ -236,15 +245,15 @@ namespace snmalloc
SNMALLOC_SLOW_PATH inline void* init_thread_allocator()
{
auto*& local_alloc = ThreadAlloc::get_reference();
if (local_alloc != &GlobalPlaceHolder)
if (local_alloc != get_GlobalPlaceHolder())
{
// If someone reuses a noncachable call, then we can end up here.
// The allocator has already been initialised. Could either error
// to say stop doing this, or just give them the initialised version.
return local_alloc;
}
local_alloc = current_alloc_pool()->acquire();
SNMALLOC_ASSERT(local_alloc != &GlobalPlaceHolder);
SNMALLOC_ASSERT(local_alloc != get_GlobalPlaceHolder());
ThreadAlloc::register_cleanup();
return local_alloc;
}
Expand All @@ -257,7 +266,7 @@ namespace snmalloc
*/
SNMALLOC_FAST_PATH bool needs_initialisation(void* existing)
{
return existing == &GlobalPlaceHolder;
return existing == get_GlobalPlaceHolder();
}
#endif
} // namespace snmalloc

0 comments on commit 65bb8c1

Please sign in to comment.