From 65bb8c18b074656e2a138a7418720498461881e8 Mon Sep 17 00:00:00 2001 From: Matthew Parkinson Date: Wed, 25 Mar 2020 11:38:13 +0000 Subject: [PATCH] Make GlobalPlaceholder zero init The GlobalPlaceholder allocator is now a zero init block of memory. This removes various issues for when things are initialised. It is made read-only to we detect write to it on some platforms. --- src/ds/address.h | 20 ++++++++++++++++++++ src/ds/cdllist.h | 4 ++-- src/mem/alloc.h | 31 +++++++++++++++++-------------- src/mem/threadalloc.h | 27 ++++++++++++++++++--------- 4 files changed, 57 insertions(+), 25 deletions(-) diff --git a/src/ds/address.h b/src/ds/address.h index 49c9b9878..22c294107 100644 --- a/src/ds/address.h +++ b/src/ds/address.h @@ -24,6 +24,15 @@ namespace snmalloc return reinterpret_cast(reinterpret_cast(base) + diff); } + /** + * Perform pointer arithmetic and return the adjusted pointer. + */ + template + inline T* pointer_offset_signed(T* base, ptrdiff_t diff) + { + return reinterpret_cast(reinterpret_cast(base) + diff); + } + /** * Cast from a pointer type to an address. */ @@ -115,4 +124,15 @@ namespace snmalloc return static_cast( static_cast(cursor) - static_cast(base)); } + + /** + * Compute the difference in pointers in units of char. This can be used + * across allocations. + */ + inline ptrdiff_t pointer_diff_signed(void* base, void* cursor) + { + return static_cast( + static_cast(cursor) - static_cast(base)); + } + } // namespace snmalloc diff --git a/src/ds/cdllist.h b/src/ds/cdllist.h index 603676c3c..c23fd1fed 100644 --- a/src/ds/cdllist.h +++ b/src/ds/cdllist.h @@ -30,7 +30,7 @@ namespace snmalloc { // TODO: CHERI will need a real pointer too // next = c; - to_next = pointer_diff(c, this); + to_next = pointer_diff_signed(this, c); } public: @@ -71,7 +71,7 @@ namespace snmalloc { // TODO: CHERI will require a real pointer // return next; - return pointer_offset(this, to_next); + return pointer_offset_signed(this, to_next); } SNMALLOC_FAST_PATH CDLLNode* get_prev() diff --git a/src/mem/alloc.h b/src/mem/alloc.h index ba79021ca..60bd82eed 100644 --- a/src/mem/alloc.h +++ b/src/mem/alloc.h @@ -165,8 +165,6 @@ namespace snmalloc else return calloc(1, size); #else - stats().alloc_request(size); - // Perform the - 1 on size, so that zero wraps around and ends up on // slow path. if (likely((size - 1) <= (sizeclass_to_size(NUM_SMALL_CLASSES - 1) - 1))) @@ -1010,17 +1008,18 @@ namespace snmalloc SNMALLOC_ASSUME(size <= SLAB_SIZE); sizeclass_t sizeclass = size_to_sizeclass(size); - return small_alloc_inner(sizeclass); + return small_alloc_inner(sizeclass, size); } template - SNMALLOC_FAST_PATH void* small_alloc_inner(sizeclass_t sizeclass) + SNMALLOC_FAST_PATH void* small_alloc_inner(sizeclass_t sizeclass, size_t size) { SNMALLOC_ASSUME(sizeclass < NUM_SMALL_CLASSES); auto& fl = small_fast_free_lists[sizeclass]; void* head = fl.value; if (likely(head != nullptr)) { + stats().alloc_request(size); stats().sizeclass_alloc(sizeclass); // Read the next slot from the memory that's about to be allocated. fl.value = Metaslab::follow_next(head); @@ -1034,9 +1033,9 @@ namespace snmalloc } if (likely(!has_messages())) - return small_alloc_next_free_list(sizeclass); + return small_alloc_next_free_list(sizeclass, size); - return small_alloc_mq_slow(sizeclass); + return small_alloc_mq_slow(sizeclass, size); } /** @@ -1044,18 +1043,18 @@ namespace snmalloc * allocation request. */ template - SNMALLOC_SLOW_PATH void* small_alloc_mq_slow(sizeclass_t sizeclass) + SNMALLOC_SLOW_PATH void* small_alloc_mq_slow(sizeclass_t sizeclass, size_t size) { handle_message_queue_inner(); - return small_alloc_next_free_list(sizeclass); + return small_alloc_next_free_list(sizeclass, size); } /** * Attempt to find a new free list to allocate from */ template - SNMALLOC_SLOW_PATH void* small_alloc_next_free_list(sizeclass_t sizeclass) + SNMALLOC_SLOW_PATH void* small_alloc_next_free_list(sizeclass_t sizeclass, size_t size) { size_t rsize = sizeclass_to_size(sizeclass); auto& sl = small_classes[sizeclass]; @@ -1064,6 +1063,7 @@ namespace snmalloc if (likely(!sl.is_empty())) { + stats().alloc_request(size); stats().sizeclass_alloc(sizeclass); SlabLink* link = sl.get_next(); @@ -1072,7 +1072,7 @@ namespace snmalloc return slab->alloc( sl, ffl, rsize, large_allocator.memory_provider); } - return small_alloc_rare(sizeclass); + return small_alloc_rare(sizeclass, size); } /** @@ -1081,14 +1081,15 @@ namespace snmalloc * new free list. */ template - SNMALLOC_SLOW_PATH void* small_alloc_rare(sizeclass_t sizeclass) + SNMALLOC_SLOW_PATH void* small_alloc_rare(sizeclass_t sizeclass, size_t size) { if (likely(!NeedsInitialisation(this))) { + stats().alloc_request(size); stats().sizeclass_alloc(sizeclass); return small_alloc_new_free_list(sizeclass); } - return small_alloc_first_alloc(sizeclass); + return small_alloc_first_alloc(sizeclass, size); } /** @@ -1096,11 +1097,11 @@ namespace snmalloc * then directs the allocation request to the newly created allocator. */ template - SNMALLOC_SLOW_PATH void* small_alloc_first_alloc(sizeclass_t sizeclass) + SNMALLOC_SLOW_PATH void* small_alloc_first_alloc(sizeclass_t sizeclass, size_t size) { auto replacement = InitThreadAllocator(); return reinterpret_cast(replacement) - ->template small_alloc_inner(sizeclass); + ->template small_alloc_inner(sizeclass, size); } /** @@ -1297,6 +1298,7 @@ namespace snmalloc sc->insert(slab); } + stats().alloc_request(size); stats().sizeclass_alloc(sizeclass); return p; } @@ -1364,6 +1366,7 @@ namespace snmalloc chunkmap().set_large_size(p, size); + stats().alloc_request(size); stats().large_alloc(large_class); return p; } diff --git a/src/mem/threadalloc.h b/src/mem/threadalloc.h index 9c53f4cee..ff198ebac 100644 --- a/src/mem/threadalloc.h +++ b/src/mem/threadalloc.h @@ -68,10 +68,19 @@ namespace snmalloc * slabs to allocate from, it will discover that it is the placeholder and * replace itself with the thread-local allocator, allocating one if * required. This avoids a branch on the fast path. + * + * The fake allocator is a zero initialised area of memory of the correct + * size. All data structures used potentially before initialisation must be + * okay with zero init to move to the slow path, that is, zero must signify + * empty. */ - inline GlobalVirtual dummy_memory_provider; - inline Alloc GlobalPlaceHolder( - dummy_memory_provider, SNMALLOC_DEFAULT_CHUNKMAP(), nullptr, true); + inline const char GlobalPlaceHolder[sizeof(Alloc)] = {0}; + + inline Alloc* get_GlobalPlaceHolder() + { + auto a = reinterpret_cast(&GlobalPlaceHolder); + return const_cast(a); + } /** * Common aspects of thread local allocator. Subclasses handle how releasing @@ -85,10 +94,10 @@ namespace snmalloc static inline void inner_release() { auto& per_thread = get_reference(); - if (per_thread != &GlobalPlaceHolder) + if (per_thread != get_GlobalPlaceHolder()) { current_alloc_pool()->release(per_thread); - per_thread = &GlobalPlaceHolder; + per_thread = get_GlobalPlaceHolder(); } } @@ -123,7 +132,7 @@ namespace snmalloc */ static inline Alloc*& get_reference() { - static thread_local Alloc* alloc = &GlobalPlaceHolder; + static thread_local Alloc* alloc = get_GlobalPlaceHolder(); return alloc; } @@ -236,7 +245,7 @@ namespace snmalloc SNMALLOC_SLOW_PATH inline void* init_thread_allocator() { auto*& local_alloc = ThreadAlloc::get_reference(); - if (local_alloc != &GlobalPlaceHolder) + if (local_alloc != get_GlobalPlaceHolder()) { // If someone reuses a noncachable call, then we can end up here. // The allocator has already been initialised. Could either error @@ -244,7 +253,7 @@ namespace snmalloc return local_alloc; } local_alloc = current_alloc_pool()->acquire(); - SNMALLOC_ASSERT(local_alloc != &GlobalPlaceHolder); + SNMALLOC_ASSERT(local_alloc != get_GlobalPlaceHolder()); ThreadAlloc::register_cleanup(); return local_alloc; } @@ -257,7 +266,7 @@ namespace snmalloc */ SNMALLOC_FAST_PATH bool needs_initialisation(void* existing) { - return existing == &GlobalPlaceHolder; + return existing == get_GlobalPlaceHolder(); } #endif } // namespace snmalloc