Skip to content

Commit

Permalink
allocator (#1908)
Browse files Browse the repository at this point in the history
Signed-off-by: turuslan <turuslan.devbox@gmail.com>
  • Loading branch information
turuslan authored Feb 19, 2024
1 parent 95e9d94 commit be1a92a
Show file tree
Hide file tree
Showing 21 changed files with 202 additions and 336 deletions.
1 change: 1 addition & 0 deletions .clang-format
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#

BasedOnStyle: Google
BreakStringLiterals: false
NamespaceIndentation: All
BreakBeforeBinaryOperators: NonAssignment
AlignOperands: AlignAfterOperator
Expand Down
4 changes: 2 additions & 2 deletions core/consensus/grandpa/impl/environment_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -420,10 +420,10 @@ namespace kagome::consensus::grandpa {
SL_ERROR(
logger_,
"BUG: VotingRoundImpl::doFinalize, block {}, set {} != {}, round {}",
grandpa_justification.block_info.number,
id,
voters.id,
grandpa_justification.round_number,
grandpa_justification.block_info.number);
grandpa_justification.round_number);
return VotingRoundError::JUSTIFICATION_FOR_BLOCK_IN_PAST;
}
verified_justification_queue_->addVerified(id, grandpa_justification);
Expand Down
3 changes: 2 additions & 1 deletion core/crypto/chacha.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include <openssl/evp.h>
#include <array>
#include <bit>
#include <cstdint>
#include <memory>
#include <span>
Expand Down Expand Up @@ -59,7 +60,7 @@ namespace kagome::crypto {
}

uint32_t next(uint32_t n) {
auto zone = (n << __builtin_clz(n)) - 1;
auto zone = (n << std::countl_zero(n)) - 1;
while (true) {
if (index_ >= block_.size()) {
block_ = block();
Expand Down
12 changes: 3 additions & 9 deletions core/host_api/impl/memory_extension.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ namespace kagome::host_api {
BOOST_ASSERT_MSG(memory_provider_ != nullptr, "memory provider is nullptr");
SL_DEBUG(logger_,
"Memory extension {} initialized with memory provider {}",
fmt::ptr(this), fmt::ptr(memory_provider_));
fmt::ptr(this),
fmt::ptr(memory_provider_));
}

runtime::WasmPointer MemoryExtension::ext_allocator_malloc_version_1(
Expand All @@ -31,14 +32,7 @@ namespace kagome::host_api {
}

void MemoryExtension::ext_allocator_free_version_1(runtime::WasmPointer ptr) {
auto opt_size = memory_provider_->getCurrentMemory()->get().deallocate(ptr);
memory_provider_->getCurrentMemory()->get().deallocate(ptr);
SL_TRACE_FUNC_CALL(logger_, ptr);
if (not opt_size) {
logger_->warn(
"Ptr {} does not point to any memory chunk in wasm memory. Nothing "
"deallocated",
ptr);
return;
}
}
} // namespace kagome::host_api
7 changes: 4 additions & 3 deletions core/primitives/math.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@

#pragma once

#include <common/blob.hpp>
#include <bit>
#include <limits>
#include <type_traits>

namespace kagome::math {
Expand Down Expand Up @@ -59,8 +60,8 @@ namespace kagome::math {
if (isPowerOf2(k)) {
return k;
}
const auto p = k == 0ull ? 0ull : 64ull - __builtin_clzll(k);
return (1ull << p);
auto n = std::numeric_limits<decltype(k)>::digits - std::countl_zero(k);
return decltype(k){1} << n;
}

} // namespace kagome::math
2 changes: 1 addition & 1 deletion core/runtime/binaryen/memory_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace kagome::runtime::binaryen {
return allocator_->allocate(size);
}

std::optional<WasmSize> MemoryImpl::deallocate(WasmPointer ptr) {
void MemoryImpl::deallocate(WasmPointer ptr) {
return allocator_->deallocate(ptr);
}

Expand Down
7 changes: 2 additions & 5 deletions core/runtime/binaryen/memory_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,18 +47,15 @@ namespace kagome::runtime::binaryen {
~MemoryImpl() override = default;

WasmPointer allocate(WasmSize size) override;
std::optional<WasmSize> deallocate(WasmPointer ptr) override;
void deallocate(WasmPointer ptr) override;

void resize(WasmSize new_size) override {
/**
* We use this condition to avoid
* deallocated_ pointers fixup
*/
if (new_size >= size()) {
if (auto mod = new_size % kMemoryPageSize) {
new_size += kMemoryPageSize - mod;
}
memory_->resize(new_size);
memory_->resize(sizeToPages(new_size) * kMemoryPageSize);
}
}

Expand Down
2 changes: 1 addition & 1 deletion core/runtime/binaryen/runtime_external_interface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ namespace kagome::runtime::binaryen {

wasm::Fatal() << "callImport: unknown import: " << import->module.str << "."
<< import->name.str;
return wasm::Literal();
throw wasm::TrapException{};
}

void RuntimeExternalInterface::checkArguments(std::string_view extern_name,
Expand Down
187 changes: 78 additions & 109 deletions core/runtime/common/memory_allocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,151 +6,120 @@

#include "runtime/common/memory_allocator.hpp"

#include "log/formatters/ref_and_ptr.hpp"
#include "log/trace_macros.hpp"
#include <boost/endian/conversion.hpp>

#include "runtime/memory.hpp"

namespace kagome::runtime {
// https://github.com/paritytech/polkadot-sdk/blob/polkadot-v1.7.0/substrate/client/allocator/src/lib.rs#L39
constexpr auto kMaxPages = (uint64_t{4} << 30) / kMemoryPageSize;

static_assert(roundUpAlign(kDefaultHeapBase) == kDefaultHeapBase,
"Heap base must be aligned");

static_assert(kDefaultHeapBase < kInitialMemorySize,
"Heap base must be in memory");

inline uint64_t read_u64(const Memory &memory, WasmPointer ptr) {
return boost::endian::load_little_u64(
memory.view(ptr, sizeof(uint64_t)).value().data());
}

inline void write_u64(const Memory &memory, WasmPointer ptr, uint64_t v) {
boost::endian::store_little_u64(
memory.view(ptr, sizeof(uint64_t)).value().data(), v);
}

MemoryAllocator::MemoryAllocator(Memory &memory, const MemoryConfig &config)
: memory_{
[&](size_t size) { memory.resize(size); },
[&] { return memory.size(); },
[&](WasmPointer ptr, uint32_t v) {
memcpy(memory.view(ptr, sizeof(v)).value().data(), &v, sizeof(v));
},
[&](WasmPointer ptr) {
uint32_t v;
memcpy(&v, memory.view(ptr, sizeof(v)).value().data(), sizeof(v));
return v;
},
},
: memory_{memory},
offset_{roundUpAlign(config.heap_base)},
max_memory_pages_num_{config.limits.max_memory_pages_num.value_or(
std::numeric_limits<uint32_t>::max())},
logger_{log::createLogger("Allocator", "runtime")} {
// Heap base (and offset in according) must be non-zero to prohibit
// allocating memory at 0 in the future, as returning 0 from allocate method
// means that wasm memory was exhausted
BOOST_ASSERT(offset_ > 0);
max_memory_pages_num_{
config.limits.max_memory_pages_num.value_or(kMaxPages)} {
BOOST_ASSERT(max_memory_pages_num_ > 0);
BOOST_ASSERT(memory_.getSize);
BOOST_ASSERT(memory_.resize);
}

WasmPointer MemoryAllocator::allocate(const uint32_t size) {
if (size == 0) {
return 0;
WasmPointer MemoryAllocator::allocate(WasmSize size) {
if (size > kMaxAllocate) {
throw std::runtime_error{"RequestedAllocationTooLarge"};
}

const size_t chunk_size =
math::nextHighPowerOf2(roundUpAlign(size) + AllocationHeaderSz);

const auto ptr = offset_;
const auto new_offset = ptr + chunk_size; // align

// Round up allocating chunk of memory
if (new_offset <= memory_.getSize()) {
offset_ = new_offset;
AllocationHeader{
.chunk_sz = (uint32_t)chunk_size,
.allocation_sz = roundUpAlign(size),
size = std::max(size, kMinAllocate);
size = math::nextHighPowerOf2(size);
uint32_t order = std::countr_zero(size) - std::countr_zero(kMinAllocate);
uint32_t head_ptr;
if (auto &list = free_lists_.at(order)) {
head_ptr = *list;
if (*list + sizeof(Header) + size > memory_.size()) {
throw std::runtime_error{"Header pointer out of memory bounds"};
}
.serialize(ptr, memory_);
SL_TRACE_FUNC_CALL(logger_, ptr, static_cast<const void *>(this), size);
return ptr + AllocationHeaderSz;
}

auto &preallocates = available_[chunk_size];
if (!preallocates.empty()) {
const auto ptr = preallocates.back();
preallocates.pop_back();

AllocationHeader{
.chunk_sz = (uint32_t)chunk_size,
.allocation_sz = roundUpAlign(size),
list = readFree(*list);
} else {
head_ptr = offset_;
auto next_offset = uint64_t{offset_} + sizeof(Header) + size;
if (next_offset > memory_.size()) {
auto pages = sizeToPages(next_offset);
if (pages > max_memory_pages_num_) {
throw std::runtime_error{
"Memory resize failed, because maximum number of pages is reached."};
}
pages = std::max(pages, 2 * sizeToPages(memory_.size()));
pages = std::min<uint64_t>(pages, max_memory_pages_num_);
memory_.resize(pages * kMemoryPageSize);
}
.serialize(ptr, memory_);
return ptr + AllocationHeaderSz;
offset_ = next_offset;
}

return growAlloc(chunk_size, size);
write_u64(memory_, head_ptr, kOccupied | order);
return head_ptr + sizeof(Header);
}

std::optional<WasmSize> MemoryAllocator::deallocate(WasmPointer ptr) {
AllocationHeader header{
.chunk_sz = 0,
.allocation_sz = 0,
};
header.deserialize(ptr - AllocationHeaderSz, memory_);
BOOST_ASSERT(math::isPowerOf2(header.chunk_sz));

available_[header.chunk_sz].push_back(ptr - AllocationHeaderSz);
BOOST_ASSERT(!available_.empty());
return header.allocation_sz;
void MemoryAllocator::deallocate(WasmPointer ptr) {
if (ptr < sizeof(Header)) {
throw std::runtime_error{"Invalid pointer for deallocation"};
}
auto head_ptr = ptr - sizeof(Header);
auto order = readOccupied(head_ptr);
auto &list = free_lists_.at(order);
auto prev = list.value_or(kNil);
list = head_ptr;
write_u64(memory_, head_ptr, prev);
}

WasmPointer MemoryAllocator::growAlloc(size_t chunk_sz,
WasmSize allocation_sz) {
// check that we do not exceed max memory size
auto new_pages_num =
(chunk_sz + offset_ + kMemoryPageSize - 1) / kMemoryPageSize;
if (new_pages_num > max_memory_pages_num_) {
logger_->error(
"Memory size exceeded when growing it on {} bytes, offset was 0x{:x}",
chunk_sz,
offset_);
return 0;
uint32_t MemoryAllocator::readOccupied(WasmPointer head_ptr) const {
auto head = read_u64(memory_, head_ptr);
uint32_t order = head;
if (order >= kOrders) {
throw std::runtime_error{"order exceed the total number of orders"};
}
auto new_size = offset_ + chunk_sz;
if (new_size > std::numeric_limits<WasmSize>::max()) {
return 0;
if ((head & kOccupied) == 0) {
throw std::runtime_error{"the allocation points to an empty header"};
}

resize(new_size);
BOOST_ASSERT(memory_.getSize() >= new_size);
return allocate(allocation_sz);
return order;
}

void MemoryAllocator::resize(WasmSize new_size) {
memory_.resize(new_size);
}

std::optional<WasmSize> MemoryAllocator::getDeallocatedChunkSize(
WasmPointer ptr) const {
for (const auto &[chunk_size, ptrs] : available_) {
for (const auto &p : ptrs) {
if (ptr == p) {
return chunk_size;
}
}
std::optional<uint32_t> MemoryAllocator::readFree(
WasmPointer head_ptr) const {
auto head = read_u64(memory_, head_ptr);
if ((head & kOccupied) != 0) {
throw std::runtime_error{"free list points to a occupied header"};
}

return std::nullopt;
uint32_t prev = head;
if (prev == kNil) {
return std::nullopt;
}
return prev;
}

std::optional<WasmSize> MemoryAllocator::getAllocatedChunkSize(
WasmPointer ptr) const {
AllocationHeader header{
.chunk_sz = 0,
.allocation_sz = 0,
};
header.deserialize(ptr - AllocationHeaderSz, memory_);
BOOST_ASSERT(math::isPowerOf2(header.chunk_sz));

return header.allocation_sz;
return kMinAllocate << readOccupied(ptr - sizeof(Header));
}

size_t MemoryAllocator::getDeallocatedChunksNum() const {
size_t size = 0ull;
for (const auto &[_, ptrs] : available_) {
size += ptrs.size();
for (auto list : free_lists_) {
while (list) {
++size;
list = readFree(*list);
}
}

return size;
Expand Down
Loading

0 comments on commit be1a92a

Please sign in to comment.