Skip to content

Commit

Permalink
Fix unaligned load and stores: (#4528) (#4531)
Browse files Browse the repository at this point in the history
Misaligned load and store operations are supported by both Intel and ARM
CPUs. However, in C++, these operations are undefined behavior (UB).
Substituting these operations with a `memcpy` fixes this UB. The
compiled assembly code is equivalent to the original, so there is no
performance penalty to using memcpy.

For context: The unaligned load and store operations fixed here were
originally introduced in the slab allocator (#4218).
  • Loading branch information
seelabs authored May 31, 2023
1 parent adde0c2 commit f709311
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 10 deletions.
25 changes: 18 additions & 7 deletions src/ripple/basics/SlabAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,18 @@
#define RIPPLE_BASICS_SLABALLOCATOR_H_INCLUDED

#include <ripple/beast/type_name.h>

#include <boost/align.hpp>
#include <boost/container/static_vector.hpp>
#include <boost/predef.h>

#include <algorithm>
#include <atomic>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <mutex>

#include <boost/align.hpp>
#include <boost/container/static_vector.hpp>
#include <boost/predef.h>

#if BOOST_OS_LINUX
#include <sys/mman.h>
#endif
Expand Down Expand Up @@ -76,7 +78,9 @@ class SlabAllocator

while (data + item <= p_ + size_)
{
*reinterpret_cast<std::uint8_t**>(data) = l_;
// Use memcpy to avoid unaligned UB
// (will optimize to equivalent code)
std::memcpy(data, &l_, sizeof(std::uint8_t*));
l_ = data;
data += item;
}
Expand Down Expand Up @@ -115,7 +119,11 @@ class SlabAllocator
ret = l_;

if (ret)
l_ = *reinterpret_cast<std::uint8_t**>(ret);
{
// Use memcpy to avoid unaligned UB
// (will optimize to equivalent code)
std::memcpy(&l_, ret, sizeof(std::uint8_t*));
}
}

return ret;
Expand All @@ -136,7 +144,10 @@ class SlabAllocator
assert(own(ptr));

std::lock_guard l(m_);
*reinterpret_cast<std::uint8_t**>(ptr) = l_;

// Use memcpy to avoid unaligned UB
// (will optimize to equivalent code)
std::memcpy(ptr, &l_, sizeof(std::uint8_t*));
l_ = ptr;
}
};
Expand Down
6 changes: 5 additions & 1 deletion src/ripple/basics/impl/partitioned_unordered_map.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,11 @@ namespace ripple {
static std::size_t
extract(uint256 const& key)
{
return *reinterpret_cast<std::size_t const*>(key.data());
std::size_t result;
// Use memcpy to avoid unaligned UB
// (will optimize to equivalent code)
std::memcpy(&result, key.data(), sizeof(std::size_t));
return result;
}

static std::size_t
Expand Down
12 changes: 10 additions & 2 deletions src/ripple/beast/hash/impl/xxhash.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ You can contact the author at :

#include <ripple/beast/hash/impl/xxhash.h>

#include <cstring>

//**************************************
// Tuning parameters
//**************************************
Expand Down Expand Up @@ -87,7 +89,7 @@ You can contact the author at :
//**************************************
// Includes & Memory related functions
//**************************************
//#include "xxhash.h"
// #include "xxhash.h"
// Modify the local functions below should you wish to use some other memory
// routines for malloc(), free()
#include <stdlib.h>
Expand Down Expand Up @@ -260,7 +262,13 @@ FORCE_INLINE U64
XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
{
if (align == XXH_unaligned)
return endian == XXH_littleEndian ? A64(ptr) : XXH_swap64(A64(ptr));
{
// Use memcpy to avoid unaligned UB
U64 tmp_aligned;
std::memcpy(&tmp_aligned, ptr, sizeof(U64));
return endian == XXH_littleEndian ? tmp_aligned
: XXH_swap64(tmp_aligned);
}
else
return endian == XXH_littleEndian ? *(U64*)ptr : XXH_swap64(*(U64*)ptr);
}
Expand Down

0 comments on commit f709311

Please sign in to comment.