From f709311762f4a4c97dbc47748980242ae9d1c466 Mon Sep 17 00:00:00 2001 From: Scott Determan Date: Wed, 31 May 2023 16:28:33 -0400 Subject: [PATCH] Fix unaligned load and stores: (#4528) (#4531) Misaligned load and store operations are supported by both Intel and ARM CPUs. However, in C++, these operations are undefined behavior (UB). Substituting these operations with a `memcpy` fixes this UB. The compiled assembly code is equivalent to the original, so there is no performance penalty to using memcpy. For context: The unaligned load and store operations fixed here were originally introduced in the slab allocator (#4218). --- src/ripple/basics/SlabAllocator.h | 25 +++++++++++++------ .../basics/impl/partitioned_unordered_map.cpp | 6 ++++- src/ripple/beast/hash/impl/xxhash.cpp | 12 +++++++-- 3 files changed, 33 insertions(+), 10 deletions(-) diff --git a/src/ripple/basics/SlabAllocator.h b/src/ripple/basics/SlabAllocator.h index c966af318b6..ece96d0b873 100644 --- a/src/ripple/basics/SlabAllocator.h +++ b/src/ripple/basics/SlabAllocator.h @@ -21,16 +21,18 @@ #define RIPPLE_BASICS_SLABALLOCATOR_H_INCLUDED #include + +#include +#include +#include + #include #include #include #include +#include #include -#include -#include -#include - #if BOOST_OS_LINUX #include #endif @@ -76,7 +78,9 @@ class SlabAllocator while (data + item <= p_ + size_) { - *reinterpret_cast(data) = l_; + // Use memcpy to avoid unaligned UB + // (will optimize to equivalent code) + std::memcpy(data, &l_, sizeof(std::uint8_t*)); l_ = data; data += item; } @@ -115,7 +119,11 @@ class SlabAllocator ret = l_; if (ret) - l_ = *reinterpret_cast(ret); + { + // Use memcpy to avoid unaligned UB + // (will optimize to equivalent code) + std::memcpy(&l_, ret, sizeof(std::uint8_t*)); + } } return ret; @@ -136,7 +144,10 @@ class SlabAllocator assert(own(ptr)); std::lock_guard l(m_); - *reinterpret_cast(ptr) = l_; + + // Use memcpy to avoid unaligned UB + // (will optimize to equivalent code) + std::memcpy(ptr, &l_, sizeof(std::uint8_t*)); l_ = ptr; } }; diff --git a/src/ripple/basics/impl/partitioned_unordered_map.cpp b/src/ripple/basics/impl/partitioned_unordered_map.cpp index 6fb2cbec1d4..3ced32eddff 100644 --- a/src/ripple/basics/impl/partitioned_unordered_map.cpp +++ b/src/ripple/basics/impl/partitioned_unordered_map.cpp @@ -31,7 +31,11 @@ namespace ripple { static std::size_t extract(uint256 const& key) { - return *reinterpret_cast(key.data()); + std::size_t result; + // Use memcpy to avoid unaligned UB + // (will optimize to equivalent code) + std::memcpy(&result, key.data(), sizeof(std::size_t)); + return result; } static std::size_t diff --git a/src/ripple/beast/hash/impl/xxhash.cpp b/src/ripple/beast/hash/impl/xxhash.cpp index 76d5e7997f5..4a6c85db815 100644 --- a/src/ripple/beast/hash/impl/xxhash.cpp +++ b/src/ripple/beast/hash/impl/xxhash.cpp @@ -33,6 +33,8 @@ You can contact the author at : #include +#include + //************************************** // Tuning parameters //************************************** @@ -87,7 +89,7 @@ You can contact the author at : //************************************** // Includes & Memory related functions //************************************** -//#include "xxhash.h" +// #include "xxhash.h" // Modify the local functions below should you wish to use some other memory // routines for malloc(), free() #include @@ -260,7 +262,13 @@ FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) { if (align == XXH_unaligned) - return endian == XXH_littleEndian ? A64(ptr) : XXH_swap64(A64(ptr)); + { + // Use memcpy to avoid unaligned UB + U64 tmp_aligned; + std::memcpy(&tmp_aligned, ptr, sizeof(U64)); + return endian == XXH_littleEndian ? tmp_aligned + : XXH_swap64(tmp_aligned); + } else return endian == XXH_littleEndian ? *(U64*)ptr : XXH_swap64(*(U64*)ptr); }