-
-
Notifications
You must be signed in to change notification settings - Fork 355
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Update immer library to v0.8.1 #1432
Conversation
WalkthroughThe updates across various files in the Changes
Poem
Recent Review DetailsConfiguration used: CodeRabbit UI Files selected for processing (55)
Files not processed due to max files limit (2)
Files not summarized due to errors (4)
Files not reviewed due to errors (7)
Files skipped from review due to trivial changes (4)
Additional comments not posted (366)
Thank you for using CodeRabbit. We offer it for free to the OSS community and would appreciate your support in helping us grow. If you find it useful, would you consider giving us a shout-out on your favorite social media? TipsChatThere are 3 ways to chat with CodeRabbit:
Note: Be mindful of the bot's finite context window. It's strongly recommended to break down tasks such as reading entire modules into smaller chunks. For a focused discussion, use review comments to chat about specific files and their changes, instead of using the PR comments. CodeRabbit Commands (invoked as PR comments)
Additionally, you can add CodeRabbit Configration File (
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actionable comments posted: 14
auto child = children[offset]; | ||
auto result = | ||
mutate ? do_sub_mut(e, child, k, hash, shift + B, store) | ||
: do_sub(child, k, hash, shift + B); | ||
switch (result.kind) { | ||
case sub_result::nothing: | ||
return {}; | ||
case sub_result::singleton: | ||
if (node->datamap() == 0 && node->children_count() == 1 && | ||
shift > 0) { | ||
if (mutate) { | ||
node_t::delete_inner(node); | ||
if (!result.mutated && child->dec()) | ||
node_t::delete_deep_shift(child, shift + B); | ||
} | ||
return {result.data.singleton, result.owned, mutate}; | ||
} else { | ||
auto r = | ||
mutate ? node_t::move_inner_replace_inline( | ||
e, | ||
node, | ||
bit, | ||
offset, | ||
result.owned | ||
? std::move(*result.data.singleton) | ||
: *result.data.singleton) | ||
: node_t::copy_inner_replace_inline( | ||
node, | ||
bit, | ||
offset, | ||
*result.data.singleton); | ||
if (result.owned) | ||
detail::destroy_at(result.data.singleton); | ||
if (!result.mutated && mutate && child->dec()) | ||
node_t::delete_deep_shift(child, shift + B); | ||
return {node_t::owned_values(r, e), mutate}; | ||
} | ||
case sub_result::tree: | ||
if (mutate) { | ||
children[offset] = result.data.tree; | ||
if (!result.mutated && child->dec()) | ||
node_t::delete_deep_shift(child, shift + B); | ||
return {node, true}; | ||
} else { | ||
IMMER_TRY { | ||
auto r = node_t::copy_inner_replace( | ||
node, offset, result.data.tree); | ||
return {node_t::owned(r, e), false}; | ||
} | ||
IMMER_CATCH (...) { | ||
node_t::delete_deep_shift(result.data.tree, | ||
shift + B); | ||
IMMER_RETHROW; | ||
} | ||
} | ||
} | ||
} else if (node->datamap() & bit) { | ||
auto offset = node->data_count(bit); | ||
auto val = node->values() + offset; | ||
auto mutate_values = mutate && node->can_mutate_values(e); | ||
if (Equal{}(*val, k)) { | ||
auto nv = node->data_count(); | ||
if (node->nodemap() || nv > 2) { | ||
auto r = mutate ? node_t::move_inner_remove_value( | ||
e, node, bit, offset) | ||
: node_t::copy_inner_remove_value( | ||
node, bit, offset); | ||
return {node_t::owned_values_safe(r, e), mutate}; | ||
} else if (nv == 2) { | ||
if (shift > 0) { | ||
if (mutate_values) { | ||
auto r = new (store) | ||
T{std::move(node->values()[!offset])}; | ||
node_t::delete_inner(node); | ||
return {r, true}; | ||
} else { | ||
return {node->values() + !offset, false}; | ||
} | ||
} else { | ||
auto& v = node->values()[!offset]; | ||
auto r = node_t::make_inner_n( | ||
0, | ||
node->datamap() & ~bit, | ||
mutate_values ? std::move(v) : v); | ||
assert(!node->nodemap()); | ||
if (mutate) | ||
node_t::delete_inner(node); | ||
return {node_t::owned_values(r, e), mutate}; | ||
} | ||
} else { | ||
assert(shift == 0); | ||
if (mutate) | ||
node_t::delete_inner(node); | ||
return {empty(), mutate}; | ||
} | ||
} | ||
} | ||
return {}; | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The do_sub_mut
method handles mutable deletions from the champ data structure. It includes comprehensive logic to handle different cases based on the structure's depth and mutation permissions. This method is complex and critical for maintaining the integrity of the data structure during deletions. Consider refactoring to improve readability and maintainability.
- auto result = mutate ? do_sub_mut(e, child, k, hash, shift + B, store) : do_sub(child, k, hash, shift + B);
+ auto result = this->sub_operation(e, child, k, hash, shift + B, store, mutate);
Committable suggestion
‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.
template <typename K> | |
sub_result_mut do_sub_mut(edit_t e, | |
node_t* node, | |
const K& k, | |
hash_t hash, | |
shift_t shift, | |
void* store) const | |
{ | |
auto mutate = node->can_mutate(e); | |
if (shift == max_shift<B>) { | |
auto fst = node->collisions(); | |
auto lst = fst + node->collision_count(); | |
for (auto cur = fst; cur != lst; ++cur) { | |
if (Equal{}(*cur, k)) { | |
if (node->collision_count() <= 2) { | |
if (mutate) { | |
auto r = new (store) | |
T{std::move(node->collisions()[cur == fst])}; | |
node_t::delete_collision(node); | |
return sub_result_mut{r, true}; | |
} else { | |
return sub_result_mut{fst + (cur == fst), false}; | |
} | |
} else { | |
auto r = mutate | |
? node_t::move_collision_remove(node, cur) | |
: node_t::copy_collision_remove(node, cur); | |
return {node_t::owned(r, e), mutate}; | |
} | |
} | |
} | |
return {}; | |
} else { | |
auto idx = (hash & (mask<B> << shift)) >> shift; | |
auto bit = bitmap_t{1u} << idx; | |
if (node->nodemap() & bit) { | |
auto offset = node->children_count(bit); | |
auto children = node->children(); | |
auto child = children[offset]; | |
auto result = | |
mutate ? do_sub_mut(e, child, k, hash, shift + B, store) | |
: do_sub(child, k, hash, shift + B); | |
switch (result.kind) { | |
case sub_result::nothing: | |
return {}; | |
case sub_result::singleton: | |
if (node->datamap() == 0 && node->children_count() == 1 && | |
shift > 0) { | |
if (mutate) { | |
node_t::delete_inner(node); | |
if (!result.mutated && child->dec()) | |
node_t::delete_deep_shift(child, shift + B); | |
} | |
return {result.data.singleton, result.owned, mutate}; | |
} else { | |
auto r = | |
mutate ? node_t::move_inner_replace_inline( | |
e, | |
node, | |
bit, | |
offset, | |
result.owned | |
? std::move(*result.data.singleton) | |
: *result.data.singleton) | |
: node_t::copy_inner_replace_inline( | |
node, | |
bit, | |
offset, | |
*result.data.singleton); | |
if (result.owned) | |
detail::destroy_at(result.data.singleton); | |
if (!result.mutated && mutate && child->dec()) | |
node_t::delete_deep_shift(child, shift + B); | |
return {node_t::owned_values(r, e), mutate}; | |
} | |
case sub_result::tree: | |
if (mutate) { | |
children[offset] = result.data.tree; | |
if (!result.mutated && child->dec()) | |
node_t::delete_deep_shift(child, shift + B); | |
return {node, true}; | |
} else { | |
IMMER_TRY { | |
auto r = node_t::copy_inner_replace( | |
node, offset, result.data.tree); | |
return {node_t::owned(r, e), false}; | |
} | |
IMMER_CATCH (...) { | |
node_t::delete_deep_shift(result.data.tree, | |
shift + B); | |
IMMER_RETHROW; | |
} | |
} | |
} | |
} else if (node->datamap() & bit) { | |
auto offset = node->data_count(bit); | |
auto val = node->values() + offset; | |
auto mutate_values = mutate && node->can_mutate_values(e); | |
if (Equal{}(*val, k)) { | |
auto nv = node->data_count(); | |
if (node->nodemap() || nv > 2) { | |
auto r = mutate ? node_t::move_inner_remove_value( | |
e, node, bit, offset) | |
: node_t::copy_inner_remove_value( | |
node, bit, offset); | |
return {node_t::owned_values_safe(r, e), mutate}; | |
} else if (nv == 2) { | |
if (shift > 0) { | |
if (mutate_values) { | |
auto r = new (store) | |
T{std::move(node->values()[!offset])}; | |
node_t::delete_inner(node); | |
return {r, true}; | |
} else { | |
return {node->values() + !offset, false}; | |
} | |
} else { | |
auto& v = node->values()[!offset]; | |
auto r = node_t::make_inner_n( | |
0, | |
node->datamap() & ~bit, | |
mutate_values ? std::move(v) : v); | |
assert(!node->nodemap()); | |
if (mutate) | |
node_t::delete_inner(node); | |
return {node_t::owned_values(r, e), mutate}; | |
} | |
} else { | |
assert(shift == 0); | |
if (mutate) | |
node_t::delete_inner(node); | |
return {empty(), mutate}; | |
} | |
} | |
} | |
return {}; | |
} | |
} | |
template <typename K> | |
sub_result_mut do_sub_mut(edit_t e, | |
node_t* node, | |
const K& k, | |
hash_t hash, | |
shift_t shift, | |
void* store) const | |
{ | |
auto mutate = node->can_mutate(e); | |
if (shift == max_shift<B>) { | |
auto fst = node->collisions(); | |
auto lst = fst + node->collision_count(); | |
for (auto cur = fst; cur != lst; ++cur) { | |
if (Equal{}(*cur, k)) { | |
if (node->collision_count() <= 2) { | |
if (mutate) { | |
auto r = new (store) | |
T{std::move(node->collisions()[cur == fst])}; | |
node_t::delete_collision(node); | |
return sub_result_mut{r, true}; | |
} else { | |
return sub_result_mut{fst + (cur == fst), false}; | |
} | |
} else { | |
auto r = mutate | |
? node_t::move_collision_remove(node, cur) | |
: node_t::copy_collision_remove(node, cur); | |
return {node_t::owned(r, e), mutate}; | |
} | |
} | |
} | |
return {}; | |
} else { | |
auto idx = (hash & (mask<B> << shift)) >> shift; | |
auto bit = bitmap_t{1u} << idx; | |
if (node->nodemap() & bit) { | |
auto offset = node->children_count(bit); | |
auto children = node->children(); | |
auto child = children[offset]; | |
auto result = this->sub_operation(e, child, k, hash, shift + B, store, mutate); | |
switch (result.kind) { | |
case sub_result::nothing: | |
return {}; | |
case sub_result::singleton: | |
if (node->datamap() == 0 && node->children_count() == 1 && | |
shift > 0) { | |
if (mutate) { | |
node_t::delete_inner(node); | |
if (!result.mutated && child->dec()) | |
node_t::delete_deep_shift(child, shift + B); | |
} | |
return {result.data.singleton, result.owned, mutate}; | |
} else { | |
auto r = | |
mutate ? node_t::move_inner_replace_inline( | |
e, | |
node, | |
bit, | |
offset, | |
result.owned | |
? std::move(*result.data.singleton) | |
: *result.data.singleton) | |
: node_t::copy_inner_replace_inline( | |
node, | |
bit, | |
offset, | |
*result.data.singleton); | |
if (result.owned) | |
detail::destroy_at(result.data.singleton); | |
if (!result.mutated && mutate && child->dec()) | |
node_t::delete_deep_shift(child, shift + B); | |
return {node_t::owned_values(r, e), mutate}; | |
} | |
case sub_result::tree: | |
if (mutate) { | |
children[offset] = result.data.tree; | |
if (!result.mutated && child->dec()) | |
node_t::delete_deep_shift(child, shift + B); | |
return {node, true}; | |
} else { | |
IMMER_TRY { | |
auto r = node_t::copy_inner_replace( | |
node, offset, result.data.tree); | |
return {node_t::owned(r, e), false}; | |
} | |
IMMER_CATCH (...) { | |
node_t::delete_deep_shift(result.data.tree, | |
shift + B); | |
IMMER_RETHROW; | |
} | |
} | |
} | |
} else if (node->datamap() & bit) { | |
auto offset = node->data_count(bit); | |
auto val = node->values() + offset; | |
auto mutate_values = mutate && node->can_mutate_values(e); | |
if (Equal{}(*val, k)) { | |
auto nv = node->data_count(); | |
if (node->nodemap() || nv > 2) { | |
auto r = mutate ? node_t::move_inner_remove_value( | |
e, node, bit, offset) | |
: node_t::copy_inner_remove_value( | |
node, bit, offset); | |
return {node_t::owned_values_safe(r, e), mutate}; | |
} else if (nv == 2) { | |
if (shift > 0) { | |
if (mutate_values) { | |
auto r = new (store) | |
T{std::move(node->values()[!offset])}; | |
node_t::delete_inner(node); | |
return {r, true}; | |
} else { | |
return {node->values() + !offset, false}; | |
} | |
} else { | |
auto& v = node->values()[!offset]; | |
auto r = node_t::make_inner_n( | |
0, | |
node->datamap() & ~bit, | |
mutate_values ? std::move(v) : v); | |
assert(!node->nodemap()); | |
if (mutate) | |
node_t::delete_inner(node); | |
return {node_t::owned_values(r, e), mutate}; | |
} | |
} else { | |
assert(shift == 0); | |
if (mutate) | |
node_t::delete_inner(node); | |
return {empty(), mutate}; | |
} | |
} | |
} | |
return {}; | |
} | |
} |
#include <immer/config.hpp> | ||
#include <immer/detail/combine_standard_layout.hpp> | ||
#include <immer/detail/util.hpp> | ||
#include <immer/detail/hamts/bits.hpp> | ||
#include <immer/detail/util.hpp> | ||
|
||
#include <cassert> | ||
|
||
#ifdef NDEBUG | ||
#define IMMER_HAMTS_TAGGED_NODE 0 | ||
#else | ||
#define IMMER_HAMTS_TAGGED_NODE 1 | ||
#endif | ||
#include <cstddef> |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Consider grouping standard library includes together for better readability.
#include <immer/config.hpp>
#include <immer/detail/combine_standard_layout.hpp>
#include <immer/detail/hamts/bits.hpp>
#include <immer/detail/util.hpp>
#include <cassert>
#include <cstddef>
Committable suggestion
‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.
#include <immer/config.hpp> | |
#include <immer/detail/combine_standard_layout.hpp> | |
#include <immer/detail/util.hpp> | |
#include <immer/detail/hamts/bits.hpp> | |
#include <immer/detail/util.hpp> | |
#include <cassert> | |
#ifdef NDEBUG | |
#define IMMER_HAMTS_TAGGED_NODE 0 | |
#else | |
#define IMMER_HAMTS_TAGGED_NODE 1 | |
#endif | |
#include <cstddef> | |
#include <cassert> | |
#include <cstddef> | |
#include <immer/config.hpp> | |
#include <immer/detail/combine_standard_layout.hpp> | |
#include <immer/detail/hamts/bits.hpp> | |
#include <immer/detail/util.hpp> |
static refs_t& refs(const values_t* x) | ||
{ | ||
return auto_const_cast(get<refs_t>(*x)); | ||
} | ||
static const ownee_t& ownee(const values_t* x) { return get<ownee_t>(*x); } | ||
static ownee_t& ownee(values_t* x) { return get<ownee_t>(*x); } | ||
static bool can_mutate(values_t* x, edit_t e) | ||
{ | ||
return refs(x).unique() || ownee(x).can_mutate(e); | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Simplify the can_mutate
function to improve readability.
static bool can_mutate(values_t* x, edit_t e) {
auto& refs = get<refs_t>(*x);
auto& ownee = get<ownee_t>(*x);
return refs.unique() || ownee.can_mutate(e);
}
Committable suggestion
‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.
static refs_t& refs(const values_t* x) | |
{ | |
return auto_const_cast(get<refs_t>(*x)); | |
} | |
static const ownee_t& ownee(const values_t* x) { return get<ownee_t>(*x); } | |
static ownee_t& ownee(values_t* x) { return get<ownee_t>(*x); } | |
static bool can_mutate(values_t* x, edit_t e) | |
{ | |
return refs(x).unique() || ownee(x).can_mutate(e); | |
} | |
static refs_t& refs(const values_t* x) | |
{ | |
return auto_const_cast(get<refs_t>(*x)); | |
} | |
static const ownee_t& ownee(const values_t* x) { return get<ownee_t>(*x); } | |
static ownee_t& ownee(values_t* x) { return get<ownee_t>(*x); } | |
static bool can_mutate(values_t* x, edit_t e) { | |
auto& refs = get<refs_t>(*x); | |
auto& ownee = get<ownee_t>(*x); | |
return refs.unique() || ownee.can_mutate(e); | |
} |
IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner); | ||
auto vp = p->impl.d.data.inner.values; | ||
if (vp && refs(vp).dec()) | ||
delete_values(vp, popcount(p->datamap())); | ||
deallocate_inner(p, popcount(p->nodemap())); | ||
delete_values(vp, p->data_count()); | ||
deallocate_inner(p, p->children_count()); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Properly manage memory and references when deleting inner nodes to avoid memory leaks.
static void delete_inner(node_t* p) {
assert(p);
IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner);
auto vp = p->impl.d.data.inner.values;
if (vp && refs(vp).dec())
delete_values(vp, p->data_count());
deallocate_inner(p, p->children_count());
+ // Ensure all child nodes are also properly managed
+ for (auto i = 0; i < p->children_count(); ++i) {
+ delete_inner(p->children()[i]);
+ }
}
Committable suggestion
‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.
IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner); | |
auto vp = p->impl.d.data.inner.values; | |
if (vp && refs(vp).dec()) | |
delete_values(vp, popcount(p->datamap())); | |
deallocate_inner(p, popcount(p->nodemap())); | |
delete_values(vp, p->data_count()); | |
deallocate_inner(p, p->children_count()); | |
IMMER_ASSERT_TAGGED(p->kind() == kind_t::inner); | |
auto vp = p->impl.d.data.inner.values; | |
if (vp && refs(vp).dec()) | |
delete_values(vp, p->data_count()); | |
deallocate_inner(p, p->children_count()); | |
// Ensure all child nodes are also properly managed | |
for (auto i = 0; i < p->children_count(); ++i) { | |
delete_inner(p->children()[i]); | |
} |
edit(void* v_) | ||
: v{v_} | ||
{} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ensure that the constructor initialization list is correctly formatted and that it adheres to the project's coding standards.
- edit(void* v_) : v{v_} {}
+ edit(void* v_) : v(v_) {}
Committable suggestion
‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.
edit(void* v_) | |
: v{v_} | |
{} | |
edit(void* v_) | |
: v(v_) | |
{} |
|
||
/*! | ||
* Returns a `const` reference to the element at position | ||
* `index`. It throws an `std::out_of_range` exception when @f$ | ||
* index \geq size() @f$. It does not allocate memory and its | ||
* complexity is *effectively* @f$ O(1) @f$. | ||
*/ | ||
reference at(size_type index) const | ||
{ return impl_.get_check(index); } | ||
reference at(size_type index) const { return impl_.get_check(index); } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Consider checking for out-of-range access in at()
method to throw std::out_of_range
.
- reference at(size_type index) const { return impl_.get_check(index); }
+ reference at(size_type index) const {
+ if (index >= size()) throw std::out_of_range("Index out of range");
+ return impl_.get_check(index);
+ }
Committable suggestion
‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.
reference at(size_type index) const { return impl_.get_check(index); } | |
reference at(size_type index) const { | |
if (index >= size()) throw std::out_of_range("Index out of range"); | |
return impl_.get_check(index); | |
} |
static void* operator new(std::size_t size) | ||
{ | ||
using heap_type = typename HeapPolicy | ||
::template optimized<sizeof(Deriv)>::type; | ||
using heap_type = | ||
typename HeapPolicy ::template optimized<sizeof(Deriv)>::type; | ||
|
||
return heap_type::allocate(size); | ||
} | ||
|
||
static void operator delete (void* data, std::size_t size) | ||
static void operator delete(void* data, std::size_t size) | ||
{ | ||
using heap_type = typename HeapPolicy | ||
::template optimized<sizeof(Deriv)>::type; | ||
using heap_type = | ||
typename HeapPolicy ::template optimized<sizeof(Deriv)>::type; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Adjustments in spacing in the new
and delete
operator functions improve readability but ensure they follow the coding standards of the project.
- using heap_type = typename HeapPolicy ::template optimized<sizeof(Deriv)>::type;
+ using heap_type = typename HeapPolicy::template optimized<sizeof(Deriv)>::type;
Committable suggestion
‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.
static void* operator new(std::size_t size) | |
{ | |
using heap_type = typename HeapPolicy | |
::template optimized<sizeof(Deriv)>::type; | |
using heap_type = | |
typename HeapPolicy ::template optimized<sizeof(Deriv)>::type; | |
return heap_type::allocate(size); | |
} | |
static void operator delete (void* data, std::size_t size) | |
static void operator delete(void* data, std::size_t size) | |
{ | |
using heap_type = typename HeapPolicy | |
::template optimized<sizeof(Deriv)>::type; | |
using heap_type = | |
typename HeapPolicy ::template optimized<sizeof(Deriv)>::type; | |
static void* operator new(std::size_t size) | |
{ | |
using heap_type = | |
typename HeapPolicy::template optimized<sizeof(Deriv)>::type; | |
return heap_type::allocate(size); | |
} | |
static void operator delete(void* data, std::size_t size) | |
{ | |
using heap_type = | |
typename HeapPolicy::template optimized<sizeof(Deriv)>::type; |
template <typename Heap, std::size_t Limit = default_free_list_size> | ||
struct free_list_heap_policy | ||
{ | ||
using type = debug_size_heap<Heap>; | ||
|
||
template <std::size_t Size> | ||
struct optimized | ||
{ | ||
using type = split_heap< | ||
Size, | ||
with_free_list_node< | ||
thread_local_free_list_heap< | ||
Size, | ||
Limit, | ||
free_list_heap< | ||
Size, Limit, | ||
debug_size_heap<Heap>>>>, | ||
debug_size_heap<Heap>>; | ||
using type = | ||
split_heap<Size, | ||
with_free_list_node<thread_local_free_list_heap< | ||
Size, | ||
Limit, | ||
free_list_heap<Size, Limit, debug_size_heap<Heap>>>>, | ||
debug_size_heap<Heap>>; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Refinements in template type usage in free_list_heap_policy
enhance the flexibility and maintainability of the code. Ensure that these changes are well-documented to avoid confusion among developers.
Would you like me to help document these template changes in the developer wiki or code comments?
IMMER_ASSERT_TAGGED(kind() == kind_t::inner); | ||
return impl.d.data.inner.relaxed; | ||
} | ||
|
||
const relaxed_t* relaxed() const | ||
{ | ||
assert(kind() == kind_t::inner); | ||
IMMER_ASSERT_TAGGED(kind() == kind_t::inner); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Tip
Codebase Verification
The analysis of the relaxed
method in the node.hpp
file reveals that this method consistently assumes that the node kind is inner
, as indicated by the use of impl.d.data.inner.relaxed
. This is a strong indication that the method is designed to be used with inner nodes specifically.
However, the review comment raises a valid concern about handling different node kinds. The current implementation does not include explicit checks for node kind before accessing impl.d.data.inner.relaxed
, which could potentially lead to incorrect type casts or undefined behavior if the method is mistakenly used with nodes of a different kind.
To address this, it would be prudent to implement additional safeguards within the relaxed
method to verify the node kind before proceeding with operations specific to inner
nodes. This would enhance the robustness of the code and prevent possible errors due to incorrect node kind handling.
Analysis chain
Ensure that the relaxed
method correctly handles different node kinds and does not lead to incorrect type casts.
Scripts Executed
The following scripts were executed for the analysis:
Script:
#!/bin/bash
# Verify correct handling of node kinds in the relaxed method.
rg "relaxed" src/immer/detail/rbts/node.hpp
Length of output: 4146
IMMER_TRY { | ||
mr = heap::allocate(sizeof_relaxed_n(n), norefs_tag{}); | ||
} catch (...) { | ||
} | ||
IMMER_CATCH (...) { | ||
heap::deallocate(sizeof_inner_r_n(n), mp); | ||
throw; | ||
IMMER_RETHROW; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Check the exception handling logic in make_inner_r_n
to ensure that it correctly deallocates memory on failure.
- IMMER_TRY {
- mr = heap::allocate(sizeof_relaxed_n(n), norefs_tag{});
- }
- IMMER_CATCH (...) {
- heap::deallocate(sizeof_inner_r_n(n), mp);
- IMMER_RETHROW;
- }
+ try {
+ mr = heap::allocate(sizeof_relaxed_n(n), norefs_tag{});
+ } catch (...) {
+ heap::deallocate(sizeof_inner_r_n(n), mp);
+ throw;
+ }
Committable suggestion
‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation.
IMMER_TRY { | |
mr = heap::allocate(sizeof_relaxed_n(n), norefs_tag{}); | |
} catch (...) { | |
} | |
IMMER_CATCH (...) { | |
heap::deallocate(sizeof_inner_r_n(n), mp); | |
throw; | |
IMMER_RETHROW; | |
try { | |
mr = heap::allocate(sizeof_relaxed_n(n), norefs_tag{}); | |
} catch (...) { | |
heap::deallocate(sizeof_inner_r_n(n), mp); | |
throw; | |
} |
These changes are included in #1470 |
PR intention
Update the immer library to the latest stable version (v0.8.1).
This does remove many build warnings.
Code changes brief
Only immer folder has been changed to the latest stable version of the immer library.
1423 needs this.