diff --git a/include/splinterdb/platform_linux/public_platform.h b/include/splinterdb/platform_linux/public_platform.h index bc278c3a6..ec97447b8 100644 --- a/include/splinterdb/platform_linux/public_platform.h +++ b/include/splinterdb/platform_linux/public_platform.h @@ -28,6 +28,7 @@ * compiler/processor. */ #include +#include // Types typedef unsigned char uchar; @@ -61,8 +62,6 @@ static_assert(sizeof(uint64) == 8, "incorrect type"); # define FALSE (0) #endif -typedef uint8 bool8; - typedef FILE platform_log_handle; // By default, info messages sent from platform_default_log() go to /dev/null diff --git a/src/allocator.h b/src/allocator.h index d344d302d..ba31723a6 100644 --- a/src/allocator.h +++ b/src/allocator.h @@ -251,7 +251,7 @@ allocator_print_allocated(allocator *al) return al->ops->print_allocated(al); } -static inline bool +static inline bool32 allocator_page_valid(allocator *al, uint64 addr) { allocator_config *allocator_cfg = allocator_get_config(al); diff --git a/src/btree.c b/src/btree.c index 89be58967..fbdbc5a5b 100644 --- a/src/btree.c +++ b/src/btree.c @@ -182,7 +182,7 @@ btree_fill_index_entry(const btree_config *cfg, entry->pivot_data.stats = stats; } -bool +bool32 btree_set_index_entry(const btree_config *cfg, btree_hdr *hdr, table_index k, @@ -237,7 +237,7 @@ btree_set_index_entry(const btree_config *cfg, return TRUE; } -static inline bool +static inline bool32 btree_insert_index_entry(const btree_config *cfg, btree_hdr *hdr, uint32 k, @@ -245,7 +245,7 @@ btree_insert_index_entry(const btree_config *cfg, uint64 new_addr, btree_pivot_stats stats) { - bool succeeded = btree_set_index_entry( + bool32 succeeded = btree_set_index_entry( cfg, hdr, hdr->num_entries, new_pivot_key, new_addr, stats); if (succeeded) { node_offset this_entry_offset = hdr->offsets[hdr->num_entries - 1]; @@ -277,7 +277,7 @@ btree_fill_leaf_entry(const btree_config *cfg, "entry->type not large enough to hold message_class"); } -static inline bool +static inline bool32 btree_can_set_leaf_entry(const btree_config *cfg, const btree_hdr *hdr, table_index k, @@ -308,7 +308,7 @@ btree_can_set_leaf_entry(const btree_config *cfg, return TRUE; } -bool +bool32 btree_set_leaf_entry(const btree_config *cfg, btree_hdr *hdr, table_index k, @@ -355,7 +355,7 @@ btree_set_leaf_entry(const btree_config *cfg, return TRUE; } -static inline bool +static inline bool32 btree_insert_leaf_entry(const btree_config *cfg, btree_hdr *hdr, table_index k, @@ -363,7 +363,7 @@ btree_insert_leaf_entry(const btree_config *cfg, message new_message) { debug_assert(k <= hdr->num_entries); - bool succeeded = + bool32 succeeded = btree_set_leaf_entry(cfg, hdr, hdr->num_entries, new_key, new_message); if (succeeded) { node_offset this_entry_offset = hdr->offsets[hdr->num_entries - 1]; @@ -428,7 +428,7 @@ int64 btree_find_pivot(const btree_config *cfg, const btree_hdr *hdr, key target, - bool *found) + bool32 *found) { int64 lo = 0, hi = btree_num_entries(hdr); @@ -472,7 +472,7 @@ static inline int64 btree_find_tuple(const btree_config *cfg, const btree_hdr *hdr, key target, - bool *found) + bool32 *found) { int64 lo = 0, hi = btree_num_entries(hdr); @@ -544,7 +544,7 @@ btree_create_leaf_incorporate_spec(const btree_config *cfg, leaf_incorporate_spec *spec) { spec->tuple_key = tuple_key; - bool found; + bool32 found; spec->idx = btree_find_tuple(cfg, hdr, tuple_key, &found); spec->old_entry_state = found ? ENTRY_STILL_EXISTS : ENTRY_DID_NOT_EXIST; if (!found) { @@ -554,7 +554,7 @@ btree_create_leaf_incorporate_spec(const btree_config *cfg, } else { leaf_entry *entry = btree_get_leaf_entry(cfg, hdr, spec->idx); message oldmessage = leaf_entry_message(entry); - bool success; + bool32 success; success = merge_accumulator_init_from_message( &spec->msg.merged_message, heap_id, msg); if (!success) { @@ -578,7 +578,7 @@ destroy_leaf_incorporate_spec(leaf_incorporate_spec *spec) } } -static inline bool +static inline bool32 btree_can_perform_leaf_incorporate_spec(const btree_config *cfg, btree_hdr *hdr, const leaf_incorporate_spec *spec) @@ -601,13 +601,13 @@ btree_can_perform_leaf_incorporate_spec(const btree_config *cfg, } } -bool +bool32 btree_try_perform_leaf_incorporate_spec(const btree_config *cfg, btree_hdr *hdr, const leaf_incorporate_spec *spec, uint64 *generation) { - bool success; + bool32 success; switch (spec->old_entry_state) { case ENTRY_DID_NOT_EXIST: success = btree_insert_leaf_entry( @@ -667,8 +667,8 @@ btree_defragment_leaf(const btree_config *cfg, // IN { spec->old_entry_state = ENTRY_HAS_BEEN_REMOVED; } else { - leaf_entry *entry = btree_get_leaf_entry(cfg, scratch_hdr, i); - debug_only bool success = + leaf_entry *entry = btree_get_leaf_entry(cfg, scratch_hdr, i); + debug_only bool32 success = btree_set_leaf_entry(cfg, hdr, dst_idx++, @@ -708,7 +708,7 @@ btree_truncate_leaf(const btree_config *cfg, // IN static leaf_splitting_plan initial_plan = {0, FALSE}; -static bool +static bool32 most_of_entry_is_on_left_side(uint64 total_bytes, uint64 left_bytes, uint64 entry_size) @@ -869,7 +869,7 @@ btree_split_leaf_build_right_node(const btree_config *cfg, // IN if (!plan.insertion_goes_left) { spec->idx -= plan.split_idx; - bool incorporated = btree_try_perform_leaf_incorporate_spec( + bool32 incorporated = btree_try_perform_leaf_incorporate_spec( cfg, right_hdr, spec, generation); platform_assert(incorporated); } @@ -902,7 +902,7 @@ btree_split_leaf_cleanup_left_node(const btree_config *cfg, // IN * Assumes write lock on both nodes. *----------------------------------------------------------------------------- */ -static inline bool +static inline bool32 btree_index_is_full(const btree_config *cfg, // IN const btree_hdr *hdr) // IN { @@ -953,12 +953,12 @@ btree_split_index_build_right_node(const btree_config *cfg, // IN for (uint64 i = 0; i < target_right_entries; i++) { index_entry *entry = btree_get_index_entry(cfg, left_hdr, target_left_entries + i); - bool succeeded = btree_set_index_entry(cfg, - right_hdr, - i, - index_entry_key(entry), - index_entry_child_addr(entry), - entry->pivot_data.stats); + bool32 succeeded = btree_set_index_entry(cfg, + right_hdr, + i, + index_entry_key(entry), + index_entry_child_addr(entry), + entry->pivot_data.stats); platform_assert(succeeded); } } @@ -980,12 +980,12 @@ btree_defragment_index(const btree_config *cfg, // IN btree_reset_node_entries(cfg, hdr); for (uint64 i = 0; i < btree_num_entries(scratch_hdr); i++) { index_entry *entry = btree_get_index_entry(cfg, scratch_hdr, i); - bool succeeded = btree_set_index_entry(cfg, - hdr, - i, - index_entry_key(entry), - index_entry_child_addr(entry), - entry->pivot_data.stats); + bool32 succeeded = btree_set_index_entry(cfg, + hdr, + i, + index_entry_key(entry), + index_entry_child_addr(entry), + entry->pivot_data.stats); platform_assert(succeeded); } } @@ -1020,7 +1020,7 @@ btree_truncate_index(const btree_config *cfg, // IN * more nodes available for the given height. *----------------------------------------------------------------------------- */ -bool +bool32 btree_alloc(cache *cc, mini_allocator *mini, uint64 height, @@ -1060,7 +1060,7 @@ btree_node_get(cache *cc, node->hdr = (btree_hdr *)(node->page->data); } -static inline bool +static inline bool32 btree_node_claim(cache *cc, // IN const btree_config *cfg, // IN btree_node *node) // IN @@ -1124,7 +1124,7 @@ btree_node_get_from_cache_ctxt(const btree_config *cfg, // IN } -static inline bool +static inline bool32 btree_addrs_share_extent(cache *cc, uint64 left_addr, uint64 right_addr) { allocator *al = cache_get_allocator(cc); @@ -1159,7 +1159,7 @@ btree_create(cache *cc, platform_status rc = allocator_alloc(al, &base_addr, type); platform_assert_status_ok(rc); page_handle *root_page = cache_alloc(cc, base_addr, type); - bool pinned = (type == PAGE_TYPE_MEMTABLE); + bool32 pinned = (type == PAGE_TYPE_MEMTABLE); // set up the root btree_node root; @@ -1207,7 +1207,7 @@ btree_inc_ref_range(cache *cc, cc, cfg->data_cfg, PAGE_TYPE_BRANCH, meta_page_addr, start_key, end_key); } -bool +bool32 btree_dec_ref_range(cache *cc, const btree_config *cfg, uint64 root_addr, @@ -1220,7 +1220,7 @@ btree_dec_ref_range(cache *cc, cc, cfg->data_cfg, PAGE_TYPE_BRANCH, meta_page_addr, start_key, end_key); } -bool +bool32 btree_dec_ref(cache *cc, const btree_config *cfg, uint64 root_addr, @@ -1320,13 +1320,13 @@ btree_split_child_leaf(cache *cc, /* limit the scope of pivot_key, since subsequent mutations of the nodes * may invalidate the memory it points to. */ - key pivot_key = btree_splitting_pivot(cfg, child->hdr, spec, plan); - bool success = btree_insert_index_entry(cfg, - parent->hdr, - index_of_child_in_parent + 1, - pivot_key, - right_child.addr, - BTREE_PIVOT_STATS_UNKNOWN); + key pivot_key = btree_splitting_pivot(cfg, child->hdr, spec, plan); + bool32 success = btree_insert_index_entry(cfg, + parent->hdr, + index_of_child_in_parent + 1, + pivot_key, + right_child.addr, + BTREE_PIVOT_STATS_UNKNOWN); platform_assert(success); } btree_node_full_unlock(cc, cfg, parent); @@ -1346,7 +1346,7 @@ btree_split_child_leaf(cache *cc, btree_split_leaf_cleanup_left_node( cfg, scratch, child->hdr, spec, plan, right_child.addr); if (plan.insertion_goes_left) { - bool incorporated = btree_try_perform_leaf_incorporate_spec( + bool32 incorporated = btree_try_perform_leaf_incorporate_spec( cfg, child->hdr, spec, generation); platform_assert(incorporated); } @@ -1399,7 +1399,7 @@ btree_defragment_or_split_child_leaf(cache *cc, btree_node_unget(cc, cfg, parent); btree_node_lock(cc, cfg, child); btree_defragment_leaf(cfg, scratch, child->hdr, spec); - bool incorporated = btree_try_perform_leaf_incorporate_spec( + bool32 incorporated = btree_try_perform_leaf_incorporate_spec( cfg, child->hdr, spec, generation); platform_assert(incorporated); btree_node_full_unlock(cc, cfg, child); @@ -1647,7 +1647,7 @@ btree_grow_root(cache *cc, // IN } else { new_pivot = btree_get_pivot(cfg, child.hdr, 0); } - bool succeeded = btree_set_index_entry( + bool32 succeeded = btree_set_index_entry( cfg, root_node->hdr, 0, new_pivot, child.addr, BTREE_PIVOT_STATS_UNKNOWN); platform_assert(succeeded); @@ -1672,7 +1672,7 @@ btree_insert(cache *cc, // IN key tuple_key, // IN message msg, // IN uint64 *generation, // OUT - bool *was_unique) // OUT + bool32 *was_unique) // OUT { platform_status rc; leaf_incorporate_spec spec; @@ -1727,8 +1727,8 @@ btree_insert(cache *cc, // IN /* read lock on root_node, root_node is an index. */ - bool found; - int64 child_idx = btree_find_pivot(cfg, root_node.hdr, tuple_key, &found); + bool32 found; + int64 child_idx = btree_find_pivot(cfg, root_node.hdr, tuple_key, &found); index_entry *parent_entry; if (child_idx < 0 || btree_index_is_full(cfg, root_node.hdr)) { @@ -1737,7 +1737,7 @@ btree_insert(cache *cc, // IN goto start_over; } btree_node_lock(cc, cfg, &root_node); - bool need_to_set_min_key = FALSE; + bool32 need_to_set_min_key = FALSE; if (child_idx < 0) { child_idx = 0; parent_entry = btree_get_index_entry(cfg, root_node.hdr, 0); @@ -1755,7 +1755,7 @@ btree_insert(cache *cc, // IN } if (need_to_set_min_key) { parent_entry = btree_get_index_entry(cfg, root_node.hdr, 0); - bool success = + bool32 success = btree_set_index_entry(cfg, root_node.hdr, 0, @@ -1808,7 +1808,7 @@ btree_insert(cache *cc, // IN btree_node_lock(cc, cfg, &parent_node); btree_node_lock(cc, cfg, &child_node); - bool need_to_set_min_key = FALSE; + bool32 need_to_set_min_key = FALSE; if (next_child_idx < 0) { next_child_idx = 0; index_entry *child_entry = @@ -1844,7 +1844,7 @@ btree_insert(cache *cc, // IN // this case index_entry *child_entry = btree_get_index_entry(cfg, parent_node.hdr, 0); - bool success = + bool32 success = btree_set_index_entry(cfg, parent_node.hdr, 0, @@ -1903,7 +1903,7 @@ btree_insert(cache *cc, // IN goto start_over; } btree_node_lock(cc, cfg, &child_node); - bool incorporated = btree_try_perform_leaf_incorporate_spec( + bool32 incorporated = btree_try_perform_leaf_incorporate_spec( cfg, child_node.hdr, &spec, generation); platform_assert(incorporated); btree_node_full_unlock(cc, cfg, &child_node); @@ -1919,7 +1919,7 @@ btree_insert(cache *cc, // IN destroy_leaf_incorporate_spec(&spec); goto start_over; } - bool need_to_rebuild_spec = FALSE; + bool32 need_to_rebuild_spec = FALSE; while (!btree_node_claim(cc, cfg, &child_node)) { btree_node_unget(cc, cfg, &child_node); platform_sleep_ns(leaf_wait); @@ -1994,7 +1994,7 @@ btree_lookup_node(cache *cc, // IN btree_node_get(cc, cfg, &node, type); for (h = btree_height(node.hdr); h > stop_at_height; h--) { - bool found; + bool32 found; child_idx = key_is_positive_infinity(target) ? btree_num_entries(node.hdr) - 1 : btree_find_pivot(cfg, node.hdr, target, &found); @@ -2027,7 +2027,7 @@ btree_lookup_with_ref(cache *cc, // IN key target, // IN btree_node *node, // OUT message *msg, // OUT - bool *found) // OUT + bool32 *found) // OUT { btree_lookup_node(cc, cfg, root_addr, target, 0, type, node, NULL); int64 idx = btree_find_tuple(cfg, node->hdr, target, found); @@ -2050,13 +2050,13 @@ btree_lookup(cache *cc, // IN btree_node node; message data; platform_status rc = STATUS_OK; - bool local_found; + bool32 local_found; btree_lookup_with_ref( cc, cfg, root_addr, type, target, &node, &data, &local_found); if (local_found) { - bool success = merge_accumulator_copy_message(result, data); - rc = success ? STATUS_OK : STATUS_NO_MEMORY; + bool32 success = merge_accumulator_copy_message(result, data); + rc = success ? STATUS_OK : STATUS_NO_MEMORY; btree_node_unget(cc, cfg, &node); } return rc; @@ -2069,7 +2069,7 @@ btree_lookup_and_merge(cache *cc, // IN page_type type, // IN key target, // IN merge_accumulator *data, // OUT - bool *local_found) // OUT + bool32 *local_found) // OUT { btree_node node; message local_data; @@ -2081,8 +2081,8 @@ btree_lookup_and_merge(cache *cc, // IN cc, cfg, root_addr, type, target, &node, &local_data, local_found); if (*local_found) { if (merge_accumulator_is_null(data)) { - bool success = merge_accumulator_copy_message(data, local_data); - rc = success ? STATUS_OK : STATUS_NO_MEMORY; + bool32 success = merge_accumulator_copy_message(data, local_data); + rc = success ? STATUS_OK : STATUS_NO_MEMORY; } else if (btree_merge_tuples(cfg, target, local_data, data)) { rc = STATUS_NO_MEMORY; } @@ -2173,11 +2173,11 @@ btree_lookup_async_with_ref(cache *cc, // IN key target, // IN btree_node *node_out, // OUT message *data, // OUT - bool *found, // OUT + bool32 *found, // OUT btree_async_ctxt *ctxt) // IN { cache_async_result res = 0; - bool done = FALSE; + bool32 done = FALSE; btree_node *node = &ctxt->node; do { @@ -2245,8 +2245,8 @@ btree_lookup_async_with_ref(cache *cc, // IN btree_async_set_state(ctxt, btree_async_state_get_leaf_complete); break; } - bool found_pivot; - int64 child_idx = + bool32 found_pivot; + int64 child_idx = btree_find_pivot(cfg, node->hdr, target, &found_pivot); if (child_idx < 0) { child_idx = 0; @@ -2315,11 +2315,11 @@ btree_lookup_async(cache *cc, // IN cache_async_result res; btree_node node; message data; - bool local_found; + bool32 local_found; res = btree_lookup_async_with_ref( cc, cfg, root_addr, target, &node, &data, &local_found, ctxt); if (res == async_success && local_found) { - bool success = merge_accumulator_copy_message(result, data); + bool32 success = merge_accumulator_copy_message(result, data); platform_assert(success); // FIXME btree_node_unget(cc, cfg, &node); } @@ -2333,7 +2333,7 @@ btree_lookup_and_merge_async(cache *cc, // IN uint64 root_addr, // IN key target, // IN merge_accumulator *data, // OUT - bool *local_found, // OUT + bool32 *local_found, // OUT btree_async_ctxt *ctxt) // IN { cache_async_result res; @@ -2344,7 +2344,7 @@ btree_lookup_and_merge_async(cache *cc, // IN cc, cfg, root_addr, target, &node, &local_data, local_found, ctxt); if (res == async_success && *local_found) { if (merge_accumulator_is_null(data)) { - bool success = merge_accumulator_copy_message(data, local_data); + bool32 success = merge_accumulator_copy_message(data, local_data); platform_assert(success); } else { int rc = btree_merge_tuples(cfg, target, local_data, data); @@ -2390,7 +2390,7 @@ btree_lookup_and_merge_async(cache *cc, // IN * the end node and end_idx. *----------------------------------------------------------------------------- */ -static bool +static bool32 btree_iterator_is_at_end(btree_iterator *itor) { return itor->curr.addr == itor->end_addr && itor->idx == itor->end_idx; @@ -2446,8 +2446,8 @@ btree_iterator_find_end(btree_iterator *itor) if (key_is_positive_infinity(itor->max_key)) { itor->end_idx = btree_num_entries(end.hdr); } else { - bool found; - int64 tmp; + bool32 found; + int64 tmp; if (itor->height == 0) { tmp = btree_find_tuple(itor->cfg, end.hdr, itor->max_key, &found); if (!found) { @@ -2559,7 +2559,7 @@ btree_iterator_advance(iterator *base_itor) platform_status -btree_iterator_at_end(iterator *itor, bool *at_end) +btree_iterator_at_end(iterator *itor, bool32 *at_end) { debug_assert(itor != NULL); *at_end = btree_iterator_is_at_end((btree_iterator *)itor); @@ -2611,7 +2611,7 @@ btree_iterator_init(cache *cc, page_type page_type, key min_key, key max_key, - bool do_prefetch, + bool32 do_prefetch, uint32 height) { platform_assert(root_addr != 0); @@ -2679,8 +2679,8 @@ btree_iterator_init(cache *cc, /* Once we've found end, we can unclaim curr. */ btree_node_unclaim(cc, cfg, &itor->curr); - bool found; - int64 tmp; + bool32 found; + int64 tmp; if (itor->height == 0) { tmp = btree_find_tuple(itor->cfg, itor->curr.hdr, min_key, &found); if (!found) { @@ -2805,8 +2805,8 @@ btree_pack_link_node(btree_pack_req *req, *edge_stats)) { btree_pack_create_next_node(req, height + 1, pivot); - parent = btree_pack_get_current_node(req, height + 1); - bool success = btree_set_index_entry( + parent = btree_pack_get_current_node(req, height + 1); + bool32 success = btree_set_index_entry( req->cfg, parent->hdr, 0, pivot, edge->addr, *edge_stats); platform_assert(success); } @@ -2879,7 +2879,7 @@ btree_pack_loop(btree_pack_req *req, // IN/OUT req->cfg, leaf->hdr, btree_num_entries(leaf->hdr), tuple_key, msg)) { leaf = btree_pack_create_next_node(req, 0, tuple_key); - bool result = + bool32 result = btree_set_leaf_entry(req->cfg, leaf->hdr, 0, tuple_key, msg); platform_assert(result); } @@ -2928,7 +2928,7 @@ btree_pack_post_loop(btree_pack_req *req, key last_key) root.addr = req->root_addr; btree_node_get(cc, cfg, &root, PAGE_TYPE_BRANCH); - debug_only bool success = btree_node_claim(cc, cfg, &root); + debug_only bool32 success = btree_node_claim(cc, cfg, &root); debug_assert(success); btree_node_lock(cc, cfg, &root); memmove(root.hdr, req->edge[req->height][0].hdr, btree_page_size(cfg)); @@ -2941,7 +2941,7 @@ btree_pack_post_loop(btree_pack_req *req, key last_key) mini_release(&req->mini, last_key); } -static bool +static bool32 btree_pack_can_fit_tuple(btree_pack_req *req, key tuple_key, message data) { return req->num_tuples < req->max_tuples; @@ -2981,7 +2981,7 @@ btree_pack(btree_pack_req *req) key tuple_key = NEGATIVE_INFINITY_KEY; message data; - bool at_end; + bool32 at_end; while (SUCCESS(iterator_at_end(req->itor, &at_end)) && !at_end) { iterator_get_curr(req->itor, &tuple_key, &data); @@ -3023,8 +3023,8 @@ btree_get_rank(cache *cc, btree_lookup_node( cc, cfg, root_addr, target, 0, PAGE_TYPE_BRANCH, &leaf, stats); - bool found; - int64 tuple_rank_in_leaf = btree_find_tuple(cfg, leaf.hdr, target, &found); + bool32 found; + int64 tuple_rank_in_leaf = btree_find_tuple(cfg, leaf.hdr, target, &found); if (!found) { tuple_rank_in_leaf++; } @@ -3086,7 +3086,7 @@ btree_count_in_range_by_iterator(cache *cc, memset(stats, 0, sizeof(*stats)); - bool at_end; + bool32 at_end; iterator_at_end(itor, &at_end); while (!at_end) { key curr_key; @@ -3408,19 +3408,19 @@ btree_space_use_in_range(cache *cc, return extents_used * btree_extent_size(cfg); } -bool +bool32 btree_verify_node(cache *cc, btree_config *cfg, uint64 addr, page_type type, - bool is_left_edge) + bool32 is_left_edge) { btree_node node; node.addr = addr; debug_assert(type == PAGE_TYPE_BRANCH || type == PAGE_TYPE_MEMTABLE); btree_node_get(cc, cfg, &node, type); table_index idx; - bool result = FALSE; + bool32 result = FALSE; for (idx = 0; idx < node.hdr->num_entries; idx++) { if (node.hdr->height == 0) { @@ -3526,7 +3526,7 @@ btree_verify_node(cache *cc, } } btree_node_unget(cc, cfg, &child); - bool child_is_left_edge = is_left_edge && idx == 0; + bool32 child_is_left_edge = is_left_edge && idx == 0; if (!btree_verify_node(cc, cfg, child.addr, type, child_is_left_edge)) { btree_node_unget(cc, cfg, &node); @@ -3541,7 +3541,7 @@ btree_verify_node(cache *cc, return result; } -bool +bool32 btree_verify_tree(cache *cc, btree_config *cfg, uint64 addr, page_type type) { return btree_verify_node(cc, cfg, addr, type, TRUE); @@ -3563,7 +3563,7 @@ btree_print_lookup(cache *cc, // IN btree_node_get(cc, cfg, &node, type); for (h = node.hdr->height; h > 0; h--) { - bool found; + bool32 found; child_idx = btree_find_pivot(cfg, node.hdr, target, &found); if (child_idx < 0) { child_idx = 0; @@ -3575,8 +3575,8 @@ btree_print_lookup(cache *cc, // IN node = child_node; } - bool found; - int64 idx = btree_find_tuple(cfg, node.hdr, target, &found); + bool32 found; + int64 idx = btree_find_tuple(cfg, node.hdr, target, &found); platform_default_log( "Matching index: %lu (%d) of %u\n", idx, found, node.hdr->num_entries); btree_node_unget(cc, cfg, &node); diff --git a/src/btree.h b/src/btree.h index 50e282760..beb7318bb 100644 --- a/src/btree.h +++ b/src/btree.h @@ -131,7 +131,7 @@ typedef struct btree_iterator { iterator super; cache *cc; btree_config *cfg; - bool do_prefetch; + bool32 do_prefetch; uint32 height; page_type page_type; key min_key; @@ -195,7 +195,7 @@ typedef struct btree_async_ctxt { cache_async_ctxt *cache_ctxt; // cache ctxt for async get btree_async_state prev_state; // Previous state btree_async_state state; // Current state - bool was_async; // Was the last cache_get async ? + bool32 was_async; // Was the last cache_get async ? btree_node node; // Current node uint64 child_addr; // Child disk address } btree_async_ctxt; @@ -210,7 +210,7 @@ btree_insert(cache *cc, // IN key tuple_key, // IN message data, // IN uint64 *generation, // OUT - bool *was_unique); // OUT + bool32 *was_unique); // OUT /* *----------------------------------------------------------------------------- @@ -248,14 +248,14 @@ btree_inc_ref_range(cache *cc, key start_key, key end_key); -bool +bool32 btree_dec_ref_range(cache *cc, const btree_config *cfg, uint64 root_addr, key start_key, key end_key); -bool +bool32 btree_dec_ref(cache *cc, const btree_config *cfg, uint64 root_addr, @@ -277,7 +277,7 @@ btree_lookup(cache *cc, key target, merge_accumulator *result); -static inline bool +static inline bool32 btree_found(merge_accumulator *result) { return !merge_accumulator_is_null(result); @@ -290,7 +290,7 @@ btree_lookup_and_merge(cache *cc, page_type type, key target, merge_accumulator *data, - bool *local_found); + bool32 *local_found); cache_async_result btree_lookup_async(cache *cc, @@ -306,7 +306,7 @@ btree_lookup_and_merge_async(cache *cc, // IN uint64 root_addr, // IN key target, // IN merge_accumulator *data, // OUT - bool *local_found, // OUT + bool32 *local_found, // OUT btree_async_ctxt *ctxt); // IN void @@ -317,7 +317,7 @@ btree_iterator_init(cache *cc, page_type page_type, key min_key, key max_key, - bool do_prefetch, + bool32 do_prefetch, uint32 height); void @@ -420,7 +420,7 @@ btree_print_lookup(cache *cc, page_type type, key target); -bool +bool32 btree_verify_tree(cache *cc, btree_config *cfg, uint64 addr, page_type type); uint64 diff --git a/src/btree_private.h b/src/btree_private.h index a400c2ab3..bd2ed1c6c 100644 --- a/src/btree_private.h +++ b/src/btree_private.h @@ -82,7 +82,7 @@ btree_create_leaf_incorporate_spec(const btree_config *cfg, message message, leaf_incorporate_spec *spec); -bool +bool32 btree_try_perform_leaf_incorporate_spec(const btree_config *cfg, btree_hdr *hdr, const leaf_incorporate_spec *spec, @@ -95,8 +95,9 @@ btree_try_perform_leaf_incorporate_spec(const btree_config *cfg, * for concurrency reasons). */ typedef struct leaf_splitting_plan { - uint64 split_idx; // keys with idx < split_idx go left - bool insertion_goes_left; // does the key to be inserted go to the left child + uint64 split_idx; // keys with idx < split_idx go left + bool32 + insertion_goes_left; // does the key to be inserted go to the left child } leaf_splitting_plan; /* @@ -105,7 +106,7 @@ typedef struct leaf_splitting_plan { * functions defined below may call these extern functions. * ************************************************************************* */ -bool +bool32 btree_set_index_entry(const btree_config *cfg, btree_hdr *hdr, table_index k, @@ -113,7 +114,7 @@ btree_set_index_entry(const btree_config *cfg, uint64 new_addr, btree_pivot_stats stats); -bool +bool32 btree_set_leaf_entry(const btree_config *cfg, btree_hdr *hdr, table_index k, @@ -135,7 +136,7 @@ int64 btree_find_pivot(const btree_config *cfg, const btree_hdr *hdr, key target, - bool *found); + bool32 *found); leaf_splitting_plan btree_build_leaf_splitting_plan(const btree_config *cfg, // IN diff --git a/src/cache.h b/src/cache.h index fe5a03936..ed10fc50a 100644 --- a/src/cache.h +++ b/src/cache.h @@ -137,7 +137,7 @@ typedef page_handle *(*page_alloc_fn)(cache *cc, uint64 addr, page_type type); typedef void (*extent_discard_fn)(cache *cc, uint64 addr, page_type type); typedef page_handle *(*page_get_fn)(cache *cc, uint64 addr, - bool blocking, + bool32 blocking, page_type type); typedef cache_async_result (*page_get_async_fn)(cache *cc, uint64 addr, @@ -146,23 +146,23 @@ typedef cache_async_result (*page_get_async_fn)(cache *cc, typedef void (*page_async_done_fn)(cache *cc, page_type type, cache_async_ctxt *ctxt); -typedef bool (*page_try_claim_fn)(cache *cc, page_handle *page); +typedef bool32 (*page_try_claim_fn)(cache *cc, page_handle *page); typedef void (*page_sync_fn)(cache *cc, page_handle *page, - bool is_blocking, + bool32 is_blocking, page_type type); typedef void (*extent_sync_fn)(cache *cc, uint64 addr, uint64 *pages_outstanding); typedef void (*page_prefetch_fn)(cache *cc, uint64 addr, page_type type); -typedef int (*evict_fn)(cache *cc, bool ignore_pinned); +typedef int (*evict_fn)(cache *cc, bool32 ignore_pinned); typedef void (*assert_ungot_fn)(cache *cc, uint64 addr); typedef void (*validate_page_fn)(cache *cc, page_handle *page, uint64 addr); typedef void (*io_stats_fn)(cache *cc, uint64 *read_bytes, uint64 *write_bytes); typedef uint32 (*count_dirty_fn)(cache *cc); typedef uint16 (*page_get_read_ref_fn)(cache *cc, page_handle *page); -typedef bool (*cache_present_fn)(cache *cc, page_handle *page); -typedef void (*enable_sync_get_fn)(cache *cc, bool enabled); +typedef bool32 (*cache_present_fn)(cache *cc, page_handle *page); +typedef void (*enable_sync_get_fn)(cache *cc, bool32 enabled); typedef allocator *(*get_allocator_fn)(const cache *cc); typedef cache_config *(*cache_config_fn)(const cache *cc); typedef void (*cache_print_fn)(platform_log_handle *log_handle, cache *cc); @@ -278,7 +278,7 @@ cache_extent_discard(cache *cc, uint64 addr, page_type type) *---------------------------------------------------------------------- */ static inline page_handle * -cache_get(cache *cc, uint64 addr, bool blocking, page_type type) +cache_get(cache *cc, uint64 addr, bool32 blocking, page_type type) { return cc->ops->page_get(cc, addr, blocking, type); } @@ -367,7 +367,7 @@ cache_unget(cache *cc, page_handle *page) * Does not block. *---------------------------------------------------------------------- */ -static inline bool +static inline bool32 cache_try_claim(cache *cc, page_handle *page) { return cc->ops->page_try_claim(cc, page); @@ -509,7 +509,10 @@ cache_unpin(cache *cc, page_handle *page) *----------------------------------------------------------------------------- */ static inline void -cache_page_sync(cache *cc, page_handle *page, bool is_blocking, page_type type) +cache_page_sync(cache *cc, + page_handle *page, + bool32 is_blocking, + page_type type) { return cc->ops->page_sync(cc, page, is_blocking, type); } @@ -574,7 +577,7 @@ cache_flush(cache *cc) *----------------------------------------------------------------------------- */ static inline int -cache_evict(cache *cc, bool ignore_pinned_pages) +cache_evict(cache *cc, bool32 ignore_pinned_pages) { return cc->ops->evict(cc, ignore_pinned_pages); } @@ -732,7 +735,7 @@ cache_get_read_ref(cache *cc, page_handle *page) * Returns TRUE if page is present in the cache. *----------------------------------------------------------------------------- */ -static inline bool +static inline bool32 cache_present(cache *cc, page_handle *page) { return cc->ops->cache_present(cc, page); @@ -750,7 +753,7 @@ cache_present(cache *cc, page_handle *page) *----------------------------------------------------------------------------- */ static inline void -cache_enable_sync_get(cache *cc, bool enabled) +cache_enable_sync_get(cache *cc, bool32 enabled) { cc->ops->enable_sync_get(cc, enabled); } diff --git a/src/clockcache.c b/src/clockcache.c index be02994cd..d628cdaa6 100644 --- a/src/clockcache.c +++ b/src/clockcache.c @@ -141,12 +141,12 @@ uint8 clockcache_get_allocator_ref(clockcache *cc, uint64 addr); page_handle * -clockcache_get(clockcache *cc, uint64 addr, bool blocking, page_type type); +clockcache_get(clockcache *cc, uint64 addr, bool32 blocking, page_type type); void clockcache_unget(clockcache *cc, page_handle *page); -bool +bool32 clockcache_try_claim(clockcache *cc, page_handle *page); void @@ -182,7 +182,7 @@ clockcache_async_done(clockcache *cc, page_type type, cache_async_ctxt *ctxt); void clockcache_page_sync(clockcache *cc, page_handle *page, - bool is_blocking, + bool32 is_blocking, page_type type); void @@ -192,7 +192,7 @@ void clockcache_flush(clockcache *cc); int -clockcache_evict_all(clockcache *cc, bool ignore_pinned); +clockcache_evict_all(clockcache *cc, bool32 ignore_pinned); void clockcache_wait(clockcache *cc); @@ -230,11 +230,11 @@ clockcache_count_dirty(clockcache *cc); uint16 clockcache_get_read_ref(clockcache *cc, page_handle *page); -bool +bool32 clockcache_present(clockcache *cc, page_handle *page); static void -clockcache_enable_sync_get(clockcache *cc, bool enabled); +clockcache_enable_sync_get(clockcache *cc, bool32 enabled); static allocator * clockcache_get_allocator(const clockcache *cc); @@ -289,7 +289,7 @@ clockcache_extent_discard_virtual(cache *c, uint64 addr, page_type type) } page_handle * -clockcache_get_virtual(cache *c, uint64 addr, bool blocking, page_type type) +clockcache_get_virtual(cache *c, uint64 addr, bool32 blocking, page_type type) { clockcache *cc = (clockcache *)c; return clockcache_get(cc, addr, blocking, type); @@ -302,7 +302,7 @@ clockcache_unget_virtual(cache *c, page_handle *page) clockcache_unget(cc, page); } -bool +bool32 clockcache_try_claim_virtual(cache *c, page_handle *page) { clockcache *cc = (clockcache *)c; @@ -378,7 +378,7 @@ clockcache_async_done_virtual(cache *c, page_type type, cache_async_ctxt *ctxt) void clockcache_page_sync_virtual(cache *c, page_handle *page, - bool is_blocking, + bool32 is_blocking, page_type type) { clockcache *cc = (clockcache *)c; @@ -400,7 +400,7 @@ clockcache_flush_virtual(cache *c) } int -clockcache_evict_all_virtual(cache *c, bool ignore_pinned) +clockcache_evict_all_virtual(cache *c, bool32 ignore_pinned) { clockcache *cc = (clockcache *)c; return clockcache_evict_all(cc, ignore_pinned); @@ -476,7 +476,7 @@ clockcache_get_read_ref_virtual(cache *c, page_handle *page) return clockcache_get_read_ref(cc, page); } -bool +bool32 clockcache_present_virtual(cache *c, page_handle *page) { clockcache *cc = (clockcache *)c; @@ -484,7 +484,7 @@ clockcache_present_virtual(cache *c, page_handle *page) } void -clockcache_enable_sync_get_virtual(cache *c, bool enabled) +clockcache_enable_sync_get_virtual(cache *c, bool32 enabled) { clockcache *cc = (clockcache *)c; clockcache_enable_sync_get(cc, enabled); @@ -951,7 +951,7 @@ typedef enum { *---------------------------------------------------------------------- */ static get_rc -clockcache_try_get_read(clockcache *cc, uint32 entry_number, bool set_access) +clockcache_try_get_read(clockcache *cc, uint32 entry_number, bool32 set_access) { const threadid tid = platform_get_tid(); @@ -1192,10 +1192,10 @@ clockcache_try_get_write(clockcache *cc, uint32 entry_number) * test and set. *---------------------------------------------------------------------- */ -static inline bool +static inline bool32 clockcache_ok_to_writeback(clockcache *cc, uint32 entry_number, - bool with_access) + bool32 with_access) { uint32 status = clockcache_get_entry(cc, entry_number)->status; return ((status == CC_CLEANABLE1_STATUS) @@ -1212,10 +1212,10 @@ clockcache_ok_to_writeback(clockcache *cc, * -- CC_CLEANABLE2_STATUS (= 0 | CC_ACCESSED) // dirty *---------------------------------------------------------------------- */ -static inline bool +static inline bool32 clockcache_try_set_writeback(clockcache *cc, uint32 entry_number, - bool with_access) + bool32 with_access) { // Validate first, as we need access to volatile status * below. debug_assert(entry_number < cc->cfg->page_capacity, @@ -1305,7 +1305,7 @@ clockcache_write_callback(void *metadata, *---------------------------------------------------------------------- */ void -clockcache_batch_start_writeback(clockcache *cc, uint64 batch, bool is_urgent) +clockcache_batch_start_writeback(clockcache *cc, uint64 batch, bool32 is_urgent) { uint32 entry_no, next_entry_no; uint64 addr, first_addr, end_addr, i; @@ -1552,16 +1552,16 @@ clockcache_evict_batch(clockcache *cc, uint32 batch) *---------------------------------------------------------------------- */ void -clockcache_move_hand(clockcache *cc, bool is_urgent) +clockcache_move_hand(clockcache *cc, bool32 is_urgent) { - const threadid tid = platform_get_tid(); - volatile bool *evict_batch_busy; - volatile bool *clean_batch_busy; - uint64 cleaner_hand; + const threadid tid = platform_get_tid(); + volatile bool32 *evict_batch_busy; + volatile bool32 *clean_batch_busy; + uint64 cleaner_hand; /* move the hand a batch forward */ - uint64 evict_hand = cc->per_thread[tid].free_hand; - debug_only bool was_busy = TRUE; + uint64 evict_hand = cc->per_thread[tid].free_hand; + debug_only bool32 was_busy = TRUE; if (evict_hand != CC_UNMAPPED_ENTRY) { evict_batch_busy = &cc->batch_busy[evict_hand]; was_busy = __sync_bool_compare_and_swap(evict_batch_busy, TRUE, FALSE); @@ -1596,8 +1596,8 @@ clockcache_move_hand(clockcache *cc, bool is_urgent) uint32 clockcache_get_free_page(clockcache *cc, uint32 status, - bool refcount, - bool blocking) + bool32 refcount, + bool32 blocking) { uint32 entry_no; uint64 num_passes = 0; @@ -1703,7 +1703,7 @@ clockcache_flush(clockcache *cc) *----------------------------------------------------------------------------- */ int -clockcache_evict_all(clockcache *cc, bool ignore_pinned_pages) +clockcache_evict_all(clockcache *cc, bool32 ignore_pinned_pages) { uint32 evict_hand; uint32 i; @@ -2081,10 +2081,10 @@ clockcache_extent_discard(clockcache *cc, uint64 addr, page_type type) * Blocks while the page is loaded into cache if necessary. *---------------------------------------------------------------------- */ -static bool +static bool32 clockcache_get_internal(clockcache *cc, // IN uint64 addr, // IN - bool blocking, // IN + bool32 blocking, // IN page_type type, // IN page_handle **page) // OUT { @@ -2257,9 +2257,9 @@ clockcache_get_internal(clockcache *cc, // IN *---------------------------------------------------------------------- */ page_handle * -clockcache_get(clockcache *cc, uint64 addr, bool blocking, page_type type) +clockcache_get(clockcache *cc, uint64 addr, bool32 blocking, page_type type) { - bool retry; + bool32 retry; page_handle *handle; debug_assert(cc->per_thread[platform_get_tid()].enable_sync_get @@ -2534,7 +2534,7 @@ clockcache_unget(clockcache *cc, page_handle *page) * readlock before trying to claim again to avoid deadlock. *---------------------------------------------------------------------- */ -bool +bool32 clockcache_try_claim(clockcache *cc, page_handle *page) { uint32 entry_number = clockcache_page_to_entry_number(cc, page); @@ -2679,7 +2679,7 @@ clockcache_unpin(clockcache *cc, page_handle *page) void clockcache_page_sync(clockcache *cc, page_handle *page, - bool is_blocking, + bool32 is_blocking, page_type type) { uint32 entry_number = clockcache_page_to_entry_number(cc, page); @@ -3223,14 +3223,14 @@ clockcache_get_read_ref(clockcache *cc, page_handle *page) return ref_count; } -bool +bool32 clockcache_present(clockcache *cc, page_handle *page) { return clockcache_lookup(cc, page->disk_addr) != CC_UNMAPPED_ENTRY; } static void -clockcache_enable_sync_get(clockcache *cc, bool enabled) +clockcache_enable_sync_get(clockcache *cc, bool32 enabled) { cc->per_thread[platform_get_tid()].enable_sync_get = enabled; } diff --git a/src/clockcache.h b/src/clockcache.h index 09e092717..647abc33e 100644 --- a/src/clockcache.h +++ b/src/clockcache.h @@ -29,7 +29,7 @@ typedef struct clockcache_config { cache_config super; io_config *io_cfg; uint64 capacity; - bool use_stats; + bool32 use_stats; char logfile[MAX_STRING_LENGTH]; // computed @@ -127,14 +127,14 @@ struct clockcache { volatile uint8 *pincount; // Clock hands and related metadata - volatile uint32 evict_hand; - volatile uint32 free_hand; - volatile bool *batch_busy; - uint64 cleaner_gap; + volatile uint32 evict_hand; + volatile uint32 free_hand; + volatile bool32 *batch_busy; + uint64 cleaner_gap; volatile struct { volatile uint32 free_hand; - bool enable_sync_get; + bool32 enable_sync_get; } PLATFORM_CACHELINE_ALIGNED per_thread[MAX_THREADS]; // Stats diff --git a/src/data_internal.h b/src/data_internal.h index 630e1e01a..56b55f733 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -44,19 +44,19 @@ typedef struct key { ((key){.kind = POSITIVE_INFINITY, .user_slice = INVALID_SLICE}) #define NULL_KEY ((key){.kind = USER_KEY, .user_slice = NULL_SLICE}) -static inline bool +static inline bool32 key_is_negative_infinity(key k) { return k.kind == NEGATIVE_INFINITY; } -static inline bool +static inline bool32 key_is_positive_infinity(key k) { return k.kind == POSITIVE_INFINITY; } -static inline bool +static inline bool32 key_is_user_key(key k) { return k.kind == USER_KEY; @@ -81,7 +81,7 @@ key_create(uint64 length, const void *data) return (key){.kind = USER_KEY, .user_slice = slice_create(length, data)}; } -static inline bool +static inline bool32 key_equals(key a, key b) { return a.kind == b.kind @@ -89,7 +89,7 @@ key_equals(key a, key b) slice_equals(a.user_slice, b.user_slice)); } -static inline bool +static inline bool32 key_is_null(key k) { return k.kind == USER_KEY && slice_is_null(k.user_slice); @@ -322,21 +322,21 @@ message_create(message_type type, slice data) return (message){.type = type, .data = data}; } -static inline bool +static inline bool32 message_is_null(message msg) { - bool r = slice_is_null(msg.data); + bool32 r = slice_is_null(msg.data); debug_assert(IMPLIES(r, msg.type == MESSAGE_TYPE_INVALID)); return r; } -static inline bool +static inline bool32 message_is_definitive(message msg) { return msg.type == MESSAGE_TYPE_INSERT || msg.type == MESSAGE_TYPE_DELETE; } -static inline bool +static inline bool32 message_is_invalid_user_type(message msg) { return msg.type == MESSAGE_TYPE_INVALID @@ -488,7 +488,7 @@ merge_accumulator_deinit(merge_accumulator *ma) ma->type = MESSAGE_TYPE_INVALID; } -static inline bool +static inline bool32 merge_accumulator_is_definitive(const merge_accumulator *ma) { return ma->type == MESSAGE_TYPE_INSERT || ma->type == MESSAGE_TYPE_DELETE; @@ -508,7 +508,7 @@ merge_accumulator_to_value(const merge_accumulator *ma) } /* Initialize an uninitialized merge_accumulator and copy msg into it. */ -static inline bool +static inline bool32 merge_accumulator_init_from_message(merge_accumulator *ma, platform_heap_id heap_id, message msg) @@ -524,10 +524,10 @@ merge_accumulator_set_to_null(merge_accumulator *ma) writable_buffer_set_to_null(&ma->data); } -static inline bool +static inline bool32 merge_accumulator_is_null(const merge_accumulator *ma) { - bool r = writable_buffer_is_null(&ma->data); + bool32 r = writable_buffer_is_null(&ma->data); debug_assert(IMPLIES(r, ma->type == MESSAGE_TYPE_INVALID)); return r; } diff --git a/src/io.h b/src/io.h index ae236f0bf..e291a8eb4 100644 --- a/src/io.h +++ b/src/io.h @@ -43,7 +43,7 @@ typedef platform_status (*io_write_fn)(io_handle *io, void *buf, uint64 bytes, uint64 addr); -typedef io_async_req *(*io_get_async_req_fn)(io_handle *io, bool blocking); +typedef io_async_req *(*io_get_async_req_fn)(io_handle *io, bool32 blocking); typedef struct iovec *(*io_get_iovec_fn)(io_handle *io, io_async_req *req); typedef void *(*io_get_metadata_fn)(io_handle *io, io_async_req *req); typedef platform_status (*io_read_async_fn)(io_handle *io, @@ -59,7 +59,7 @@ typedef platform_status (*io_write_async_fn)(io_handle *io, typedef void (*io_cleanup_fn)(io_handle *io, uint64 count); typedef void (*io_cleanup_all_fn)(io_handle *io); typedef void (*io_thread_register_fn)(io_handle *io); -typedef bool (*io_max_latency_elapsed_fn)(io_handle *io, timestamp ts); +typedef bool32 (*io_max_latency_elapsed_fn)(io_handle *io, timestamp ts); typedef void *(*io_get_context_fn)(io_handle *io); @@ -110,7 +110,7 @@ io_write(io_handle *io, void *buf, uint64 bytes, uint64 addr) } static inline io_async_req * -io_get_async_req(io_handle *io, bool blocking) +io_get_async_req(io_handle *io, bool32 blocking) { return io->ops->get_async_req(io, blocking); } @@ -168,7 +168,7 @@ io_thread_register(io_handle *io) } } -static inline bool +static inline bool32 io_max_latency_elapsed(io_handle *io, timestamp ts) { if (io->ops->max_latency_elapsed) { diff --git a/src/iterator.h b/src/iterator.h index 7f221ace6..7c2e714b0 100644 --- a/src/iterator.h +++ b/src/iterator.h @@ -11,7 +11,7 @@ typedef struct iterator iterator; typedef void (*iterator_get_curr_fn)(iterator *itor, key *curr_key, message *msg); -typedef platform_status (*iterator_at_end_fn)(iterator *itor, bool *at_end); +typedef platform_status (*iterator_at_end_fn)(iterator *itor, bool32 *at_end); typedef platform_status (*iterator_advance_fn)(iterator *itor); typedef void (*iterator_print_fn)(iterator *itor); @@ -35,7 +35,7 @@ iterator_get_curr(iterator *itor, key *curr_key, message *msg) } static inline platform_status -iterator_at_end(iterator *itor, bool *at_end) +iterator_at_end(iterator *itor, bool32 *at_end) { return itor->ops->at_end(itor, at_end); } diff --git a/src/memtable.c b/src/memtable.c index da24b3968..92a66b995 100644 --- a/src/memtable.c +++ b/src/memtable.c @@ -19,13 +19,13 @@ #define MEMTABLE_INSERT_LOCK_IDX 0 #define MEMTABLE_LOOKUP_LOCK_IDX 1 -bool +bool32 memtable_is_full(const memtable_config *cfg, memtable *mt) { return cfg->max_extents_per_memtable <= mini_num_extents(&mt->mini); } -bool +bool32 memtable_is_empty(memtable_context *ctxt) { return ctxt->is_empty; @@ -55,7 +55,7 @@ memtable_end_insert(memtable_context *ctxt) platform_batch_rwlock_unget(ctxt->rwlock, MEMTABLE_INSERT_LOCK_IDX); } -static inline bool +static inline bool32 memtable_try_begin_insert_rotation(memtable_context *ctxt) { if (!platform_batch_rwlock_try_claim(ctxt->rwlock, MEMTABLE_INSERT_LOCK_IDX)) @@ -217,7 +217,7 @@ memtable_insert(memtable_context *ctxt, uint64 *leaf_generation) { const threadid tid = platform_get_tid(); - bool was_unique; + bool32 was_unique; platform_status rc = btree_insert(ctxt->cc, ctxt->cfg.btree_cfg, @@ -244,12 +244,12 @@ memtable_insert(memtable_context *ctxt, * if there are no outstanding refs, then destroy and reinit memtable and * transition to READY */ -bool +bool32 memtable_dec_ref_maybe_recycle(memtable_context *ctxt, memtable *mt) { cache *cc = ctxt->cc; - bool freed = btree_dec_ref(cc, mt->cfg, mt->root_addr, PAGE_TYPE_MEMTABLE); + bool32 freed = btree_dec_ref(cc, mt->cfg, mt->root_addr, PAGE_TYPE_MEMTABLE); if (freed) { platform_assert(mt->state == MEMTABLE_STATE_INCORPORATED); mt->root_addr = btree_create(cc, mt->cfg, &mt->mini, PAGE_TYPE_MEMTABLE); @@ -295,7 +295,7 @@ void memtable_deinit(cache *cc, memtable *mt) { mini_release(&mt->mini, NULL_KEY); - debug_only bool freed = + debug_only bool32 freed = btree_dec_ref(cc, mt->cfg, mt->root_addr, PAGE_TYPE_MEMTABLE); debug_assert(freed); } diff --git a/src/memtable.h b/src/memtable.h index 44767a408..255868648 100644 --- a/src/memtable.h +++ b/src/memtable.h @@ -36,7 +36,7 @@ typedef struct memtable { btree_config *cfg; } PLATFORM_CACHELINE_ALIGNED memtable; -static inline bool +static inline bool32 memtable_try_transition(memtable *mt, memtable_state old_state, memtable_state new_state) @@ -95,7 +95,7 @@ memtable_transition(memtable *mt, memtable_state old_state, memtable_state new_state) { - bool success = memtable_try_transition(mt, old_state, new_state); + bool32 success = memtable_try_transition(mt, old_state, new_state); platform_assert(success); } @@ -131,7 +131,7 @@ typedef struct memtable_context { // read lock to read and write lock to modify. volatile uint64 generation_retired; - bool is_empty; + bool32 is_empty; // Effectively thread local, no locking at all: btree_scratch scratch[MAX_THREADS]; @@ -166,7 +166,7 @@ memtable_insert(memtable_context *ctxt, message msg, uint64 *generation); -bool +bool32 memtable_dec_ref_maybe_recycle(memtable_context *ctxt, memtable *mt); uint64 @@ -260,24 +260,24 @@ memtable_zap(cache *cc, memtable *mt) btree_dec_ref(cc, mt->cfg, mt->root_addr, PAGE_TYPE_MEMTABLE); } -static inline bool +static inline bool32 memtable_ok_to_lookup(memtable *mt) { return mt->state != MEMTABLE_STATE_INCORPORATING && mt->state != MEMTABLE_STATE_INCORPORATED; } -static inline bool +static inline bool32 memtable_ok_to_lookup_compacted(memtable *mt) { return mt->state == MEMTABLE_STATE_COMPACTED || mt->state == MEMTABLE_STATE_INCORPORATION_ASSIGNED; } -bool +bool32 memtable_is_empty(memtable_context *mt_ctxt); -static inline bool +static inline bool32 memtable_verify(cache *cc, memtable *mt) { return btree_verify_tree(cc, mt->cfg, mt->root_addr, PAGE_TYPE_MEMTABLE); diff --git a/src/merge.c b/src/merge.c index 13444240b..3cd2f4945 100644 --- a/src/merge.c +++ b/src/merge.c @@ -21,7 +21,7 @@ void merge_get_curr(iterator *itor, key *curr_key, message *data); platform_status -merge_at_end(iterator *itor, bool *at_end); +merge_at_end(iterator *itor, bool32 *at_end); platform_status merge_advance(iterator *itor); @@ -44,7 +44,7 @@ static inline int bsearch_comp(const ordered_iterator *itor_one, const ordered_iterator *itor_two, const data_config *cfg, - bool *keys_equal) + bool32 *keys_equal) { int cmp = data_key_compare(cfg, itor_one->curr_key, itor_two->curr_key); *keys_equal = (cmp == 0); @@ -64,7 +64,7 @@ merge_comp(const void *one, const void *two, void *ctxt) const ordered_iterator *itor_one = *(ordered_iterator **)one; const ordered_iterator *itor_two = *(ordered_iterator **)two; data_config *cfg = (data_config *)ctxt; - bool ignore_keys_equal; + bool32 ignore_keys_equal; return bsearch_comp(itor_one, itor_two, cfg, &ignore_keys_equal); } @@ -74,19 +74,19 @@ bsearch_insert(register const ordered_iterator *key, ordered_iterator **base0, const size_t nmemb, const data_config *cfg, - bool *prev_equal_out, - bool *next_equal_out) + bool32 *prev_equal_out, + bool32 *next_equal_out) { register ordered_iterator **base = base0; register int lim, cmp; register ordered_iterator **p; - bool prev_equal = FALSE; - bool next_equal = FALSE; + bool32 prev_equal = FALSE; + bool32 next_equal = FALSE; for (lim = nmemb; lim != 0; lim >>= 1) { p = base + (lim >> 1); - bool keys_equal; + bool32 keys_equal; cmp = bsearch_comp(key, *p, cfg, &keys_equal); debug_assert(cmp != 0); @@ -159,7 +159,7 @@ advance_and_resort_min_ritor(merge_iterator *merge_itor) return rc; } - bool at_end; + bool32 at_end; // if it's exhausted, kill it and move the ritors up the queue. rc = iterator_at_end(merge_itor->ordered_iterators[0]->itor, &at_end); if (!SUCCESS(rc)) { @@ -183,8 +183,8 @@ advance_and_resort_min_ritor(merge_iterator *merge_itor) goto out; } - bool prev_equal; - bool next_equal; + bool32 prev_equal; + bool32 next_equal; // otherwise, find its position in the array // Add 1 to return value since it gives offset from [1] int index = 1 @@ -241,8 +241,8 @@ merge_resolve_equal_keys(merge_iterator *merge_itor) #endif // there is more than one copy of the current key - bool success = merge_accumulator_copy_message(&merge_itor->merge_buffer, - merge_itor->curr_data); + bool32 success = merge_accumulator_copy_message(&merge_itor->merge_buffer, + merge_itor->curr_data); if (!success) { return STATUS_NO_MEMORY; } @@ -303,7 +303,7 @@ merge_resolve_equal_keys(merge_iterator *merge_itor) */ static inline platform_status merge_finalize_updates_and_discard_deletes(merge_iterator *merge_itor, - bool *discarded) + bool32 *discarded) { data_config *cfg = merge_itor->cfg; message_type class = message_class(merge_itor->curr_data); @@ -311,7 +311,7 @@ merge_finalize_updates_and_discard_deletes(merge_iterator *merge_itor, if (message_data(merge_itor->curr_data) != merge_accumulator_data(&merge_itor->merge_buffer)) { - bool success = merge_accumulator_copy_message( + bool32 success = merge_accumulator_copy_message( &merge_itor->merge_buffer, merge_itor->curr_data); if (!success) { return STATUS_NO_MEMORY; @@ -336,7 +336,7 @@ merge_finalize_updates_and_discard_deletes(merge_iterator *merge_itor, } static platform_status -advance_one_loop(merge_iterator *merge_itor, bool *retry) +advance_one_loop(merge_iterator *merge_itor, bool32 *retry) { *retry = FALSE; // Determine whether we're at the end. @@ -366,7 +366,7 @@ advance_one_loop(merge_iterator *merge_itor, bool *retry) } } - bool discarded; + bool32 discarded; rc = merge_finalize_updates_and_discard_deletes(merge_itor, &discarded); if (!SUCCESS(rc)) { return rc; @@ -457,7 +457,7 @@ merge_iterator_create(platform_heap_id hid, merge_itor->num_remaining = num_trees; i = 0; while (i < merge_itor->num_remaining) { - bool at_end; + bool32 at_end; rc = iterator_at_end(merge_itor->ordered_iterators[i]->itor, &at_end); if (!SUCCESS(rc)) { goto destroy; @@ -490,7 +490,7 @@ merge_iterator_create(platform_heap_id hid, merge_itor->ordered_iterators[i]->next_key_equal = (cmp == 0); } - bool retry; + bool32 retry; rc = advance_one_loop(merge_itor, &retry); if (!SUCCESS(rc)) { goto out; @@ -559,7 +559,7 @@ merge_iterator_destroy(platform_heap_id hid, merge_iterator **merge_itor) */ platform_status merge_at_end(iterator *itor, // IN - bool *at_end) // OUT + bool32 *at_end) // OUT { merge_iterator *merge_itor = (merge_iterator *)itor; *at_end = merge_itor->at_end; @@ -606,7 +606,7 @@ merge_advance(iterator *itor) platform_status rc = STATUS_OK; merge_iterator *merge_itor = (merge_iterator *)itor; - bool retry; + bool32 retry; do { merge_itor->curr_key = NULL_KEY; merge_itor->curr_data = NULL_MESSAGE; @@ -642,7 +642,7 @@ merge_iterator_print(merge_iterator *merge_itor) platform_default_log("** curr: %s\n", key_string(data_cfg, curr_key)); platform_default_log("----------------------------------------\n"); for (i = 0; i < merge_itor->num_trees; i++) { - bool at_end; + bool32 at_end; iterator_at_end(merge_itor->ordered_iterators[i]->itor, &at_end); platform_default_log("%u: ", merge_itor->ordered_iterators[i]->seq); if (at_end) { diff --git a/src/merge.h b/src/merge.h index 0383fbce3..d9ab6a58f 100644 --- a/src/merge.h +++ b/src/merge.h @@ -21,7 +21,7 @@ typedef struct ordered_iterator { int seq; key curr_key; message curr_data; - bool next_key_equal; + bool32 next_key_equal; } ordered_iterator; /* @@ -59,10 +59,10 @@ extern struct merge_behavior merge_full, merge_intermediate, merge_raw; typedef struct merge_iterator { iterator super; // handle for iterator.h API int num_trees; // number of trees in the forest - bool merge_messages; - bool finalize_updates; - bool emit_deletes; - bool at_end; + bool32 merge_messages; + bool32 finalize_updates; + bool32 emit_deletes; + bool32 at_end; int num_remaining; // number of ritors not at end data_config *cfg; // point message tree data config key curr_key; // current key diff --git a/src/mini_allocator.c b/src/mini_allocator.c index b8edfac36..ee9ab7a53 100644 --- a/src/mini_allocator.c +++ b/src/mini_allocator.c @@ -274,7 +274,7 @@ mini_init(mini_allocator *mini, uint64 meta_tail, uint64 num_batches, page_type type, - bool keyed) + bool32 keyed) { platform_assert(num_batches <= MINI_MAX_BATCHES); platform_assert(num_batches != 0); @@ -364,13 +364,13 @@ mini_num_entries(page_handle *meta_page) * Side effects: *----------------------------------------------------------------------------- */ -static bool +static bool32 entry_fits_in_page(uint64 page_size, uint64 start, uint64 entry_size) { return start + entry_size <= page_size; } -static bool +static bool32 mini_keyed_append_entry(mini_allocator *mini, uint64 batch, page_handle *meta_page, @@ -404,7 +404,7 @@ mini_keyed_append_entry(mini_allocator *mini, return TRUE; } -static bool +static bool32 mini_unkeyed_append_entry(mini_allocator *mini, page_handle *meta_page, uint64 extent_addr) @@ -507,14 +507,14 @@ mini_set_next_meta_addr(mini_allocator *mini, hdr->next_meta_addr = next_meta_addr; } -static bool +static bool32 mini_append_entry(mini_allocator *mini, uint64 batch, key entry_key, uint64 next_addr) { page_handle *meta_page = mini_full_lock_meta_tail(mini); - bool success; + bool32 success; if (mini->keyed) { success = mini_keyed_append_entry(mini, batch, meta_page, next_addr, entry_key); @@ -597,7 +597,7 @@ mini_alloc(mini_allocator *mini, platform_assert_status_ok(rc); next_addr = extent_addr; - bool success = mini_append_entry(mini, batch, alloc_key, next_addr); + bool32 success = mini_append_entry(mini, batch, alloc_key, next_addr); platform_assert(success); } @@ -665,7 +665,7 @@ mini_release(mini_allocator *mini, key end_key) */ void -mini_deinit(cache *cc, uint64 meta_head, page_type type, bool pinned) +mini_deinit(cache *cc, uint64 meta_head, page_type type, bool32 pinned) { allocator *al = cache_get_allocator(cc); uint64 meta_addr = meta_head; @@ -758,16 +758,16 @@ mini_destroy_unused(mini_allocator *mini) *----------------------------------------------------------------------------- */ -typedef bool (*mini_for_each_fn)(cache *cc, - page_type type, - uint64 base_addr, - void *out); +typedef bool32 (*mini_for_each_fn)(cache *cc, + page_type type, + uint64 base_addr, + void *out); static void mini_unkeyed_for_each(cache *cc, uint64 meta_head, page_type type, - bool pinned, + bool32 pinned, mini_for_each_fn func, void *out) { @@ -796,7 +796,7 @@ typedef enum boundary_state { after_end = 2 } boundary_state; -static bool +static bool32 interval_intersects_range(boundary_state left_state, boundary_state right_state) { /* @@ -837,7 +837,7 @@ state(data_config *cfg, key start_key, key end_key, key entry_start_key) * point passed to mini_release. *----------------------------------------------------------------------------- */ -static bool +static bool32 mini_keyed_for_each(cache *cc, data_config *cfg, uint64 meta_head, @@ -848,10 +848,10 @@ mini_keyed_for_each(cache *cc, void *out) { // We return true for cleanup if every call to func returns TRUE. - bool should_cleanup = TRUE; + bool32 should_cleanup = TRUE; // Should not be called if there are no intersecting ranges, we track with // did_work. - debug_only bool did_work = FALSE; + debug_only bool32 did_work = FALSE; uint64 meta_addr = meta_head; @@ -882,8 +882,9 @@ mini_keyed_for_each(cache *cc, if (interval_intersects_range(current_state[batch], next_state)) { debug_code(did_work = TRUE); - bool entry_should_cleanup = func(cc, type, extent_addr[batch], out); - should_cleanup = should_cleanup && entry_should_cleanup; + bool32 entry_should_cleanup = + func(cc, type, extent_addr[batch], out); + should_cleanup = should_cleanup && entry_should_cleanup; } extent_addr[batch] = entry->extent_addr; @@ -913,7 +914,7 @@ mini_keyed_for_each(cache *cc, * point passed to mini_release. *----------------------------------------------------------------------------- */ -static bool +static bool32 mini_keyed_for_each_self_exclusive(cache *cc, data_config *cfg, uint64 meta_head, @@ -924,10 +925,10 @@ mini_keyed_for_each_self_exclusive(cache *cc, void *out) { // We return true for cleanup if every call to func returns TRUE. - bool should_cleanup = TRUE; + bool32 should_cleanup = TRUE; // Should not be called if there are no intersecting ranges, we track with // did_work. - debug_only bool did_work = FALSE; + debug_only bool32 did_work = FALSE; uint64 meta_addr = meta_head; page_handle *meta_page = mini_get_claim_meta_page(cc, meta_head, type); @@ -958,8 +959,9 @@ mini_keyed_for_each_self_exclusive(cache *cc, if (interval_intersects_range(current_state[batch], next_state)) { debug_code(did_work = TRUE); - bool entry_should_cleanup = func(cc, type, extent_addr[batch], out); - should_cleanup = should_cleanup && entry_should_cleanup; + bool32 entry_should_cleanup = + func(cc, type, extent_addr[batch], out); + should_cleanup = should_cleanup && entry_should_cleanup; } extent_addr[batch] = entry->extent_addr; @@ -1007,7 +1009,7 @@ mini_unkeyed_inc_ref(cache *cc, uint64 meta_head) return ref - MINI_NO_REFS; } -static bool +static bool32 mini_dealloc_extent(cache *cc, page_type type, uint64 base_addr, void *out) { allocator *al = cache_get_allocator(cc); @@ -1020,7 +1022,7 @@ mini_dealloc_extent(cache *cc, page_type type, uint64 base_addr, void *out) } uint8 -mini_unkeyed_dec_ref(cache *cc, uint64 meta_head, page_type type, bool pinned) +mini_unkeyed_dec_ref(cache *cc, uint64 meta_head, page_type type, bool32 pinned) { if (type == PAGE_TYPE_MEMTABLE) { platform_assert(pinned); @@ -1074,7 +1076,7 @@ mini_unkeyed_dec_ref(cache *cc, uint64 meta_head, page_type type, bool pinned) * Deallocation/cache side effects. *----------------------------------------------------------------------------- */ -static bool +static bool32 mini_keyed_inc_ref_extent(cache *cc, page_type type, uint64 base_addr, @@ -1103,7 +1105,7 @@ mini_keyed_inc_ref(cache *cc, NULL); } -static bool +static bool32 mini_keyed_dec_ref_extent(cache *cc, page_type type, uint64 base_addr, @@ -1131,7 +1133,7 @@ mini_wait_for_blockers(cache *cc, uint64 meta_head) } } -bool +bool32 mini_keyed_dec_ref(cache *cc, data_config *data_cfg, page_type type, @@ -1140,7 +1142,7 @@ mini_keyed_dec_ref(cache *cc, key end_key) { mini_wait_for_blockers(cc, meta_head); - bool should_cleanup = + bool32 should_cleanup = mini_keyed_for_each_self_exclusive(cc, data_cfg, meta_head, @@ -1203,7 +1205,7 @@ mini_unblock_dec_ref(cache *cc, uint64 meta_head) * None. *----------------------------------------------------------------------------- */ -static bool +static bool32 mini_keyed_count_extents(cache *cc, page_type type, uint64 base_addr, void *out) { uint64 *count = (uint64 *)out; @@ -1244,7 +1246,7 @@ mini_keyed_extent_count(cache *cc, * Standard cache side effects. *----------------------------------------------------------------------------- */ -static bool +static bool32 mini_prefetch_extent(cache *cc, page_type type, uint64 base_addr, void *out) { cache_prefetch(cc, base_addr, type); diff --git a/src/mini_allocator.h b/src/mini_allocator.h index 7c9162aa5..86b6eb84e 100644 --- a/src/mini_allocator.h +++ b/src/mini_allocator.h @@ -37,8 +37,8 @@ typedef struct mini_allocator { allocator *al; cache *cc; data_config *data_cfg; - bool keyed; - bool pinned; + bool32 keyed; + bool32 pinned; uint64 meta_head; volatile uint64 meta_tail; page_type type; @@ -57,7 +57,7 @@ mini_init(mini_allocator *mini, uint64 meta_tail, uint64 num_batches, page_type type, - bool keyed); + bool32 keyed); void mini_release(mini_allocator *mini, key end_key); @@ -77,7 +77,10 @@ mini_alloc(mini_allocator *mini, uint8 mini_unkeyed_inc_ref(cache *cc, uint64 meta_head); uint8 -mini_unkeyed_dec_ref(cache *cc, uint64 meta_head, page_type type, bool pinned); +mini_unkeyed_dec_ref(cache *cc, + uint64 meta_head, + page_type type, + bool32 pinned); void mini_keyed_inc_ref(cache *cc, @@ -86,7 +89,7 @@ mini_keyed_inc_ref(cache *cc, uint64 meta_head, key start_key, key end_key); -bool +bool32 mini_keyed_dec_ref(cache *cc, data_config *data_cfg, page_type type, diff --git a/src/pcq.h b/src/pcq.h index bd583fdc6..a4e3ac9e2 100644 --- a/src/pcq.h +++ b/src/pcq.h @@ -48,14 +48,14 @@ pcq_count(const pcq *q) } // Return TRUE if PCQ is empty, FALSE otherwise -static inline bool +static inline bool32 pcq_is_empty(const pcq *q) { return q->head.v == q->tail.v; } // Return TRUE if PCQ is full, FALSE otherwise -static inline bool +static inline bool32 pcq_is_full(const pcq *q) { return pcq_count(q) == q->num_elems; diff --git a/src/platform_linux/laio.c b/src/platform_linux/laio.c index 1efa72282..5184780e3 100644 --- a/src/platform_linux/laio.c +++ b/src/platform_linux/laio.c @@ -36,7 +36,7 @@ static platform_status laio_write(io_handle *ioh, void *buf, uint64 bytes, uint64 addr); static io_async_req * -laio_get_async_req(io_handle *ioh, bool blocking); +laio_get_async_req(io_handle *ioh, bool32 blocking); struct iovec * laio_get_iovec(io_handle *ioh, io_async_req *req); @@ -117,7 +117,7 @@ io_handle_init(laio_handle *io, status = io_setup(cfg->kernel_queue_size, &io->ctx); platform_assert(status == 0); - bool is_create = ((cfg->flags & O_CREAT) != 0); + bool32 is_create = ((cfg->flags & O_CREAT) != 0); if (is_create) { io->fd = open(cfg->filename, cfg->flags, cfg->perms); } else { @@ -245,7 +245,7 @@ laio_get_kth_req(laio_handle *io, uint64 k) * laio_get_async_req() - Return an Async IO request structure for this thread. */ static io_async_req * -laio_get_async_req(io_handle *ioh, bool blocking) +laio_get_async_req(io_handle *ioh, bool32 blocking) { laio_handle *io; io_async_req *req; @@ -423,13 +423,13 @@ laio_cleanup_all(io_handle *ioh) } } -static inline bool +static inline bool32 laio_config_valid_page_size(io_config *cfg) { return (cfg->page_size == LAIO_DEFAULT_PAGE_SIZE); } -static inline bool +static inline bool32 laio_config_valid_extent_size(io_config *cfg) { return (cfg->extent_size == LAIO_DEFAULT_EXTENT_SIZE); diff --git a/src/platform_linux/laio.h b/src/platform_linux/laio.h index 8ca704598..6ab500625 100644 --- a/src/platform_linux/laio.h +++ b/src/platform_linux/laio.h @@ -38,7 +38,7 @@ struct io_async_req { io_callback_fn callback; // issuer callback char metadata[64]; // issuer callback data uint64 number; // request number/id - bool busy; // request in-use flag + bool32 busy; // request in-use flag uint64 bytes; // total bytes in the IO request uint64 count; // number of vector elements struct iovec iovec[]; // vector with IO offsets and size diff --git a/src/platform_linux/platform.c b/src/platform_linux/platform.c index fe9d12b3e..58d7111a5 100644 --- a/src/platform_linux/platform.c +++ b/src/platform_linux/platform.c @@ -9,8 +9,8 @@ __thread threadid xxxtid = INVALID_TID; -bool platform_use_hugetlb = FALSE; -bool platform_use_mlock = FALSE; +bool32 platform_use_hugetlb = FALSE; +bool32 platform_use_mlock = FALSE; // By default, platform_default_log() messages are sent to /dev/null // and platform_error_log() messages go to stderr (see below). @@ -139,7 +139,7 @@ platform_buffer_deinit(buffer_handle *bh) */ platform_status platform_thread_create(platform_thread *thread, - bool detached, + bool32 detached, platform_thread_worker worker, void *arg, platform_heap_id UNUSED_PARAM(heap_id)) @@ -295,7 +295,7 @@ platform_batch_rwlock_full_unlock(platform_batch_rwlock *lock, uint64 lock_idx) *----------------------------------------------------------------------------- */ -bool +bool32 platform_batch_rwlock_try_claim(platform_batch_rwlock *lock, uint64 lock_idx) { threadid tid = platform_get_tid(); diff --git a/src/platform_linux/platform.h b/src/platform_linux/platform.h index ae6d8ac64..e3e74f2b0 100644 --- a/src/platform_linux/platform.h +++ b/src/platform_linux/platform.h @@ -33,9 +33,7 @@ * Section 1: * Shared types/typedefs that don't rely on anything platform-specific */ -#if !defined(__cplusplus) -typedef int32 bool; -#endif +typedef int32 bool32; #if !defined(SPLINTER_DEBUG) # define SPLINTER_DEBUG 0 @@ -184,8 +182,8 @@ typedef struct { int last_token_len; } platform_strtok_ctx; -extern bool platform_use_hugetlb; -extern bool platform_use_mlock; +extern bool32 platform_use_hugetlb; +extern bool32 platform_use_mlock; /* @@ -673,7 +671,7 @@ platform_spinlock_destroy(platform_spinlock *lock); platform_status platform_thread_create(platform_thread *thread, - bool detached, + bool32 detached, platform_thread_worker worker, void *arg, platform_heap_id heap_id); @@ -747,7 +745,7 @@ max_size_t(size_t a, size_t b) return a > b ? a : b; } -static inline bool +static inline bool32 SUCCESS(const platform_status s) { return STATUS_IS_EQ(s, STATUS_OK); diff --git a/src/platform_linux/platform_inline.h b/src/platform_linux/platform_inline.h index 8114d3dbb..0e4f3d99d 100644 --- a/src/platform_linux/platform_inline.h +++ b/src/platform_linux/platform_inline.h @@ -28,7 +28,7 @@ platform_popcount(uint32 x) #define platform_hash64 XXH64 #define platform_hash128 XXH128 -static inline bool +static inline bool32 platform_checksum_is_equal(checksum128 left, checksum128 right) { return XXH128_isEqual(left, right); @@ -216,13 +216,13 @@ platform_yield() {} // platform predicates -static inline bool +static inline bool32 STATUS_IS_EQ(const platform_status s1, const platform_status s2) { return s1.r == s2.r; } -static inline bool +static inline bool32 STATUS_IS_NE(const platform_status s1, const platform_status s2) { return s1.r != s2.r; diff --git a/src/platform_linux/platform_types.h b/src/platform_linux/platform_types.h index 9c1574311..61095beb6 100644 --- a/src/platform_linux/platform_types.h +++ b/src/platform_linux/platform_types.h @@ -126,7 +126,7 @@ platform_batch_rwlock_unget(platform_batch_rwlock *lock, uint64 lock_idx); * Callers still hold a shared lock after a failed claim attempt. * Callers _must_ release their shared lock after a failed claim attempt. */ -bool +bool32 platform_batch_rwlock_try_claim(platform_batch_rwlock *lock, uint64 lock_idx); /* shared-lock -> claim, BUT(!) may temporarily release the shared-lock in the diff --git a/src/rc_allocator.c b/src/rc_allocator.c index 8217f79a1..872e771b3 100644 --- a/src/rc_allocator.c +++ b/src/rc_allocator.c @@ -203,7 +203,7 @@ const static allocator_ops rc_allocator_ops = { * Is page address 'base_addr' a valid extent address? I.e. it is the address * of the 1st page in an extent. */ -debug_only static inline bool +debug_only static inline bool32 rc_allocator_valid_extent_addr(rc_allocator *al, uint64 base_addr) { return ((base_addr % al->cfg->io_cfg->extent_size) == 0); @@ -688,7 +688,7 @@ rc_allocator_alloc(rc_allocator *al, // IN { uint64 first_hand = al->hand % al->cfg->extent_capacity; uint64 hand; - bool extent_is_free = FALSE; + bool32 extent_is_free = FALSE; do { hand = __sync_fetch_and_add(&al->hand, 1) % al->cfg->extent_capacity; @@ -841,7 +841,7 @@ rc_allocator_print_allocated(rc_allocator *al) uint64 nallocated = al->stats.curr_allocated; // For more than a few allocated extents, print enclosing { } tags. - bool print_curly = (nallocated > 20); + bool32 print_curly = (nallocated > 20); platform_default_log( "Allocated extents: %lu\n%s", nallocated, (print_curly ? "{\n" : "")); diff --git a/src/routing_filter.c b/src/routing_filter.c index afd83e342..9c8838cec 100644 --- a/src/routing_filter.c +++ b/src/routing_filter.c @@ -532,10 +532,10 @@ routing_filter_add(cache *cc, while (new_fps_added < new_index_count || old_fps_added < old_index_count) { uint32 fp; - bool is_old = ((new_fps_added == new_index_count) - || ((old_fps_added != old_index_count) - && (old_src_fp[old_fps_added] - <= new_src_fp[new_fps_added]))); + bool32 is_old = ((new_fps_added == new_index_count) + || ((old_fps_added != old_index_count) + && (old_src_fp[old_fps_added] + <= new_src_fp[new_fps_added]))); if (is_old) { fp = old_src_fp[old_fps_added++]; } else { @@ -984,7 +984,7 @@ routing_filter_lookup_async(cache *cc, routing_async_ctxt *ctxt) { cache_async_result res = 0; - bool done = FALSE; + bool32 done = FALSE; debug_assert(key_is_user_key(target)); @@ -1215,7 +1215,7 @@ routing_filter_verify(cache *cc, uint16 value, iterator *itor) { - bool at_end; + bool32 at_end; iterator_at_end(itor, &at_end); while (!at_end) { key curr_key; diff --git a/src/routing_filter.h b/src/routing_filter.h index a96e9cce4..76b41d17e 100644 --- a/src/routing_filter.h +++ b/src/routing_filter.h @@ -79,7 +79,7 @@ typedef struct routing_async_ctxt { // Internal fields routing_async_state prev_state; // Previous state routing_async_state state; // Current state - bool was_async; // Was the last cache_get async ? + bool32 was_async; // Was the last cache_get async ? uint32 remainder_size; uint32 remainder; // remainder uint32 bucket; // hash bucket @@ -119,7 +119,7 @@ routing_filter_get_next_value(uint64 found_values, uint16 last_value) return 63 - __builtin_clzll(found_values); } -static inline bool +static inline bool32 routing_filter_is_value_found(uint64 found_values, uint16 value) { return ((found_values & (1 << value)) != 0); diff --git a/src/shard_log.c b/src/shard_log.c index 65a993064..ca6718a74 100644 --- a/src/shard_log.c +++ b/src/shard_log.c @@ -40,7 +40,7 @@ static log_ops shard_log_ops = { void shard_log_iterator_get_curr(iterator *itor, key *curr_key, message *msg); platform_status -shard_log_iterator_at_end(iterator *itor, bool *at_end); +shard_log_iterator_at_end(iterator *itor, bool32 *at_end); platform_status shard_log_iterator_advance(iterator *itor); @@ -179,7 +179,7 @@ first_log_entry(char *page) return (log_entry *)(page + sizeof(shard_log_hdr)); } -static bool +static bool32 terminal_log_entry(shard_log_config *cfg, char *page, log_entry *le) { return page + shard_log_page_size(cfg) - (char *)le < sizeof(log_entry) @@ -295,7 +295,7 @@ shard_log_magic(log_handle *logh) return log->magic; } -bool +bool32 shard_log_valid(shard_log_config *cfg, page_handle *page, uint64 magic) { shard_log_hdr *hdr = (shard_log_hdr *)page->data; @@ -436,7 +436,7 @@ shard_log_iterator_get_curr(iterator *itorh, key *curr_key, message *msg) } platform_status -shard_log_iterator_at_end(iterator *itorh, bool *at_end) +shard_log_iterator_at_end(iterator *itorh, bool32 *at_end) { shard_log_iterator *itor = (shard_log_iterator *)itorh; *at_end = itor->pos == itor->num_entries; diff --git a/src/splinterdb.c b/src/splinterdb.c index a3f95c190..280517243 100644 --- a/src/splinterdb.c +++ b/src/splinterdb.c @@ -229,7 +229,7 @@ splinterdb_init_config(const splinterdb_config *kvs_cfg, // IN int splinterdb_create_or_open(const splinterdb_config *kvs_cfg, // IN splinterdb **kvs_out, // OUT - bool open_existing // IN + bool32 open_existing // IN ) { splinterdb *kvs; @@ -643,7 +643,7 @@ splinterdb_iterator_valid(splinterdb_iterator *kvi) if (!SUCCESS(kvi->last_rc)) { return FALSE; } - bool at_end; + bool32 at_end; iterator *itor = &(kvi->sri.super); kvi->last_rc = iterator_at_end(itor, &at_end); if (!SUCCESS(kvi->last_rc)) { diff --git a/src/srq.h b/src/srq.h index 6dbe7fc86..ce553557e 100644 --- a/src/srq.h +++ b/src/srq.h @@ -74,7 +74,7 @@ srq_rchild(int64 pos) /* * Returns TRUE if priority(left) > priority(right) */ -static inline bool +static inline bool32 srq_has_priority(srq *queue, int64 lpos, int64 rpos) { debug_assert(lpos >= 0, "lpos=%ld", lpos); @@ -166,7 +166,7 @@ srq_get_new_index(srq *queue) return queue->index_hand; } -static inline bool +static inline bool32 srq_verify(srq *queue); static inline void @@ -189,7 +189,7 @@ srq_insert(srq *queue, srq_data new_data) return new_idx; } -static inline bool +static inline bool32 srq_data_found(srq_data *data) { return data->idx != SRQ_INDEX_AVAILABLE; @@ -309,10 +309,10 @@ srq_print(srq *queue) platform_mutex_unlock(&queue->mutex); } -static inline bool +static inline bool32 srq_verify(srq *queue) { - bool ret = TRUE; + bool32 ret = TRUE; platform_mutex_lock(&queue->mutex); uint64 entries_found = 0; for (uint64 idx = 0; idx < SRQ_MAX_ENTRIES; idx++) { diff --git a/src/task.c b/src/task.c index 4d9905182..517014efc 100644 --- a/src/task.c +++ b/src/task.c @@ -235,7 +235,7 @@ task_invoke_with_hooks(void *func_and_args) */ static platform_status task_create_thread_with_hooks(platform_thread *thread, - bool detached, + bool32 detached, platform_thread_worker func, void *arg, size_t scratch_size, @@ -563,7 +563,7 @@ task_group_deinit(task_group *group) static platform_status task_group_init(task_group *group, task_system *ts, - bool use_stats, + bool32 use_stats, uint8 num_bg_threads, uint64 scratch_size) { @@ -608,7 +608,7 @@ task_enqueue(task_system *ts, task_type type, task_fn func, void *arg, - bool at_head) + bool32 at_head) { task *new_task = TYPED_ZALLOC(ts->heap_id, new_task); if (new_task == NULL) { @@ -742,12 +742,12 @@ task_perform_all(task_system *ts) } while (STATUS_IS_NE(rc, STATUS_TIMEDOUT)); } -bool +bool32 task_system_is_quiescent(task_system *ts) { platform_status rc; task_type ttlocked; - bool result = FALSE; + bool32 result = FALSE; for (ttlocked = TASK_TYPE_FIRST; ttlocked < NUM_TASK_TYPES; ttlocked++) { rc = task_group_lock(&ts->group[ttlocked]); @@ -817,7 +817,7 @@ task_config_valid(const uint64 num_background_threads[NUM_TASK_TYPES]) platform_status task_system_config_init(task_system_config *task_cfg, - bool use_stats, + bool32 use_stats, const uint64 num_bg_threads[NUM_TASK_TYPES], uint64 scratch_size) { diff --git a/src/task.h b/src/task.h index 6de979cdc..286be2dde 100644 --- a/src/task.h +++ b/src/task.h @@ -38,7 +38,7 @@ typedef struct task_queue { } task_queue; typedef struct task_bg_thread_group { - bool stop; + bool32 stop; uint8 num_threads; platform_thread threads[MAX_THREADS]; } task_bg_thread_group; @@ -58,7 +58,7 @@ typedef struct task_group { task_bg_thread_group bg; // Per thread stats. - bool use_stats; + bool32 use_stats; task_stats stats[MAX_THREADS]; } task_group; @@ -77,14 +77,14 @@ typedef enum task_type { } task_type; typedef struct task_system_config { - bool use_stats; + bool32 use_stats; uint64 num_background_threads[NUM_TASK_TYPES]; uint64 scratch_size; } task_system_config; platform_status task_system_config_init(task_system_config *task_cfg, - bool use_stats, + bool32 use_stats, const uint64 num_background_threads[NUM_TASK_TYPES], uint64 scratch_size); @@ -199,7 +199,7 @@ task_enqueue(task_system *ts, task_type type, task_fn func, void *arg, - bool at_head); + bool32 at_head); /* * Possibly performs one background task if there is one waiting, @@ -248,7 +248,7 @@ void task_perform_all(task_system *ts); /* TRUE if there are no running or waiting tasks. */ -bool +bool32 task_system_is_quiescent(task_system *ts); /* diff --git a/src/trunk.c b/src/trunk.c index fc566278e..d087e842d 100644 --- a/src/trunk.c +++ b/src/trunk.c @@ -112,7 +112,7 @@ static const int64 latency_histo_buckets[LATENCYHISTO_SIZE] = { * to cfg->log_handle. */ -static inline bool +static inline bool32 trunk_verbose_logging_enabled(trunk_handle *spl) { return spl->cfg.verbose_logging_enabled; @@ -453,8 +453,8 @@ typedef struct ONDISK trunk_super_block { uint64 log_addr; uint64 log_meta_addr; uint64 timestamp; - bool checkpointed; - bool unmounted; + bool32 checkpointed; + bool32 unmounted; checksum128 checksum; } trunk_super_block; @@ -662,13 +662,13 @@ trunk_node_height(trunk_node *node) return node->hdr->height; } -static inline bool +static inline bool32 trunk_node_is_leaf(trunk_node *node) { return trunk_node_height(node) == 0; } -static inline bool +static inline bool32 trunk_node_is_index(trunk_node *node) { return !trunk_node_is_leaf(node); @@ -747,7 +747,7 @@ typedef enum lookup_type { } lookup_type; // for for_each_node -typedef bool (*node_fn)(trunk_handle *spl, uint64 addr, void *arg); +typedef bool32 (*node_fn)(trunk_handle *spl, uint64 addr, void *arg); // Used by trunk_compact_bundle() typedef struct { @@ -797,7 +797,7 @@ static inline uint64 trunk_pivot_num_tuples (trunk_handle static inline uint64 trunk_pivot_kv_bytes (trunk_handle *spl, trunk_node *node, uint16 pivot_no); static inline void trunk_pivot_branch_tuple_counts (trunk_handle *spl, trunk_node *node, uint16 pivot_no, uint16 branch_no, uint64 *num_tuples, uint64 *num_kv_bytes); void trunk_pivot_recount_num_tuples_and_kv_bytes (trunk_handle *spl, trunk_node *node, uint64 pivot_no); -static inline bool trunk_has_vacancy (trunk_handle *spl, trunk_node *node, uint16 num_new_branches); +static inline bool32 trunk_has_vacancy (trunk_handle *spl, trunk_node *node, uint16 num_new_branches); static inline uint16 trunk_add_bundle_number (trunk_handle *spl, uint16 start, uint16 end); static inline uint16 trunk_subtract_bundle_number (trunk_handle *spl, uint16 start, uint16 end); static inline trunk_bundle *trunk_get_bundle (trunk_handle *spl, trunk_node *node, uint16 bundle_no); @@ -818,26 +818,26 @@ static inline uint16 trunk_end_branch (trunk_handle static inline uint16 trunk_start_frac_branch (trunk_handle *spl, trunk_node *node); static inline void trunk_set_start_frac_branch (trunk_handle *spl, trunk_node *node, uint16 branch_no); static inline uint16 trunk_branch_count (trunk_handle *spl, trunk_node *node); -static inline bool trunk_branch_valid (trunk_handle *spl, trunk_node *node, uint64 branch_no); -static inline bool trunk_branch_live (trunk_handle *spl, trunk_node *node, uint64 branch_no); -static inline bool trunk_branch_live_for_pivot (trunk_handle *spl, trunk_node *node, uint64 branch_no, uint16 pivot_no); -static inline bool trunk_branch_is_whole (trunk_handle *spl, trunk_node *node, uint64 branch_no); +static inline bool32 trunk_branch_valid (trunk_handle *spl, trunk_node *node, uint64 branch_no); +static inline bool32 trunk_branch_live (trunk_handle *spl, trunk_node *node, uint64 branch_no); +static inline bool32 trunk_branch_live_for_pivot (trunk_handle *spl, trunk_node *node, uint64 branch_no, uint16 pivot_no); +static inline bool32 trunk_branch_is_whole (trunk_handle *spl, trunk_node *node, uint64 branch_no); trunk_bundle * trunk_flush_into_bundle (trunk_handle *spl, trunk_node *parent, trunk_node *child, trunk_pivot_data *pdata, trunk_compact_bundle_req *req); void trunk_replace_bundle_branches (trunk_handle *spl, trunk_node *node, trunk_branch *new_branch, trunk_compact_bundle_req *req); static inline uint16 trunk_add_branch_number (trunk_handle *spl, uint16 branch_no, uint16 offset); static inline uint16 trunk_subtract_branch_number (trunk_handle *spl, uint16 branch_no, uint16 offset); -static inline void trunk_dec_ref (trunk_handle *spl, trunk_branch *branch, bool is_memtable); +static inline void trunk_dec_ref (trunk_handle *spl, trunk_branch *branch, bool32 is_memtable); static inline void trunk_zap_branch_range (trunk_handle *spl, trunk_branch *branch, key start_key, key end_key, page_type type); -static inline void trunk_inc_intersection (trunk_handle *spl, trunk_branch *branch, key target, bool is_memtable); +static inline void trunk_inc_intersection (trunk_handle *spl, trunk_branch *branch, key target, bool32 is_memtable); void trunk_memtable_flush_virtual (void *arg, uint64 generation); platform_status trunk_memtable_insert (trunk_handle *spl, key tuple_key, message data); void trunk_bundle_build_filters (void *arg, void *scratch); static inline void trunk_inc_filter (trunk_handle *spl, routing_filter *filter); static inline void trunk_dec_filter (trunk_handle *spl, routing_filter *filter); void trunk_compact_bundle (void *arg, void *scratch); -platform_status trunk_flush (trunk_handle *spl, trunk_node *parent, trunk_pivot_data *pdata, bool is_space_rec); +platform_status trunk_flush (trunk_handle *spl, trunk_node *parent, trunk_pivot_data *pdata, bool32 is_space_rec); platform_status trunk_flush_fullest (trunk_handle *spl, trunk_node *node); -static inline bool trunk_needs_split (trunk_handle *spl, trunk_node *node); +static inline bool32 trunk_needs_split (trunk_handle *spl, trunk_node *node); void trunk_split_leaf (trunk_handle *spl, trunk_node *parent, trunk_node *leaf, uint16 child_idx); void trunk_split_index (trunk_handle *spl, trunk_node *parent, trunk_node *child, uint16 pivot_no, trunk_compact_bundle_req *req); int trunk_split_root (trunk_handle *spl, trunk_node *root); @@ -848,10 +848,10 @@ static void trunk_print_branches_and_bundles(platform_log static void trunk_btree_skiperator_init (trunk_handle *spl, trunk_btree_skiperator *skip_itor, trunk_node *node, uint16 branch_idx, key_buffer pivots[static TRUNK_MAX_PIVOTS]); void trunk_btree_skiperator_get_curr (iterator *itor, key *curr_key, message *data); platform_status trunk_btree_skiperator_advance (iterator *itor); -platform_status trunk_btree_skiperator_at_end (iterator *itor, bool *at_end); +platform_status trunk_btree_skiperator_at_end (iterator *itor, bool32 *at_end); void trunk_btree_skiperator_print (iterator *itor); void trunk_btree_skiperator_deinit (trunk_handle *spl, trunk_btree_skiperator *skip_itor); -bool trunk_verify_node (trunk_handle *spl, trunk_node *node); +bool32 trunk_verify_node (trunk_handle *spl, trunk_node *node); void trunk_maybe_reclaim_space (trunk_handle *spl); const static iterator_ops trunk_btree_skiperator_ops = { .get_curr = trunk_btree_skiperator_get_curr, @@ -903,9 +903,9 @@ trunk_tree_height(trunk_handle *spl) */ void trunk_set_super_block(trunk_handle *spl, - bool is_checkpoint, - bool is_unmount, - bool is_create) + bool32 is_checkpoint, + bool32 is_unmount, + bool32 is_create) { uint64 super_addr; page_handle *super_page; @@ -1017,7 +1017,7 @@ trunk_logical_branch_count(trunk_handle *spl, trunk_node *node) * A node is full if either it has too many tuples or if it has too many * logical branches. */ -static inline bool +static inline bool32 trunk_node_is_full(trunk_handle *spl, trunk_node *node) { uint64 num_kv_bytes = 0; @@ -1030,7 +1030,7 @@ trunk_node_is_full(trunk_handle *spl, trunk_node *node) return num_kv_bytes > spl->cfg.max_kv_bytes_per_node; } -bool +bool32 trunk_for_each_subtree(trunk_handle *spl, uint64 addr, node_fn func, void *arg) { // func may be deallocation, so first apply to subtree @@ -1040,7 +1040,7 @@ trunk_for_each_subtree(trunk_handle *spl, uint64 addr, node_fn func, void *arg) uint16 num_children = trunk_num_children(spl, &node); for (uint16 pivot_no = 0; pivot_no < num_children; pivot_no++) { trunk_pivot_data *pdata = trunk_get_pivot_data(spl, &node, pivot_no); - bool succeeded_on_subtree = + bool32 succeeded_on_subtree = trunk_for_each_subtree(spl, pdata->addr, func, arg); if (!succeeded_on_subtree) { goto failed_on_subtree; @@ -1062,7 +1062,7 @@ trunk_for_each_subtree(trunk_handle *spl, uint64 addr, node_fn func, void *arg) * * Returns: TRUE, if 'func' was successful on all nodes. FALSE, otherwise. */ -bool +bool32 trunk_for_each_node(trunk_handle *spl, node_fn func, void *arg) { return trunk_for_each_subtree(spl, spl->root_addr, func, arg); @@ -1360,7 +1360,7 @@ trunk_subtract_branch_number(trunk_handle *spl, uint16 branch_no, uint16 offset) % spl->cfg.hard_max_branches_per_node; } -static inline bool +static inline bool32 trunk_branch_in_range(trunk_handle *spl, uint16 branch_no, uint16 start, @@ -1382,7 +1382,7 @@ trunk_subtract_bundle_number(trunk_handle *spl, uint16 start, uint16 end) return (start + TRUNK_MAX_BUNDLES - end) % TRUNK_MAX_BUNDLES; } -static inline bool +static inline bool32 trunk_bundle_in_range(trunk_handle *spl, uint16 bundle_no, uint16 start, @@ -1676,7 +1676,7 @@ trunk_find_pivot(trunk_handle *spl, * branch_live_for_pivot returns TRUE if the branch is live for the pivot and * FALSE otherwise. */ -static inline bool +static inline bool32 trunk_branch_live_for_pivot(trunk_handle *spl, trunk_node *node, uint64 branch_no, @@ -1692,7 +1692,7 @@ trunk_branch_live_for_pivot(trunk_handle *spl, * branch_is_whole returns TRUE if the branch is whole and FALSE if it is * fractional (part of a bundle) or dead. */ -static inline bool +static inline bool32 trunk_branch_is_whole(trunk_handle *spl, trunk_node *node, uint64 branch_no) { return trunk_subtract_branch_number(spl, branch_no, node->hdr->start_branch) @@ -1965,7 +1965,7 @@ trunk_pivot_logical_branch_count(trunk_handle *spl, * with too many live logical branches must be flushed in order to reduce the * branch count. */ -static inline bool +static inline bool32 trunk_pivot_needs_flush(trunk_handle *spl, trunk_node *node, trunk_pivot_data *pdata) @@ -2177,7 +2177,7 @@ trunk_inc_num_pivot_keys(trunk_handle *spl, trunk_node *node) /* * Returns TRUE if the bundle is live in the node and FALSE otherwise. */ -static inline bool +static inline bool32 trunk_bundle_live(trunk_handle *spl, trunk_node *node, uint16 bundle_no) { return trunk_bundle_in_range(spl, @@ -2310,7 +2310,7 @@ trunk_end_sb_filter(trunk_handle *spl, trunk_node *node) return node->hdr->end_sb_filter; } -static inline bool +static inline bool32 trunk_sb_filter_valid(trunk_handle *spl, trunk_node *node, uint16 filter_no) { uint16 start_filter = trunk_start_sb_filter(spl, node); @@ -2455,7 +2455,7 @@ trunk_leaf_rebundle_all_branches(trunk_handle *spl, trunk_node *node, uint64 target_num_tuples, uint64 target_kv_bytes, - bool is_space_rec) + bool32 is_space_rec) { debug_assert(trunk_node_height(node) == 0); uint16 bundle_no = trunk_get_new_bundle(spl, node); @@ -2568,7 +2568,7 @@ trunk_subbundle_count(trunk_handle *spl, trunk_node *node) * Returns TRUE if the bundle is valid in the node (live or == end_bundle) and * FALSE otherwise. */ -static inline bool +static inline bool32 trunk_bundle_valid(trunk_handle *spl, trunk_node *node, uint16 bundle_no) { return trunk_subtract_bundle_number(spl, bundle_no, node->hdr->start_bundle) @@ -2579,7 +2579,7 @@ trunk_bundle_valid(trunk_handle *spl, trunk_node *node, uint16 bundle_no) /* * Returns TRUE if the bundle is live for the pivot and FALSE otherwise */ -static inline bool +static inline bool32 trunk_bundle_live_for_pivot(trunk_handle *spl, trunk_node *node, uint16 bundle_no, @@ -2735,7 +2735,7 @@ trunk_branch_count(trunk_handle *spl, trunk_node *node) spl, node->hdr->end_branch, node->hdr->start_branch); } -static inline bool +static inline bool32 trunk_has_vacancy(trunk_handle *spl, trunk_node *node, uint16 num_new_branches) { uint16 branch_count = trunk_branch_count(spl, node); @@ -2793,7 +2793,7 @@ trunk_end_branch(trunk_handle *spl, trunk_node *node) /* * branch_live checks if branch_no is live for any pivot in the node. */ -static inline bool +static inline bool32 trunk_branch_live(trunk_handle *spl, trunk_node *node, uint64 branch_no) { return trunk_branch_in_range( @@ -2804,7 +2804,7 @@ trunk_branch_live(trunk_handle *spl, trunk_node *node, uint64 branch_no) * branch_valid checks if branch_no is being used by any pivot or is * end_branch. Used to verify if a given entry is valid. */ -static inline bool +static inline bool32 trunk_branch_valid(trunk_handle *spl, trunk_node *node, uint64 branch_no) { return trunk_subtract_branch_number(spl, branch_no, node->hdr->start_branch) @@ -3169,7 +3169,7 @@ trunk_zap_branch_range(trunk_handle *spl, * reaches 0. */ static inline void -trunk_dec_ref(trunk_handle *spl, trunk_branch *branch, bool is_memtable) +trunk_dec_ref(trunk_handle *spl, trunk_branch *branch, bool32 is_memtable) { page_type type = is_memtable ? PAGE_TYPE_MEMTABLE : PAGE_TYPE_BRANCH; trunk_zap_branch_range( @@ -3183,7 +3183,7 @@ static inline void trunk_inc_intersection(trunk_handle *spl, trunk_branch *branch, key target, - bool is_memtable) + bool32 is_memtable) { platform_assert(IMPLIES(is_memtable, key_is_null(target))); trunk_inc_branch_range(spl, branch, target, target); @@ -3205,7 +3205,7 @@ trunk_btree_lookup_and_merge(trunk_handle *spl, trunk_branch *branch, key target, merge_accumulator *data, - bool *local_found) + bool32 *local_found) { cache *cc = spl->cc; btree_config *cfg = &spl->cfg.btree_cfg; @@ -3251,7 +3251,7 @@ trunk_btree_lookup_and_merge_async(trunk_handle *spl, // IN cache *cc = spl->cc; btree_config *cfg = &spl->cfg.btree_cfg; cache_async_result res; - bool local_found; + bool32 local_found; res = btree_lookup_and_merge_async( cc, cfg, branch->root_addr, target, data, &local_found, ctxt); @@ -3336,8 +3336,8 @@ trunk_memtable_iterator_init(trunk_handle *spl, uint64 root_addr, key min_key, key max_key, - bool is_live, - bool inc_ref) + bool32 is_live, + bool32 inc_ref) { if (inc_ref) { allocator_inc_ref(spl->al, root_addr); @@ -3357,7 +3357,7 @@ static void trunk_memtable_iterator_deinit(trunk_handle *spl, btree_iterator *itor, uint64 mt_gen, - bool dec_ref) + bool32 dec_ref) { btree_iterator_deinit(itor); if (dec_ref) { @@ -3532,10 +3532,10 @@ trunk_memtable_compact_and_build_filter(trunk_handle *spl, * 2. memtable set to COMP after try_continue tries to set it to incorp * should_wait will be set to generation, so try_start will incorp */ -static inline bool +static inline bool32 trunk_try_start_incorporate(trunk_handle *spl, uint64 generation) { - bool should_start = FALSE; + bool32 should_start = FALSE; memtable_lock_incorporation_lock(spl->mt_ctxt); memtable *mt = trunk_try_get_memtable(spl, generation); @@ -3553,10 +3553,10 @@ trunk_try_start_incorporate(trunk_handle *spl, uint64 generation) return should_start; } -static inline bool +static inline bool32 trunk_try_continue_incorporate(trunk_handle *spl, uint64 next_generation) { - bool should_continue = FALSE; + bool32 should_continue = FALSE; memtable_lock_incorporation_lock(spl->mt_ctxt); memtable *mt = trunk_try_get_memtable(spl, next_generation); @@ -3826,7 +3826,7 @@ trunk_memtable_flush_virtual(void *arg, uint64 generation) static inline uint64 trunk_memtable_root_addr_for_lookup(trunk_handle *spl, uint64 generation, - bool *is_compacted) + bool32 *is_compacted) { memtable *mt = trunk_get_memtable(spl, generation); platform_assert(memtable_ok_to_lookup(mt)); @@ -3862,13 +3862,13 @@ trunk_memtable_lookup(trunk_handle *spl, { cache *const cc = spl->cc; btree_config *const cfg = &spl->cfg.btree_cfg; - bool memtable_is_compacted; + bool32 memtable_is_compacted; uint64 root_addr = trunk_memtable_root_addr_for_lookup( spl, generation, &memtable_is_compacted); page_type type = memtable_is_compacted ? PAGE_TYPE_BRANCH : PAGE_TYPE_MEMTABLE; platform_status rc; - bool local_found; + bool32 local_found; rc = btree_lookup_and_merge( cc, cfg, root_addr, type, target, data, &local_found); @@ -3911,7 +3911,7 @@ typedef struct trunk_filter_scratch { key_buffer start_key; key_buffer end_key; uint16 height; - bool should_build[TRUNK_MAX_PIVOTS]; + bool32 should_build[TRUNK_MAX_PIVOTS]; routing_filter old_filter[TRUNK_MAX_PIVOTS]; uint16 value[TRUNK_MAX_PIVOTS]; routing_filter filter[TRUNK_MAX_PIVOTS]; @@ -3925,7 +3925,7 @@ trunk_filter_scratch_init(trunk_compact_bundle_req *compact_req, ZERO_CONTENTS(filter_scratch); filter_scratch->fp_arr = compact_req->fp_arr; } -static inline bool +static inline bool32 trunk_compact_bundle_node_has_split(trunk_handle *spl, trunk_compact_bundle_req *req, trunk_node *node) @@ -3954,7 +3954,7 @@ trunk_compact_bundle_node_copy_path(trunk_handle *spl, spl, start_key, req->height, out_node, old_root_addr); } -static inline bool +static inline bool32 trunk_build_filter_should_abort(trunk_compact_bundle_req *req, trunk_node *node) { trunk_handle *spl = req->spl; @@ -3979,7 +3979,7 @@ trunk_build_filter_should_abort(trunk_compact_bundle_req *req, trunk_node *node) return FALSE; } -static inline bool +static inline bool32 trunk_build_filter_should_skip(trunk_compact_bundle_req *req, trunk_node *node) { trunk_handle *spl = req->spl; @@ -4002,7 +4002,7 @@ trunk_build_filter_should_skip(trunk_compact_bundle_req *req, trunk_node *node) return FALSE; } -static inline bool +static inline bool32 trunk_build_filter_should_reenqueue(trunk_compact_bundle_req *req, trunk_node *node) { @@ -4227,7 +4227,7 @@ trunk_bundle_build_filters(void *arg, void *scratch) trunk_compact_bundle_req *compact_req = (trunk_compact_bundle_req *)arg; trunk_handle *spl = compact_req->spl; - bool should_continue_build_filters = TRUE; + bool32 should_continue_build_filters = TRUE; while (should_continue_build_filters) { trunk_node node; platform_status rc = @@ -4280,7 +4280,7 @@ trunk_bundle_build_filters(void *arg, void *scratch) trunk_log_stream_if_enabled(spl, &stream, "Filters built\n"); - bool should_continue_replacing_filters = TRUE; + bool32 should_continue_replacing_filters = TRUE; while (should_continue_replacing_filters) { uint64 old_root_addr; key start_key = key_buffer_key(&filter_scratch.start_key); @@ -4561,7 +4561,7 @@ trunk_flush_into_bundle(trunk_handle *spl, // IN * * NOTE: parent and child must have at least read locks */ -static inline bool +static inline bool32 trunk_room_to_flush(trunk_handle *spl, trunk_node *parent, trunk_node *child, @@ -4617,7 +4617,7 @@ platform_status trunk_flush(trunk_handle *spl, trunk_node *parent, trunk_pivot_data *pdata, - bool is_space_rec) + bool32 is_space_rec) { platform_status rc; @@ -4818,8 +4818,8 @@ trunk_branch_iterator_init(trunk_handle *spl, trunk_branch *branch, key min_key, key max_key, - bool do_prefetch, - bool should_inc_ref) + bool32 do_prefetch, + bool32 should_inc_ref) { cache *cc = spl->cc; btree_config *btree_cfg = &spl->cfg.btree_cfg; @@ -4841,7 +4841,7 @@ trunk_branch_iterator_init(trunk_handle *spl, void trunk_branch_iterator_deinit(trunk_handle *spl, btree_iterator *itor, - bool should_dec_ref) + bool32 should_dec_ref) { if (itor->root_addr == 0) { return; @@ -4882,10 +4882,10 @@ trunk_btree_skiperator_init(trunk_handle *spl, skip_itor->branch = *trunk_get_branch(spl, node, branch_idx); uint16 first_pivot = 0; - bool iterator_started = FALSE; + bool32 iterator_started = FALSE; for (uint16 i = min_pivot_no; i < max_pivot_no + 1; i++) { - bool branch_valid = + bool32 branch_valid = i == max_pivot_no ? FALSE : trunk_branch_live_for_pivot(spl, node, branch_idx, i); @@ -4912,7 +4912,7 @@ trunk_btree_skiperator_init(trunk_handle *spl, } } - bool at_end; + bool32 at_end; if (skip_itor->curr != skip_itor->end) { iterator_at_end(&skip_itor->itor[skip_itor->curr].super, &at_end); } else { @@ -4947,7 +4947,7 @@ trunk_btree_skiperator_advance(iterator *itor) return rc; } - bool at_end; + bool32 at_end; iterator_at_end(&skip_itor->itor[skip_itor->curr].super, &at_end); while (skip_itor->curr != skip_itor->end && at_end) { iterator_at_end(&skip_itor->itor[skip_itor->curr].super, &at_end); @@ -4960,7 +4960,7 @@ trunk_btree_skiperator_advance(iterator *itor) } platform_status -trunk_btree_skiperator_at_end(iterator *itor, bool *at_end) +trunk_btree_skiperator_at_end(iterator *itor, bool32 *at_end) { trunk_btree_skiperator *skip_itor = (trunk_btree_skiperator *)itor; if (skip_itor->curr == skip_itor->end) { @@ -5258,7 +5258,7 @@ trunk_compact_bundle(void *arg, void *scratch_buf) * 11. For each newly split sibling replace bundle with new branch */ uint64 num_replacements = 0; - bool should_continue = TRUE; + bool32 should_continue = TRUE; while (should_continue) { uint64 old_root_addr; trunk_compact_bundle_node_copy_path(spl, req, &node, &old_root_addr); @@ -5410,7 +5410,7 @@ trunk_compact_bundle(void *arg, void *scratch_buf) *----------------------------------------------------------------------------- */ -static inline bool +static inline bool32 trunk_needs_split(trunk_handle *spl, trunk_node *node) { if (trunk_node_is_leaf(node)) { @@ -5745,7 +5745,7 @@ trunk_split_leaf(trunk_handle *spl, /* * 2. Use rough merge iterator to determine pivots for new leaves */ - bool at_end; + bool32 at_end; rc = iterator_at_end(&rough_merge_itor->super, &at_end); platform_assert_status_ok(rc); @@ -6020,7 +6020,7 @@ trunk_split_root(trunk_handle *spl, trunk_node *root) void trunk_range_iterator_get_curr(iterator *itor, key *curr_key, message *data); platform_status -trunk_range_iterator_at_end(iterator *itor, bool *at_end); +trunk_range_iterator_at_end(iterator *itor, bool32 *at_end); platform_status trunk_range_iterator_advance(iterator *itor); void @@ -6080,7 +6080,7 @@ trunk_range_iterator_init(trunk_handle *spl, TRUNK_RANGE_ITOR_MAX_BRANCHES); debug_assert(range_itor->num_branches < ARRAY_SIZE(range_itor->branch)); - bool compacted; + bool32 compacted; uint64 root_addr = trunk_memtable_root_addr_for_lookup(spl, mt_gen, &compacted); range_itor->compacted[range_itor->num_branches] = compacted; @@ -6173,7 +6173,7 @@ trunk_range_iterator_init(trunk_handle *spl, btree_iterator *btree_itor = &range_itor->btree_itor[branch_no]; trunk_branch *branch = &range_itor->branch[branch_no]; if (range_itor->compacted[branch_no]) { - bool do_prefetch = + bool32 do_prefetch = range_itor->compacted[branch_no] && num_tuples > TRUNK_PREFETCH_MIN ? TRUE : FALSE; @@ -6186,7 +6186,7 @@ trunk_range_iterator_init(trunk_handle *spl, FALSE); } else { uint64 mt_root_addr = branch->root_addr; - bool is_live = branch_no == 0; + bool32 is_live = branch_no == 0; trunk_memtable_iterator_init(spl, btree_itor, mt_root_addr, @@ -6209,7 +6209,7 @@ trunk_range_iterator_init(trunk_handle *spl, return rc; } - bool at_end; + bool32 at_end; iterator_at_end(&range_itor->merge_itor->super, &at_end); /* @@ -6256,7 +6256,7 @@ trunk_range_iterator_advance(iterator *itor) trunk_range_iterator *range_itor = (trunk_range_iterator *)itor; iterator_advance(&range_itor->merge_itor->super); range_itor->num_tuples++; - bool at_end; + bool32 at_end; iterator_at_end(&range_itor->merge_itor->super, &at_end); platform_status rc; // robj: shouldn't this be a while loop, like in the init function? @@ -6294,7 +6294,7 @@ trunk_range_iterator_advance(iterator *itor) } platform_status -trunk_range_iterator_at_end(iterator *itor, bool *at_end) +trunk_range_iterator_at_end(iterator *itor, bool32 *at_end) { debug_assert(itor != NULL); trunk_range_iterator *range_itor = (trunk_range_iterator *)itor; @@ -6417,7 +6417,7 @@ trunk_compact_leaf(trunk_handle *spl, trunk_node *leaf) * Space reclamation *----------------------------------------------------------------------------- */ -bool +bool32 trunk_should_reclaim_space(trunk_handle *spl) { if (spl->cfg.reclaim_threshold == UINT64_MAX) { @@ -6427,7 +6427,7 @@ trunk_should_reclaim_space(trunk_handle *spl) return TRUE; } uint64 in_use = allocator_in_use(spl->al); - bool should_reclaim = in_use > spl->cfg.reclaim_threshold; + bool32 should_reclaim = in_use > spl->cfg.reclaim_threshold; return should_reclaim; } @@ -6553,7 +6553,7 @@ trunk_insert(trunk_handle *spl, key tuple_key, message data) return rc; } -bool +bool32 trunk_filter_lookup(trunk_handle *spl, trunk_node *node, routing_filter *filter, @@ -6581,7 +6581,7 @@ trunk_filter_lookup(trunk_handle *spl, while (next_value != ROUTING_NOT_FOUND) { uint16 branch_no = trunk_add_branch_number(spl, start_branch, next_value); trunk_branch *branch = trunk_get_branch(spl, node, branch_no); - bool local_found; + bool32 local_found; platform_status rc; rc = trunk_btree_lookup_and_merge(spl, branch, target, data, &local_found); @@ -6602,7 +6602,7 @@ trunk_filter_lookup(trunk_handle *spl, return TRUE; } -bool +bool32 trunk_compacted_subbundle_lookup(trunk_handle *spl, trunk_node *node, trunk_subbundle *sb, @@ -6632,7 +6632,7 @@ trunk_compacted_subbundle_lookup(trunk_handle *spl, if (found_values) { uint16 branch_no = sb->start_branch; trunk_branch *branch = trunk_get_branch(spl, node, branch_no); - bool local_found; + bool32 local_found; platform_status rc; rc = trunk_btree_lookup_and_merge( spl, branch, target, data, &local_found); @@ -6654,7 +6654,7 @@ trunk_compacted_subbundle_lookup(trunk_handle *spl, return TRUE; } -bool +bool32 trunk_bundle_lookup(trunk_handle *spl, trunk_node *node, trunk_bundle *bundle, @@ -6666,7 +6666,7 @@ trunk_bundle_lookup(trunk_handle *spl, uint16 sb_no = trunk_subtract_subbundle_number( spl, bundle->end_subbundle, sb_off + 1); trunk_subbundle *sb = trunk_get_subbundle(spl, node, sb_no); - bool should_continue; + bool32 should_continue; if (sb->state == SB_STATE_COMPACTED) { should_continue = trunk_compacted_subbundle_lookup(spl, node, sb, target, data); @@ -6684,7 +6684,7 @@ trunk_bundle_lookup(trunk_handle *spl, return TRUE; } -bool +bool32 trunk_pivot_lookup(trunk_handle *spl, trunk_node *node, trunk_pivot_data *pdata, @@ -6698,7 +6698,7 @@ trunk_pivot_lookup(trunk_handle *spl, spl, trunk_end_bundle(spl, node), bundle_off + 1); debug_assert(trunk_bundle_live(spl, node, bundle_no)); trunk_bundle *bundle = trunk_get_bundle(spl, node, bundle_no); - bool should_continue = + bool32 should_continue = trunk_bundle_lookup(spl, node, bundle, target, data); if (!should_continue) { return should_continue; @@ -6725,7 +6725,7 @@ trunk_lookup(trunk_handle *spl, key target, merge_accumulator *result) merge_accumulator_set_to_null(result); memtable_begin_lookup(spl->mt_ctxt); - bool found_in_memtable = FALSE; + bool32 found_in_memtable = FALSE; uint64 mt_gen_start = memtable_generation(spl->mt_ctxt); uint64 mt_gen_end = memtable_generation_retired(spl->mt_ctxt); platform_assert(mt_gen_start - mt_gen_end <= TRUNK_NUM_MEMTABLES); @@ -6753,7 +6753,7 @@ trunk_lookup(trunk_handle *spl, key target, merge_accumulator *result) trunk_find_pivot(spl, &node, target, less_than_or_equal); debug_assert(pivot_no < trunk_num_children(spl, &node)); trunk_pivot_data *pdata = trunk_get_pivot_data(spl, &node, pivot_no); - bool should_continue = + bool32 should_continue = trunk_pivot_lookup(spl, &node, pdata, target, result); if (!should_continue) { goto found_final_answer_early; @@ -6766,7 +6766,8 @@ trunk_lookup(trunk_handle *spl, key target, merge_accumulator *result) // look in leaf trunk_pivot_data *pdata = trunk_get_pivot_data(spl, &node, 0); - bool should_continue = trunk_pivot_lookup(spl, &node, pdata, target, result); + bool32 should_continue = + trunk_pivot_lookup(spl, &node, pdata, target, result); if (!should_continue) { goto found_final_answer_early; } @@ -6933,7 +6934,7 @@ trunk_lookup_async(trunk_handle *spl, // IN tid = platform_get_tid(); } trunk_node *node = &ctxt->trunk_node; - bool done = FALSE; + bool32 done = FALSE; do { switch (ctxt->state) { @@ -7067,7 +7068,7 @@ trunk_lookup_async(trunk_handle *spl, // IN } case async_state_filter_lookup_reentrant: { - // bool is_leaf; + // bool32 is_leaf; // switch (ctxt->lookup_state) { // case async_lookup_state_pivot: // is_leaf = ctxt->height == 0; @@ -7385,7 +7386,7 @@ trunk_range(trunk_handle *spl, goto destroy_range_itor; } - bool at_end; + bool32 at_end; iterator_at_end(&range_itor->super, &at_end); for (int i = 0; i < num_tuples && !at_end; i++) { @@ -7649,7 +7650,7 @@ trunk_prepare_for_shutdown(trunk_handle *spl) cache_flush(spl->cc); } -bool +bool32 trunk_node_destroy(trunk_handle *spl, uint64 addr, void *arg) { trunk_node node; @@ -7770,10 +7771,10 @@ trunk_perform_tasks(trunk_handle *spl) * 5. subbundles are coherent (branches are contiguous and non-overlapping) * 6. start_frac (resp end_branch) is first (resp last) branch in a subbundle */ -bool +bool32 trunk_verify_node(trunk_handle *spl, trunk_node *node) { - bool is_valid = FALSE; + bool32 is_valid = FALSE; uint64 addr = node->addr; // check values in trunk node->hdr (currently just num_pivot_keys) @@ -8103,12 +8104,12 @@ typedef struct trunk_verify_scratch { * 1. coherent max key with successor's min key * 2. coherent pivots with children's min/max keys */ -bool +bool32 trunk_verify_node_with_neighbors(trunk_handle *spl, trunk_node *node, trunk_verify_scratch *scratch) { - bool is_valid = FALSE; + bool32 is_valid = FALSE; uint64 addr = node->addr; uint16 height = trunk_node_height(node); @@ -8182,12 +8183,12 @@ trunk_verify_node_with_neighbors(trunk_handle *spl, /* * Wrapper for trunk_for_each_node */ -bool +bool32 trunk_verify_node_and_neighbors(trunk_handle *spl, uint64 addr, void *arg) { trunk_node node; trunk_node_get(spl->cc, addr, &node); - bool is_valid = trunk_verify_node(spl, &node); + bool32 is_valid = trunk_verify_node(spl, &node); if (!is_valid) { goto out; } @@ -8202,7 +8203,7 @@ trunk_verify_node_and_neighbors(trunk_handle *spl, uint64 addr, void *arg) /* * verify_tree verifies each node with itself and its neighbors */ -bool +bool32 trunk_verify_tree(trunk_handle *spl) { trunk_verify_scratch scratch = {0}; @@ -8210,7 +8211,7 @@ trunk_verify_tree(trunk_handle *spl) key_buffer_init_from_key( &scratch.last_key_seen[h], spl->heap_id, NEGATIVE_INFINITY_KEY); } - bool success = + bool32 success = trunk_for_each_node(spl, trunk_verify_node_and_neighbors, &scratch); for (uint64 h = 0; h < TRUNK_MAX_HEIGHT; h++) { key_buffer_deinit(&scratch.last_key_seen[h]); @@ -8221,7 +8222,7 @@ trunk_verify_tree(trunk_handle *spl) /* * Returns the amount of space used by each level of the tree */ -bool +bool32 trunk_node_space_use(trunk_handle *spl, uint64 addr, void *arg) { uint64 *bytes_used_on_level = (uint64 *)arg; @@ -9020,7 +9021,7 @@ trunk_print_lookup(trunk_handle *spl, uint64 mt_gen_start = memtable_generation(spl->mt_ctxt); uint64 mt_gen_end = memtable_generation_retired(spl->mt_ctxt); for (uint64 mt_gen = mt_gen_start; mt_gen != mt_gen_end; mt_gen--) { - bool memtable_is_compacted; + bool32 memtable_is_compacted; uint64 root_addr = trunk_memtable_root_addr_for_lookup( spl, mt_gen, &memtable_is_compacted); platform_status rc; @@ -9084,7 +9085,7 @@ trunk_print_lookup(trunk_handle *spl, { trunk_branch *branch = trunk_get_branch(spl, &node, branch_no); platform_status rc; - bool local_found; + bool32 local_found; merge_accumulator_set_to_null(&data); rc = trunk_btree_lookup_and_merge( spl, branch, target, &data, &local_found); @@ -9137,7 +9138,7 @@ trunk_print_lookup(trunk_handle *spl, { trunk_branch *branch = trunk_get_branch(spl, &node, branch_no); platform_status rc; - bool local_found; + bool32 local_found; merge_accumulator_set_to_null(&data); rc = trunk_btree_lookup_and_merge( spl, branch, target, &data, &local_found); @@ -9221,7 +9222,7 @@ trunk_branch_count_num_tuples(trunk_handle *spl, } } -bool +bool32 trunk_node_print_branches(trunk_handle *spl, uint64 addr, void *arg) { platform_log_handle *log_handle = (platform_log_handle *)arg; @@ -9296,7 +9297,7 @@ trunk_print_branches(platform_log_handle *log_handle, trunk_handle *spl) trunk_for_each_node(spl, trunk_node_print_branches, log_handle); } -// bool +// bool32 // trunk_node_print_extent_count(trunk_handle *spl, // uint64 addr, // void *arg) @@ -9353,9 +9354,9 @@ trunk_config_init(trunk_config *trunk_cfg, uint64 filter_index_size, uint64 reclaim_threshold, uint64 queue_scale_percent, - bool use_log, - bool use_stats, - bool verbose_logging, + bool32 use_log, + bool32 use_stats, + bool32 verbose_logging, platform_log_handle *log_handle) { diff --git a/src/trunk.h b/src/trunk.h index 474f8b2f1..13a1fd9e8 100644 --- a/src/trunk.h +++ b/src/trunk.h @@ -64,16 +64,16 @@ typedef struct trunk_config { // free space < threshold uint64 queue_scale_percent; // Governs when inserters perform bg tasks. See // task.h - bool use_stats; // stats + bool32 use_stats; // stats memtable_config mt_cfg; btree_config btree_cfg; routing_config filter_cfg; data_config *data_cfg; - bool use_log; + bool32 use_log; log_config *log_cfg; // verbose logging - bool verbose_logging_enabled; + bool32 verbose_logging_enabled; platform_log_handle *log_handle; } trunk_config; @@ -229,9 +229,9 @@ typedef struct trunk_range_iterator { uint64 num_memtable_branches; uint64 memtable_start_gen; uint64 memtable_end_gen; - bool compacted[TRUNK_RANGE_ITOR_MAX_BRANCHES]; + bool32 compacted[TRUNK_RANGE_ITOR_MAX_BRANCHES]; merge_iterator *merge_itor; - bool at_end; + bool32 at_end; key_buffer min_key; key_buffer max_key; key_buffer rebuild_key; @@ -310,7 +310,7 @@ typedef struct trunk_async_ctxt { uint16 branch_no; // branch number (newest) uint16 branch_no_end; // branch number end (oldest, // exclusive) - bool was_async; // Did an async IO for trunk ? + bool32 was_async; // Did an async IO for trunk ? trunk_branch *branch; // Current branch union { routing_async_ctxt filter_ctxt; // Filter async context @@ -334,7 +334,7 @@ trunk_insert(trunk_handle *spl, key tuple_key, message data); platform_status trunk_lookup(trunk_handle *spl, key target, merge_accumulator *result); -static inline bool +static inline bool32 trunk_lookup_found(merge_accumulator *result) { return !merge_accumulator_is_null(result); @@ -407,7 +407,7 @@ void trunk_print_extent_counts(platform_log_handle *log_handle, trunk_handle *spl); void trunk_print_space_use(platform_log_handle *log_handle, trunk_handle *spl); -bool +bool32 trunk_verify_tree(trunk_handle *spl); static inline uint64 @@ -461,9 +461,9 @@ trunk_config_init(trunk_config *trunk_cfg, uint64 filter_index_size, uint64 reclaim_threshold, uint64 queue_scale_percent, - bool use_log, - bool use_stats, - bool verbose_logging, + bool32 use_log, + bool32 use_stats, + bool32 verbose_logging, platform_log_handle *log_handle); size_t trunk_get_scratch_size(); diff --git a/src/util.c b/src/util.c index 0c341269e..c9c0f85d8 100644 --- a/src/util.c +++ b/src/util.c @@ -52,7 +52,7 @@ writable_buffer_resize(writable_buffer *wb, uint64 newlength) * negative_limit and positive_limit are absolute values *---------------------------------------------------------------------- */ -static inline bool +static inline bool32 try_string_to_uint64_limit(const char *nptr, // IN const uint64 negative_limit, // IN const uint64 positive_limit, // IN @@ -67,7 +67,7 @@ try_string_to_uint64_limit(const char *nptr, // IN } while (isspace(c)); // Skip (single) leading '+', treat single leading '-' as negative - bool negative = FALSE; + bool32 negative = FALSE; if (c == '-') { if (negative_limit == 0) { goto negative_disallowed; @@ -109,7 +109,7 @@ try_string_to_uint64_limit(const char *nptr, // IN const int cutlim = limit % (uint64)base; uint64 value; - bool converted_any = FALSE; + bool32 converted_any = FALSE; for (value = 0; c != '\0'; c = *s++) { if (isspace(c)) { break; @@ -195,7 +195,7 @@ try_string_to_uint64_limit(const char *nptr, // IN * Base is automatically detected based on the regular expressions above *---------------------------------------------------------------------- */ -bool +bool32 try_string_to_uint64(const char *nptr, // IN uint64 *n) // OUT { @@ -204,7 +204,7 @@ try_string_to_uint64(const char *nptr, // IN return try_string_to_uint64_limit(nptr, negative_limit, positive_limit, n); } -bool +bool32 try_string_to_int64(const char *nptr, // IN int64 *n) // OUT { @@ -220,7 +220,7 @@ try_string_to_int64(const char *nptr, // IN return TRUE; } -bool +bool32 try_string_to_uint32(const char *nptr, // IN uint32 *n) // OUT { @@ -232,7 +232,7 @@ try_string_to_uint32(const char *nptr, // IN return TRUE; } -bool +bool32 try_string_to_uint16(const char *nptr, // IN uint16 *n) // OUT { @@ -244,7 +244,7 @@ try_string_to_uint16(const char *nptr, // IN return TRUE; } -bool +bool32 try_string_to_uint8(const char *nptr, // IN uint8 *n) // OUT { @@ -256,7 +256,7 @@ try_string_to_uint8(const char *nptr, // IN return TRUE; } -bool +bool32 try_string_to_int32(const char *nptr, // IN int32 *n) // OUT { @@ -268,7 +268,7 @@ try_string_to_int32(const char *nptr, // IN return TRUE; } -bool +bool32 try_string_to_int16(const char *nptr, // IN int16 *n) // OUT { @@ -280,7 +280,7 @@ try_string_to_int16(const char *nptr, // IN return TRUE; } -bool +bool32 try_string_to_int8(const char *nptr, // IN int8 *n) // OUT { @@ -375,7 +375,7 @@ size_to_str(char *outbuf, size_t outbuflen, size_t size) debug_assert(outbuflen >= SIZE_TO_STR_LEN, "outbuflen=%lu.\n", outbuflen); size_t unit_val = 0; size_t frac_val = 0; - bool is_approx = FALSE; + bool32 is_approx = FALSE; char *units = NULL; if (size >= TiB) { diff --git a/src/util.h b/src/util.h index 550eb02d6..0fb0753d1 100644 --- a/src/util.h +++ b/src/util.h @@ -80,7 +80,7 @@ slice_copy_contents(void *dst, const slice src) return slice_create(src.length, dst); } -static inline bool +static inline bool32 slice_equals(const slice a, const slice b) { return a.length == b.length && a.data == b.data; @@ -125,7 +125,7 @@ typedef struct writable_buffer { void *buffer; uint64 buffer_capacity; uint64 length; - bool can_free; + bool32 can_free; } writable_buffer; #define WRITABLE_BUFFER_NULL_LENGTH UINT64_MAX @@ -154,7 +154,7 @@ writable_buffer_data(const writable_buffer *wb) } } -static inline bool +static inline bool32 writable_buffer_is_null(const writable_buffer *wb) { return wb->length == WRITABLE_BUFFER_NULL_LENGTH; @@ -297,35 +297,35 @@ writable_buffer_append(writable_buffer *wb, uint64 length, const void *newdata) * * Base is automatically detected based on the regular expressions above */ -bool +bool32 try_string_to_uint64(const char *nptr, // IN uint64 *n); // OUT -bool +bool32 try_string_to_int64(const char *nptr, // IN int64 *n); // OUT -bool +bool32 try_string_to_uint32(const char *nptr, // IN uint32 *n); // OUT -bool +bool32 try_string_to_int32(const char *nptr, // IN int32 *n); // OUT -bool +bool32 try_string_to_uint16(const char *nptr, // IN uint16 *n); // OUT -bool +bool32 try_string_to_int16(const char *nptr, // IN int16 *n); // OUT -bool +bool32 try_string_to_uint8(const char *nptr, // IN uint8 *n); // OUT -bool +bool32 try_string_to_int8(const char *nptr, // IN int8 *n); // OUT diff --git a/tests/config.h b/tests/config.h index b8594e647..bcb950029 100644 --- a/tests/config.h +++ b/tests/config.h @@ -57,7 +57,7 @@ typedef struct master_config { // cache uint64 cache_capacity; - bool cache_use_stats; + bool32 cache_use_stats; char cache_logfile[MAX_STRING_LENGTH]; // btree @@ -68,7 +68,7 @@ typedef struct master_config { uint64 filter_index_size; // log - bool use_log; + bool32 use_log; // task system uint64 num_normal_bg_threads; // Both bg_threads fields have to be non-zero @@ -81,8 +81,8 @@ typedef struct master_config { uint64 use_stats; uint64 reclaim_threshold; uint64 queue_scale_percent; - bool verbose_logging_enabled; - bool verbose_progress; + bool32 verbose_logging_enabled; + bool32 verbose_progress; platform_log_handle *log_handle; // data diff --git a/tests/functional/avlTree.c b/tests/functional/avlTree.c index 0571a92d8..79b2b7a9e 100644 --- a/tests/functional/avlTree.c +++ b/tests/functional/avlTree.c @@ -102,7 +102,7 @@ AvlTree_InitNode(AvlTreeLinks *node) *----------------------------------------------------------------------------- */ -bool +bool32 AvlTree_IsUnlinked(AvlTreeLinks *node) { return node->left == NULL && node->right == NULL && node->height == 0; @@ -787,7 +787,7 @@ AvlTreeStackPop(AvlTreeIter *iter) *----------------------------------------------------------------------------- */ -static bool +static bool32 AvlTreeStackIsEmpty(AvlTreeIter *iter) { return iter->num == 0; @@ -870,7 +870,7 @@ AvlTreeIter_Init(AvlTreeIter *iter, // IN iterator *----------------------------------------------------------------------------- */ -bool +bool32 AvlTreeIter_IsAtEnd(AvlTreeIter *iter) { return iter->cur == NULL; diff --git a/tests/functional/avlTree.h b/tests/functional/avlTree.h index f556a13fc..a71c3163c 100644 --- a/tests/functional/avlTree.h +++ b/tests/functional/avlTree.h @@ -78,7 +78,7 @@ AvlTree_Init(AvlTree *tree, void AvlTree_InitNode(AvlTreeLinks *node); -bool +bool32 AvlTree_IsUnlinked(AvlTreeLinks *node); /* Insertion / deletion */ @@ -155,7 +155,7 @@ AvlTreeIter_AllocSize(unsigned max) // IN height of the avl tree void AvlTreeIter_Init(AvlTreeIter *iter, unsigned max, AvlTree *tree); -bool +bool32 AvlTreeIter_IsAtEnd(AvlTreeIter *iter); void diff --git a/tests/functional/btree_test.c b/tests/functional/btree_test.c index 79db18060..e1997e805 100644 --- a/tests/functional/btree_test.c +++ b/tests/functional/btree_test.c @@ -105,7 +105,7 @@ test_btree_insert(test_memtable_context *ctxt, key tuple_key, message data) return rc; } -bool +bool32 test_btree_lookup(cache *cc, btree_config *cfg, platform_heap_id hid, @@ -115,7 +115,7 @@ test_btree_lookup(cache *cc, { platform_status rc; merge_accumulator result; - bool ret; + bool32 ret; merge_accumulator_init(&result, hid); @@ -134,7 +134,7 @@ test_btree_lookup(cache *cc, return ret; } -bool +bool32 test_memtable_lookup(test_memtable_context *ctxt, uint64 mt_no, key target, @@ -308,7 +308,7 @@ test_btree_perf(cache *cc, typedef struct { btree_async_ctxt ctxt; cache_async_ctxt cache_ctxt; - bool ready; + bool32 ready; key_buffer keybuf; merge_accumulator result; } btree_test_async_ctxt; @@ -382,7 +382,7 @@ btree_test_async_ctxt_init(btree_test_async_lookup *async_lookup) async_lookup->ctxt_bitmap = (1UL << max_async_inflight) - 1; } -static bool +static bool32 btree_test_async_ctxt_is_used(const btree_test_async_lookup *async_lookup, int ctxt_idx) { @@ -390,7 +390,7 @@ btree_test_async_ctxt_is_used(const btree_test_async_lookup *async_lookup, return async_lookup->ctxt_bitmap & (1UL << ctxt_idx) ? FALSE : TRUE; } -static bool +static bool32 btree_test_async_ctxt_any_used(const btree_test_async_lookup *async_lookup) { debug_assert((async_lookup->ctxt_bitmap & ~((1UL << max_async_inflight) - 1)) @@ -398,13 +398,13 @@ btree_test_async_ctxt_any_used(const btree_test_async_lookup *async_lookup) return async_lookup->ctxt_bitmap != (1UL << max_async_inflight) - 1; } -static bool +static bool32 btree_test_run_pending(cache *cc, btree_config *cfg, uint64 root_addr, btree_test_async_lookup *async_lookup, btree_test_async_ctxt *skip_ctxt, - bool expected_found) + bool32 expected_found) { int i; @@ -425,7 +425,7 @@ btree_test_run_pending(cache *cc, key target = key_buffer_key(&ctxt->keybuf); res = btree_lookup_async( cc, cfg, root_addr, target, &ctxt->result, &ctxt->ctxt); - bool local_found = btree_found(&ctxt->result); + bool32 local_found = btree_found(&ctxt->result); switch (res) { case async_locked: case async_no_reqs: @@ -467,7 +467,7 @@ btree_test_wait_pending(cache *cc, btree_config *cfg, uint64 root_addr, btree_test_async_lookup *async_lookup, - bool expected_found) + bool32 expected_found) { // Rough detection of stuck contexts const timestamp ts = platform_get_timestamp(); @@ -484,8 +484,8 @@ test_btree_async_lookup(cache *cc, btree_test_async_ctxt *async_ctxt, btree_test_async_lookup *async_lookup, uint64 root_addr, - bool expected_found, - bool *correct) + bool32 expected_found, + bool32 *correct) { cache_async_result res; btree_ctxt_init( @@ -522,8 +522,8 @@ test_memtable_async_lookup(test_memtable_context *ctxt, btree_test_async_ctxt *async_ctxt, btree_test_async_lookup *async_lookup, uint64 mt_no, - bool expected_found, - bool *correct) + bool32 expected_found, + bool32 *correct) { memtable *mt = &ctxt->mt_ctxt->mt[mt_no]; btree_config *btree_cfg = mt->cfg; @@ -575,7 +575,7 @@ test_btree_basic(cache *cc, platform_default_log("btree insert time per tuple %luns\n", platform_timestamp_elapsed(start_time) / num_inserts); - bool correct = memtable_verify(cc, mt); + bool32 correct = memtable_verify(cc, mt); if (!correct) { memtable_print(Platform_default_log_handle, cc, mt); } @@ -591,7 +591,7 @@ test_btree_basic(cache *cc, if (async_ctxt == NULL) { test_btree_tuple(ctxt, &keybuf, &expected_data, insert_num, 0); - bool correct = + bool32 correct = test_memtable_lookup(ctxt, 0, key_buffer_key(&keybuf), @@ -606,7 +606,7 @@ test_btree_basic(cache *cc, platform_assert(correct); } else { num_async++; - bool correct; + bool32 correct; test_btree_tuple( ctxt, &async_ctxt->keybuf, &expected_data, insert_num, 0); cache_async_result res = test_memtable_async_lookup( @@ -636,7 +636,7 @@ test_btree_basic(cache *cc, uint64 end_num = 2 * num_inserts; for (uint64 insert_num = start_num; insert_num < end_num; insert_num++) { test_btree_tuple(ctxt, &keybuf, &expected_data, insert_num, 0); - bool correct = + bool32 correct = test_memtable_lookup(ctxt, 0, key_buffer_key(&keybuf), NULL_MESSAGE); if (!correct) { memtable_print(Platform_default_log_handle, cc, mt); @@ -693,7 +693,7 @@ test_btree_basic(cache *cc, if (async_ctxt == NULL) { test_btree_tuple(ctxt, &keybuf, &expected_data, insert_num, 0); - bool correct = + bool32 correct = test_btree_lookup(cc, btree_cfg, hid, @@ -715,7 +715,7 @@ test_btree_basic(cache *cc, platform_assert(correct); } else { num_async++; - bool correct; + bool32 correct; test_btree_tuple( ctxt, &async_ctxt->keybuf, &expected_data, insert_num, 0); cache_async_result res = test_btree_async_lookup(cc, @@ -755,12 +755,12 @@ test_btree_basic(cache *cc, end_num = 2 * num_inserts; for (uint64 insert_num = start_num; insert_num < end_num; insert_num++) { test_btree_tuple(ctxt, &keybuf, &expected_data, insert_num, 0); - bool correct = test_btree_lookup(cc, - btree_cfg, - hid, - packed_root_addr, - key_buffer_key(&keybuf), - NULL_MESSAGE); + bool32 correct = test_btree_lookup(cc, + btree_cfg, + hid, + packed_root_addr, + key_buffer_key(&keybuf), + NULL_MESSAGE); if (!correct) { btree_print_tree(Platform_default_log_handle, cc, @@ -880,7 +880,7 @@ test_count_tuples_in_range(cache *cc, } btree_iterator_init( cc, cfg, &itor, root_addr[i], type, low_key, high_key, TRUE, 0); - bool at_end; + bool32 at_end; iterator_at_end(&itor.super, &at_end); key last_key = NULL_KEY; while (!at_end) { @@ -962,7 +962,7 @@ test_btree_print_all_keys(cache *cc, platform_default_log("tree number %lu\n", i); btree_iterator_init( cc, cfg, &itor, root_addr[i], type, low_key, high_key, TRUE, 0); - bool at_end; + bool32 at_end; iterator_at_end(&itor.super, &at_end); while (!at_end) { key curr_key; @@ -1244,7 +1244,7 @@ test_btree_rough_iterator(cache *cc, iterator **rough_itor = TYPED_ARRAY_MALLOC(hid, rough_itor, num_trees); platform_assert(rough_itor); - bool at_end; + bool32 at_end; for (uint64 tree_no = 0; tree_no < num_trees; tree_no++) { btree_iterator_init(cc, btree_cfg, @@ -1493,7 +1493,7 @@ btree_test(int argc, char *argv[]) task_system_config task_cfg; int config_argc; char **config_argv; - bool run_perf_test; + bool32 run_perf_test; platform_status rc; uint64 seed; task_system *ts = NULL; diff --git a/tests/functional/cache_test.c b/tests/functional/cache_test.c index eb4df93d9..9178ba16a 100644 --- a/tests/functional/cache_test.c +++ b/tests/functional/cache_test.c @@ -177,7 +177,7 @@ test_cache_basic(cache *cc, clockcache_config *cfg, platform_heap_id hid) uint32 i; for (i = 0; i < cfg->page_capacity; i++) { page_arr[i] = cache_get(cc, addr_arr[j + i], TRUE, PAGE_TYPE_MISC); - bool claim_obtained = cache_try_claim(cc, page_arr[i]); + bool32 claim_obtained = cache_try_claim(cc, page_arr[i]); if (!claim_obtained) { platform_error_log("Expected uncontested claim, but failed\n"); rc = STATUS_TEST_FAILED; @@ -222,7 +222,7 @@ test_cache_basic(cache *cc, clockcache_config *cfg, platform_heap_id hid) uint32 i; for (i = 0; i < cfg->page_capacity; i++) { page_arr[i] = cache_get(cc, addr_arr[j + i], TRUE, PAGE_TYPE_MISC); - bool claim_obtained = cache_try_claim(cc, page_arr[i]); + bool32 claim_obtained = cache_try_claim(cc, page_arr[i]); if (!claim_obtained) { platform_error_log("Expected uncontested claim, but failed\n"); rc = STATUS_TEST_FAILED; @@ -310,7 +310,7 @@ typedef struct { uint32 start; uint32 end; int32 incr; - bool arity; + bool32 arity; } hop; }; } cache_test_index_itor; @@ -410,7 +410,7 @@ cache_test_dirty_flush(cache *cc, for (uint32 i = 0; i < cfg->page_capacity; i++) { const uint32 idx = cache_test_index_itor_get(itor); page_handle *ph = cache_get(cc, addr_arr[idx], TRUE, PAGE_TYPE_MISC); - bool claim_obtained = cache_try_claim(cc, ph); + bool32 claim_obtained = cache_try_claim(cc, ph); if (!claim_obtained) { platform_error_log("Expected uncontested claim, but failed\n"); rc = STATUS_TEST_FAILED; @@ -582,8 +582,8 @@ typedef struct { task_system *ts; // IN platform_thread thread; // IN platform_heap_id hid; // IN - bool mt_reader; // IN readers are MT - bool logger; // IN logger thread + bool32 mt_reader; // IN readers are MT + bool32 logger; // IN logger thread const uint64 *addr_arr; // IN array of page addrs uint64 num_pages; // IN #of pages to get uint64 num_pages_ws; // IN #of pages in working set @@ -626,7 +626,7 @@ static void test_abandon_read_batch(test_params *params, uint64 batch_start, uint64 batch_end, // exclusive - bool was_async[]) + bool32 was_async[]) { page_handle **handle_arr = params->handle_arr; const uint64 *addr_arr = params->addr_arr; @@ -650,14 +650,14 @@ test_abandon_read_batch(test_params *params, } // Do async reads for a batch of addresses, and wait for them to complete -static bool +static bool32 test_do_read_batch(threadid tid, test_params *params, uint64 batch_start) { page_handle **handle_arr = ¶ms->handle_arr[batch_start]; const uint64 *addr_arr = ¶ms->addr_arr[batch_start]; - const bool mt_reader = params->mt_reader; + const bool32 mt_reader = params->mt_reader; cache *cc = params->cc; - bool was_async[READER_BATCH_SIZE] = {FALSE}; + bool32 was_async[READER_BATCH_SIZE] = {FALSE}; uint64 j; // Prepare to do async gets on current batch @@ -754,7 +754,7 @@ test_reader_thread(void *arg) } k += READER_BATCH_SIZE; } - bool need_retry; + bool32 need_retry; do { need_retry = test_do_read_batch(tid, params, i); if (need_retry) { @@ -871,7 +871,7 @@ test_cache_async(cache *cc, cache_evict(cc, TRUE); cache_reset_stats(cc); for (i = 0; i < total_threads; i++) { - const bool is_reader = i < num_reader_threads ? TRUE : FALSE; + const bool32 is_reader = i < num_reader_threads ? TRUE : FALSE; params[i].cc = cc; params[i].cfg = cfg; @@ -968,7 +968,7 @@ cache_test(int argc, char *argv[]) char **config_argv = argv + 1; platform_status rc; task_system *ts = NULL; - bool benchmark = FALSE, async = FALSE; + bool32 benchmark = FALSE, async = FALSE; uint64 seed; test_message_generator gen; diff --git a/tests/functional/filter_test.c b/tests/functional/filter_test.c index 2026948c6..28cfa4bfd 100644 --- a/tests/functional/filter_test.c +++ b/tests/functional/filter_test.c @@ -41,7 +41,7 @@ test_filter_basic(cache *cc, fp_arr[i] = TYPED_ARRAY_MALLOC(hid, fp_arr[i], num_fingerprints); } - bool *used_keys = + bool32 *used_keys = TYPED_ARRAY_ZALLOC(hid, used_keys, (num_values + 1) * num_fingerprints); uint32 *num_input_keys = TYPED_ARRAY_ZALLOC(hid, num_input_keys, num_values); @@ -298,7 +298,7 @@ filter_test(int argc, char *argv[]) clockcache *cc; int config_argc; char **config_argv; - bool run_perf_test; + bool32 run_perf_test; platform_status rc; uint64 seed; test_message_generator gen; diff --git a/tests/functional/io_apis_test.c b/tests/functional/io_apis_test.c index b7c425a38..74193deb7 100644 --- a/tests/functional/io_apis_test.c +++ b/tests/functional/io_apis_test.c @@ -48,7 +48,7 @@ typedef struct io_test_fn_args { } io_test_fn_args; /* Whether to display verbose-progress from each thread's activity */ -bool Verbose_progress = FALSE; +bool32 Verbose_progress = FALSE; /* * Different test cases in this test drive multiple threads each doing one diff --git a/tests/functional/log_test.c b/tests/functional/log_test.c index 6e5dea24c..c1fed2d83 100644 --- a/tests/functional/log_test.c +++ b/tests/functional/log_test.c @@ -31,7 +31,7 @@ test_log_crash(clockcache *cc, platform_heap_id hid, test_message_generator *gen, uint64 num_entries, - bool crash) + bool32 crash) { platform_status rc; @@ -45,7 +45,7 @@ test_log_crash(clockcache *cc, iterator *itorh = (iterator *)&itor; char key_str[128]; char data_str[128]; - bool at_end; + bool32 at_end; merge_accumulator msg; DECLARE_AUTO_KEY_BUFFER(keybuffer, hid); @@ -240,8 +240,8 @@ log_test(int argc, char *argv[]) platform_status ret; int config_argc; char **config_argv; - bool run_perf_test; - bool run_crash_test; + bool32 run_perf_test; + bool32 run_crash_test; int rc; uint64 seed; task_system *ts = NULL; diff --git a/tests/functional/splinter_test.c b/tests/functional/splinter_test.c index ff49de048..b8e01c6c8 100644 --- a/tests/functional/splinter_test.c +++ b/tests/functional/splinter_test.c @@ -65,7 +65,7 @@ typedef struct test_splinter_thread_params { uint64 max_range_length; stats_insert insert_stats; uint64 num_ops_per_thread[NUM_OP_TYPES]; // in each round - bool expected_found; + bool32 expected_found; test_async_lookup *async_lookup[8]; // async lookup state per table uint64 insert_rate; stats_lookup lookup_stats[NUM_LOOKUP_TYPES]; @@ -92,7 +92,7 @@ typedef struct trunk_range_perf_params { */ typedef void (*test_trunk_thread_hdlr)(void *arg); -static inline bool +static inline bool32 test_is_done(const uint8 done, const uint8 n) { return (((done >> n) & 1) != 0); @@ -104,7 +104,7 @@ test_set_done(uint8 *done, const uint8 n) *done |= 1 << n; } -static inline bool +static inline bool32 test_all_done(const uint8 done, const uint8 num_tables) { return (done == ((1 << num_tables) - 1)); @@ -233,7 +233,7 @@ test_trunk_lookup_thread(void *arg) uint64 *curr_op = params->curr_op; uint64 op_granularity = params->op_granularity; uint64 thread_number = params->thread_number; - bool expected_found = params->expected_found; + bool32 expected_found = params->expected_found; uint8 num_tables = params->num_tables; verify_tuple_arg vtarg = {.expected_found = expected_found, .stats = ¶ms->lookup_stats[ASYNC_LU]}; @@ -366,7 +366,8 @@ test_trunk_range_thread(void *arg) uint64 *range_base = TYPED_ARRAY_ZALLOC(heap_id, range_base, num_tables); uint8 done = 0; - bool verbose_progress = test_show_verbose_progress(test_cfg->test_exec_cfg); + bool32 verbose_progress = + test_show_verbose_progress(test_cfg->test_exec_cfg); uint64 test_start_time = platform_get_timestamp(); uint64 start_time = platform_get_timestamp(); char progress_msg[60]; @@ -461,7 +462,7 @@ test_trunk_range_thread(void *arg) * Returns: TRUE if all tests are done *----------------------------------------------------------------------------- */ -static bool +static bool32 advance_base(const test_splinter_thread_params *params, uint64 *curr_op, uint64 *base, @@ -556,7 +557,7 @@ do_operation(test_splinter_thread_params *params, uint64 num_ops, uint64 op_offset, const uint8 *done, - bool is_insert) + bool32 is_insert) { trunk_handle **spl_tables = params->spl; const test_config *test_cfg = params->test_cfg; @@ -625,7 +626,7 @@ do_operation(test_splinter_thread_params *params, if (ts > params->lookup_stats[SYNC_LU].latency_max) { params->lookup_stats[SYNC_LU].latency_max = ts; } - bool found = trunk_lookup_found(&msg); + bool32 found = trunk_lookup_found(&msg); if (found) { params->lookup_stats[SYNC_LU].num_found++; } else { @@ -859,7 +860,7 @@ load_thread_params(test_splinter_thread_params *params, uint64 insert_rate, uint64 num_insert_threads, uint64 num_threads, - bool is_parallel) + bool32 is_parallel) { for (uint64 i = 0; i < num_threads; i++) { params[i].spl = spl_tables; @@ -1009,7 +1010,8 @@ splinter_perf_inserts(platform_heap_id hid, return rc; } - bool verbose_progress = test_show_verbose_progress(test_cfg->test_exec_cfg); + bool32 verbose_progress = + test_show_verbose_progress(test_cfg->test_exec_cfg); if (verbose_progress) { platform_default_log("Created %lu insert threads" ", Waiting for threads to complete ...\n", @@ -1112,7 +1114,8 @@ splinter_perf_lookups(platform_heap_id hid, return rc; } - bool verbose_progress = test_show_verbose_progress(test_cfg->test_exec_cfg); + bool32 verbose_progress = + test_show_verbose_progress(test_cfg->test_exec_cfg); if (verbose_progress) { platform_default_log("Created %lu lookup threads" ", Waiting for threads to complete ...\n", @@ -1208,8 +1211,9 @@ splinter_perf_range_lookups(platform_heap_id hid, platform_assert( (num_range_threads > 0), "num_range_threads=%lu", num_range_threads); - bool verbose_progress = test_show_verbose_progress(test_cfg->test_exec_cfg); - uint64 total_ranges = 0; + bool32 verbose_progress = + test_show_verbose_progress(test_cfg->test_exec_cfg); + uint64 total_ranges = 0; for (uint8 i = 0; i < num_tables; i++) { per_table_ranges[i] = ROUNDUP(per_table_inserts[i] / num_ranges, TEST_RANGE_GRANULARITY); @@ -2492,7 +2496,7 @@ splinter_test(int argc, char *argv[]) uint32 num_range_lookup_threads, max_async_inflight; uint32 num_pthreads = 0; uint8 num_tables = 1; - bool cache_per_table = FALSE; + bool32 cache_per_table = FALSE; uint64 insert_rate = 0; // no rate throttling by default. task_system *ts = NULL; uint8 lookup_positive_pct = 0; @@ -2695,7 +2699,7 @@ splinter_test(int argc, char *argv[]) config_argv); // if there are multiple cache capacity, cache_per_table needs to be TRUE - bool multi_cap = FALSE; + bool32 multi_cap = FALSE; for (uint8 i = 0; i < num_tables; i++) { if (cache_cfg[i].capacity != cache_cfg[0].capacity) { multi_cap = TRUE; diff --git a/tests/functional/test.h b/tests/functional/test.h index e02488a1b..b2848179d 100644 --- a/tests/functional/test.h +++ b/tests/functional/test.h @@ -110,7 +110,7 @@ test_key(key_buffer *keywb, return key_buffer_key(keywb); } -static inline bool +static inline bool32 test_period_complete(uint64 idx, uint64 period) { return idx % period == 0; @@ -279,7 +279,7 @@ test_config_init(trunk_config *splinter_cfg, // OUT typedef struct test_exec_config { uint64 seed; uint64 num_inserts; - bool verbose_progress; // --verbose-progress: During test execution + bool32 verbose_progress; // --verbose-progress: During test execution } test_exec_config; /* diff --git a/tests/functional/test_async.c b/tests/functional/test_async.c index 0602010fa..7d9b1723c 100644 --- a/tests/functional/test_async.c +++ b/tests/functional/test_async.c @@ -153,7 +153,7 @@ async_ctxt_process_one(trunk_handle *spl, * * Returns: TRUE if no context at all are used. */ -bool +bool32 async_ctxt_process_ready(trunk_handle *spl, test_async_lookup *async_lookup, timestamp *latency_max, diff --git a/tests/functional/test_async.h b/tests/functional/test_async.h index 25093226e..1c268b2c3 100644 --- a/tests/functional/test_async.h +++ b/tests/functional/test_async.h @@ -62,7 +62,7 @@ async_ctxt_process_one(trunk_handle *spl, timestamp *latency_max, async_ctxt_process_cb process_cb, void *process_arg); -bool +bool32 async_ctxt_process_ready(trunk_handle *spl, test_async_lookup *async_lookup, timestamp *latency_max, diff --git a/tests/functional/test_functionality.c b/tests/functional/test_functionality.c index 553d626eb..7cb79c090 100644 --- a/tests/functional/test_functionality.c +++ b/tests/functional/test_functionality.c @@ -32,7 +32,7 @@ static void search_for_key_via_iterator(trunk_handle *spl, key target) { trunk_range_iterator iter; - bool at_end; + bool32 at_end; trunk_range_iterator_init( spl, &iter, NEGATIVE_INFINITY_KEY, POSITIVE_INFINITY_KEY, UINT64_MAX); @@ -60,7 +60,7 @@ verify_tuple(trunk_handle *spl, platform_status *result) { const data_handle *dh = message_data(msg); - bool found = dh != NULL; + bool32 found = dh != NULL; uint64 int_key = be64toh(*(uint64 *)key_data(keybuf)); if (dh && message_length(msg) < sizeof(data_handle)) { @@ -227,7 +227,7 @@ verify_range_against_shadow(trunk_handle *spl, const data_handle *splinter_data_handle; uint64 splinter_key; uint64 i; - bool at_end; + bool32 at_end; platform_assert(start_index <= sharr->nkeys); platform_assert(end_index <= sharr->nkeys); @@ -329,7 +329,7 @@ choose_key(data_config *cfg, // IN test_splinter_shadow_array *sharr, // IN random_state *prg, // IN/OUT int type, // IN - bool is_start, // IN + bool32 is_start, // IN key startkey, // IN int start_index, // IN int *index, // OUT @@ -382,7 +382,7 @@ verify_range_against_shadow_all_types(trunk_handle *spl, random_state *prg, test_splinter_shadow_array *sharr, platform_heap_id hid, - bool do_it) + bool32 do_it) { int begin_type; int end_type; @@ -470,7 +470,7 @@ validate_tree_against_shadow(trunk_handle *spl, random_state *prg, test_splinter_shadow_tree *shadow, platform_heap_id hid, - bool do_it, + bool32 do_it, test_async_lookup *async_lookup) { test_splinter_shadow_array dry_run_sharr = { diff --git a/tests/functional/test_splinter_shadow.c b/tests/functional/test_splinter_shadow.c index a1c8144ea..228cec1e7 100644 --- a/tests/functional/test_splinter_shadow.c +++ b/tests/functional/test_splinter_shadow.c @@ -145,7 +145,7 @@ test_splinter_shadow_create(test_splinter_shadow_tree **tree, *----------------------------------------------------------------------------- */ -bool +bool32 test_splinter_shadow_lookup(test_splinter_shadow_tree *tree, uint64 *key, uint64 *val) diff --git a/tests/functional/test_splinter_shadow.h b/tests/functional/test_splinter_shadow.h index a80e0e51b..4dcc2da6d 100644 --- a/tests/functional/test_splinter_shadow.h +++ b/tests/functional/test_splinter_shadow.h @@ -65,7 +65,7 @@ test_splinter_shadow_count(test_splinter_shadow_tree *tree) } -bool +bool32 test_splinter_shadow_lookup(test_splinter_shadow_tree *tree, uint64 *key, uint64 *val); diff --git a/tests/functional/ycsb_test.c b/tests/functional/ycsb_test.c index 2d3fa080d..26e12ff6d 100644 --- a/tests/functional/ycsb_test.c +++ b/tests/functional/ycsb_test.c @@ -181,7 +181,7 @@ print_latency_table(latency_table table, platform_log_handle *log_handle) { uint64_t exponent; uint64_t mantissa; - bool started = FALSE; + bool32 started = FALSE; uint64_t max = max_latency(table); platform_log(log_handle, "latency count\n"); @@ -248,7 +248,7 @@ typedef struct ycsb_op { uint64 range_len; uint64 start_time; uint64 end_time; - bool found; + bool32 found; } ycsb_op; typedef struct running_times { @@ -529,7 +529,7 @@ run_all_ycsb_phases(trunk_handle *spl, typedef struct parse_ycsb_log_req { char *filename; - bool lock; + bool32 lock; uint64 start_line; uint64 end_line; uint64 *num_ops; @@ -542,7 +542,7 @@ parse_ycsb_log_file(void *arg) { parse_ycsb_log_req *req = (parse_ycsb_log_req *)arg; platform_heap_id hid = platform_get_heap_id(); - bool lock = req->lock; + bool32 lock = req->lock; uint64 *num_ops = req->num_ops; random_state rs; @@ -641,7 +641,7 @@ static platform_status load_ycsb_logs(int argc, char *argv[], uint64 *nphases, - bool *use_existing, + bool32 *use_existing, ycsb_phase **output, int *args_consumed, uint64 *log_size_bytes_out, @@ -649,7 +649,7 @@ load_ycsb_logs(int argc, { uint64 _nphases = 1; uint64 num_threads = 0; - bool mlock_log = TRUE; + bool32 mlock_log = TRUE; char *measurement_command = NULL; uint64 log_size_bytes = 0; *use_existing = FALSE; @@ -1158,7 +1158,7 @@ ycsb_test(int argc, char *argv[]) task_system *ts = NULL; uint64 nphases; - bool use_existing = 0; + bool32 use_existing = 0; ycsb_phase *phases; int args_consumed; test_message_generator gen; diff --git a/tests/test_common.c b/tests/test_common.c index 727e17eb1..b5768ca1d 100644 --- a/tests/test_common.c +++ b/tests/test_common.c @@ -26,7 +26,7 @@ verify_tuple(trunk_handle *spl, uint64 lookup_num, key tuple_key, message data, - bool expected_found) + bool32 expected_found) { if (message_is_null(data) != !expected_found) { char key_str[128]; @@ -89,7 +89,7 @@ void verify_tuple_callback(trunk_handle *spl, test_async_ctxt *ctxt, void *arg) { verify_tuple_arg *vta = arg; - bool found = trunk_lookup_found(&ctxt->data); + bool32 found = trunk_lookup_found(&ctxt->data); if (vta->stats != NULL) { if (found) { diff --git a/tests/test_common.h b/tests/test_common.h index 3e133659a..21a0c9afd 100644 --- a/tests/test_common.h +++ b/tests/test_common.h @@ -21,8 +21,8 @@ typedef struct stats_lookup { } stats_lookup; typedef struct { - bool expected_found; - bool stats_only; // update statistic only + bool32 expected_found; + bool32 stats_only; // update statistic only stats_lookup *stats; } verify_tuple_arg; @@ -35,7 +35,7 @@ verify_tuple(trunk_handle *spl, uint64 lookup_num, key tuple_key, message data, - bool expected_found); + bool32 expected_found); void test_wait_for_inflight(trunk_handle *spl, @@ -50,7 +50,7 @@ test_async_ctxt_get(trunk_handle *spl, test_async_lookup *async_lookup, verify_tuple_arg *vtarg); -static inline bool +static inline bool32 test_show_verbose_progress(test_exec_config *test_exec_cfg) { return (test_exec_cfg->verbose_progress); diff --git a/tests/unit/btree_stress_test.c b/tests/unit/btree_stress_test.c index 8ff32248f..dff2d6387 100644 --- a/tests/unit/btree_stress_test.c +++ b/tests/unit/btree_stress_test.c @@ -305,7 +305,7 @@ insert_tests(cache *cc, int end) { uint64 generation; - bool was_unique; + bool32 was_unique; int keybuf_size = btree_page_size(cfg); int msgbuf_size = btree_page_size(cfg); @@ -426,7 +426,7 @@ iterator_tests(cache *cc, iterator *iter = (iterator *)&dbiter; uint64 seen = 0; - bool at_end; + bool32 at_end; uint8 *prevbuf = TYPED_MANUAL_MALLOC(hid, prevbuf, btree_page_size(cfg)); key prev = NULL_KEY; uint8 *keybuf = TYPED_MANUAL_MALLOC(hid, keybuf, btree_page_size(cfg)); diff --git a/tests/unit/btree_test.c b/tests/unit/btree_test.c index 63f2156c5..41b7200a1 100644 --- a/tests/unit/btree_test.c +++ b/tests/unit/btree_test.c @@ -42,7 +42,7 @@ leaf_split_tests(btree_config *cfg, int nkvs, platform_heap_id hid); -static bool +static bool32 btree_leaf_incorporate_tuple(const btree_config *cfg, platform_heap_id hid, btree_hdr *hdr, @@ -174,7 +174,7 @@ leaf_hdr_tests(btree_config *cfg, btree_scratch *scratch, platform_heap_id hid) btree_init_hdr(cfg, hdr); - bool rv = FALSE; + bool32 rv = FALSE; for (uint32 i = 0; i < nkvs; i++) { rv = btree_set_leaf_entry( cfg, @@ -266,7 +266,7 @@ leaf_hdr_search_tests(btree_config *cfg, platform_heap_id hid) message_create(MESSAGE_TYPE_INSERT, slice_create(i % 8, messagebuf)); leaf_incorporate_spec spec; - bool result = btree_leaf_incorporate_tuple( + bool32 result = btree_leaf_incorporate_tuple( cfg, hid, hdr, tuple_key, msg, &spec, &generation); ASSERT_TRUE(result, "Could not incorporate kv pair %d\n", i); @@ -295,8 +295,8 @@ index_hdr_tests(btree_config *cfg, btree_scratch *scratch, platform_heap_id hid) int nkvs = 100; - bool rv = FALSE; - int cmp_rv = 0; + bool32 rv = FALSE; + int cmp_rv = 0; btree_init_hdr(cfg, hdr); hdr->height = 1; @@ -362,7 +362,7 @@ index_hdr_search_tests(btree_config *cfg, platform_heap_id hid) btree_init_hdr(cfg, hdr); - bool rv = FALSE; + bool32 rv = FALSE; for (int i = 0; i < nkvs; i += 2) { uint8 keybuf[1]; keybuf[0] = i; @@ -373,8 +373,8 @@ index_hdr_search_tests(btree_config *cfg, platform_heap_id hid) } for (int i = 0; i < nkvs; i++) { - bool found; - uint8 keybuf[1]; + bool32 found; + uint8 keybuf[1]; keybuf[0] = i; key target = key_create(1, &keybuf); int64 idx = btree_find_pivot(cfg, hdr, target, &found); @@ -427,7 +427,7 @@ leaf_split_tests(btree_config *cfg, key tuple_key = key_create(1, &i); - bool success = btree_leaf_incorporate_tuple( + bool32 success = btree_leaf_incorporate_tuple( cfg, hid, hdr, tuple_key, bigger_msg, &spec, &generation); if (success) { btree_print_locked_node( diff --git a/tests/unit/splinter_test.c b/tests/unit/splinter_test.c index fc33348b0..783ec6460 100644 --- a/tests/unit/splinter_test.c +++ b/tests/unit/splinter_test.c @@ -40,7 +40,7 @@ typedef struct shadow_entry { typedef struct trunk_shadow { data_config *data_cfg; - bool sorted; + bool32 sorted; writable_buffer entries; writable_buffer data; } trunk_shadow; @@ -49,7 +49,7 @@ typedef struct trunk_shadow { static uint64 splinter_do_inserts(void *datap, trunk_handle *spl, - bool verify, + bool32 verify, trunk_shadow *shadow); // Out static platform_status @@ -120,7 +120,7 @@ CTEST_SETUP(splinter) data->max_async_inflight = 64; data->spl_num_tables = 1; - bool cache_per_table = FALSE; + bool32 cache_per_table = FALSE; int num_tables = data->spl_num_tables; // Cache, for re-use below uint8 num_caches = (cache_per_table ? num_tables : 1); uint64 heap_capacity = MAX(1024 * MiB * num_caches, 512 * MiB * num_tables); @@ -692,7 +692,7 @@ CTEST2(splinter, test_splinter_print_diags) static uint64 splinter_do_inserts(void *datap, trunk_handle *spl, - bool verify, + bool32 verify, trunk_shadow *shadow) // Out { // Cast void * datap to ptr-to-CTEST_DATA() struct in use. @@ -745,7 +745,7 @@ splinter_do_inserts(void *datap, if (verify && (insert_num != 0) && (insert_num % TEST_VERIFY_GRANULARITY) == 0) { - bool result = trunk_verify_tree(spl); + bool32 result = trunk_verify_tree(spl); ASSERT_TRUE(result, "trunk_verify_tree() failed after %d inserts. ", insert_num); diff --git a/tests/unit/splinterdb_quick_test.c b/tests/unit/splinterdb_quick_test.c index 2891b5d5d..97d998042 100644 --- a/tests/unit/splinterdb_quick_test.c +++ b/tests/unit/splinterdb_quick_test.c @@ -463,7 +463,7 @@ CTEST2(splinterdb_quick, test_splinterdb_iterator_with_startkey) rc = splinterdb_iterator_init(data->kvsb, &it, start_key); ASSERT_EQUAL(0, rc); - bool is_valid = splinterdb_iterator_valid(it); + bool32 is_valid = splinterdb_iterator_valid(it); ASSERT_TRUE(is_valid); // Scan should have been positioned at the i'th key @@ -497,7 +497,7 @@ CTEST2(splinterdb_quick, test_splinterdb_iterator_with_non_existent_startkey) rc = splinterdb_iterator_init(data->kvsb, &it, start_key); // Iterator should be invalid, as lookup key is non-existent. - bool is_valid = splinterdb_iterator_valid(it); + bool32 is_valid = splinterdb_iterator_valid(it); ASSERT_FALSE(is_valid); splinterdb_iterator_deinit(it); @@ -563,7 +563,7 @@ CTEST2(splinterdb_quick, rc = splinterdb_iterator_init(data->kvsb, &it, start_key); ASSERT_EQUAL(0, rc); - bool is_valid = splinterdb_iterator_valid(it); + bool32 is_valid = splinterdb_iterator_valid(it); ASSERT_TRUE(is_valid); // Iterator should be initialized to 1st key inserted, if the supplied @@ -797,7 +797,7 @@ CTEST2(splinterdb_quick, test_iterator_custom_comparator) ASSERT_EQUAL(num_inserts, i); ASSERT_TRUE(data->default_data_cfg.num_comparisons > (2 * num_inserts)); - bool is_valid = splinterdb_iterator_valid(it); + bool32 is_valid = splinterdb_iterator_valid(it); ASSERT_FALSE(is_valid); if (it) { @@ -825,7 +825,7 @@ CTEST2(splinterdb_quick, test_iterator_init_bug) rc = splinterdb_iterator_init(data->kvsb, &it, NULL_SLICE); ASSERT_EQUAL(0, rc); - bool iter_valid = splinterdb_iterator_valid(it); + bool32 iter_valid = splinterdb_iterator_valid(it); ASSERT_FALSE(iter_valid); splinterdb_iterator_deinit(it); diff --git a/tests/unit/task_system_test.c b/tests/unit/task_system_test.c index 2e0607c9a..394a0c64c 100644 --- a/tests/unit/task_system_test.c +++ b/tests/unit/task_system_test.c @@ -46,8 +46,8 @@ typedef struct { task_system *tasks; threadid exp_thread_idx; // Splinter-generated expected thread index threadid exp_max_tid; // After this thread gets created - bool stop_thread; - bool waitfor_stop_signal; + bool32 stop_thread; + bool32 waitfor_stop_signal; int line; // Thread created on / around this line # } thread_config_lockstep;