diff --git a/src/box/tree_index.cc b/src/box/tree_index.cc index cfa95d93b4b636abbf61bbdf6a22842381f98d35..0f902f413a3bb8bc04e17bff848f5d346decaa5e 100644 --- a/src/box/tree_index.cc +++ b/src/box/tree_index.cc @@ -31,74 +31,46 @@ #include "space.h" #include "exception.h" #include "errinj.h" +#include "memory.h" +#include "fiber.h" + +/** For all memory used by all tree indexes. */ +static struct mempool tree_extent_pool; +/** Number of allocated extents. */ +static int tree_index_count = 0; /* {{{ Utilities. *************************************************/ -struct sptree_index_key_data +struct key_data { const char *key; uint32_t part_count; }; -static inline struct tuple * -sptree_index_unfold(const void *node) -{ - return node ? *(struct tuple **) node : NULL; -} - -static int -sptree_index_node_compare(const void *node_a, const void *node_b, void *arg) -{ - struct key_def *key_def = (struct key_def *) arg; - const struct tuple *tuple_a = *(const struct tuple **) node_a; - const struct tuple *tuple_b = *(const struct tuple **) node_b; - - return tuple_compare(tuple_a, tuple_b, key_def); -} - -static int -sptree_index_node_compare_dup(const void *node_a, const void *node_b, - void *arg) +int +tree_index_compare(const tuple *a, const tuple *b, struct key_def *key_def) { - struct key_def *key_def = (struct key_def *) arg; - const struct tuple *tuple_a = *(const struct tuple **) node_a; - const struct tuple *tuple_b = *(const struct tuple **) node_b; - - return tuple_compare_dup(tuple_a, tuple_b, key_def); + int r = tuple_compare(a, b, key_def); + if (r == 0 && !key_def->is_unique) + r = a < b ? -1 : a > b; + return r; } - -static int -sptree_index_node_compare_with_key(const void *key, const void *node, - void *arg) +int +tree_index_compare_key(const tuple *a, const key_data *key_data, + struct key_def *key_def) { - struct key_def *key_def = (struct key_def *) arg; - const struct sptree_index_key_data *key_data = - (const struct sptree_index_key_data *) key; - const struct tuple *tuple = *(const struct tuple **) node; - - /* the result is inverted because arguments are swapped */ - return -tuple_compare_with_key(tuple, key_data->key, - key_data->part_count, key_def); + return tuple_compare_with_key(a, key_data->key, key_data->part_count, + key_def); } -#ifndef NDEBUG -void * -realloc_inject(void *ptr, size_t size) -{ - if (size) - ERROR_INJECT(ERRINJ_TREE_ALLOC, return 0); - return realloc(ptr, size); -} -#endif - /* {{{ TreeIndex Iterators ****************************************/ struct tree_iterator { struct iterator base; + const struct bps_tree *tree; struct key_def *key_def; - sptree_index_compare compare; - struct sptree_index_iterator *iter; - struct sptree_index_key_data key_data; + struct bps_tree_iterator bsp_tree_iter; + struct key_data key_data; }; static void @@ -114,116 +86,150 @@ tree_iterator(struct iterator *it) static void tree_iterator_free(struct iterator *iterator) { - struct tree_iterator *it = tree_iterator(iterator); - if (it->iter) - sptree_index_iterator_free(it->iter); - free(it); + free(iterator); } static struct tuple * -tree_iterator_ge(struct iterator *iterator) +tree_iterator_dummie(struct iterator *iterator) { - struct tree_iterator *it = tree_iterator(iterator); - void *node = sptree_index_iterator_next(it->iter); - return sptree_index_unfold(node); + (void)iterator; + return 0; } static struct tuple * -tree_iterator_le(struct iterator *iterator) +tree_iterator_fwd(struct iterator *iterator) { struct tree_iterator *it = tree_iterator(iterator); - void *node = sptree_index_iterator_reverse_next(it->iter); - return sptree_index_unfold(node); + tuple **res = bps_tree_itr_get_elem(it->tree, &it->bsp_tree_iter); + if (!res) + return 0; + bps_tree_itr_next(it->tree, &it->bsp_tree_iter); + return *res; } static struct tuple * -tree_iterator_eq(struct iterator *iterator) +tree_iterator_bwd(struct iterator *iterator) { struct tree_iterator *it = tree_iterator(iterator); - - void *node = sptree_index_iterator_next(it->iter); - if (node && it->compare(&it->key_data, node, it->key_def) == 0) - return *(struct tuple **) node; - - return NULL; + tuple **res = bps_tree_itr_get_elem(it->tree, &it->bsp_tree_iter); + if (!res) + return 0; + bps_tree_itr_prev(it->tree, &it->bsp_tree_iter); + return *res; } static struct tuple * -tree_iterator_req(struct iterator *iterator) +tree_iterator_fwd_check_equality(struct iterator *iterator) { struct tree_iterator *it = tree_iterator(iterator); - - void *node = sptree_index_iterator_reverse_next(it->iter); - if (node && it->compare(&it->key_data, node, it->key_def) == 0) - return *(struct tuple **) node; - - return NULL; + tuple **res = bps_tree_itr_get_elem(it->tree, &it->bsp_tree_iter); + if (!res) + return 0; + if (tree_index_compare_key(*res, &it->key_data, it->key_def) != 0) { + it->bsp_tree_iter = bps_tree_invalid_iterator(); + return 0; + } + bps_tree_itr_next(it->tree, &it->bsp_tree_iter); + return *res; } static struct tuple * -tree_iterator_lt(struct iterator *iterator) +tree_iterator_fwd_check_next_equality(struct iterator *iterator) { struct tree_iterator *it = tree_iterator(iterator); - - void *node ; - while ((node = sptree_index_iterator_reverse_next(it->iter)) != NULL) { - if (it->compare(&it->key_data, node, it->key_def) != 0) { - it->base.next = tree_iterator_le; - return *(struct tuple **) node; - } - } - - return NULL; + tuple **res = bps_tree_itr_get_elem(it->tree, &it->bsp_tree_iter); + if (!res) + return 0; + bps_tree_itr_next(it->tree, &it->bsp_tree_iter); + iterator->next = tree_iterator_fwd_check_equality; + return *res; } static struct tuple * -tree_iterator_gt(struct iterator *iterator) +tree_iterator_bwd_skip_one(struct iterator *iterator) { struct tree_iterator *it = tree_iterator(iterator); + bps_tree_itr_prev(it->tree, &it->bsp_tree_iter); + iterator->next = tree_iterator_bwd; + return tree_iterator_bwd(iterator); +} - void *node; - while ((node = sptree_index_iterator_next(it->iter)) != NULL) { - if (it->compare(&it->key_data, node, it->key_def) != 0) { - it->base.next = tree_iterator_ge; - return *(struct tuple **) node; - } +static struct tuple * +tree_iterator_bwd_check_equality(struct iterator *iterator) +{ + struct tree_iterator *it = tree_iterator(iterator); + tuple **res = bps_tree_itr_get_elem(it->tree, &it->bsp_tree_iter); + if (!res) + return 0; + if (tree_index_compare_key(*res, &it->key_data, it->key_def) != 0) { + it->bsp_tree_iter = bps_tree_invalid_iterator(); + return 0; } - - return NULL; + bps_tree_itr_prev(it->tree, &it->bsp_tree_iter); + return *res; } +static struct tuple * +tree_iterator_bwd_skip_one_check_next_equality(struct iterator *iterator) +{ + struct tree_iterator *it = tree_iterator(iterator); + bps_tree_itr_prev(it->tree, &it->bsp_tree_iter); + iterator->next = tree_iterator_bwd_check_equality; + return tree_iterator_bwd_check_equality(iterator); +} /* }}} */ /* {{{ TreeIndex **********************************************************/ -TreeIndex::TreeIndex(struct key_def *key_def) - : Index(key_def) +static void * +extent_alloc() { - memset(&tree, 0, sizeof tree); +#ifndef NDEBUG + ERROR_INJECT(ERRINJ_TREE_ALLOC, return 0); +#endif + return mempool_alloc(&tree_extent_pool); +} + +static void +extent_free(void *extent) +{ + return mempool_free(&tree_extent_pool, extent); +} + +TreeIndex::TreeIndex(struct key_def *key_def_arg) + : Index(key_def_arg) +{ + if (tree_index_count == 0) + mempool_create(&tree_extent_pool, &cord()->slabc, + BPS_TREE_EXTENT_SIZE); + tree_index_count++; + bps_tree_create(&tree, key_def, extent_alloc, extent_free); } TreeIndex::~TreeIndex() { - sptree_index_destroy(&tree); + tree_index_count--; + if (tree_index_count == 0) + mempool_destroy(&tree_extent_pool); } size_t TreeIndex::size() const { - return tree.size; + return bps_tree_size(&tree); } size_t TreeIndex::memsize() const { - return tree.size * (8 + sizeof(struct tuple *)); + return bps_tree_mem_used(&tree); } struct tuple * TreeIndex::random(uint32_t rnd) const { - void *node = sptree_index_random(&tree, rnd); - return sptree_index_unfold(node); + struct tuple **res = bps_tree_random(&tree, rnd); + return res ? *res : 0; } struct tuple * @@ -231,11 +237,11 @@ TreeIndex::findByKey(const char *key, uint32_t part_count) const { assert(key_def->is_unique && part_count == key_def->part_count); - struct sptree_index_key_data key_data; + struct key_data key_data; key_data.key = key; key_data.part_count = part_count; - void *node = sptree_index_find(&tree, &key_data); - return sptree_index_unfold(node); + struct tuple **res = bps_tree_find(&tree, &key_data); + return res ? *res : 0; } struct tuple * @@ -246,29 +252,28 @@ TreeIndex::replace(struct tuple *old_tuple, struct tuple *new_tuple, if (new_tuple) { struct tuple *dup_tuple = NULL; - void *p_dup_node = &dup_tuple; /* Try to optimistically replace the new_tuple. */ - int tree_res = - sptree_index_replace(&tree, &new_tuple, &p_dup_node); - if (tree_res) { - tnt_raise(ClientError, ER_MEMORY_ISSUE, tree_res, - "TreeIndex", "replace"); + bool tree_res = + bps_tree_insert_or_replace(&tree, new_tuple, &dup_tuple); + if (!tree_res) { + tnt_raise(ClientError, ER_MEMORY_ISSUE, + BPS_TREE_EXTENT_SIZE, "TreeIndex", "replace"); } errcode = replace_check_dup(old_tuple, dup_tuple, mode); if (errcode) { - sptree_index_delete(&tree, &new_tuple); + bps_tree_delete(&tree, new_tuple); if (dup_tuple) - sptree_index_replace(&tree, &dup_tuple, NULL); + bps_tree_insert_or_replace(&tree, dup_tuple, 0); tnt_raise(ClientError, errcode, index_id(this)); } if (dup_tuple) return dup_tuple; } if (old_tuple) { - sptree_index_delete(&tree, &old_tuple); + bps_tree_delete(&tree, old_tuple); } return old_tuple; } @@ -285,8 +290,9 @@ TreeIndex::allocIterator() const } it->key_def = key_def; - it->compare = tree.compare; + it->tree = &tree; it->base.free = tree_iterator_free; + it->bsp_tree_iter = bps_tree_invalid_iterator(); return (struct iterator *) it; } @@ -306,44 +312,52 @@ TreeIndex::initIterator(struct iterator *iterator, enum iterator_type type, tnt_raise(ClientError, ER_UNSUPPORTED, "Tree index", "requested iterator type"); type = iterator_type_is_reverse(type) ? ITER_LE : ITER_GE; - key = NULL; + key = 0; } it->key_data.key = key; it->key_data.part_count = part_count; - if (iterator_type_is_reverse(type)) { - int r = sptree_index_iterator_reverse_init_set(&tree, - &it->iter, &it->key_data); - if (r) - tnt_raise(ClientError, ER_MEMORY_ISSUE, - r, "TreeIndex", "init iterator"); + bool exact = false; + if (key == 0) { + if (iterator_type_is_reverse(type)) + it->bsp_tree_iter = bps_tree_invalid_iterator(); + else + it->bsp_tree_iter = bps_tree_itr_first(&tree); } else { - int r = sptree_index_iterator_init_set(&tree, - &it->iter, &it->key_data); - if (r) - tnt_raise(ClientError, ER_MEMORY_ISSUE, - r, "TreeIndex", "init iterator"); + if (type == ITER_ALL || type == ITER_EQ || type == ITER_GE || type == ITER_LT) { + it->bsp_tree_iter = bps_tree_lower_bound(&tree, &it->key_data, &exact); + if (type == ITER_EQ && !exact) { + it->base.next = tree_iterator_dummie; + return; + } + } else { // ITER_GT, ITER_REQ, ITER_LE + it->bsp_tree_iter = bps_tree_upper_bound(&tree, &it->key_data, &exact); + if (type == ITER_REQ && !exact) { + it->base.next = tree_iterator_dummie; + return; + } + } } switch (type) { case ITER_EQ: - it->base.next = tree_iterator_eq; + it->base.next = tree_iterator_fwd_check_next_equality; break; case ITER_REQ: - it->base.next = tree_iterator_req; + it->base.next = tree_iterator_bwd_skip_one_check_next_equality; break; case ITER_ALL: case ITER_GE: - it->base.next = tree_iterator_ge; + it->base.next = tree_iterator_fwd; break; case ITER_GT: - it->base.next = tree_iterator_gt; + it->base.next = tree_iterator_fwd; break; case ITER_LE: - it->base.next = tree_iterator_le; + it->base.next = tree_iterator_bwd_skip_one; break; case ITER_LT: - it->base.next = tree_iterator_lt; + it->base.next = tree_iterator_bwd_skip_one; break; default: tnt_raise(ClientError, ER_UNSUPPORTED, @@ -354,59 +368,23 @@ TreeIndex::initIterator(struct iterator *iterator, enum iterator_type type, void TreeIndex::beginBuild() { - tree.size = 0; - tree.max_size = 0; - tree.members = NULL; + assert(bps_tree_size(&tree) == 0); } void TreeIndex::reserve(uint32_t size_hint) { - assert(size_hint >= tree.size); - size_hint = MAX(size_hint, SPTREE_MIN_SIZE); - size_t sz = size_hint * sizeof(struct tuple *); - void *members = realloc(tree.members, sz); - if (members == NULL) { - tnt_raise(ClientError, ER_MEMORY_ISSUE, sz, - "TreeIndex::reserve()", "malloc"); - } - tree.members = members; - tree.max_size = size_hint; + (void)size_hint; } void TreeIndex::buildNext(struct tuple *tuple) { - if (tree.size >= tree.max_size) - reserve(tree.max_size * 2); - - struct tuple **node = (struct tuple **) tree.members + tree.size; - *node = tuple; - tree.size++; + bps_tree_insert_or_replace(&tree, tuple, 0); } void TreeIndex::endBuild() { - uint32_t n_tuples = tree.size; - - if (n_tuples) { - say_info("Sorting %" PRIu32 " keys in %s index %" PRIu32 "...", - n_tuples, index_type_strs[key_def->type], index_id(this)); - } - uint32_t estimated_tuples = tree.max_size; - void *nodes = tree.members; - - /* If n_tuples == 0 then estimated_tuples = 0, elem == NULL, tree is empty */ - int tree_res = - sptree_index_init(&tree, sizeof(struct tuple *), - nodes, n_tuples, estimated_tuples, - sptree_index_node_compare_with_key, - key_def->is_unique ? sptree_index_node_compare - : sptree_index_node_compare_dup, - key_def); - if (tree_res) { - panic("tree_init: failed to allocate %d bytes", tree_res); - } } diff --git a/src/box/tree_index.h b/src/box/tree_index.h index 57e78d4553c5946afdc809bdee508636310708c2..65cda04b1180e755cd3597fb8c22f552df210667 100644 --- a/src/box/tree_index.h +++ b/src/box/tree_index.h @@ -31,19 +31,25 @@ #include "index.h" -#include <third_party/qsort_arg.h> -#include <third_party/sptree.h> +struct tuple; +struct key_data; -/** - * Instantiate sptree definitions - */ -#ifdef NDEBUG -SPTREE_DEF(index, realloc, qsort_arg); -#else -void * -realloc_inject(void *ptr, size_t size); -SPTREE_DEF(index, realloc_inject, qsort_arg); -#endif +int +tree_index_compare(const struct tuple *a, const struct tuple *b, struct key_def *key_def); + +int +tree_index_compare_key(const tuple *a, const key_data *b, struct key_def *key_def); + +#define BPS_TREE_NAME _index +#define BPS_TREE_BLOCK_SIZE (512) +#define BPS_TREE_EXTENT_SIZE (16*1024) +#define BPS_TREE_COMPARE(a, b, arg) tree_index_compare(a, b, arg) +#define BPS_TREE_COMPARE_KEY(a, b, arg) tree_index_compare_key(a, b, arg) +#define bps_tree_elem_t struct tuple * +#define bps_tree_key_t struct key_data * +#define bps_tree_arg_t struct key_def * + +#include "salad/bps_tree.h" class TreeIndex: public Index { public: @@ -68,7 +74,7 @@ class TreeIndex: public Index { const char *key, uint32_t part_count) const; // protected: - sptree_index tree; + struct bps_tree tree; }; #endif /* TARANTOOL_BOX_TREE_INDEX_H_INCLUDED */ diff --git a/src/lib/salad/bps_tree.h b/src/lib/salad/bps_tree.h new file mode 100644 index 0000000000000000000000000000000000000000..8cfafb79f3114a8485f48d865cf1e14f94d3f0cc --- /dev/null +++ b/src/lib/salad/bps_tree.h @@ -0,0 +1,4333 @@ +#include <string.h> /* memmove, memset */ +#include <stdint.h> +#include <assert.h> +#include "small/pt_alloc.h" + +/* {{{ BPS-tree description */ +/** + * BPS-tree implementation. + * BPS-tree is an in-memory B+*-tree, i.e. B-tree with (+) and (*) + * variants. + * + * Useful links: + * http://en.wikipedia.org/wiki/B-tree + * http://en.wikipedia.org/wiki/B-tree#Variants + * http://en.wikipedia.org/wiki/B%2B_tree + * http://ru.wikipedia.org/wiki/B*-%D0%B4%D0%B5%D1%80%D0%B5%D0%B2%D0%BE + * + * BPS-tree stores specified elements orderly with specified + * compare function. + * + * The tree can be used to insert, replace, delete elements and + * search values by key. + * Search/modification of elements has logarithmic complexity, + * lg B (N). + * + * It also has iterator support, providing sequential access to + * elements in ascending and descending order. An iterator can be + * initialized by the first or last element of the tree, or by the + * lower/upper bound value of a key. Iteration has constant + * complexity. + * + * The main features of the tree are: + * + * 1) It could be very compact. BPS-tree consumes the amount of + * memory mostly proportional to (!) the maximal payload of the + * tree. In other words, if a thee contains N elements of size + * S, and maximum of N over a lifetime + * of the tree is Nmax, then the consumed memory is asymptotically + * proportional to (Nmax*S). + * + * In practice, a well configured BPS-tree consumes about 120% + * of payload asymptotically when the tree is randomly filled, + * i.e. has about 20% of memory overhead on big amounts of + * data. + * + * In a rather bad case, when the tree is filled with + * monotonically increasing values, the asymptotic overhead is + * that about 40% of the payload, and the theoretical maximal + * asymptotic overhead is about 60% of the payload. + * + * The theoretical minimal asymptotic overhead is about 0% :) + * + * However, and it could be important, if a tree is first + * filled up and then emptied (but not destroyed), it still + * consumes the amount of memory used to index the now + * deleted elements. + * + * The tree iterator structure occupies only 6 bytes of memory + * (with probable padding by the compiler up to 8 bytes). + * + * 2) It has a low cache-miss rate. A look up in the tree boils + * down to to search in H blocks, where H is the height of the + * tree, and can be bound by log(N) / log(K), where N is the + * size of the tree and K is the average number of elements in + * a block. For example, with 8-byte values and 512-byte blocks, + * the tree with a million of elements will probably have height + * of 4, and the tree with a billion of elements will probably have + * height of 6. + * 3) Successful insertion into the tree or deletion of an element + * can break any of this tree's active iterators. + * Nevertheless, dealing with broken iterators never leads to memory + * access violation; the element, returned by the iterator is always + * valid (the tree contains the value) and iteration never leads + * to an infinite loop. + * Note, that replacement of an element does not break an iterator + * at all. + * Note also, that using an uninitialised iterator indeed leads to + * memory access violation. + * + * Setup and usage: + * + * 1) Define all macros like in the example below before including + * this header. See "BPS-tree interface settings" section for + * details. Example: + * + * #define BPS_TREE_NAME + * #define BPS_TREE_BLOCK_SIZE 512 + * #define BPS_TREE_EXTENT_SIZE 16*1024 + * #define BPS_TREE_COMPARE(a, b, context) my_compare(a, b, context) + * #define BPS_TREE_COMPARE_KEY(a, b, context) my_compare_key(a, b, context) + * #define bps_tree_elem_t struct tuple * + * #define bps_tree_key_t struct key_t * + * #define bps_tree_arg_t struct compare_context * + * + * 2) Use structs and functions from the list below. + * See "BPS-tree interface" section for details. Here is short list: + * // types: + * struct bps_tree; + * struct bps_tree_iterator; + * typedef void *(*bps_tree_extent_alloc_f)(); + * typedef void (*bps_tree_extent_free_f)(void *); + * // base: + * void bps_tree_create(tree, arg, extent_alloc_func, extent_free_func); + * void bps_tree_destroy(tree); + * bps_tree_elem_t *bps_tree_find(tree, key); + * bool bps_tree_insert_or_replace(tree, new_elem, replaced_elem); + * bool bps_tree_delete(tree, elem); + * size_t bps_tree_size(tree); + * size_t bps_tree_mem_used(tree); + * bps_tree_elem_t *bps_tree_random(tree, rnd); + * int bps_tree_debug_check(tree); + * // iterators: + * struct bps_tree_iterator bps_tree_invalid_iterator(); + * bool bps_tree_itr_is_invalid(itr); + * bool bps_tree_itr_are_equal(tree, itr1, itr2); + * struct bps_tree_iterator bps_tree_itr_first(tree); + * struct bps_tree_iterator bps_tree_itr_last(tree); + * struct bps_tree_iterator bps_tree_lower_bound(tree, key, exact); + * struct bps_tree_iterator bps_tree_upper_bound(tree, key, exact); + * bps_tree_elem_t *bps_tree_itr_get_elem(tree, itr); + * bool bps_tree_itr_next(tree, itr); + * bool bps_tree_itr_prev(tree, itr); + */ +/* }}} */ + +/* {{{ BPS-tree interface settings */ +/** + * Custom name for structs and functions. + * Struct and functions will have bps_tree##BPS_TREE_NAME name or prefix. + * For example one can #define BPS_TREE_NAME _test, and use then + * struct bps_tree_test my_tree; + * bps_tree_test_create(&my_tree, ...); + * Allowed to be empty (just #define BPS_TREE_NAME) + * TODO: does not used yet + */ +#ifndef BPS_TREE_NAME +#error "BPS_TREE_NAME must be defined" +#endif + +/** + * Size of a block of the tree. A block should be large enough to contain + * dozens of elements and dozens of 32-bit identifiers. + * Must be a power of 2, i.e. log2(BPS_TREE_BLOCK_SIZE) must be an integer. + * Tests show that for 64-bit elements, an ideal block size is 512 bytes + * if binary search is used, and 256 bytes if linear search is used. + * (see below for the binary/linear search setting) + * Example: + * #define BPS_TREE_BLOCK_SIZE 512 + */ +#ifndef BPS_TREE_BLOCK_SIZE +#error "BPS_TREE_BLOCK_SIZE must be defined" +#endif + +/** + * Allocation granularity. The tree allocates memory by extents of + * that size. Must be power of 2, i.e. log2(BPS_TREE_EXTENT_SIZE) + * must be a whole number. + * Two important things: + * + * 1) The maximal amount of memory, that particular btee instance + * can use, is + * ( (BPS_TREE_EXTENT_SIZE ^ 3) / (sizeof(void *) ^ 2) ) + * + * 2) The first insertion of an element leads to immidiate + * allocation of three extents. Thus, memory overhead of almost + * empty tree is + * 3 * BPS_TREE_EXTENT_SIZE + * + * Example: + * #define BPS_TREE_EXTENT_SIZE 8*1024 + */ +#ifndef BPS_TREE_EXTENT_SIZE +#error "BPS_TREE_EXTENT_SIZE must be defined" +#endif + +/** + * Type of the tree element. Must be POD. The implementation + * copies elements by memmove and assignment operator and + * compares them with comparators defined below, and also + * could be compared with operator == Example: + * #define bps_tree_elem_t struct tuple * + */ +#ifndef bps_tree_elem_t +#error "bps_tree_elem_t must be defined" +#endif + +/** + * Type of tree key. Must be POD. Used for finding an element in + * the tree and in iterator initialization. + * Example: + * #define bps_tree_key_t struct key_data * + */ +#ifndef bps_tree_key_t +#error "bps_tree_key_t must be defined" +#endif + +/** + * Type of comparison additional argument. The argument of this + * type is initialized during tree creation and then passed to + * compare function. If it is non necessary, define as int and + * forget. Example: + * + * #define bps_tree_arg_t struct key_def * + */ +#ifndef bps_tree_arg_t +#define bps_tree_arg_t int +#endif + +/** + * Function to compare elements. + * Parameters: two elements and an additional argument, specified + * for the tree instance. See struct bps_tree members for details. + * Must return int-compatible value, like strcmp or memcmp + * Examples: + * #define BPS_TREE_COMPARE(a, b, arg) ((a) < (b) ? -1 : (a) > (b)) + * #define BPS_TREE_COMPARE(a, b, arg) my_compare(a, b, arg) + */ +#ifndef BPS_TREE_COMPARE +#error "BPS_TREE_COMPARE must be defined" +#endif + +/** + * Function to compare an element with a key. + * Parameters: element, key and an additional argument, specified + * for the tree instance. See struct bps_tree members for details. + * Must return int-compatible value, like strcmp or memcmp + * Examples: + * #define BPS_TREE_COMPARE_KEY(a, b, arg) ((a) < (b) ? -1 : (a) > (b)) + * #define BPS_TREE_COMPARE_KEY(a, b, arg) BPS_TREE_COMPARE(a, b, arg) + */ +#ifndef BPS_TREE_COMPARE_KEY +#error "BPS_TREE_COMPARE_KEY must be defined" +#endif + +/** + * A switch to define the type of search in an array elements. + * By default, bps_tree uses binary search to find a particular + * element in a block. But if the element type is simple + * (like an integer or float) it could be significantly faster to + * use linear search. To turn on the linear search + * #define BPS_BLOCK_LINEAR_SEARCH + */ +#ifdef BPS_BLOCK_LINEAR_SEARCH +#pragma message("Btree: using linear search") +#endif +/* }}} */ + +/* {{{ BPS-tree internal settings */ +typedef int16_t bps_tree_pos_t; +typedef uint32_t bps_tree_block_id_t; +/* }}} */ + +/* {{{ Compile time utils */ +/** + * Concatenation of name at compile time + */ +#ifndef CONCAT +#define CONCAT_R(a, b) a##b +#define CONCAT(a, b) CONCAT_R(a, b) +#define CONCAT4_R(a, b, c, d) a##b##c##d +#define CONCAT4(a, b, c, d) CONCAT4_R(a, b, c, d) +#endif +/** + * Compile time assertion for use in function blocks + */ +#ifndef CT_ASSERT +#define CT_ASSERT(e) do { typedef char __ct_assert[(e) ? 1 : -1]; } while(0) +#endif +/** + * Compile time assertion for use in global scope (and in class scope) + */ +#ifndef CT_ASSERT_G +#define CT_ASSERT_G(e) typedef char CONCAT(__ct_assert_, __LINE__)[(e) ? 1 :-1] +#endif +/* }}} */ + +/* {{{ Macros for custom naming of structs and functions */ +#define _bps(postfix) CONCAT4(bps, BPS_TREE_NAME, _, postfix) +#define _bps_tree(postfix) CONCAT4(bps_tree, BPS_TREE_NAME, _, postfix) +#define _BPS(postfix) CONCAT4(BPS, BPS_TREE_NAME, _, postfix) +#define _BPS_TREE(postfix) CONCAT4(BPS_TREE, BPS_TREE_NAME, _, postfix) +/* }}} */ + +/* {{{ BPS-tree interface (declaration) */ + +/** + * struct bps_block forward declaration (Used in struct bps_tree) + */ +struct bps_block; + +/** + * Main tree struct. One instance - one tree. + */ +struct bps_tree { + /* Pointer to root block. Is NULL in empty tree. */ + bps_block *root; + /* ID of root block. Undefined in empty tree. */ + bps_tree_block_id_t root_id; + /* IDs of first and last block. (-1) in empty tree. */ + bps_tree_block_id_t first_id, last_id; + /* Counters of used blocks and garbaged blocks */ + bps_tree_block_id_t leaf_count, inner_count, garbage_count; + /* Depth (height?) of a tee. Is 0 in empty tree. */ + bps_tree_block_id_t depth; + /* Number of elements in tree */ + size_t size; + /* Head of list of garbaged blocks */ + struct bps_garbage *garbage_head; + /* User-provided argument for comparator */ + bps_tree_arg_t arg; + /* Copy of maximal element in tree. Used for beauty */ + bps_tree_elem_t max_elem; + /* Special allocator of blocks and their IDs */ + pt3 pt_allocator; +}; + +/** + * Tree iterator. Points to an element in tree. + * There are 4 possible states of iterator: + * 1)Normal. Points to concrete element in tree. + * 2)Invalid. Points to nothing. Safe. + * 3)Broken. Normal can become broken during tree modification. + * Safe to use, but has undefined behavior. + * 4)Uninitialized (or initialized in wrong way). + * Unsafe and undefined behaviour. + */ +struct bps_tree_iterator { + /* ID of a block, containing element. -1 for an invalid iterator */ + bps_tree_block_id_t block_id; + /* Position of an element in the block. Could be -1 for last in block*/ + bps_tree_pos_t pos; +}; + +/** + * Pointer to function that allocates extent of size BPS_TREE_EXTENT_SIZE + * BPS-tree properly handles with NULL result but could leak memory + * in case of exception. + */ +typedef void *(*bps_tree_extent_alloc_f)(); + +/** + * Pointer to function frees extent (of size BPS_TREE_EXTENT_SIZE) + */ +typedef void (*bps_tree_extent_free_f)(void *); + +/** + * @brief Tree construction. Fills struct bps_tree members. + * @param tree - pointer to a tree + * @param arg - user defined argument for comparator + * @param extent_alloc_func - pointer to function that allocates extents, + * see bps_tree_extent_alloc_f description for details + * @param extent_free_func - pointer to function that allocates extents, + * see bps_tree_extent_free_f description for details + */ +void +bps_tree_create(struct bps_tree *tree, bps_tree_arg_t arg, + bps_tree_extent_alloc_f extent_alloc_func, + bps_tree_extent_free_f extent_free_func); + +/** + * @brief Tree destruction. Frees allocated memory. + * @param tree - pointer to a tree + */ +void +bps_tree_destroy(struct bps_tree *tree); + +/** + * @brief Find the first element that is equal to the key (comparator returns 0) + * @param tree - pointer to a tree + * @param key - key that will be compared with elements + * @return pointer to the first equal element or NULL if not found + */ +bps_tree_elem_t * +bps_tree_find(const struct bps_tree *tree, bps_tree_key_t key); + +/** + * @brief Insert an element to the tree or replace an element in the tree + * In case of replacing, if 'replaced' argument is not null, + * it'll be filled with replaced element. In case of inserting it's untoched. + * Thus one can distinguish real insert or replace by passing to the function + * pointer to some value; and if it was changed during the function call, + * then the replace was happend; insert otherwise. + * @param tree - pointer to a tree + * @param new_elem - inserting or replacing element + * @replaced - optional pointer for a replaces element + * @return - true on success or false if memory allocation failed for insert + */ +bool +bps_tree_insert_or_replace(struct bps_tree *tree, bps_tree_elem_t new_elem, + bps_tree_elem_t *replaced); + +/** + * @brief Delete an element from a tree. + * @param tree - pointer to a tree + * @param elem - the element tot delete + * @return - true on success or false if the element was not found in tree + */ +bool +bps_tree_delete(struct bps_tree *tree, bps_tree_elem_t elem); + +/** + * @brief Get size of tree, i.e. count of elements in tree + * @param tree - pointer to a tree + * @return - count count of elements in tree + */ +size_t +bps_tree_size(const struct bps_tree *tree); + +/** + * @brief Get amount of memory in bytes that the tree is using + * (not including sizeof(struct bps_tree)) + * @param tree - pointer to a tree + * @return - count count of elements in tree + */ +size_t +bps_tree_mem_used(const struct bps_tree *tree); + +/** + * @brief Get a random element in a tree. + * @param tree - pointer to a tree + * @param rnd - some random value + * @return - count count of elements in tree + */ +bps_tree_elem_t * +bps_tree_random(const struct bps_tree *tree, size_t rnd); + +/** + * @brief Get an invalid iterator. See iterator description. + * @return - Invalid iterator + */ +struct bps_tree_iterator +bps_tree_invalid_iterator(); + +/** + * @brief Check if an iterator is invalid. See iterator description. + * @param itr - iterator to check + * @return - true if iterator is invalid, false otherwise + */ +bool +bps_tree_itr_is_invalid(struct bps_tree_iterator *itr); + +/** + * @brief Compare two iterators and return true if trey points to same element. + * Two invalid iterators are equal and points to the same nowhere. + * Broken iterator is possibly not equal to any valid or invalid iterators. + * @param tree - pointer to a tree + * @param itr1 - first iterator + * @param itr2 - second iterator + * @return - true if iterators are equal, false otherwise + */ +bool +bps_tree_itr_are_equal(const struct bps_tree *tree, + struct bps_tree_iterator itr1, + struct bps_tree_iterator itr2); + +/** + * @brief Get an iterator to the first element of the tree + * @param tree - pointer to a tree + * @return - First iterator. Could be invalid if the tree is empty. + */ +struct bps_tree_iterator +bps_tree_itr_first(const struct bps_tree *tree); + +/** + * @brief Get an iterator to the last element of the tree + * @param tree - pointer to a tree + * @return - Last iterator. Could be invalid if the tree is empty. + */ +struct bps_tree_iterator +bps_tree_itr_last(const struct bps_tree *tree); + +/** + * @brief Get an iterator to the first element that is greater or + * equal than key + * @param tree - pointer to a tree + * @param key - key that will be compared with elements + * @param exact - pointer to a bool value, that will be set to true if + * and element pointed by the iterator is equal to the key, false otherwise + * Pass NULL if you don't need that info. + * @return - Lower-bound iterator. Invalid if all elements are less than key. + */ +struct bps_tree_iterator +bps_tree_lower_bound(const struct bps_tree *tree, bps_tree_key_t key, + bool *exact); + +/** + * @brief Get an iterator to the first element that is greater than key + * @param tree - pointer to a tree + * @param key - key that will be compared with elements + * @param exact - pointer to a bool value, that will be set to true if + * and element pointed by the (!)previous iterator is equal to the key, + * false otherwise. Pass NULL if you don't need that info. + * @return - Upper-bound iterator. Invalid if all elements are less or equal + * than the key. + */ +struct bps_tree_iterator +bps_tree_upper_bound(const struct bps_tree *tree, bps_tree_key_t key, + bool *exact); + +/** + * @brief Get a pointer to the element pointed by iterator. + * If iterator is detected as broken, it is invalidated and NULL returned. + * @param tree - pointer to a tree + * @param itr - pointer to tree iterator + * @return - Pointer to the element. Null for invalid iterator + */ +bps_tree_elem_t * +bps_tree_itr_get_elem(const struct bps_tree *tree, + struct bps_tree_iterator itr); + +/** + * @brief Increments an iterator, makes it point to the next element + * If the iterator is to last element, it will be invalidated + * If the iterator is detected as broken, it will be invalidated. + * If the iterator is invalid, then it will be set to first element. + * @param tree - pointer to a tree + * @param itr - pointer to tree iterator + * @return - true on success, false if a resulted iterator is set to invalid + */ +bool +bps_tree_itr_next(const struct bps_tree *tree, struct bps_tree_iterator *itr); + +/** + * @brief Decrements an iterator, makes it point to the previous element + * If the iterator is to first element, it will be invalidated + * If the iterator is detected as broken, it will be invalidated. + * If the iterator is invalid, then it will be set to last element. + * @param tree - pointer to a tree + * @param itr - pointer to tree iterator + * @return - true on success, false if a resulted iterator is set to invalid + */ +bool +bps_tree_itr_prev(const struct bps_tree *tree, struct bps_tree_iterator *itr); + +/** + * @brief Debug self-checking. Returns bitmask of found errors (0 + * on success). + * I hope you will not need it. + * @param tree - pointer to a tree + * @return - Bitwise-OR of all errors found + */ +int +bps_tree_debug_check(const struct bps_tree *tree); +/* }}} */ + + +/* {{{ BPS-tree implementation (definition) */ + +/* Data moving */ +#ifndef NDEBUG +/* Debug version checks buffer overflow an runtime */ +#define BPS_TREE_MEMMOVE(dst, src, num, dst_block, src_block) \ + bps_tree_debug_memmove(dst, src, num, dst_block, src_block) +#else +/* Release version just moves memory */ +#define BPS_TREE_MEMMOVE(dst, src, num, dst_block, src_block) \ + memmove(dst, src, num) +#endif +/* Same as BPS_TREE_MEMMOVE but takes count of values instead of memory size */ +#define BPS_TREE_DATAMOVE(dst, src, num, dst_bck, src_bck) \ + BPS_TREE_MEMMOVE(dst, src, (num) * sizeof((dst)[0]), dst_bck, src_bck) + +/** + * Types of a block + */ +enum bps_block_type { + BPS_TREE_BT_GARBAGE = 1, + BPS_TREE_BT_INNER = 2, + BPS_TREE_BT_LEAF = 4 +}; + +/** + * Header for bps_leaf, bps_inner or bps_garbage blocks + */ +struct bps_block { + /* Type of a block. See bps_block_type. Used for iterators and debug */ + bps_tree_pos_t type; + /* Count of elements for leaf, and of childs for inner nodes */ + bps_tree_pos_t size; +}; + +/** + * Calculation of max sizes (max count + 1) + */ +enum bps_tree_max_sizes { + BPS_TREE_MAX_COUNT_IN_LEAF = + ( BPS_TREE_BLOCK_SIZE - sizeof(struct bps_block) + - 2 * sizeof(bps_tree_block_id_t) ) + / sizeof(bps_tree_elem_t), + BPS_TREE_MAX_COUNT_IN_INNER = + (BPS_TREE_BLOCK_SIZE - sizeof(struct bps_block)) + / (sizeof(bps_tree_elem_t) + sizeof(bps_tree_block_id_t)) +}; + +/** + * Leaf block definition. + * Contains array of element on the last level of the tree + */ +struct bps_leaf { + /* Block header */ + bps_block header; + /* Next leaf block ID in ordered linked list */ + bps_tree_block_id_t next_id; + /* Previous leaf block ID in ordered linked list */ + bps_tree_block_id_t prev_id; + /* Ordered array of elements */ + bps_tree_elem_t elems[BPS_TREE_MAX_COUNT_IN_LEAF]; +}; + +/** + * Stop compile if smth went terribly wrong + */ +CT_ASSERT_G(sizeof(struct bps_leaf) <= BPS_TREE_BLOCK_SIZE); + +/** + * Inner block definition. + * Contains an array of child (inner of leaf) IDs, and array of + * copies of maximal elements of the corresponding subtrees. Only + * last child subtree does not have corresponding element copy in + * this array (but it has a copy of maximal element somewhere in + * parent's arrays on in tree struct) + */ +struct bps_inner { + /* Block header */ + bps_block header; + /* Ordered array of elements. Note -1 in size. See struct descr. */ + bps_tree_elem_t elems[BPS_TREE_MAX_COUNT_IN_INNER - 1]; + /* Corresponding child IDs */ + bps_tree_block_id_t child_ids[BPS_TREE_MAX_COUNT_IN_INNER]; +}; + +/** + * Stop compile if smth went terribly wrong + */ +CT_ASSERT_G(sizeof(struct bps_inner) <= BPS_TREE_BLOCK_SIZE); + +/** + * Garbaged block definition + */ +struct bps_garbage { + /* Block header */ + bps_block header; + /* Stored id of this block */ + bps_tree_block_id_t id; + /* Next garbaged block in single-linked list */ + struct bps_garbage *next; +}; + +/** + * Stop compile if smth went terribly wrong + */ +CT_ASSERT_G(sizeof(struct bps_garbage) <= BPS_TREE_BLOCK_SIZE); + +/** + * Struct for collecting path in tree, corresponds to one inner block + */ +struct bps_inner_path_elem { + /* Pointer to block */ + struct bps_inner *block; + /* ID of the block */ + bps_tree_block_id_t block_id; + /* Position of next path element in block's child_ids array */ + bps_tree_pos_t insertion_point; + /* Position of this path element in parent's child_ids array */ + bps_tree_pos_t pos_in_parent; + /* Pointer to parent block (NULL for root) */ + struct bps_inner_path_elem *parent; + /* Pointer to the sequent to the max element in the subtree */ + bps_tree_elem_t *max_elem_copy; +}; + +/** + * An auxiliary struct to collect a path in tree, + * corresponds to one leaf block/one element of the path. + * + */ +struct bps_leaf_path_elem { + /* A pointer to the block */ + struct bps_leaf *block; + /* ID of the block */ + bps_tree_block_id_t block_id; + /* Position of the next path element in block's child_ids array */ + bps_tree_pos_t insertion_point; + /* Position of this path element in parent's child_ids array */ + bps_tree_pos_t pos_in_parent; + /* A pointer to the parent block (NULL for root) */ + bps_inner_path_elem *parent; + /* A pointer to the sequent to the max element in the subtree */ + bps_tree_elem_t *max_elem_copy; +}; + +/** + * @brief Tree construction. Fills struct bps_tree members. + * @param tree - pointer to a tree + * @param arg - user defined argument for comparator + * @param extent_alloc_func - pointer to function that allocates extents, + * see bps_tree_extent_alloc_f description for details + * @param extent_free_func - pointer to function that allocates extents, + * see bps_tree_extent_free_f description for details + */ +inline void +bps_tree_create(struct bps_tree *tree, bps_tree_arg_t arg, + bps_tree_extent_alloc_f extent_alloc_func, + bps_tree_extent_free_f extent_free_func) +{ + tree->root = 0; + tree->first_id = (bps_tree_block_id_t)(-1); + tree->last_id = (bps_tree_block_id_t)(-1); + tree->leaf_count = 0; + tree->inner_count = 0; + tree->garbage_count = 0; + tree->depth = 0; + tree->size = 0; + tree->garbage_head = 0; + tree->arg = arg; + + pt3_construct(&tree->pt_allocator, + BPS_TREE_EXTENT_SIZE, BPS_TREE_BLOCK_SIZE, + extent_alloc_func, extent_free_func); +} + +/** + * @brief Tree destruction. Frees allocated memory. + * @param tree - pointer to a tree + */ +inline void +bps_tree_destroy(struct bps_tree *tree) +{ + pt3_destroy(&tree->pt_allocator); +} + +/** + * @brief Get size of tree, i.e. count of elements in tree + * @param tree - pointer to a tree + * @return - count count of elements in tree + */ +inline size_t +bps_tree_size(const struct bps_tree *tree) +{ + return tree->size; +} + +/** + * @brief Get amount of memory in bytes that the tree is using + * (not including sizeof(struct bps_tree)) + * @param tree - pointer to a tree + * @return - count count of elements in tree + */ +inline size_t +bps_tree_mem_used(const struct bps_tree *tree) +{ + size_t res = pt3_extents_count(&tree->pt_allocator); + res *= BPS_TREE_EXTENT_SIZE; + return res; +} + +/** + * @brief Get a pointer to block by it's ID. + */ +static inline bps_block * +bps_tree_restore_block(const struct bps_tree *tree, bps_tree_block_id_t id) +{ + return (bps_block *)pt3_get(&tree->pt_allocator, id); +} + +/** + * @brief Get a random element in a tree. + * @param tree - pointer to a tree + * @param rnd - some random value + * @return - count count of elements in tree + */ +inline bps_tree_elem_t * +bps_tree_random(const struct bps_tree *tree, size_t rnd) +{ + if (!tree->root) + return 0; + + bps_block *block = tree->root; + + for (bps_tree_block_id_t i = 0; i < tree->depth - 1; i++) { + struct bps_inner * inner = (struct bps_inner *)block; + bps_tree_pos_t pos = rnd % inner->header.size; + rnd /= inner->header.size; + block = bps_tree_restore_block(tree, inner->child_ids[pos]); + } + + struct bps_leaf *leaf = (struct bps_leaf *)block; + bps_tree_pos_t pos = rnd % leaf->header.size; + return leaf->elems + pos; +} + +/** + * @brief Find the lowest element in sorted array that is >= than the key + * @param tree - pointer to a tree + * @param arr - array of elements + * @param size - size of the array + * @param key - key to find + * @param exact - point to bool that receives true if equal element was found + */ +static inline bps_tree_pos_t +bps_tree_find_ins_point_key(const struct bps_tree *tree, bps_tree_elem_t *arr, + size_t size, bps_tree_key_t key, bool *exact) +{ + (void)tree; + bps_tree_elem_t *begin = arr; + bps_tree_elem_t *end = arr + size; + *exact = false; +#ifdef BPS_BLOCK_LINEAR_SEARCH + while (begin != end) { + int res = BPS_TREE_COMPARE_KEY(*begin, key, tree->arg); + if (res >= 0) { + *exact = res == 0; + return (bps_tree_pos_t)(begin - arr); + } + ++begin; + } + return (bps_tree_pos_t)(begin - arr); +#else + while (begin != end) { + bps_tree_elem_t *mid = begin + (end - begin) / 2; + int res = BPS_TREE_COMPARE_KEY(*mid, key, tree->arg); + if (res > 0) { + end = mid; + } else if (res < 0) { + begin = mid + 1; + } else { + *exact = true; + end = mid; + /* Equal found, continue search for lowest equal */ + } + } + return (bps_tree_pos_t)(end - arr); +#endif +} + +/** + * @brief Find the lowest element in sorted array that is >= than the elem + * @param tree - pointer to a tree + * @param arr - array of elements + * @param size - size of the array + * @param elem - element to find + * @param exact - point to bool that receives true if equal + * element was found + */ +static inline bps_tree_pos_t +bps_tree_find_ins_point_elem(const struct bps_tree *tree, bps_tree_elem_t *arr, + size_t size, bps_tree_elem_t elem, bool *exact) +{ + (void)tree; + bps_tree_elem_t *begin = arr; + bps_tree_elem_t *end = arr + size; + *exact = false; +#ifdef BPS_BLOCK_LINEAR_SEARCH + while (begin != end) { + int res = BPS_TREE_COMPARE(*begin, elem, tree->arg); + if (res >= 0) { + *exact = res == 0; + return (bps_tree_pos_t)(begin - arr); + } + ++begin; + } + return (bps_tree_pos_t)(begin - arr); +#else + while (begin != end) { + bps_tree_elem_t *mid = begin + (end - begin) / 2; + int res = BPS_TREE_COMPARE(*mid, elem, tree->arg); + if (res > 0) { + end = mid; + } else if (res < 0) { + begin = mid + 1; + } else { + *exact = true; + /* Since elements are unique in array, stop search */ + return (bps_tree_pos_t)(mid - arr); + } + } + return (bps_tree_pos_t)(end - arr); +#endif +} + +/** + * @brief Find the lowest element in sorted array that is greater + * than the key. + * @param tree - pointer to a tree + * @param arr - array of elements + * @param size - size of the array + * @param key - key to find + * @param exact - point to bool that receives true if equal + * element is present + */ +static inline bps_tree_pos_t +bps_tree_find_after_ins_point_key(const struct bps_tree *tree, + bps_tree_elem_t *arr, size_t size, + bps_tree_key_t key, bool *exact) +{ + (void)tree; + bps_tree_elem_t *begin = arr; + bps_tree_elem_t *end = arr + size; + *exact = false; +#ifdef BPS_BLOCK_LINEAR_SEARCH + while (begin != end) { + int res = BPS_TREE_COMPARE_KEY(*begin, key, tree->arg); + if (res == 0) + *exact = true; + else if (res > 0) + return (bps_tree_pos_t)(begin - arr); + ++begin; + } + return (bps_tree_pos_t)(begin - arr); +#else + while (begin != end) { + bps_tree_elem_t *mid = begin + (end - begin) / 2; + int res = BPS_TREE_COMPARE_KEY(*mid, key, tree->arg); + if (res > 0) { + end = mid; + } else if (res < 0) { + begin = mid + 1; + } else { + *exact = true; + begin = mid + 1; + } + } + return (bps_tree_pos_t)(end - arr); +#endif +} + +/** + * @brief Get an invalid iterator. See iterator description. + * @return - Invalid iterator + */ +inline struct bps_tree_iterator +bps_tree_invalid_iterator() +{ + struct bps_tree_iterator res; + res.block_id = (bps_tree_block_id_t)(-1); + res.pos = 0; + return res; +} + +/** + * @brief Check if an iterator is invalid. See iterator + * description. + * @param itr - iterator to check + * @return - true if iterator is invalid, false otherwise + */ +inline bool +bps_tree_itr_is_invalid(struct bps_tree_iterator *itr) +{ + return itr->block_id == (bps_tree_block_id_t)(-1); +} + +/** + * @brief Check for a validity of an iterator and return pointer + * to the leaf. Position is also checked an (-1) is converted to + * position to last element. If smth is wrong, iterator is + * invalidated and NULL returned. + */ +static inline struct bps_leaf * +bps_tree_get_leaf_safe(const struct bps_tree *tree, + struct bps_tree_iterator *itr) +{ + if (itr->block_id == (bps_tree_block_id_t)(-1)) + return 0; + + bps_block *block = bps_tree_restore_block(tree, itr->block_id); + if (block->type != BPS_TREE_BT_LEAF) { + itr->block_id = (bps_tree_block_id_t)(-1); + return 0; + } + if (itr->pos == (bps_tree_pos_t)(-1)) { + itr->pos = block->size - 1; + } else if (itr->pos >= block->size) { + itr->block_id = (bps_tree_block_id_t)(-1); + return 0; + } + return (struct bps_leaf *)block; +} + +/** + * @brief Compare two iterators and return true if trey point to + * the same element. + * Two invalid iterators are equal and point to the same nowhere. + * A broken iterator is possibly not equal to any valid or invalid + * iterators. + * @param tree - pointer to a tree + * @param itr1 - first iterator + * @param itr2 - second iterator + * @return - true if iterators are equal, false otherwise + */ +inline bool +bps_tree_itr_are_equal(const struct bps_tree *tree, + struct bps_tree_iterator *itr1, + struct bps_tree_iterator *itr2) +{ + if (bps_tree_itr_is_invalid(itr1) && bps_tree_itr_is_invalid(itr2)) + return true; + if (bps_tree_itr_is_invalid(itr1) || bps_tree_itr_is_invalid(itr2)) + return false; + if (itr1->block_id == itr2->block_id && itr1->pos == itr2->pos) + return true; + if (itr1->pos == (bps_tree_pos_t)(-1)) { + struct bps_leaf *leaf = bps_tree_get_leaf_safe(tree, itr1); + if (!leaf) + return false; + itr1->pos = leaf->header.size - 1; + if (itr1->block_id == itr2->block_id && itr1->pos == itr2->pos) + return true; + } + if (itr2->pos == (bps_tree_pos_t)(-1)) { + struct bps_leaf *leaf = bps_tree_get_leaf_safe(tree, itr2); + if (!leaf) + return false; + itr2->pos = leaf->header.size - 1; + if (itr1->block_id == itr2->block_id && itr1->pos == itr2->pos) + return true; + } + return false; +} + +/** + * @brief Get an iterator to the first element of the tree + * @param tree - pointer to a tree + * @return - First iterator. Could be invalid if the tree is empty. + */ +inline struct bps_tree_iterator +bps_tree_itr_first(const struct bps_tree *tree) +{ + struct bps_tree_iterator itr; + itr.block_id = tree->first_id; + itr.pos = 0; + return itr; +} + +/** + * @brief Get an iterator to the last element of the tree. + * @param tree - pointer to a tree + * @return - Last iterator. Could be invalid if the tree is empty. + */ +inline struct bps_tree_iterator +bps_tree_itr_last(const struct bps_tree *tree) +{ + struct bps_tree_iterator itr; + itr.block_id = tree->last_id; + itr.pos = (bps_tree_pos_t)(-1); + return itr; +} + +/** + * @brief Get an iterator to the first element that is greater + * than or equal to the key. + * @param tree - pointer to a tree + * @param key - key that will be compared with elements + * @param exact - pointer to a bool value, that will be set to true if + * and element pointed by the iterator is equal to the key, false otherwise + * Pass NULL if you don't need that info. + * @return - Lower-bound iterator. Invalid if all elements are less than key. + */ +inline struct bps_tree_iterator +bps_tree_lower_bound(const struct bps_tree *tree, bps_tree_key_t key, + bool *exact) +{ + struct bps_tree_iterator res; + bool local_result; + if (!exact) + exact = &local_result; + *exact = false; + if (!tree->root) { + res.block_id = (bps_tree_block_id_t)(-1); + res.pos = 0; + return res; + } + bps_block *block = tree->root; + bps_tree_block_id_t block_id = tree->root_id; + for (bps_tree_block_id_t i = 0; i < tree->depth - 1; i++) { + struct bps_inner *inner = (struct bps_inner *)block; + bps_tree_pos_t pos; + pos = bps_tree_find_ins_point_key(tree, inner->elems, + inner->header.size - 1, + key, exact); + block_id = inner->child_ids[pos]; + block = bps_tree_restore_block(tree, block_id); + } + + struct bps_leaf *leaf = (struct bps_leaf *)block; + bps_tree_pos_t pos; + pos = bps_tree_find_ins_point_key(tree, leaf->elems, leaf->header.size, + key, exact); + if (pos >= leaf->header.size) { + res.block_id = leaf->next_id; + res.pos = 0; + } else { + res.block_id = block_id; + res.pos = pos; + } + return res; +} + +/** + * @brief Get an iterator to the first element that is greater than key + * @param tree - pointer to a tree + * @param key - key that will be compared with elements + * @param exact - pointer to a bool value, that will be set to true if + * and element pointed by the (!)previous iterator is equal to the key, + * false otherwise. Pass NULL if you don't need that info. + * @return - Upper-bound iterator. Invalid if all elements are less or equal + * than the key. + */ +inline struct bps_tree_iterator +bps_tree_upper_bound(const struct bps_tree *tree, bps_tree_key_t key, + bool *exact) +{ + struct bps_tree_iterator res; + bool local_result; + if (!exact) + exact = &local_result; + *exact = false; + bool exact_test; + if (!tree->root) { + res.block_id = (bps_tree_block_id_t)(-1); + res.pos = 0; + return res; + } + bps_block *block = tree->root; + bps_tree_block_id_t block_id = tree->root_id; + for (bps_tree_block_id_t i = 0; i < tree->depth - 1; i++) { + struct bps_inner *inner = (struct bps_inner *)block; + bps_tree_pos_t pos; + pos = bps_tree_find_after_ins_point_key(tree, inner->elems, + inner->header.size - 1, + key, &exact_test); + if (exact_test) + *exact = true; + block_id = inner->child_ids[pos]; + block = bps_tree_restore_block(tree, block_id); + } + + struct bps_leaf *leaf = (struct bps_leaf *)block; + bps_tree_pos_t pos; + pos = bps_tree_find_after_ins_point_key(tree, leaf->elems, + leaf->header.size, + key, &exact_test); + if (exact_test) + *exact = true; + if (pos >= leaf->header.size) { + res.block_id = leaf->next_id; + res.pos = 0; + } else { + res.block_id = block_id; + res.pos = pos; + } + return res; +} + +/** + * @brief Get a pointer to the element pointed by iterator. + * If iterator is detected as broken, it is invalidated and NULL returned. + * @param tree - pointer to a tree + * @param itr - pointer to tree iterator + * @return - Pointer to the element. Null for invalid iterator + */ +inline bps_tree_elem_t * +bps_tree_itr_get_elem(const struct bps_tree *tree, + struct bps_tree_iterator *itr) +{ + struct bps_leaf *leaf = bps_tree_get_leaf_safe(tree, itr); + if (!leaf) + return 0; + return leaf->elems + itr->pos; +} + +/** + * @brief Increments an iterator, makes it point to the next element + * If the iterator is to last element, it will be invalidated + * If the iterator is detected as broken, it will be invalidated. + * If the iterator is invalid, then it will be set to first element. + * @param tree - pointer to a tree + * @param itr - pointer to tree iterator + * @return - true on success, false if a resulted iterator is set to invalid + */ +inline bool +bps_tree_itr_next(const struct bps_tree *tree, struct bps_tree_iterator *itr) +{ + if (itr->block_id == (bps_tree_block_id_t)(-1)) { + itr->block_id = tree->first_id; + itr->pos = 0; + return itr->block_id != (bps_tree_block_id_t)(-1); + } + struct bps_leaf *leaf = bps_tree_get_leaf_safe(tree, itr); + if (!leaf) + return false; + itr->pos++; + if (itr->pos >= leaf->header.size) { + itr->block_id = leaf->next_id; + itr->pos = 0; + return itr->block_id != (bps_tree_block_id_t)(-1); + } + return true; +} + +/** + * @brief Decrements an iterator, makes it point to the previous element + * If the iterator is to first element, it will be invalidated + * If the iterator is detected as broken, it will be invalidated. + * If the iterator is invalid, then it will be set to last element. + * @param tree - pointer to a tree + * @param itr - pointer to tree iterator + * @return - true on success, false if a resulted iterator is set to invalid + */ +inline bool +bps_tree_itr_prev(const struct bps_tree *tree, struct bps_tree_iterator *itr) +{ + if (itr->block_id == (bps_tree_block_id_t)(-1)) { + itr->block_id = tree->last_id; + itr->pos = (bps_tree_pos_t)(-1); + return itr->block_id != (bps_tree_block_id_t)(-1); + } + struct bps_leaf *leaf = bps_tree_get_leaf_safe(tree, itr); + if (!leaf) + return false; + if (itr->pos == 0) { + itr->block_id = leaf->prev_id; + itr->pos = (bps_tree_pos_t)(-1); + return itr->block_id != (bps_tree_block_id_t)(-1); + } else { + itr->pos--; + } + return true; +} + +/** + * @brief Find the first element that is equal to the key (comparator returns 0) + * @param tree - pointer to a tree + * @param key - key that will be compared with elements + * @return pointer to the first equal element or NULL if not found + */ +inline bps_tree_elem_t * +bps_tree_find(const struct bps_tree *tree, bps_tree_key_t key) +{ + if (!tree->root) + return 0; + bps_block *block = tree->root; + bool exact = false; + for (bps_tree_block_id_t i = 0; i < tree->depth - 1; i++) { + struct bps_inner *inner = (struct bps_inner *)block; + bps_tree_pos_t pos; + pos = bps_tree_find_ins_point_key(tree, inner->elems, + inner->header.size - 1, + key, &exact); + block = bps_tree_restore_block(tree, inner->child_ids[pos]); + } + + struct bps_leaf *leaf = (struct bps_leaf *)block; + bps_tree_pos_t pos; + pos = bps_tree_find_ins_point_key(tree, leaf->elems, leaf->header.size, + key, &exact); + if (exact) + return leaf->elems + pos; + else + return 0; +} + +/** + * @brief Add a block to the garbage for future reuse + */ +static inline void +bps_tree_garbage_push(struct bps_tree *tree, bps_block *block, + bps_tree_block_id_t id) +{ + assert(block); + struct bps_garbage *garbage = (struct bps_garbage *)block; + garbage->header.type = BPS_TREE_BT_GARBAGE; + garbage->id = id; + garbage->next = tree->garbage_head; + tree->garbage_head = garbage; + tree->garbage_count++; +} + +/** + * @brief Reclaim a block fomr the garbage for reuse + */ +static inline bps_block * +bps_tree_garbage_pop(struct bps_tree *tree, bps_tree_block_id_t *id) +{ + if (tree->garbage_head) { + *id = tree->garbage_head->id; + bps_block *result = (bps_block *)tree->garbage_head; + tree->garbage_head = tree->garbage_head->next; + tree->garbage_count--; + return result; + } else { + return 0; + } +} + +/** + * @brief Reclaim from garbage of create new block and convert it to leaf + */ +static inline struct bps_leaf * +bps_tree_create_leaf(struct bps_tree *tree, bps_tree_block_id_t *id) +{ + struct bps_leaf *res = (struct bps_leaf *) + bps_tree_garbage_pop(tree, id); + if (!res) + res = (struct bps_leaf *)pt3_alloc(&tree->pt_allocator, id); + res->header.type = BPS_TREE_BT_LEAF; + tree->leaf_count++; + return res; +} + +/** + * @brief Reclaim from garbage of create new block and convert it to inner + */ +static inline struct bps_inner * +bps_tree_create_inner(struct bps_tree *tree, bps_tree_block_id_t *id) +{ + struct bps_inner *res = (struct bps_inner *) + bps_tree_garbage_pop(tree, id); + if (!res) + res = (struct bps_inner *)pt3_alloc(&tree->pt_allocator, id); + res->header.type = BPS_TREE_BT_INNER; + tree->inner_count++; + return res; +} + +/** + * @brief Dispose leaf block (to garbage and decrement counter) + */ +static inline void +bps_tree_dispose_leaf(struct bps_tree *tree, struct bps_leaf *leaf, + bps_tree_block_id_t id) +{ + tree->leaf_count--; + bps_tree_garbage_push(tree, (bps_block *)leaf, id); +} + +/** + * @brief Dispose inner block (to garbage and decrement counter) + */ +static inline void +bps_tree_dispose_inner(struct bps_tree *tree, struct bps_inner *inner, + bps_tree_block_id_t id) +{ + tree->inner_count--; + bps_tree_garbage_push(tree, (bps_block *)inner, id); +} + +/** + * @brief Reserve a number of block, return false if failed. + */ +static inline bool +bps_tree_reserve_blocks(struct bps_tree *tree, bps_tree_block_id_t count) +{ + while (tree->garbage_count < count) { + bps_tree_block_id_t id; + bps_block *block = (bps_block *)pt3_alloc(&tree->pt_allocator, + &id); + if (!block) + return false; + bps_tree_garbage_push(tree, block, id); + } + return true; +} + +/** + * @brief Insert first element to and empty tree. + */ +static inline bool +bps_tree_insert_first_elem(struct bps_tree *tree, bps_tree_elem_t new_elem) +{ + assert(tree->depth == 0); + assert(tree->size == 0); + assert(tree->leaf_count == 0); + tree->max_elem = new_elem; + struct bps_leaf *leaf = bps_tree_create_leaf(tree, &tree->root_id); + if (!leaf) + return false; + leaf->header.size = 1; + leaf->elems[0] = new_elem; + tree->root = (bps_block *)leaf; + tree->first_id = tree->root_id; + tree->last_id = tree->root_id; + leaf->prev_id = (bps_tree_block_id_t)(-1); + leaf->next_id = (bps_tree_block_id_t)(-1); + tree->depth = 1; + tree->size = 1; + return true; +} + +/** + * @brief Collect path to an element or to the place where it can be inserted + */ +static inline void +bps_tree_collect_path(struct bps_tree *tree, bps_tree_elem_t new_elem, + bps_inner_path_elem *path, + struct bps_leaf_path_elem *leaf_path_elem, bool *exact) +{ + *exact = false; + + bps_inner_path_elem *prev_ext = 0; + bps_tree_pos_t prev_pos = 0; + bps_block *block = tree->root; + bps_tree_block_id_t block_id = tree->root_id; + bps_tree_elem_t *max_elem_copy = &tree->max_elem; + for (bps_tree_block_id_t i = 0; i < tree->depth - 1; i++) { + struct bps_inner *inner = (struct bps_inner *)block; + bps_tree_pos_t pos; + if (*exact) + pos = inner->header.size - 1; + else + pos = bps_tree_find_ins_point_elem(tree, inner->elems, + inner->header.size - 1, + new_elem, exact); + + path[i].block = inner; + path[i].block_id = block_id; + path[i].insertion_point = pos; + path[i].pos_in_parent = prev_pos; + path[i].parent = prev_ext; + path[i].max_elem_copy = max_elem_copy; + + if (pos < inner->header.size - 1) + max_elem_copy = inner->elems + pos; + block_id = inner->child_ids[pos]; + block = bps_tree_restore_block(tree, block_id); + prev_pos = pos; + prev_ext = path + i; + } + + struct bps_leaf *leaf = (struct bps_leaf *)block; + bps_tree_pos_t pos; + if (*exact) + pos = leaf->header.size - 1; + else + pos = bps_tree_find_ins_point_elem(tree, leaf->elems, + leaf->header.size, + new_elem, exact); + + leaf_path_elem->block = leaf; + leaf_path_elem->block_id = block_id; + leaf_path_elem->insertion_point = pos; + leaf_path_elem->pos_in_parent = prev_pos; + leaf_path_elem->parent = prev_ext; + leaf_path_elem->max_elem_copy = max_elem_copy; +} + +/** + * @brief Replace element by it's path and fill the *replaced argument + */ +static inline bool +bps_tree_process_replace(struct bps_tree *tree, + struct bps_leaf_path_elem *leaf_path_elem, + bps_tree_elem_t new_elem, bps_tree_elem_t *replaced) +{ + (void)tree; + struct bps_leaf *leaf = leaf_path_elem->block; + assert(leaf_path_elem->insertion_point < leaf->header.size); + + if (replaced) + *replaced = leaf->elems[leaf_path_elem->insertion_point]; + + leaf->elems[leaf_path_elem->insertion_point] = new_elem; + *leaf_path_elem->max_elem_copy = leaf->elems[leaf->header.size - 1]; + return true; +} + +#ifndef NDEBUG +/** + * @brief Debug memmove, checks for overflow + */ +static inline void +bps_tree_debug_memmove(void *dst_arg, void *src_arg, size_t num, + void *dst_block_arg, void *src_block_arg) +{ + char *dst = (char *)dst_arg; + char *src = (char *)src_arg; + bps_block *dst_block = (bps_block *)dst_block_arg; + bps_block *src_block = (bps_block *)src_block_arg; + + assert(dst_block->type == src_block->type); + assert(dst_block->type == BPS_TREE_BT_LEAF || + dst_block->type == BPS_TREE_BT_INNER); + if (dst_block->type == BPS_TREE_BT_LEAF) { + struct bps_leaf *dst_leaf = (struct bps_leaf *)dst_block_arg; + struct bps_leaf *src_leaf = (struct bps_leaf *)src_block_arg; + if (num) { + assert(dst >= ((char *)dst_leaf->elems)); + assert(dst < ((char *)dst_leaf->elems) + + BPS_TREE_MAX_COUNT_IN_LEAF * + sizeof(bps_tree_elem_t)); + assert(src >= (char *)src_leaf->elems); + assert(src < ((char *)src_leaf->elems) + + BPS_TREE_MAX_COUNT_IN_LEAF * + sizeof(bps_tree_elem_t)); + } else { + assert(dst >= ((char *)dst_leaf->elems)); + assert(dst <= ((char *)dst_leaf->elems) + + BPS_TREE_MAX_COUNT_IN_LEAF * + sizeof(bps_tree_elem_t)); + assert(src >= (char *)src_leaf->elems); + assert(src <= ((char *)src_leaf->elems) + + BPS_TREE_MAX_COUNT_IN_LEAF * + sizeof(bps_tree_elem_t)); + } + } else { + struct bps_inner *dst_inner = (struct bps_inner *) + dst_block_arg; + struct bps_inner *src_inner = (struct bps_inner *) + src_block_arg; + if (num) { + if (dst >= ((char *)dst_inner->elems) && dst < + ((char *)dst_inner->elems) + + (BPS_TREE_MAX_COUNT_IN_INNER - 1) * + sizeof(bps_tree_elem_t)) { + assert(dst >= ((char *)dst_inner->elems)); + assert(dst < ((char *)dst_inner->elems) + + (BPS_TREE_MAX_COUNT_IN_INNER - 1) * + sizeof(bps_tree_elem_t)); + assert(src >= (char *)src_inner->elems); + assert(src < ((char *)src_inner->elems) + + (BPS_TREE_MAX_COUNT_IN_INNER - 1) * + sizeof(bps_tree_elem_t)); + } else { + assert(dst >= ((char *)dst_inner->child_ids)); + assert(dst < ((char *)dst_inner->child_ids) + + BPS_TREE_MAX_COUNT_IN_INNER * + sizeof(bps_tree_block_id_t)); + assert(src >= (char *)src_inner->child_ids); + assert(src < ((char *)src_inner->child_ids) + + BPS_TREE_MAX_COUNT_IN_INNER * + sizeof(bps_tree_block_id_t)); + } + } else { + if (dst >= ((char *)dst_inner->elems) + && dst <= ((char *)dst_inner->elems) + + (BPS_TREE_MAX_COUNT_IN_INNER - 1) * + sizeof(bps_tree_elem_t) + && src >= (char *)src_inner->elems + && src <= ((char *)src_inner->elems) + + (BPS_TREE_MAX_COUNT_IN_INNER - 1) * + sizeof(bps_tree_elem_t)) { + /* nothing to do due to if condition */ + } else { + assert(dst >= ((char *)dst_inner->child_ids)); + assert(dst <= ((char *)dst_inner->child_ids) + + BPS_TREE_MAX_COUNT_IN_INNER * + sizeof(bps_tree_block_id_t)); + assert(src >= (char *)src_inner->child_ids); + assert(src <= ((char *)src_inner->child_ids) + + BPS_TREE_MAX_COUNT_IN_INNER * + sizeof(bps_tree_block_id_t)); + } + } + } + /* oh, useful work at last */ + memmove(dst, src, num); +} +#endif + +/** + * @breif Insert an element into leaf block. There must be enough space. + */ +static inline void +bps_tree_insert_into_leaf(struct bps_tree *tree, + struct bps_leaf_path_elem *leaf_path_elem, + bps_tree_elem_t new_elem) +{ + (void)tree; + struct bps_leaf *leaf = leaf_path_elem->block; + bps_tree_pos_t pos = leaf_path_elem->insertion_point; + + assert(pos >= 0); + assert(pos <= leaf->header.size); + assert(leaf->header.size < BPS_TREE_MAX_COUNT_IN_LEAF); + + BPS_TREE_DATAMOVE(leaf->elems + pos + 1, leaf->elems + pos, + leaf->header.size - pos, leaf, leaf); + leaf->elems[pos] = new_elem; + *leaf_path_elem->max_elem_copy = leaf->elems[leaf->header.size]; + leaf->header.size++; + tree->size++; +} + +/** + * @breif Insert a child into inner block. There must be enough space. + */ +static inline void +bps_tree_insert_into_inner(struct bps_tree *tree, + bps_inner_path_elem *inner_path_elem, + bps_tree_block_id_t block_id, bps_tree_pos_t pos, + bps_tree_elem_t max_elem) +{ + (void)tree; + struct bps_inner *inner = inner_path_elem->block; + + assert(pos >= 0); + assert(pos <= inner->header.size); + assert(inner->header.size < BPS_TREE_MAX_COUNT_IN_INNER); + + if (pos < inner->header.size) { + BPS_TREE_DATAMOVE(inner->elems + pos + 1, inner->elems + pos, + inner->header.size - pos - 1, inner, inner); + inner->elems[pos] = max_elem; + BPS_TREE_DATAMOVE(inner->child_ids + pos + 1, + inner->child_ids + pos, + inner->header.size - pos, inner, inner); + } else { + inner->elems[pos - 1] = *inner_path_elem->max_elem_copy; + *inner_path_elem->max_elem_copy = max_elem; + } + inner->child_ids[pos] = block_id; + + inner->header.size++; +} + +/** + * @breif Delete element from leaf block. + */ +static inline void +bps_tree_delete_from_leaf(struct bps_tree *tree, + struct bps_leaf_path_elem *leaf_path_elem) +{ + (void)tree; + struct bps_leaf *leaf = leaf_path_elem->block; + bps_tree_pos_t pos = leaf_path_elem->insertion_point; + + assert(pos >= 0); + assert(pos < leaf->header.size); + + BPS_TREE_DATAMOVE(leaf->elems + pos, leaf->elems + pos + 1, + leaf->header.size - 1 - pos, leaf, leaf); + + leaf->header.size--; + + if (leaf->header.size > 0) + *leaf_path_elem->max_elem_copy = + leaf->elems[leaf->header.size - 1]; + + tree->size--; +} + +/** + * @breif Delete a child from inner block. + */ +static inline void +bps_tree_delete_from_inner(struct bps_tree *tree, + bps_inner_path_elem *inner_path_elem) +{ + (void)tree; + struct bps_inner *inner = inner_path_elem->block; + bps_tree_pos_t pos = inner_path_elem->insertion_point; + + assert(pos >= 0); + assert(pos < inner->header.size); + + if (pos < inner->header.size - 1) { + BPS_TREE_DATAMOVE(inner->elems + pos, inner->elems + pos + 1, + inner->header.size - 2 - pos, inner, inner); + BPS_TREE_DATAMOVE(inner->child_ids + pos, + inner->child_ids + pos + 1, + inner->header.size - 1 - pos, inner, inner); + } else if (pos > 0) { + *inner_path_elem->max_elem_copy = inner->elems[pos - 1]; + } + + inner->header.size--; +} + +/** + * @breif Move a number of elements from left leaf to right leaf + */ +static inline void +bps_tree_move_elems_to_right_leaf(struct bps_tree *tree, + struct bps_leaf_path_elem *a_leaf_path_elem, + struct bps_leaf_path_elem *b_leaf_path_elem, + bps_tree_pos_t num) +{ + (void)tree; + struct bps_leaf *a = a_leaf_path_elem->block; + struct bps_leaf *b = b_leaf_path_elem->block; + bool move_all = a->header.size == num; + + assert(num > 0); + assert(a->header.size >= num); + assert(b->header.size + num <= BPS_TREE_MAX_COUNT_IN_LEAF); + + BPS_TREE_DATAMOVE(b->elems + num, b->elems, b->header.size, b, b); + BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num, num, + b, a); + + a->header.size -= num; + b->header.size += num; + + if (!move_all) + *a_leaf_path_elem->max_elem_copy = + a->elems[a->header.size - 1]; + *b_leaf_path_elem->max_elem_copy = b->elems[b->header.size - 1]; +} + +/** + * @breif Move a number of children from left inner to right inner block + */ +static inline void +bps_tree_move_elems_to_right_inner(struct bps_tree *tree, + bps_inner_path_elem *a_inner_path_elem, + bps_inner_path_elem *b_inner_path_elem, + bps_tree_pos_t num) +{ + (void)tree; + struct bps_inner *a = a_inner_path_elem->block; + struct bps_inner *b = b_inner_path_elem->block; + bool move_to_empty = b->header.size == 0; + bool move_all = a->header.size == num; + + assert(num > 0); + assert(a->header.size >= num); + assert(b->header.size + num <= BPS_TREE_MAX_COUNT_IN_INNER); + + BPS_TREE_DATAMOVE(b->child_ids + num, b->child_ids, + b->header.size, b, b); + BPS_TREE_DATAMOVE(b->child_ids, a->child_ids + a->header.size - num, + num, b, a); + + if (!move_to_empty) + BPS_TREE_DATAMOVE(b->elems + num, b->elems, + b->header.size - 1, b, b); + BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num, + num - 1, b, a); + if (move_to_empty) + *b_inner_path_elem->max_elem_copy = + *a_inner_path_elem->max_elem_copy; + else + b->elems[num - 1] = *a_inner_path_elem->max_elem_copy; + if (!move_all) + *a_inner_path_elem->max_elem_copy = + a->elems[a->header.size - num - 1]; + + a->header.size -= num; + b->header.size += num; +} + +/** + * @breif Move a number of elements from right leaf to left leaf + */ +static inline void +bps_tree_move_elems_to_left_leaf(struct bps_tree *tree, + struct bps_leaf_path_elem *a_leaf_path_elem, + struct bps_leaf_path_elem *b_leaf_path_elem, + bps_tree_pos_t num) +{ + (void)tree; + struct bps_leaf *a = a_leaf_path_elem->block; + struct bps_leaf *b = b_leaf_path_elem->block; + + assert(num > 0); + assert(b->header.size >= num); + assert(a->header.size + num <= BPS_TREE_MAX_COUNT_IN_LEAF); + + BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, num, a, b); + BPS_TREE_DATAMOVE(b->elems, b->elems + num, b->header.size - num, + b, b); + + a->header.size += num; + b->header.size -= num; + *a_leaf_path_elem->max_elem_copy = a->elems[a->header.size - 1]; +} + +/** + * @breif Move a number of children from right inner to left inner block + */ +static inline void +bps_tree_move_elems_to_left_inner(struct bps_tree *tree, + bps_inner_path_elem *a_inner_path_elem, + bps_inner_path_elem *b_inner_path_elem, + bps_tree_pos_t num) +{ + (void)tree; + struct bps_inner *a = a_inner_path_elem->block; + struct bps_inner *b = b_inner_path_elem->block; + bool move_to_empty = a->header.size == 0; + bool move_all = b->header.size == num; + + assert(num > 0); + assert(b->header.size >= num); + assert(a->header.size + num <= BPS_TREE_MAX_COUNT_IN_INNER); + + BPS_TREE_DATAMOVE(a->child_ids + a->header.size, b->child_ids, + num, a, b); + BPS_TREE_DATAMOVE(b->child_ids, b->child_ids + num, + b->header.size - num, b, b); + + if (!move_to_empty) + a->elems[a->header.size - 1] = + *a_inner_path_elem->max_elem_copy; + BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, num - 1, a, b); + if (move_all) { + *a_inner_path_elem->max_elem_copy = + *b_inner_path_elem->max_elem_copy; + } else { + *a_inner_path_elem->max_elem_copy = b->elems[num - 1]; + BPS_TREE_DATAMOVE(b->elems, b->elems + num, + b->header.size - num - 1, b, b); + } + + a->header.size += num; + b->header.size -= num; +} + +/** + * @breif Insert into leaf and move a number of elements to the right + * Works like if bps_tree_insert_into_leaf and + * bps_tree_move_elems_to_right_leaf was consequentially called, + * BUT(!) insertion is allowed into full block, so one can consider + * insertion as a virtual insertion into virtual block of greater maximum size + */ +static inline void +bps_tree_insert_and_move_elems_to_right_leaf(struct bps_tree *tree, + struct bps_leaf_path_elem *a_leaf_path_elem, + struct bps_leaf_path_elem *b_leaf_path_elem, + bps_tree_pos_t num, bps_tree_elem_t new_elem) +{ + (void)tree; + struct bps_leaf *a = a_leaf_path_elem->block; + struct bps_leaf *b = b_leaf_path_elem->block; + bps_tree_pos_t pos = a_leaf_path_elem->insertion_point; + bool move_to_empty = b->header.size == 0; + bool move_all = a->header.size == num - 1; + + assert(num > 0); + assert(a->header.size >= num - 1); + assert(b->header.size + num <= BPS_TREE_MAX_COUNT_IN_LEAF); + assert(pos <= a->header.size); + assert(pos >= 0); + + BPS_TREE_DATAMOVE(b->elems + num, b->elems, b->header.size, b, b); + + bps_tree_pos_t mid_part_size = a->header.size - pos; + if (mid_part_size >= num) { + /* In fact insert to 'a' block */ + BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num, + num, b, a); + BPS_TREE_DATAMOVE(a->elems + pos + 1, a->elems + pos, + mid_part_size - num, a, a); + a->elems[pos] = new_elem; + } else { + /* In fact insert to 'b' block */ + bps_tree_pos_t new_pos = num - mid_part_size - 1;/* Can be 0 */ + BPS_TREE_DATAMOVE(b->elems, + a->elems + a->header.size - num + 1, + new_pos, b, a); + b->elems[new_pos] = new_elem; + BPS_TREE_DATAMOVE(b->elems + new_pos + 1, a->elems + pos, + mid_part_size, b, a); + } + + a->header.size -= (num - 1); + b->header.size += num; + if (!move_all) + *a_leaf_path_elem->max_elem_copy = + a->elems[a->header.size - 1]; + if (move_to_empty) + *b_leaf_path_elem->max_elem_copy = + b->elems[b->header.size - 1]; + tree->size++; +} + +/** + * @breif Insert into inner and move a number of children to the right + * Works like if bps_tree_insert_into_inner and + * bps_tree_move_elems_to_right_inner was consequentially called, + * BUT(!) insertion is allowed into full block, so one can consider + * insertion as a virtual insertion into virtual block of greater maximum size + */ +static inline void +bps_tree_insert_and_move_elems_to_right_inner(struct bps_tree *tree, + bps_inner_path_elem *a_inner_path_elem, + bps_inner_path_elem *b_inner_path_elem, + bps_tree_pos_t num, bps_tree_block_id_t block_id, + bps_tree_pos_t pos, bps_tree_elem_t max_elem) +{ + (void)tree; + struct bps_inner *a = a_inner_path_elem->block; + struct bps_inner *b = b_inner_path_elem->block; + bool move_to_empty = b->header.size == 0; + bool move_all = a->header.size == num - 1; + + assert(num > 0); + assert(a->header.size >= num - 1); + assert(b->header.size + num <= BPS_TREE_MAX_COUNT_IN_INNER); + assert(pos <= a->header.size); + assert(pos >= 0); + + if (!move_to_empty) { + BPS_TREE_DATAMOVE(b->child_ids + num, b->child_ids, + b->header.size, b, b); + BPS_TREE_DATAMOVE(b->elems + num, b->elems, + b->header.size - 1, b, b); + } + + bps_tree_pos_t mid_part_size = a->header.size - pos; + if (mid_part_size > num) { + /* In fact insert to 'a' block, to the internal position */ + BPS_TREE_DATAMOVE(b->child_ids, + a->child_ids + a->header.size - num, + num, b, a); + BPS_TREE_DATAMOVE(a->child_ids + pos + 1, a->child_ids + pos, + mid_part_size - num, a, a); + a->child_ids[pos] = block_id; + + BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num, + num - 1, b, a); + if (move_to_empty) + *b_inner_path_elem->max_elem_copy = + *a_inner_path_elem->max_elem_copy; + else + b->elems[num - 1] = *a_inner_path_elem->max_elem_copy; + + *a_inner_path_elem->max_elem_copy = + a->elems[a->header.size - num - 1]; + BPS_TREE_DATAMOVE(a->elems + pos + 1, a->elems + pos, + mid_part_size - num - 1, a, a); + a->elems[pos] = max_elem; + } else if (mid_part_size == num) { + /* In fact insert to 'a' block, to the last position */ + BPS_TREE_DATAMOVE(b->child_ids, + a->child_ids + a->header.size - num, + num, b, a); + BPS_TREE_DATAMOVE(a->child_ids + pos + 1, a->child_ids + pos, + mid_part_size - num, a, a); + a->child_ids[pos] = block_id; + + BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num, + num - 1, b, a); + if (move_to_empty) + *b_inner_path_elem->max_elem_copy = + *a_inner_path_elem->max_elem_copy; + else + b->elems[num - 1] = *a_inner_path_elem->max_elem_copy; + + *a_inner_path_elem->max_elem_copy = max_elem; + } else { + /* In fact insert to 'b' block */ + bps_tree_pos_t new_pos = num - mid_part_size - 1;/* Can be 0 */ + BPS_TREE_DATAMOVE(b->child_ids, + a->child_ids + a->header.size - num + 1, + new_pos, b, a); + b->child_ids[new_pos] = block_id; + BPS_TREE_DATAMOVE(b->child_ids + new_pos + 1, + a->child_ids + pos, mid_part_size, b, a); + + if (pos == a->header.size) { + /* +1 */ + if (move_to_empty) + *b_inner_path_elem->max_elem_copy = max_elem; + else + b->elems[num - 1] = max_elem; + if (num > 1) { + /* +(num - 2) */ + BPS_TREE_DATAMOVE(b->elems, + a->elems + a->header.size + - num + 1, num - 2, b, a); + /* +1 */ + b->elems[num - 2] = + *a_inner_path_elem->max_elem_copy; + + if (!move_all) + *a_inner_path_elem->max_elem_copy = + a->elems[a->header.size - num]; + } + } else { + assert(num > 1); + + BPS_TREE_DATAMOVE(b->elems, + a->elems + a->header.size - num + 1, + num - mid_part_size - 1, b, a); + b->elems[new_pos] = max_elem; + BPS_TREE_DATAMOVE(b->elems + new_pos + 1, + a->elems + pos, mid_part_size - 1, b, a); + if (move_to_empty) + *b_inner_path_elem->max_elem_copy = + *a_inner_path_elem->max_elem_copy; + else + b->elems[num - 1] = + *a_inner_path_elem->max_elem_copy; + + if (!move_all) + *a_inner_path_elem->max_elem_copy = + a->elems[a->header.size - num]; + } + } + + a->header.size -= (num - 1); + b->header.size += num; +} + +/** + * @breif Insert into leaf and move a number of elements to the left + * Works like if bps_tree_insert_into_leaf and + * bps_tree_move_elems_to_right_left was consequentially called, + * BUT(!) insertion is allowed into full block, so one can consider + * insertion as a virtual insertion into virtual block of greater maximum size + */ +static inline void +bps_tree_insert_and_move_elems_to_left_leaf(struct bps_tree *tree, + struct bps_leaf_path_elem *a_leaf_path_elem, + struct bps_leaf_path_elem *b_leaf_path_elem, + bps_tree_pos_t num, bps_tree_elem_t new_elem) +{ + (void)tree; + struct bps_leaf *a = a_leaf_path_elem->block; + struct bps_leaf *b = b_leaf_path_elem->block; + bps_tree_pos_t pos = b_leaf_path_elem->insertion_point; + bool move_all = b->header.size == num - 1; + + assert(num > 0); + assert(b->header.size >= num - 1); + assert(a->header.size + num <= BPS_TREE_MAX_COUNT_IN_LEAF); + assert(pos >= 0); + assert(pos <= b->header.size); + + if (pos >= num) { + /* In fact insert to 'b' block */ + bps_tree_pos_t new_pos = pos - num; /* Can be 0 */ + BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, + num, a, b); + BPS_TREE_DATAMOVE(b->elems, b->elems + num, new_pos, b, b); + b->elems[new_pos] = new_elem; + BPS_TREE_DATAMOVE(b->elems + new_pos + 1, b->elems + pos, + b->header.size - pos, b, b); + + } else { + /* In fact insert to 'a' block */ + bps_tree_pos_t new_pos = a->header.size + pos; /* Can be 0 */ + BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, + pos, a, b); + a->elems[new_pos] = new_elem; + BPS_TREE_DATAMOVE(a->elems + new_pos + 1, b->elems + pos, + num - 1 - pos, a, b); + if (!move_all) + BPS_TREE_DATAMOVE(b->elems, b->elems + num - 1, + b->header.size - num + 1, b, b); + } + + a->header.size += num; + b->header.size -= (num - 1); + *a_leaf_path_elem->max_elem_copy = a->elems[a->header.size - 1]; + if (!move_all) + *b_leaf_path_elem->max_elem_copy = + b->elems[b->header.size - 1]; + tree->size++; +} + +/** + * @breif Insert into inner and move a number of children to the left + * Works like if bps_tree_insert_into_inner and + * bps_tree_move_elems_to_right_inner was consequentially called, + * BUT(!) insertion is allowed into full block, so one can consider + * insertion as a virtual insertion into virtual block of greater maximum size + */ +static inline void +bps_tree_insert_and_move_elems_to_left_inner(struct bps_tree *tree, + bps_inner_path_elem *a_inner_path_elem, + bps_inner_path_elem *b_inner_path_elem, bps_tree_pos_t num, + bps_tree_block_id_t block_id, bps_tree_pos_t pos, + bps_tree_elem_t max_elem) +{ + (void)tree; + struct bps_inner *a = a_inner_path_elem->block; + struct bps_inner *b = b_inner_path_elem->block; + bool move_to_empty = a->header.size == 0; + bool move_all = b->header.size == num - 1; + + assert(num > 0); + assert(b->header.size >= num - 1); + assert(a->header.size + num <= BPS_TREE_MAX_COUNT_IN_INNER); + assert(pos >= 0); + assert(pos <= b->header.size); + + if (pos >= num) { + /* In fact insert to 'b' block */ + bps_tree_pos_t new_pos = pos - num; /* Can be 0 */ + BPS_TREE_DATAMOVE(a->child_ids + a->header.size, b->child_ids, + num, a, b); + BPS_TREE_DATAMOVE(b->child_ids, b->child_ids + num, + new_pos, b, b); + b->child_ids[new_pos] = block_id; + BPS_TREE_DATAMOVE(b->child_ids + new_pos + 1, + b->child_ids + pos, + b->header.size - pos, b, b); + + if (!move_to_empty) + a->elems[a->header.size - 1] = + *a_inner_path_elem->max_elem_copy; + + BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, + num - 1, a, b); + if (num < b->header.size) + *a_inner_path_elem->max_elem_copy = b->elems[num - 1]; + else + *a_inner_path_elem->max_elem_copy = + *b_inner_path_elem->max_elem_copy; + + if (pos == b->header.size) { /* arrow is righter than star */ + if (num < b->header.size) { + BPS_TREE_DATAMOVE(b->elems, b->elems + num, + b->header.size - num - 1, + b, b); + b->elems[b->header.size - num - 1] = + *b_inner_path_elem->max_elem_copy; + } + *b_inner_path_elem->max_elem_copy = max_elem; + } else { /* star is righter than arrow */ + BPS_TREE_DATAMOVE(b->elems, b->elems + num, + new_pos, b, b); + b->elems[new_pos] = max_elem; + BPS_TREE_DATAMOVE(b->elems + new_pos + 1, + b->elems + pos, + b->header.size - pos - 1, b, b); + } + } else { + /* In fact insert to 'a' block */ + bps_tree_pos_t new_pos = a->header.size + pos; /* Can be 0 */ + BPS_TREE_DATAMOVE(a->child_ids + a->header.size, + b->child_ids, pos, a, b); + a->child_ids[new_pos] = block_id; + BPS_TREE_DATAMOVE(a->child_ids + new_pos + 1, + b->child_ids + pos, num - 1 - pos, a, b); + if (!move_all) + BPS_TREE_DATAMOVE(b->child_ids, b->child_ids + num - 1, + b->header.size - num + 1, b, b); + + if (!move_to_empty) + a->elems[a->header.size - 1] = + *a_inner_path_elem->max_elem_copy; + + if (!move_all) { + BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, + pos, a, b); + } else { + if (pos == b->header.size) { + if (pos > 0) { /* why? */ + BPS_TREE_DATAMOVE(a->elems + + a->header.size, + b->elems, pos - 1, + a, b); + a->elems[new_pos - 1] = + *b_inner_path_elem->max_elem_copy; + } + } else { + BPS_TREE_DATAMOVE(a->elems + a->header.size, + b->elems, pos, a, b); + } + } + if (new_pos == a->header.size + num - 1) { + *a_inner_path_elem->max_elem_copy = max_elem; + } else { + a->elems[new_pos] = max_elem; + BPS_TREE_DATAMOVE(a->elems + new_pos + 1, + b->elems + pos, num - 1 - pos - 1, + a, b); + if (move_all) + *a_inner_path_elem->max_elem_copy = + *b_inner_path_elem->max_elem_copy; + else + *a_inner_path_elem->max_elem_copy = + b->elems[num - 2]; + } + if (!move_all) + BPS_TREE_DATAMOVE(b->elems, b->elems + num - 1, + b->header.size - num, b, b); + } + + a->header.size += num; + b->header.size -= (num - 1); +} + +/** + * @brieaf Difference between maximum possible and current size of the leaf + */ +static inline bps_tree_pos_t +bps_tree_leaf_free_size(struct bps_leaf *leaf) +{ + return BPS_TREE_MAX_COUNT_IN_LEAF - leaf->header.size; +} + +/** + * @brieaf Difference between maximum possible and current size of the inner + */ +static inline bps_tree_pos_t +bps_tree_inner_free_size(struct bps_inner *inner) +{ + return BPS_TREE_MAX_COUNT_IN_INNER - inner->header.size; +} + +/** + * @brieaf Difference between current size of the leaf and minumum allowed + */ +static inline bps_tree_pos_t +bps_tree_leaf_overmin_size(struct bps_leaf *leaf) +{ + return leaf->header.size - BPS_TREE_MAX_COUNT_IN_LEAF * 2 / 3; +} +/** + * @brieaf Difference between current size of the inner and minumum allowed + */ + +static inline bps_tree_pos_t +bps_tree_inner_overmin_size(struct bps_inner *inner) +{ + return inner->header.size - BPS_TREE_MAX_COUNT_IN_INNER * 2 / 3; +} + +/** + * @brief Fill path element structure of the left leaf + */ +static inline bool +bps_tree_collect_left_path_elem_leaf(struct bps_tree *tree, + struct bps_leaf_path_elem *path_elem, + struct bps_leaf_path_elem *new_path_elem) +{ + bps_inner_path_elem * parent = path_elem->parent; + if (!parent) + return false; + if (path_elem->pos_in_parent == 0) + return false; + + new_path_elem->parent = path_elem->parent; + new_path_elem->pos_in_parent = path_elem->pos_in_parent - 1; + new_path_elem->block_id = + parent->block->child_ids[new_path_elem->pos_in_parent]; + new_path_elem->block = (struct bps_leaf *) + bps_tree_restore_block(tree, new_path_elem->block_id); + new_path_elem->max_elem_copy = + parent->block->elems + new_path_elem->pos_in_parent; + new_path_elem->insertion_point = bps_tree_pos_t(-1); /* unused */ + return true; +} + +/** + * @brief Fill path element structure of the left inner + * almost exact copy of collect_tree_left_ext_leaf + */ +static inline bool +bps_tree_collect_left_path_elem_inner(struct bps_tree *tree, + bps_inner_path_elem *path_elem, + bps_inner_path_elem *new_path_elem) +{ + bps_inner_path_elem * parent = path_elem->parent; + if (!parent) + return false; + if (path_elem->pos_in_parent == 0) + return false; + + new_path_elem->parent = path_elem->parent; + new_path_elem->pos_in_parent = path_elem->pos_in_parent - 1; + new_path_elem->block_id = + parent->block->child_ids[new_path_elem->pos_in_parent]; + new_path_elem->block = (struct bps_inner *) + bps_tree_restore_block(tree, new_path_elem->block_id); + new_path_elem->max_elem_copy = parent->block->elems + + new_path_elem->pos_in_parent; + new_path_elem->insertion_point = bps_tree_pos_t(-1); /* unused */ + return true; +} + +/** + * @brief Fill path element structure of the right leaf + */ +static inline bool +bps_tree_collect_right_ext_leaf(struct bps_tree *tree, + struct bps_leaf_path_elem *path_elem, + struct bps_leaf_path_elem *new_path_elem) +{ + bps_inner_path_elem *parent = path_elem->parent; + if (!parent) + return false; + if (path_elem->pos_in_parent >= parent->block->header.size - 1) + return false; + + new_path_elem->parent = path_elem->parent; + new_path_elem->pos_in_parent = path_elem->pos_in_parent + 1; + new_path_elem->block_id = + parent->block->child_ids[new_path_elem->pos_in_parent]; + new_path_elem->block = (struct bps_leaf *) + bps_tree_restore_block(tree, new_path_elem->block_id); + if (new_path_elem->pos_in_parent >= parent->block->header.size - 1) + new_path_elem->max_elem_copy = parent->max_elem_copy; + else + new_path_elem->max_elem_copy = parent->block->elems + + new_path_elem->pos_in_parent; + new_path_elem->insertion_point = bps_tree_pos_t(-1); /* unused */ + return true; +} + +/** + * @brief Fill path element structure of the right inner + * almost exact copy of bps_tree_collect_right_ext_leaf + */ +static inline bool +bps_tree_collect_right_ext_inner(struct bps_tree *tree, + bps_inner_path_elem *path_elem, + bps_inner_path_elem *new_path_elem) +{ + bps_inner_path_elem *parent = path_elem->parent; + if (!parent) + return false; + if (path_elem->pos_in_parent >= parent->block->header.size - 1) + return false; + + new_path_elem->parent = path_elem->parent; + new_path_elem->pos_in_parent = path_elem->pos_in_parent + 1; + new_path_elem->block_id = + parent->block->child_ids[new_path_elem->pos_in_parent]; + new_path_elem->block = (struct bps_inner *) + bps_tree_restore_block(tree, new_path_elem->block_id); + if (new_path_elem->pos_in_parent >= parent->block->header.size - 1) + new_path_elem->max_elem_copy = parent->max_elem_copy; + else + new_path_elem->max_elem_copy = parent->block->elems + + new_path_elem->pos_in_parent; + new_path_elem->insertion_point = bps_tree_pos_t(-1); /* unused */ + return true; +} + +/** + * @brief Fill path element structure of the new leaf + */ +static inline void +bps_tree_prepare_new_ext_leaf(struct bps_leaf_path_elem *path_elem, + struct bps_leaf_path_elem *new_path_elem, + struct bps_leaf* new_leaf, + bps_tree_block_id_t new_leaf_id, + bps_tree_elem_t *max_elem_copy) +{ + new_path_elem->parent = path_elem->parent; + new_path_elem->pos_in_parent = path_elem->pos_in_parent + 1; + new_path_elem->block_id = new_leaf_id; + new_path_elem->block = new_leaf; + new_path_elem->max_elem_copy = max_elem_copy; + new_path_elem->insertion_point = bps_tree_pos_t(-1); /* unused */ +} + +/** + * @brief Fill path element structure of the new inner + */ +static inline void +bps_tree_prepare_new_ext_inner(bps_inner_path_elem *path_elem, + bps_inner_path_elem *new_path_elem, + struct bps_inner* new_inner, + bps_tree_block_id_t new_inner_id, + bps_tree_elem_t *max_elem_copy) +{ + new_path_elem->parent = path_elem->parent; + new_path_elem->pos_in_parent = path_elem->pos_in_parent + 1; + new_path_elem->block_id = new_inner_id; + new_path_elem->block = new_inner; + new_path_elem->max_elem_copy = max_elem_copy; + new_path_elem->insertion_point = bps_tree_pos_t(-1); /* unused */ +} + +/** + * bps_tree_process_insert_inner declaration. See definition for details. + */ +static bool +bps_tree_process_insert_inner(struct bps_tree *tree, + bps_inner_path_elem *inner_path_elem, + bps_tree_block_id_t block_id, bps_tree_pos_t pos, + bps_tree_elem_t max_elem); + +/** + * Basic inserted into leaf, dealing with spliting, merging and moving data + * to neighbour blocks if necessary + */ +static inline bool +bps_tree_process_insert_leaf(struct bps_tree *tree, + struct bps_leaf_path_elem *leaf_path_elem, + bps_tree_elem_t new_elem) +{ + if (bps_tree_leaf_free_size(leaf_path_elem->block)) { + bps_tree_insert_into_leaf(tree, leaf_path_elem, new_elem); + return true; + } + struct bps_leaf_path_elem left_ext = {0, 0, 0, 0, 0, 0}, + right_ext = {0, 0, 0, 0, 0, 0}, + left_left_ext = {0, 0, 0, 0, 0, 0}, + right_right_ext = {0, 0, 0, 0, 0, 0}; + bool has_left_ext = + bps_tree_collect_left_path_elem_leaf(tree, leaf_path_elem, + &left_ext); + bool has_right_ext = + bps_tree_collect_right_ext_leaf(tree, leaf_path_elem, + &right_ext); + bool has_left_left_ext = false; + bool has_right_right_ext = false; + if (has_left_ext && has_right_ext) { + if (bps_tree_leaf_free_size(left_ext.block) > + bps_tree_leaf_free_size(right_ext.block)) { + bps_tree_pos_t move_count = 1 + + bps_tree_leaf_free_size(left_ext.block) / 2; + bps_tree_insert_and_move_elems_to_left_leaf(tree, + &left_ext, leaf_path_elem, + move_count, new_elem); + return true; + } else if (bps_tree_leaf_free_size(right_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + + bps_tree_leaf_free_size(right_ext.block) / 2; + bps_tree_insert_and_move_elems_to_right_leaf(tree, + leaf_path_elem, &right_ext, + move_count, new_elem); + return true; + } + } else if (has_left_ext) { + if (bps_tree_leaf_free_size(left_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + + bps_tree_leaf_free_size(left_ext.block) / 2; + bps_tree_insert_and_move_elems_to_left_leaf(tree, + &left_ext, leaf_path_elem, + move_count, new_elem); + return true; + } + has_left_left_ext = bps_tree_collect_left_path_elem_leaf(tree, + &left_ext, &left_left_ext); + if (has_left_left_ext && + bps_tree_leaf_free_size(left_left_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + (2 * + bps_tree_leaf_free_size(left_left_ext.block) + - 1) / 3; + bps_tree_move_elems_to_left_leaf(tree, + &left_left_ext, &left_ext, move_count); + move_count = 1 + move_count / 2; + bps_tree_insert_and_move_elems_to_left_leaf(tree, + &left_ext, leaf_path_elem, + move_count, new_elem); + return true; + } + } else if (has_right_ext) { + if (bps_tree_leaf_free_size(right_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + + bps_tree_leaf_free_size(right_ext.block) / 2; + bps_tree_insert_and_move_elems_to_right_leaf(tree, + leaf_path_elem, &right_ext, + move_count, new_elem); + return true; + } + has_right_right_ext = bps_tree_collect_right_ext_leaf(tree, + &right_ext, &right_right_ext); + if (has_right_right_ext && + bps_tree_leaf_free_size(right_right_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + (2 * + bps_tree_leaf_free_size(right_right_ext.block) + - 1) / 3; + bps_tree_move_elems_to_right_leaf(tree, &right_ext, + &right_right_ext, move_count); + move_count = 1 + move_count / 2; + bps_tree_insert_and_move_elems_to_right_leaf(tree, + leaf_path_elem, &right_ext, + move_count, new_elem); + return true; + } + } + bps_tree_block_id_t new_block_id = (bps_tree_block_id_t)(-1); + struct bps_leaf *new_leaf = bps_tree_create_leaf(tree, &new_block_id); + + if (!bps_tree_reserve_blocks(tree, tree->depth + 1)) + return false; + + if (leaf_path_elem->block->next_id != (bps_tree_block_id_t)(-1)) { + struct bps_leaf *next_leaf = (struct bps_leaf *) + bps_tree_restore_block(tree, + leaf_path_elem->block->next_id); + assert(next_leaf->prev_id == leaf_path_elem->block_id); + next_leaf->prev_id = new_block_id; + } else { + tree->last_id = new_block_id; + } + new_leaf->next_id = leaf_path_elem->block->next_id; + leaf_path_elem->block->next_id = new_block_id; + new_leaf->prev_id = leaf_path_elem->block_id; + + new_leaf->header.size = 0; + struct bps_leaf_path_elem new_path_elem; + bps_tree_elem_t new_max_elem; + bps_tree_prepare_new_ext_leaf(leaf_path_elem, &new_path_elem, new_leaf, + new_block_id, &new_max_elem); + if (has_left_ext && has_right_ext) { + bps_tree_pos_t move_count = BPS_TREE_MAX_COUNT_IN_LEAF / 4; + bps_tree_insert_and_move_elems_to_right_leaf(tree, + leaf_path_elem, &new_path_elem, + move_count * 2, new_elem); + bps_tree_move_elems_to_left_leaf(tree, &new_path_elem, + &right_ext, move_count); + bps_tree_move_elems_to_right_leaf(tree, &left_ext, + leaf_path_elem, move_count); + } else if (has_left_ext && has_left_left_ext) { + bps_tree_pos_t move_count = BPS_TREE_MAX_COUNT_IN_LEAF / 4; + bps_tree_insert_and_move_elems_to_right_leaf(tree, + leaf_path_elem, &new_path_elem, + move_count * 3, new_elem); + bps_tree_move_elems_to_right_leaf(tree, &left_ext, + leaf_path_elem, move_count * 2); + bps_tree_move_elems_to_right_leaf(tree, &left_left_ext, + &left_ext, move_count); + } else if (has_right_ext && has_right_right_ext) { + bps_tree_pos_t move_count = BPS_TREE_MAX_COUNT_IN_LEAF / 4; + bps_tree_insert_and_move_elems_to_right_leaf(tree, + leaf_path_elem, &new_path_elem, + move_count, new_elem); + bps_tree_move_elems_to_left_leaf(tree, + &new_path_elem, &right_ext, move_count * 2); + bps_tree_move_elems_to_left_leaf(tree, + &right_ext, &right_right_ext, move_count); + } else if (has_left_ext) { + bps_tree_pos_t move_count = BPS_TREE_MAX_COUNT_IN_LEAF / 3; + bps_tree_insert_and_move_elems_to_right_leaf(tree, + leaf_path_elem, &new_path_elem, + move_count * 2, new_elem); + bps_tree_move_elems_to_right_leaf(tree, &left_ext, + leaf_path_elem, move_count); + } else if (has_right_ext) { + bps_tree_pos_t move_count = BPS_TREE_MAX_COUNT_IN_LEAF / 3; + bps_tree_insert_and_move_elems_to_right_leaf(tree, + leaf_path_elem, &new_path_elem, + move_count, new_elem); + bps_tree_move_elems_to_left_leaf(tree, &new_path_elem, + &right_ext, move_count); + } else { + assert(!leaf_path_elem->parent); + bps_tree_pos_t move_count = BPS_TREE_MAX_COUNT_IN_LEAF / 2; + bps_tree_insert_and_move_elems_to_right_leaf(tree, + leaf_path_elem, &new_path_elem, + move_count, new_elem); + + bps_tree_block_id_t new_root_id = (bps_tree_block_id_t)(-1); + struct bps_inner *new_root = bps_tree_create_inner(tree, + &new_root_id); + new_root->header.size = 2; + new_root->child_ids[0] = tree->root_id; + new_root->child_ids[1] = new_block_id; + new_root->elems[0] = tree->max_elem; + tree->root = (bps_block *)new_root; + tree->root_id = new_root_id; + tree->max_elem = new_max_elem; + tree->depth++; + return true; + } + assert(leaf_path_elem->parent); + return bps_tree_process_insert_inner(tree, leaf_path_elem->parent, + new_block_id, new_path_elem.pos_in_parent, + new_max_elem); +} + +/** + * Basic inserted into inner, dealing with spliting, merging and moving data + * to neighbour blocks if necessary + */ +static inline bool +bps_tree_process_insert_inner(struct bps_tree *tree, + bps_inner_path_elem *inner_path_elem, + bps_tree_block_id_t block_id, + bps_tree_pos_t pos, bps_tree_elem_t max_elem) +{ + if (bps_tree_inner_free_size(inner_path_elem->block)) { + bps_tree_insert_into_inner(tree, inner_path_elem, + block_id, pos, max_elem); + return true; + } + bps_inner_path_elem left_ext = {0, 0, 0, 0, 0, 0}, + right_ext = {0, 0, 0, 0, 0, 0}, + left_left_ext = {0, 0, 0, 0, 0, 0}, + right_right_ext = {0, 0, 0, 0, 0, 0}; + bool has_left_ext = + bps_tree_collect_left_path_elem_inner(tree, inner_path_elem, + &left_ext); + bool has_right_ext = + bps_tree_collect_right_ext_inner(tree, inner_path_elem, + &right_ext); + bool has_left_left_ext = false; + bool has_right_right_ext = false; + if (has_left_ext && has_right_ext) { + if (bps_tree_inner_free_size(left_ext.block) > + bps_tree_inner_free_size(right_ext.block)) { + bps_tree_pos_t move_count = 1 + + bps_tree_inner_free_size(left_ext.block) / 2; + bps_tree_insert_and_move_elems_to_left_inner(tree, + &left_ext, inner_path_elem, move_count, + block_id, pos, max_elem); + return true; + } else if (bps_tree_inner_free_size(right_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + + bps_tree_inner_free_size(right_ext.block) / 2; + bps_tree_insert_and_move_elems_to_right_inner(tree, + inner_path_elem, &right_ext, + move_count, block_id, pos, max_elem); + return true; + } + } else if (has_left_ext) { + if (bps_tree_inner_free_size(left_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + + bps_tree_inner_free_size(left_ext.block) / 2; + bps_tree_insert_and_move_elems_to_left_inner(tree, + &left_ext, inner_path_elem, + move_count, block_id, pos, max_elem); + return true; + } + has_left_left_ext = bps_tree_collect_left_path_elem_inner(tree, + &left_ext, &left_left_ext); + if (has_left_left_ext && + bps_tree_inner_free_size(left_left_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + (2 * + bps_tree_inner_free_size(left_left_ext.block) + - 1) / 3; + bps_tree_move_elems_to_left_inner(tree, &left_left_ext, + &left_ext, move_count); + move_count = 1 + move_count / 2; + bps_tree_insert_and_move_elems_to_left_inner(tree, + &left_ext, inner_path_elem, move_count, + block_id, pos, max_elem); + return true; + } + } else if (has_right_ext) { + if (bps_tree_inner_free_size(right_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + + bps_tree_inner_free_size(right_ext.block) / 2; + bps_tree_insert_and_move_elems_to_right_inner(tree, + inner_path_elem, &right_ext, + move_count, block_id, pos, max_elem); + return true; + } + has_right_right_ext = bps_tree_collect_right_ext_inner(tree, + &right_ext, &right_right_ext); + if (has_right_right_ext && + bps_tree_inner_free_size(right_right_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + (2 * + bps_tree_inner_free_size(right_right_ext.block) + - 1) / 3; + bps_tree_move_elems_to_right_inner(tree, &right_ext, + &right_right_ext, move_count); + move_count = 1 + move_count / 2; + bps_tree_insert_and_move_elems_to_right_inner(tree, + inner_path_elem, &right_ext, + move_count, block_id, pos, max_elem); + return true; + } + } + bps_tree_block_id_t new_block_id = (bps_tree_block_id_t)(-1); + struct bps_inner *new_inner = bps_tree_create_inner(tree, + &new_block_id); + + new_inner->header.size = 0; + bps_inner_path_elem new_path_elem; + bps_tree_elem_t new_max_elem; + bps_tree_prepare_new_ext_inner(inner_path_elem, &new_path_elem, + new_inner, new_block_id, &new_max_elem); + if (has_left_ext && has_right_ext) { + bps_tree_pos_t move_count = BPS_TREE_MAX_COUNT_IN_INNER / 4; + bps_tree_insert_and_move_elems_to_right_inner(tree, + inner_path_elem, &new_path_elem, + move_count * 2, block_id, pos, max_elem); + bps_tree_move_elems_to_left_inner(tree, &new_path_elem, + &right_ext, move_count); + bps_tree_move_elems_to_right_inner(tree, &left_ext, + inner_path_elem, move_count); + } else if (has_left_ext && has_left_left_ext) { + bps_tree_pos_t move_count = BPS_TREE_MAX_COUNT_IN_INNER / 4; + bps_tree_insert_and_move_elems_to_right_inner(tree, + inner_path_elem, &new_path_elem, + move_count * 3, block_id, pos, max_elem); + bps_tree_move_elems_to_right_inner(tree, + &left_ext, inner_path_elem, move_count * 2); + bps_tree_move_elems_to_right_inner(tree, &left_left_ext, + &left_ext, move_count); + } else if (has_right_ext && has_right_right_ext) { + bps_tree_pos_t move_count = BPS_TREE_MAX_COUNT_IN_INNER / 4; + bps_tree_insert_and_move_elems_to_right_inner(tree, + inner_path_elem, &new_path_elem, + move_count, block_id, pos, max_elem); + bps_tree_move_elems_to_left_inner(tree, &new_path_elem, + &right_ext, move_count * 2); + bps_tree_move_elems_to_left_inner(tree, &right_ext, + &right_right_ext, move_count); + } else if (has_left_ext) { + bps_tree_pos_t move_count = BPS_TREE_MAX_COUNT_IN_INNER / 3; + bps_tree_insert_and_move_elems_to_right_inner(tree, + inner_path_elem, &new_path_elem, + move_count * 2, block_id, pos, max_elem); + bps_tree_move_elems_to_right_inner(tree, &left_ext, + inner_path_elem, move_count); + } else if (has_right_ext) { + bps_tree_pos_t move_count = BPS_TREE_MAX_COUNT_IN_INNER / 3; + bps_tree_insert_and_move_elems_to_right_inner(tree, + inner_path_elem, &new_path_elem, + move_count, block_id, pos, max_elem); + bps_tree_move_elems_to_left_inner(tree, &new_path_elem, + &right_ext, move_count); + } else { + assert(!inner_path_elem->parent); + bps_tree_pos_t move_count = BPS_TREE_MAX_COUNT_IN_INNER / 2; + bps_tree_insert_and_move_elems_to_right_inner(tree, + inner_path_elem, &new_path_elem, + move_count, block_id, pos, max_elem); + + bps_tree_block_id_t new_root_id = (bps_tree_block_id_t)(-1); + struct bps_inner *new_root = + bps_tree_create_inner(tree, &new_root_id); + new_root->header.size = 2; + new_root->child_ids[0] = tree->root_id; + new_root->child_ids[1] = new_block_id; + new_root->elems[0] = tree->max_elem; + tree->root = (bps_block *)new_root; + tree->root_id = new_root_id; + tree->max_elem = new_max_elem; + tree->depth++; + return true; + } + assert(inner_path_elem->parent); + return bps_tree_process_insert_inner(tree, inner_path_elem->parent, + new_block_id, new_path_elem.pos_in_parent, + new_max_elem); +} + +/** + * bps_tree_process_delete_inner declaration. See definition for details. + */ +static inline void +bps_tree_process_delete_inner(struct bps_tree *tree, + bps_inner_path_elem *inner_path_elem); + +/** + * Basic deleting from leaf, dealing with spliting, merging and moving data + * to neighbour blocks if necessary + */ +static inline void +bps_tree_process_delete_leaf(struct bps_tree *tree, + struct bps_leaf_path_elem *leaf_path_elem) +{ + bps_tree_delete_from_leaf(tree, leaf_path_elem); + + if (leaf_path_elem->block->header.size >= + BPS_TREE_MAX_COUNT_IN_LEAF * 2 / 3) + return; + + struct bps_leaf_path_elem left_ext = {0, 0, 0, 0, 0, 0}, + right_ext = {0, 0, 0, 0, 0, 0}, + left_left_ext = {0, 0, 0, 0, 0, 0}, + right_right_ext = {0, 0, 0, 0, 0, 0}; + bool has_left_ext = + bps_tree_collect_left_path_elem_leaf(tree, leaf_path_elem, + &left_ext); + bool has_right_ext = + bps_tree_collect_right_ext_leaf(tree, leaf_path_elem, + &right_ext); + bool has_left_left_ext = false; + bool has_right_right_ext = false; + if (has_left_ext && has_right_ext) { + if (bps_tree_leaf_overmin_size(left_ext.block) > + bps_tree_leaf_overmin_size(right_ext.block)) { + bps_tree_pos_t move_count = 1 + + bps_tree_leaf_overmin_size(left_ext.block) / 2; + bps_tree_move_elems_to_right_leaf(tree, &left_ext, + leaf_path_elem, move_count); + return; + } else if (bps_tree_leaf_overmin_size(right_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + + bps_tree_leaf_overmin_size(right_ext.block) / 2; + bps_tree_move_elems_to_left_leaf(tree, leaf_path_elem, + &right_ext, move_count); + return; + } + } else if (has_left_ext) { + if (bps_tree_leaf_overmin_size(left_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + + bps_tree_leaf_overmin_size(left_ext.block) / 2; + bps_tree_move_elems_to_right_leaf(tree, &left_ext, + leaf_path_elem, move_count); + return; + } + has_left_left_ext = bps_tree_collect_left_path_elem_leaf(tree, + &left_ext, &left_left_ext); + if (has_left_left_ext && + bps_tree_leaf_overmin_size(left_left_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + (2 * + bps_tree_leaf_overmin_size(left_left_ext.block) + - 1) / 3; + bps_tree_move_elems_to_right_leaf(tree, &left_ext, + leaf_path_elem, move_count); + move_count = 1 + move_count / 2; + bps_tree_move_elems_to_right_leaf(tree, &left_left_ext, + &left_ext, move_count); + return; + } + } else if (has_right_ext) { + if (bps_tree_leaf_overmin_size(right_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + + bps_tree_leaf_overmin_size(right_ext.block) + / 2; + bps_tree_move_elems_to_left_leaf(tree, leaf_path_elem, + &right_ext, move_count); + return; + } + has_right_right_ext = bps_tree_collect_right_ext_leaf(tree, + &right_ext, &right_right_ext); + if (has_right_right_ext && + bps_tree_leaf_overmin_size(right_right_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + (2 * + bps_tree_leaf_overmin_size(right_right_ext.block) + - 1)/ 3; + bps_tree_move_elems_to_left_leaf(tree, leaf_path_elem, + &right_ext, move_count); + move_count = 1 + move_count / 2; + bps_tree_move_elems_to_left_leaf(tree, &right_ext, + &right_right_ext, move_count); + return; + } + } + + if (has_left_ext && has_right_ext) { + bps_tree_pos_t move_count = + (leaf_path_elem->block->header.size + 1) / 2; + bps_tree_move_elems_to_right_leaf(tree, leaf_path_elem, + &right_ext, move_count); + move_count = leaf_path_elem->block->header.size; + bps_tree_move_elems_to_left_leaf(tree, &left_ext, + leaf_path_elem, move_count); + } else if (has_left_ext && has_left_left_ext) { + bps_tree_pos_t move_count = + (leaf_path_elem->block->header.size + 1) / 2; + bps_tree_move_elems_to_left_leaf(tree, &left_left_ext, + &left_ext, move_count); + move_count = leaf_path_elem->block->header.size; + bps_tree_move_elems_to_left_leaf(tree, &left_ext, + leaf_path_elem, move_count); + } else if (has_right_ext && has_right_right_ext) { + bps_tree_pos_t move_count = + (leaf_path_elem->block->header.size + 1) / 2; + bps_tree_move_elems_to_right_leaf(tree, &right_ext, + &right_right_ext, move_count); + move_count = leaf_path_elem->block->header.size; + bps_tree_move_elems_to_right_leaf(tree, leaf_path_elem, + &right_ext, move_count); + } else if (has_left_ext) { + if (leaf_path_elem->block->header.size + + left_ext.block->header.size > BPS_TREE_MAX_COUNT_IN_LEAF) + return; + bps_tree_pos_t move_count = leaf_path_elem->block->header.size; + bps_tree_move_elems_to_left_leaf(tree, &left_ext, + leaf_path_elem, move_count); + } else if (has_right_ext) { + if (leaf_path_elem->block->header.size + + right_ext.block->header.size > BPS_TREE_MAX_COUNT_IN_LEAF) + return; + bps_tree_pos_t move_count = leaf_path_elem->block->header.size; + bps_tree_move_elems_to_right_leaf(tree, leaf_path_elem, + &right_ext, move_count); + } else { + if (leaf_path_elem->block->header.size > 0) + return; + assert(leaf_path_elem->parent == 0); + assert(tree->depth == 1); + assert(tree->size == 0); + tree->root = 0; + tree->depth = 0; + tree->first_id = (bps_tree_block_id_t)(-1); + tree->last_id = (bps_tree_block_id_t)(-1); + bps_tree_dispose_leaf(tree, leaf_path_elem->block, + leaf_path_elem->block_id); + return; + } + + assert(leaf_path_elem->block->header.size == 0); + + struct bps_leaf *leaf = (struct bps_leaf*)leaf_path_elem->block; + if (leaf->prev_id == (bps_tree_block_id_t)(-1)) { + tree->first_id = leaf->next_id; + } else { + struct bps_leaf *prev_block = (struct bps_leaf *) + bps_tree_restore_block(tree, leaf->prev_id); + prev_block->next_id = leaf->next_id; + } + if (leaf->next_id == (bps_tree_block_id_t)(-1)) { + tree->last_id = leaf->prev_id; + } else { + struct bps_leaf *next_block = (struct bps_leaf *) + bps_tree_restore_block(tree, leaf->next_id); + next_block->prev_id = leaf->prev_id; + } + + bps_tree_dispose_leaf(tree, leaf_path_elem->block, + leaf_path_elem->block_id); + assert(leaf_path_elem->parent); + bps_tree_process_delete_inner(tree, leaf_path_elem->parent); +} + +/** + * Basic deletion from a leaf, deals with possible splitting, + * merging and moving of elements data to neighbouring blocks. + */ +static inline void +bps_tree_process_delete_inner(struct bps_tree *tree, + bps_inner_path_elem *inner_path_elem) +{ + bps_tree_delete_from_inner(tree, inner_path_elem); + + if (inner_path_elem->block->header.size >= + BPS_TREE_MAX_COUNT_IN_INNER * 2 / 3) + return; + + bps_inner_path_elem left_ext = {0, 0, 0, 0, 0, 0}, + right_ext = {0, 0, 0, 0, 0, 0}, + left_left_ext = {0, 0, 0, 0, 0, 0}, + right_right_ext = {0, 0, 0, 0, 0, 0}; + bool has_left_ext = + bps_tree_collect_left_path_elem_inner(tree, inner_path_elem, + &left_ext); + bool has_right_ext = + bps_tree_collect_right_ext_inner(tree, inner_path_elem, + &right_ext); + bool has_left_left_ext = false; + bool has_right_right_ext = false; + if (has_left_ext && has_right_ext) { + if (bps_tree_inner_overmin_size(left_ext.block) > + bps_tree_inner_overmin_size(right_ext.block)) { + bps_tree_pos_t move_count = 1 + + bps_tree_inner_overmin_size(left_ext.block) + / 2; + bps_tree_move_elems_to_right_inner(tree, &left_ext, + inner_path_elem, move_count); + return; + } else if (bps_tree_inner_overmin_size(right_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + + bps_tree_inner_overmin_size(right_ext.block) + / 2; + bps_tree_move_elems_to_left_inner(tree, + inner_path_elem, &right_ext, + move_count); + return; + } + } else if (has_left_ext) { + if (bps_tree_inner_overmin_size(left_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + + bps_tree_inner_overmin_size(left_ext.block) + / 2; + bps_tree_move_elems_to_right_inner(tree, &left_ext, + inner_path_elem, move_count); + return; + } + has_left_left_ext = + bps_tree_collect_left_path_elem_inner(tree, &left_ext, + &left_left_ext); + if (has_left_left_ext && + bps_tree_inner_overmin_size(left_left_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + (2 * + bps_tree_inner_overmin_size(left_left_ext.block) + - 1) / 3; + bps_tree_move_elems_to_right_inner(tree, &left_ext, + inner_path_elem, move_count); + move_count = 1 + move_count / 2; + bps_tree_move_elems_to_right_inner(tree, + &left_left_ext, &left_ext, move_count); + return; + } + } else if (has_right_ext) { + if (bps_tree_inner_overmin_size(right_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + + bps_tree_inner_overmin_size(right_ext.block) + / 2; + bps_tree_move_elems_to_left_inner(tree, + inner_path_elem, &right_ext, + move_count); + return; + } + has_right_right_ext = + bps_tree_collect_right_ext_inner(tree, &right_ext, + &right_right_ext); + if (has_right_right_ext && + bps_tree_inner_overmin_size(right_right_ext.block) > 0) { + bps_tree_pos_t move_count = 1 + (2 * + bps_tree_inner_overmin_size(right_right_ext.block) + - 1) / 3; + bps_tree_move_elems_to_left_inner(tree, inner_path_elem, + &right_ext, move_count); + move_count = 1 + move_count / 2; + bps_tree_move_elems_to_left_inner(tree, &right_ext, + &right_right_ext, move_count); + return; + } + } + + if (has_left_ext && has_right_ext) { + bps_tree_pos_t move_count = + (inner_path_elem->block->header.size + 1) / 2; + bps_tree_move_elems_to_right_inner(tree, inner_path_elem, + &right_ext, move_count); + move_count = inner_path_elem->block->header.size; + bps_tree_move_elems_to_left_inner(tree, &left_ext, + inner_path_elem, move_count); + } else if (has_left_ext && has_left_left_ext) { + bps_tree_pos_t move_count = + (inner_path_elem->block->header.size + 1) / 2; + bps_tree_move_elems_to_left_inner(tree, &left_left_ext, + &left_ext, move_count); + move_count = inner_path_elem->block->header.size; + bps_tree_move_elems_to_left_inner(tree, &left_ext, + inner_path_elem, move_count); + } else if (has_right_ext && has_right_right_ext) { + bps_tree_pos_t move_count = + (inner_path_elem->block->header.size + 1) / 2; + bps_tree_move_elems_to_right_inner(tree, &right_ext, + &right_right_ext, move_count); + move_count = inner_path_elem->block->header.size; + bps_tree_move_elems_to_right_inner(tree, inner_path_elem, + &right_ext, move_count); + } else if (has_left_ext) { + if (inner_path_elem->block->header.size + + left_ext.block->header.size > BPS_TREE_MAX_COUNT_IN_INNER) + return; + bps_tree_pos_t move_count = inner_path_elem->block->header.size; + bps_tree_move_elems_to_left_inner(tree, &left_ext, + inner_path_elem, move_count); + } else if (has_right_ext) { + if (inner_path_elem->block->header.size + + right_ext.block->header.size > BPS_TREE_MAX_COUNT_IN_INNER) + return; + bps_tree_pos_t move_count = inner_path_elem->block->header.size; + bps_tree_move_elems_to_right_inner(tree, inner_path_elem, + &right_ext, move_count); + } else { + if (inner_path_elem->block->header.size > 1) + return; + assert(tree->depth > 1); + assert(inner_path_elem->parent == 0); + tree->depth--; + tree->root_id = inner_path_elem->block->child_ids[0]; + tree->root = bps_tree_restore_block(tree, tree->root_id); + bps_tree_dispose_inner(tree, inner_path_elem->block, + inner_path_elem->block_id); + return; + } + assert(inner_path_elem->block->header.size == 0); + + bps_tree_dispose_inner(tree, inner_path_elem->block, + inner_path_elem->block_id); + assert(inner_path_elem->parent); + bps_tree_process_delete_inner(tree, inner_path_elem->parent); +} + + +/** + * @brief Insert an element to the tree or replace an element in the tree + * In case of replacing, if 'replaced' argument is not null, it'll + * be filled with replaced element. In case of inserting it's left + * intact. + * Thus one can distinguish a real insert or replace by passing to + * the function a pointer to some value; and if it was changed + * during the function call, then the replace has happened. + * Otherwise, it was an insert. + * @param tree - pointer to a tree + * @param new_elem - inserting or replacing element + * @replaced - optional pointer for a replaces element + * @return - true on success or false if memory allocation failed for insert + */ +inline bool +bps_tree_insert_or_replace(struct bps_tree *tree, bps_tree_elem_t new_elem, + bps_tree_elem_t *replaced) +{ + if (!tree->root) + return bps_tree_insert_first_elem(tree, new_elem); + + bps_inner_path_elem path[tree->depth - 1]; + struct bps_leaf_path_elem leaf_path_elem; + bool exact; + bps_tree_collect_path(tree, new_elem, path, &leaf_path_elem, &exact); + if (exact) { + bps_tree_process_replace(tree, &leaf_path_elem, new_elem, + replaced); + return true; + } else { + return bps_tree_process_insert_leaf(tree, &leaf_path_elem, + new_elem); + } +} + +/** + * @brief Delete an element from a tree. + * @param tree - pointer to a tree + * @param elem - the element tot delete + * @return - true on success or false if the element was not found in tree + */ +inline bool +bps_tree_delete(struct bps_tree *tree, bps_tree_elem_t elem) +{ + if (!tree->root) + return false; + bps_inner_path_elem path[tree->depth - 1]; + struct bps_leaf_path_elem leaf_path_elem; + bool exact; + bps_tree_collect_path(tree, elem, path, &leaf_path_elem, &exact); + + if (!exact) + return false; + + bps_tree_process_delete_leaf(tree, &leaf_path_elem); + return true; +} + + +/** + * @brief Recursively find a maximum element in subtree. + * Used only for debug purposes + */ +static inline bps_tree_elem_t +bps_tree_debug_find_max_elem(const struct bps_tree *tree, bps_block *block) +{ + assert(block->size); + if (block->type == BPS_TREE_BT_LEAF) { + struct bps_leaf *leaf = (struct bps_leaf *)block; + return leaf->elems[block->size - 1]; + } else { + assert(block->type == BPS_TREE_BT_INNER); + struct bps_inner *inner = (struct bps_inner *)block; + bps_tree_block_id_t next_block_id = + inner->child_ids[block->size - 1]; + bps_block *next_block = bps_tree_restore_block(tree, + next_block_id); + return bps_tree_debug_find_max_elem(tree, next_block); + } +} + +/** + * @brief Recursively checks the block and the corresponding subtree + * Used by bps_tree_debug_check + */ +static inline int +bps_tree_debug_check_block(const struct bps_tree *tree, bps_block *block, + bps_tree_block_id_t id, int level, + size_t *calc_count, + bps_tree_block_id_t *expected_prev_id, + bps_tree_block_id_t *expected_this_id) +{ + if (block->type != BPS_TREE_BT_LEAF && block->type != BPS_TREE_BT_INNER) + return 0x10; + if (block->type == BPS_TREE_BT_LEAF) { + struct bps_leaf *leaf = (struct bps_leaf *)(block); + int result = 0; + *calc_count += block->size; + if (id != *expected_this_id) + result |= 0x10000; + if (leaf->prev_id != *expected_prev_id) + result |= 0x20000; + *expected_prev_id = id; + *expected_this_id = leaf->next_id; + + if (level != 1) + result |= 0x100; + if (block->size == 0) + result |= 0x200; + if (block->size > BPS_TREE_MAX_COUNT_IN_LEAF) + result |= 0x200; + for (bps_tree_pos_t i = 1; i < block->size; i++) + if (BPS_TREE_COMPARE(leaf->elems[i - 1], + leaf->elems[i], tree->arg) >= 0) + result |= 0x400; + return result; + } else { + struct bps_inner *inner = (struct bps_inner *)(block); + int result = 0; + if (block->size == 0) + result |= 0x1000; + if (block->size > BPS_TREE_MAX_COUNT_IN_INNER) + result |= 0x1000; + for (bps_tree_pos_t i = 1; i < block->size - 1; i++) + if (BPS_TREE_COMPARE(inner->elems[i - 1], + inner->elems[i], tree->arg) >= 0) + result |= 0x2000; + for (bps_tree_pos_t i = 0; i < block->size - 1; i++) { + struct bps_block *block = + bps_tree_restore_block(tree, + inner->child_ids[i]); + bps_tree_elem_t calc_max_elem = + bps_tree_debug_find_max_elem(tree, block); + if (inner->elems[i] != calc_max_elem) + result |= 0x4000; + } + if (block->size > 1) { + bps_tree_elem_t calc_max_elem = + bps_tree_debug_find_max_elem(tree, block); + if (BPS_TREE_COMPARE(inner->elems[block->size - 2], + calc_max_elem, tree->arg) >= 0) + result |= 0x8000; + } + for (bps_tree_pos_t i = 0; i < block->size; i++) + result |= bps_tree_debug_check_block(tree, + bps_tree_restore_block(tree, + inner->child_ids[i]), + inner->child_ids[i], level - 1, calc_count, + expected_prev_id, expected_this_id); + return result; + } +} + +/** + * @brief A debug self-check. + * Returns a bitmask of found errors (0 on success). + * I hope you will not need it. + * @param tree - pointer to a tree + * @return - Bitwise-OR of all errors found + */ +inline int +bps_tree_debug_check(const struct bps_tree *tree) +{ + int result = 0; + if (!tree->root) { + if (tree->depth != 0) + result |= 0x1; + if (tree->size != 0) + result |= 0x1; + if (tree->leaf_count != 0 || tree->inner_count != 0) + result |= 0x1; + return result; + } + if (tree->max_elem != bps_tree_debug_find_max_elem(tree, tree->root)) + result |= 0x8; + if (bps_tree_restore_block(tree, tree->root_id) != tree->root) + result |= 0x2; + size_t calc_count = 0; + bps_tree_block_id_t expected_prev_id = (bps_tree_block_id_t)(-1); + bps_tree_block_id_t expected_this_id = tree->first_id; + result |= bps_tree_debug_check_block(tree, tree->root, tree->root_id, + tree->depth, &calc_count, + &expected_prev_id, + &expected_this_id); + if (expected_this_id != (bps_tree_block_id_t)(-1)) + result |= 0x40000; + if (expected_prev_id != tree->last_id) + result |= 0x80000; + if (tree->size != calc_count) + result |= 0x4; + return result; +} +/* }}} */ + +/* {{{ Unused now, but could be essential */ +#if 0 +// Debug printing to output stream +public: + template <class CStream> + void Print(CStream &stream) const + { + if (!tree->root) { + stream << "Empty\n"; + return; + } + print(stream, root); + } + +private: + template <class CStream> + static void printIndent(CStream &stream, int indent) + { + for (int i = 0; i < indent; i++) + stream << " "; + } + + template <class CStream> + void print(CStream &stream, const struct bps_leaf* block, int indent) const + { + printIndent(stream, indent); + stream << "[(" << block->header.size << ")"; + for (bps_tree_pos_t i = 0; i < block->header.size; i++) + stream << " " << block->elems[i]; + stream << "]\n"; + } + + template <class CStream> + void print(CStream &stream, const struct bps_inner* block, int indent) const + { + bps_block *next = bps_tree_restore_block(tree, block->child_ids[0]); + print(stream, next, indent+1); + for (bps_tree_pos_t i = 0; i < block->header.size - 1; i++) { + printIndent(stream, indent); + stream << block->elems[i] << "\n"; + next = bps_tree_restore_block(tree, block->child_ids[i + 1]); + print(stream, next, indent+1); + } + } + + template <class CStream> + void print(CStream &stream, const bps_block* block, int indent = 0) const + { + if (block->header.type == BPS_TREE_BT_INNER) + print(stream, static_cast<const struct bps_inner *>(block), indent); + else + print(stream, static_cast<const struct bps_leaf *>(block), indent); + } + + +}; +#endif + +#if 0 + +// Debug utilities for testing base operation on blocks: inserting, deleting, moving to left and right blocks, and (inserting and moving) +public: + static int InternalMechanismCheck(bool assertme) + { + This_t test; + return test.internalMechanismCheck(assertme); + } + +private: + static void debugSetElem(bps_tree_elem_t &elem, unsigned char c) + { + memset(&elem, 0, sizeof(bps_tree_elem_t)); + *(unsigned char *)&elem = c; + } + + static unsigned char debugGetElem(const bps_tree_elem_t &elem) + { + return *(unsigned char *)&elem; + } + + static void debugSetElem(bps_inner_path_elem &path_elem, bps_tree_pos_t pos, unsigned char c) + { + assert(pos >= 0); + assert(pos < path_elem.block->header.size); + if (pos < path_elem.block->header.size - 1) + debugSetElem(path_elem.block->elems[pos], c); + else + debugSetElem(*path_elem.max_elem_copy, c); + } + + static unsigned char debugGetElem(const bps_inner_path_elem &path_elem, bps_tree_pos_t pos) + { + assert(pos >= 0); + assert(pos < path_elem.block->header.size); + if (pos < path_elem.block->header.size - 1) + return *(unsigned char *)(path_elem.block->elems + pos); + else + return *(unsigned char *)(path_elem.max_elem_copy); + } + + int internalMechanismCheckInsertLeaf(bool assertme) + { + (void)assertme; + int result = 0; + for (unsigned int i = 0; i < struct bps_leaf::ElemMaxCount; i++) { + for (unsigned int j = 0; j <= i; j++) { + size = 0; + struct bps_leaf block; + block.header.size = i; + for (unsigned int k = 0; k < struct bps_leaf::ElemMaxCount; k++) + if (k < j) + debugSetElem(block.elems[k], k); + else + debugSetElem(block.elems[k], k + 1); + struct bps_leaf_path_elem path_elem; + bps_tree_elem_t max, ins; + debugSetElem(max, i + 1); + debugSetElem(ins, j); + path_elem.block = █ + path_elem.insertion_point = j; + path_elem.max_elem_copy = &max; + if (!simpleInsert(path_elem, ins)) { + result |= (1 << 0); + assert(!assertme); + } + + if (block.header.size != bps_tree_pos_t(i + 1) || size != bps_tree_pos_t(1)) { + result |= (1 << 0); + assert(!assertme); + } + if (debugGetElem(max) != debugGetElem(block.LastElem())) { + result |= (1 << 1); + assert(!assertme); + } + for (unsigned int k = 0; k <= i; k++) { + if (debugGetElem(block.elems[k]) != (unsigned char)k) { + result |= (1 << 1); + assert(!assertme); + } + } + } + } + return result; + } + + int internalMechanismCheckDeleteLeaf(bool assertme) + { + (void)assertme; + int result = 0; + for (unsigned int i = 1; i <= struct bps_leaf::ElemMaxCount; i++) { + for (unsigned int j = 0; j < i; j++) { + size = 1; + struct bps_leaf block; + block.header.size = i; + for (unsigned int k = 0; k < struct bps_leaf::ElemMaxCount; k++) + debugSetElem(block.elems[k], k); + struct bps_leaf_path_elem path_elem; + bps_tree_elem_t max; + debugSetElem(max, j == i - 1 ? i - 2 : i - 1); + path_elem.block = █ + path_elem.insertion_point = j; + path_elem.max_elem_copy = &max; + if (!simpleDelete(path_elem)) { + result |= (1 << 2); + assert(!assertme); + } + + if (block.header.size != bps_tree_pos_t(i - 1) || size != bps_tree_pos_t(0)) { + result |= (1 << 2); + assert(!assertme); + } + if (i > 1 && debugGetElem(max) != debugGetElem(block.LastElem())) { + result |= (1 << 3); + assert(!assertme); + } + for (unsigned int k = 0; k < i - 1; k++) { + if (debugGetElem(block.elems[k]) != (unsigned char)( k < j ? k : k + 1)) { + result |= (1 << 3); + assert(!assertme); + } + } + } + } + return result; + } + + int internalMechanismCheckMoveRightLeaf(bool assertme) + { + (void)assertme; + int result = 0; + for (unsigned int i = 0; i <= struct bps_leaf::ElemMaxCount; i++) { + for (unsigned int j = 0; j <= struct bps_leaf::ElemMaxCount; j++) { + unsigned int maxMove = i < struct bps_leaf::ElemMaxCount - j ? i : struct bps_leaf::ElemMaxCount - j; + for (unsigned int k = 1; k <= maxMove; k++) { + struct bps_leaf a, b; + memset(a.elems, 0xFF, sizeof(a.elems)); + memset(b.elems, 0xFF, sizeof(b.elems)); + a.header.size = i; + b.header.size = j; + unsigned char c = 0; + for (unsigned int u = 0; u < i; u++) + debugSetElem(a.elems[u], c++); + for (unsigned int u = 0; u < j; u++) + debugSetElem(b.elems[u], c++); + bps_tree_elem_t ma = bps_tree_elem_t(), mb = bps_tree_elem_t(); + if (i) + ma = a.LastElem(); + if (j) + mb = b.LastElem(); + + struct bps_leaf_path_elem aExt, bExt; + aExt.block = &a; + aExt.max_elem_copy = &ma; + bExt.block = &b; + bExt.max_elem_copy = &mb; + + if (j) { + const bool move_to_empty = false; + if (k < i) { + const bool move_all = false; + moveRight<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } else { + const bool move_all = true; + moveRight<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } + } else { + const bool move_to_empty = true; + if (k < i) { + const bool move_all = false; + moveRight<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } else { + const bool move_all = true; + moveRight<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } + } + + if (a.header.size != (bps_tree_pos_t)(i - k)) { + result |= (1 << 4); + assert(!assertme); + } + if (b.header.size != (bps_tree_pos_t)(j + k)) { + result |= (1 << 4); + assert(!assertme); + } + + if (i - k) + if (ma != a.LastElem()) { + result |= (1 << 5); + assert(!assertme); + } + if (j + k) + if (mb != b.LastElem()) { + result |= (1 << 5); + assert(!assertme); + } + + c = 0; + for (unsigned int u = 0; u < (unsigned int)a.header.size; u++) + if (debugGetElem(a.elems[u]) != c++) { + result |= (1 << 5); + assert(!assertme); + } + for (unsigned int u = 0; u < (unsigned int)b.header.size; u++) + if (debugGetElem(b.elems[u]) != c++) { + result |= (1 << 5); + assert(!assertme); + } + } + } + } + return result; + } + + int internalMechanismCheckMoveLeftLeaf(bool assertme) + { + (void)assertme; + int result = 0; + for (unsigned int i = 0; i <= struct bps_leaf::ElemMaxCount; i++) { + for (unsigned int j = 0; j <= struct bps_leaf::ElemMaxCount; j++) { + unsigned int maxMove = j < struct bps_leaf::ElemMaxCount - i ? j : struct bps_leaf::ElemMaxCount - i; + for (unsigned int k = 1; k <= maxMove; k++) { + struct bps_leaf a, b; + memset(a.elems, 0xFF, sizeof(a.elems)); + memset(b.elems, 0xFF, sizeof(b.elems)); + a.header.size = i; + b.header.size = j; + unsigned char c = 0; + for (unsigned int u = 0; u < i; u++) + debugSetElem(a.elems[u], c++); + for (unsigned int u = 0; u < j; u++) + debugSetElem(b.elems[u], c++); + bps_tree_elem_t ma = bps_tree_elem_t(), mb = bps_tree_elem_t(); + if (i) + ma = a.LastElem(); + if (j) + mb = b.LastElem(); + + struct bps_leaf_path_elem aExt, bExt; + aExt.block = &a; + aExt.max_elem_copy = &ma; + bExt.block = &b; + bExt.max_elem_copy = &mb; + + if (i) { + const bool move_to_empty = false; + if (k < j) { + const bool move_all = false; + moveLeft<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } else { + const bool move_all = true; + moveLeft<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } + } else { + const bool move_to_empty = true; + if (k < j) { + const bool move_all = false; + moveLeft<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } else { + const bool move_all = true; + moveLeft<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } + } + + if (a.header.size != (bps_tree_pos_t)(i + k)) { + result |= (1 << 6); + assert(!assertme); + } + if (b.header.size != (bps_tree_pos_t)(j - k)) { + result |= (1 << 6); + assert(!assertme); + } + + if (i + k) + if (ma != a.LastElem()) { + result |= (1 << 7); + assert(!assertme); + } + if (j - k) + if (mb != b.LastElem()) { + result |= (1 << 7); + assert(!assertme); + } + + c = 0; + for (unsigned int u = 0; u < (unsigned int)a.header.size; u++) + if (debugGetElem(a.elems[u]) != c++) { + result |= (1 << 7); + assert(!assertme); + } + for (unsigned int u = 0; u < (unsigned int)b.header.size; u++) + if (debugGetElem(b.elems[u]) != c++) { + result |= (1 << 7); + assert(!assertme); + } + } + } + } + return result; + } + + int internalMechanismCheckMoveRightInsertLeaf(bool assertme) + { + (void)assertme; + int result = 0; + for (unsigned int i = 0; i <= struct bps_leaf::ElemMaxCount; i++) { + for (unsigned int j = 0; j <= struct bps_leaf::ElemMaxCount; j++) { + unsigned int maxMove = i + 1 < struct bps_leaf::ElemMaxCount - j ? i + 1 : struct bps_leaf::ElemMaxCount - j; + for (unsigned int k = 0; k <= i; k++) { + for (unsigned int u = 1; u <= maxMove; u++) { + struct bps_leaf a, b; + memset(a.elems, 0xFF, sizeof(a.elems)); + memset(b.elems, 0xFF, sizeof(b.elems)); + a.header.size = i; + b.header.size = j; + unsigned char c = 0; + unsigned char ic = i + j; + for (unsigned int v = 0; v < i; v++) { + if (v == k) + ic = c++; + debugSetElem(a.elems[v], c++); + } + if (k == i) + ic = c++; + for (unsigned int v = 0; v < j; v++) + debugSetElem(b.elems[v], c++); + bps_tree_elem_t ma = bps_tree_elem_t(), mb = bps_tree_elem_t(); + if (i) + ma = a.LastElem(); + if (j) + mb = b.LastElem(); + + struct bps_leaf_path_elem aExt, bExt; + aExt.block = &a; + aExt.max_elem_copy = &ma; + bExt.block = &b; + bExt.max_elem_copy = &mb; + aExt.insertion_point = k; + bps_tree_elem_t ins; + debugSetElem(ins, ic); + + if (j) { + const bool move_to_empty = false; + if (u < i + 1) { + const bool move_all = false; + moveRightInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, ins); + } else { + const bool move_all = true; + moveRightInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, ins); + } + } else { + const bool move_to_empty = true; + if (u < i + 1) { + const bool move_all = false; + moveRightInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, ins); + } else { + const bool move_all = true; + moveRightInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, ins); + } + } + + if (a.header.size != (bps_tree_pos_t)(i - u + 1)) { + result |= (1 << 8); + assert(!assertme); + } + if (b.header.size != (bps_tree_pos_t)(j + u)) { + result |= (1 << 8); + assert(!assertme); + } + + if (i - u + 1) + if (ma != a.LastElem()) { + result |= (1 << 9); + assert(!assertme); + } + if (j + u) + if (mb != b.LastElem()) { + result |= (1 << 9); + assert(!assertme); + } + + c = 0; + for (unsigned int v = 0; v < (unsigned int)a.header.size; v++) + if (debugGetElem(a.elems[v]) != c++) { + result |= (1 << 9); + assert(!assertme); + } + for (unsigned int v = 0; v < (unsigned int)b.header.size; v++) + if (debugGetElem(b.elems[v]) != c++) { + result |= (1 << 9); + assert(!assertme); + } + + + } + } + } + } + return result; + } + + int internalMechanismCheckMoveLeftInsertLeaf(bool assertme) + { + (void)assertme; + int result = 0; + for (unsigned int i = 0; i <= struct bps_leaf::ElemMaxCount; i++) { + for (unsigned int j = 0; j <= struct bps_leaf::ElemMaxCount; j++) { + unsigned int maxMove = j + 1 < struct bps_leaf::ElemMaxCount - i ? j + 1 : struct bps_leaf::ElemMaxCount - i; + for (unsigned int k = 0; k <= j; k++) { + for (unsigned int u = 1; u <= maxMove; u++) { + struct bps_leaf a, b; + memset(a.elems, 0xFF, sizeof(a.elems)); + memset(b.elems, 0xFF, sizeof(b.elems)); + a.header.size = i; + b.header.size = j; + unsigned char c = 0; + unsigned char ic = i + j; + for (unsigned int v = 0; v < i; v++) + debugSetElem(a.elems[v], c++); + for (unsigned int v = 0; v < j; v++) { + if (v == k) + ic = c++; + debugSetElem(b.elems[v], c++); + } + bps_tree_elem_t ma = bps_tree_elem_t(), mb = bps_tree_elem_t(); + if (i) + ma = a.LastElem(); + if (j) + mb = b.LastElem(); + + struct bps_leaf_path_elem aExt, bExt; + aExt.block = &a; + aExt.max_elem_copy = &ma; + bExt.block = &b; + bExt.max_elem_copy = &mb; + bExt.insertion_point = k; + bps_tree_elem_t ins; + debugSetElem(ins, ic); + + if (i) { + const bool move_to_empty = false; + if (u < j + 1) { + const bool move_all = false; + moveLeftInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, ins); + } else { + const bool move_all = true; + moveLeftInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, ins); + } + } else { + const bool move_to_empty = true; + if (u < j + 1) { + const bool move_all = false; + moveLeftInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, ins); + } else { + const bool move_all = true; + moveLeftInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, ins); + } + } + + if (a.header.size != (bps_tree_pos_t)(i + u)) { + result |= (1 << 10); + assert(!assertme); + } + if (b.header.size != (bps_tree_pos_t)(j - u + 1)) { + result |= (1 << 10); + assert(!assertme); + } + + if (i + u) + if (ma != a.LastElem()) { + result |= (1 << 11); + assert(!assertme); + } + if (j - u + 1) + if (mb != b.LastElem()) { + result |= (1 << 11); + assert(!assertme); + } + + c = 0; + for (unsigned int v = 0; v < (unsigned int)a.header.size; v++) + if (debugGetElem(a.elems[v]) != c++) { + result |= (1 << 11); + assert(!assertme); + } + for (unsigned int v = 0; v < (unsigned int)b.header.size; v++) + if (debugGetElem(b.elems[v]) != c++) { + result |= (1 << 11); + assert(!assertme); + } + + + } + } + } + } + return result; + } + + int internalMechanismCheckInsertInner(bool assertme) + { + (void)assertme; + int result = 0; + for (unsigned int i = 0; i < struct bps_inner::ElemMaxCount; i++) { + for (unsigned int j = 0; j <= i; j++) { + size = 0; + + struct bps_inner block; + memset(block.elems, 0xFF, sizeof(block.elems)); + memset(block.child_ids, 0xFF, sizeof(block.child_ids)); + block.header.size = i; + + bps_tree_elem_t max, ins; + debugSetElem(ins, j); + + bps_inner_path_elem path_elem; + path_elem.block = █ + path_elem.max_elem_copy = &max; + + for (unsigned int k = 0; k < i; k++) { + if (k < j) + debugSetElem(path_elem, k, k); + else + debugSetElem(path_elem, k, k + 1); + } + for (unsigned int k = 0; k < i; k++) + if (k < j) + block.child_ids[k] = (bps_tree_block_id_t)k; + else + block.child_ids[k] = (bps_tree_block_id_t)(k + 1); + + bps_inner_path_elem newExt; + newExt.max_elem_copy = &ins; + newExt.block_id = j; + newExt.posInParent = j; + + if (!simpleInsert(path_elem, newExt)) { + result |= (1 << 12); + assert(!assertme); + } + + for (unsigned int k = 0; k <= i; k++) { + if (debugGetElem(path_elem, k) != (unsigned char)k) { + result |= (1 << 13); + assert(!assertme); + } + } + for (unsigned int k = 0; k <= i; k++) { + if (block.child_ids[k] != k) { + result |= (1 << 13); + assert(!assertme); + } + } + } + } + return result; + } + + int internalMechanismCheckDeleteInner(bool assertme) + { + (void)assertme; + int result = 0; + for (unsigned int i = 1; i <= struct bps_inner::ElemMaxCount; i++) { + for (unsigned int j = 0; j < i; j++) { + struct bps_inner block; + block.header.size = i; + for (unsigned int k = 0; k < struct bps_inner::ElemMaxCount - 1; k++) + debugSetElem(block.elems[k], k); + for (unsigned int k = 0; k < struct bps_inner::ElemMaxCount; k++) + block.child_ids[k] = k; + bps_inner_path_elem path_elem; + bps_tree_elem_t max; + debugSetElem(max, i - 1); + path_elem.block = █ + path_elem.insertion_point = j; + path_elem.max_elem_copy = &max; + if (!simpleDelete(path_elem)) { + result |= (1 << 14); + assert(!assertme); + } + + unsigned char c = 0; + bps_tree_block_id_t kk = 0; + for (unsigned int k = 0; k < i - 1; k++) { + if (k == j) { + c++; + kk++; + } + if (debugGetElem(path_elem, k) != c++) { + result |= (1 << 15); + assert(!assertme); + } + if (block.child_ids[k] != kk++) { + result |= (1 << 15); + assert(!assertme); + } + } + } + } + return result; + } + + int internalMechanismCheckMoveRightInner(bool assertme) + { + (void)assertme; + int result = 0; + for (unsigned int i = 0; i <= struct bps_inner::ElemMaxCount; i++) { + for (unsigned int j = 0; j <= struct bps_inner::ElemMaxCount; j++) { + unsigned int maxMove = i < struct bps_inner::ElemMaxCount - j ? i : struct bps_inner::ElemMaxCount - j; + for (unsigned int k = 1; k <= maxMove; k++) { + struct bps_inner a, b; + memset(a.elems, 0xFF, sizeof(a.elems)); + memset(b.elems, 0xFF, sizeof(b.elems)); + memset(a.child_ids, 0xFF, sizeof(a.child_ids)); + memset(b.child_ids, 0xFF, sizeof(b.child_ids)); + a.header.size = i; + b.header.size = j; + + bps_tree_elem_t ma = bps_tree_elem_t(), mb = bps_tree_elem_t(); + bps_inner_path_elem aExt, bExt; + aExt.block = &a; + aExt.max_elem_copy = &ma; + bExt.block = &b; + bExt.max_elem_copy = &mb; + + unsigned char c = 0; + bps_tree_block_id_t kk = 0; + for (unsigned int u = 0; u < i; u++) { + debugSetElem(aExt, u, c++); + a.child_ids[u] = kk++; + } + for (unsigned int u = 0; u < j; u++) { + debugSetElem(bExt, u, c++); + b.child_ids[u] = kk++; + } + + if (j) { + const bool move_to_empty = false; + if (k < i) { + const bool move_all = false; + moveRight<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } else { + const bool move_all = true; + moveRight<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } + } else { + const bool move_to_empty = true; + if (k < i) { + const bool move_all = false; + moveRight<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } else { + const bool move_all = true; + moveRight<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } + } + + if (a.header.size != (bps_tree_pos_t)(i - k)) { + result |= (1 << 16); + assert(!assertme); + } + if (b.header.size != (bps_tree_pos_t)(j + k)) { + result |= (1 << 16); + assert(!assertme); + } + + c = 0; + kk = 0; + for (unsigned int u = 0; u < (unsigned int)a.header.size; u++) { + if (debugGetElem(aExt, u) != c++) { + result |= (1 << 17); + assert(!assertme); + } + if (a.child_ids[u] != kk++) { + result |= (1 << 17); + assert(!assertme); + } + } + for (unsigned int u = 0; u < (unsigned int)b.header.size; u++) { + if (debugGetElem(bExt, u) != c++) { + result |= (1 << 17); + assert(!assertme); + } + if (b.child_ids[u] != kk++) { + result |= (1 << 17); + assert(!assertme); + } + } + } + } + } + return result; + } + + int internalMechanismCheckMoveLeftInner(bool assertme) + { + (void)assertme; + int result = 0; + for (unsigned int i = 0; i <= struct bps_inner::ElemMaxCount; i++) { + for (unsigned int j = 0; j <= struct bps_inner::ElemMaxCount; j++) { + unsigned int maxMove = j < struct bps_inner::ElemMaxCount - i ? j : struct bps_inner::ElemMaxCount - i; + for (unsigned int k = 1; k <= maxMove; k++) { + struct bps_inner a, b; + memset(a.elems, 0xFF, sizeof(a.elems)); + memset(b.elems, 0xFF, sizeof(b.elems)); + memset(a.child_ids, 0xFF, sizeof(a.child_ids)); + memset(b.child_ids, 0xFF, sizeof(b.child_ids)); + a.header.size = i; + b.header.size = j; + + bps_tree_elem_t ma = bps_tree_elem_t(), mb = bps_tree_elem_t(); + bps_inner_path_elem aExt, bExt; + aExt.block = &a; + aExt.max_elem_copy = &ma; + bExt.block = &b; + bExt.max_elem_copy = &mb; + + unsigned char c = 0; + bps_tree_block_id_t kk = 0; + for (unsigned int u = 0; u < i; u++) { + debugSetElem(aExt, u, c++); + a.child_ids[u] = kk++; + } + for (unsigned int u = 0; u < j; u++) { + debugSetElem(bExt, u, c++); + b.child_ids[u] = kk++; + } + + if (i) { + const bool move_to_empty = false; + if (k < j) { + const bool move_all = false; + moveLeft<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } else { + const bool move_all = true; + moveLeft<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } + } else { + const bool move_to_empty = true; + if (k < j) { + const bool move_all = false; + moveLeft<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } else { + const bool move_all = true; + moveLeft<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)k); + } + } + + if (a.header.size != (bps_tree_pos_t)(i + k)) { + result |= (1 << 18); + assert(!assertme); + } + if (b.header.size != (bps_tree_pos_t)(j - k)) { + result |= (1 << 18); + assert(!assertme); + } + + c = 0; + kk = 0; + for (unsigned int u = 0; u < (unsigned int)a.header.size; u++) { + if (debugGetElem(aExt, u) != c++) { + result |= (1 << 19); + assert(!assertme); + } + if (a.child_ids[u] != kk++) { + result |= (1 << 19); + assert(!assertme); + } + } + for (unsigned int u = 0; u < (unsigned int)b.header.size; u++) { + if (debugGetElem(bExt, u) != c++) { + result |= (1 << 19); + assert(!assertme); + } + if (b.child_ids[u] != kk++) { + result |= (1 << 19); + assert(!assertme); + } + } + } + } + } + return result; + } + + int internalMechanismCheckMoveRightInsertInner(bool assertme) + { + (void)assertme; + int result = 0; + for (unsigned int i = 0; i <= struct bps_inner::ElemMaxCount; i++) { + for (unsigned int j = 0; j <= struct bps_inner::ElemMaxCount; j++) { + unsigned int maxMove = i + 1 < struct bps_inner::ElemMaxCount - j ? i + 1 : struct bps_inner::ElemMaxCount - j; + for (unsigned int k = 0; k <= i; k++) { + for (unsigned int u = 1; u <= maxMove; u++) { + struct bps_inner a, b; + memset(a.elems, 0xFF, sizeof(a.elems)); + memset(b.elems, 0xFF, sizeof(b.elems)); + memset(a.child_ids, 0xFF, sizeof(a.child_ids)); + memset(b.child_ids, 0xFF, sizeof(b.child_ids)); + a.header.size = i; + b.header.size = j; + + bps_tree_elem_t ma = bps_tree_elem_t(), mb = bps_tree_elem_t(); + bps_inner_path_elem aExt, bExt; + aExt.block = &a; + aExt.max_elem_copy = &ma; + bExt.block = &b; + bExt.max_elem_copy = &mb; + + unsigned char c = 0; + bps_tree_block_id_t kk = 0; + unsigned char ic = i + j; + bps_tree_block_id_t ikk = (bps_tree_block_id_t)(i + j); + + for (unsigned int v = 0; v < i; v++) { + if (v == k) { + ic = c++; + ikk = kk++; + } + debugSetElem(aExt, v, c++); + a.child_ids[v] = kk++; + } + if (k == i) { + ic = c++; + ikk = kk++; + } + for (unsigned int v = 0; v < j; v++) { + debugSetElem(bExt, v, c++); + b.child_ids[v] = kk++; + } + + aExt.insertion_point = -1; + bps_tree_elem_t ins; + debugSetElem(ins, ic); + bps_inner_path_elem insExt; + insExt.posInParent = k; + insExt.max_elem_copy = &ins; + insExt.block_id = ikk; + + if (j) { + const bool move_to_empty = false; + if (u < i + 1) { + const bool move_all = false; + moveRightInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, insExt); + } else { + const bool move_all = true; + moveRightInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, insExt); + } + } else { + const bool move_to_empty = true; + if (u < i + 1) { + const bool move_all = false; + moveRightInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, insExt); + } else { + const bool move_all = true; + moveRightInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, insExt); + } + } + + if (a.header.size != (bps_tree_pos_t)(i - u + 1)) { + result |= (1 << 20); + assert(!assertme); + } + if (b.header.size != (bps_tree_pos_t)(j + u)) { + result |= (1 << 20); + assert(!assertme); + } + + c = 0; + kk = 0; + for (unsigned int v = 0; v < (unsigned int)a.header.size; v++) { + if (debugGetElem(aExt, v) != c++) { + result |= (1 << 21); + assert(!assertme); + } + if (a.child_ids[v] != kk++) { + result |= (1 << 21); + assert(!assertme); + } + } + for (unsigned int v = 0; v < (unsigned int)b.header.size; v++) { + if (debugGetElem(bExt, v) != c++) { + result |= (1 << 21); + assert(!assertme); + } + if (b.child_ids[v] != kk++) { + result |= (1 << 21); + assert(!assertme); + } + } + + } + } + } + } + return result; + } + + int internalMechanismCheckMoveLeftInsertInner(bool assertme) + { + (void)assertme; + int result = 0; + for (unsigned int i = 0; i <= struct bps_inner::ElemMaxCount; i++) { + for (unsigned int j = 0; j <= struct bps_inner::ElemMaxCount; j++) { + unsigned int maxMove = j + 1 < struct bps_inner::ElemMaxCount - i ? j + 1 : struct bps_inner::ElemMaxCount - i; + for (unsigned int k = 0; k <= j; k++) { + for (unsigned int u = 1; u <= maxMove; u++) { + struct bps_inner a, b; + memset(a.elems, 0xFF, sizeof(a.elems)); + memset(b.elems, 0xFF, sizeof(b.elems)); + memset(a.child_ids, 0xFF, sizeof(a.child_ids)); + memset(b.child_ids, 0xFF, sizeof(b.child_ids)); + a.header.size = i; + b.header.size = j; + + bps_tree_elem_t ma = bps_tree_elem_t(), mb = bps_tree_elem_t(); + bps_inner_path_elem aExt, bExt; + aExt.block = &a; + aExt.max_elem_copy = &ma; + bExt.block = &b; + bExt.max_elem_copy = &mb; + + unsigned char c = 0; + bps_tree_block_id_t kk = 0; + unsigned char ic = i + j; + bps_tree_block_id_t ikk = (bps_tree_block_id_t)(i + j); + for (unsigned int v = 0; v < i; v++) { + debugSetElem(aExt, v, c++); + a.child_ids[v] = kk++; + } + for (unsigned int v = 0; v < j; v++) { + if (v == k) { + ic = c++; + ikk = kk++; + } + debugSetElem(bExt, v, c++); + b.child_ids[v] = kk++; + } + + bExt.insertion_point = -1; + bps_tree_elem_t ins; + debugSetElem(ins, ic); + bps_inner_path_elem insExt; + insExt.posInParent = k; + insExt.max_elem_copy = &ins; + insExt.block_id = ikk; + + if (i) { + const bool move_to_empty = false; + if (u < j + 1) { + const bool move_all = false; + moveLeftInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, insExt); + } else { + const bool move_all = true; + moveLeftInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, insExt); + } + } else { + const bool move_to_empty = true; + if (u < j + 1) { + const bool move_all = false; + moveLeftInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, insExt); + } else { + const bool move_all = true; + moveLeftInsert<move_to_empty, move_all>(aExt, bExt, (bps_tree_pos_t)u, insExt); + } + } + + if (a.header.size != (bps_tree_pos_t)(i + u)) { + result |= (1 << 22); + assert(!assertme); + } + if (b.header.size != (bps_tree_pos_t)(j - u + 1)) { + result |= (1 << 22); + assert(!assertme); + } + + c = 0; + kk = 0; + for (unsigned int v = 0; v < (unsigned int)a.header.size; v++) { + if (debugGetElem(aExt, v) != c++) { + result |= (1 << 23); + assert(!assertme); + } + if (a.child_ids[v] != kk++) { + result |= (1 << 23); + assert(!assertme); + } + } + for (unsigned int v = 0; v < (unsigned int)b.header.size; v++) { + if (debugGetElem(bExt, v) != c++) { + result |= (1 << 23); + assert(!assertme); + } + if (b.child_ids[v] != kk++) { + result |= (1 << 23); + assert(!assertme); + } + } + + } + } + } + } + return result; + } + + int internalMechanismCheck(bool assertme) + { + int result = 0; + result |= internalMechanismCheckInsertLeaf(assertme); + result |= internalMechanismCheckDeleteLeaf(assertme); + result |= internalMechanismCheckMoveRightLeaf(assertme); + result |= internalMechanismCheckMoveLeftLeaf(assertme); + result |= internalMechanismCheckMoveRightInsertLeaf(assertme); + result |= internalMechanismCheckMoveLeftInsertLeaf(assertme); + + result |= internalMechanismCheckInsertInner(assertme); + result |= internalMechanismCheckDeleteInner(assertme); + result |= internalMechanismCheckMoveRightInner(assertme); + result |= internalMechanismCheckMoveLeftInner(assertme); + result |= internalMechanismCheckMoveRightInsertInner(assertme); + result |= internalMechanismCheckMoveLeftInsertInner(assertme); + return result; + } + + +#endif +/* }}} */ + +#undef BPS_TREE_MEMMOVE +#undef BPS_TREE_DATAMOVE + +#undef _bps +#undef _bps_tree +#undef _BPS +#undef _BPS_TREE diff --git a/src/lib/small/pt_alloc.c b/src/lib/small/pt_alloc.c index 155ef37579cd0403854962e1b21fd574b3689197..cd1107bb2e43895fb26abadae3d928283593cda2 100644 --- a/src/lib/small/pt_alloc.c +++ b/src/lib/small/pt_alloc.c @@ -86,9 +86,6 @@ pt2_construct(pt2 *p, PT_ID_T extent_size, PT_ID_T block_size, p->free_func = free_func; } -/* - * Destruction - */ void pt3_destroy(pt3 *p) { @@ -156,9 +153,6 @@ pt2_destroy(pt2 *p) } -/* - * Allocation - */ void * pt3_alloc(pt3 *p, PT_ID_T *result_id) { @@ -256,11 +250,8 @@ pt2_alloc(pt2 *p, PT_ID_T *result_id) return (void*)((char*)extent2 + index2 * p->block_size); } -/* - * Restoration - */ void * -pt3_get(pt3 *p, PT_ID_T id) +pt3_get(const pt3 *p, PT_ID_T id) { pt_assert(id < p->created); @@ -274,7 +265,7 @@ pt3_get(pt3 *p, PT_ID_T id) } void * -pt2_get(pt2 *p, PT_ID_T id) +pt2_get(const pt2 *p, PT_ID_T id) { pt_assert(id < p->created); @@ -285,11 +276,11 @@ pt2_get(pt2 *p, PT_ID_T id) return (((char**)p->extent)[index1] + index2 * p->block_size); } -/* - * Getting number of allocated chunks (of size p->chunk_size each) +/** + * Return the number of allocated extents (of size p->extent_size each) */ PT_ID_T -pt3_extents_count(pt3 *p) +pt3_extents_count(const pt3 *p) { PT_ID_T c = (p->created + (p->extent_size / p->block_size - 1)) / (p->extent_size / p->block_size); @@ -301,8 +292,9 @@ pt3_extents_count(pt3 *p) } return res; } + PT_ID_T -pt2_extents_count(pt2 *p) +pt2_extents_count(const pt2 *p) { PT_ID_T c = (p->created + (p->extent_size / p->block_size - 1)) / (p->extent_size / p->block_size); diff --git a/src/lib/small/pt_alloc.h b/src/lib/small/pt_alloc.h index 4ba650da15eca5bf99bc3cee422ccfd7e926e66c..ba33ce4535b7c972f56bfbc17c733eca773eb467 100644 --- a/src/lib/small/pt_alloc.h +++ b/src/lib/small/pt_alloc.h @@ -1,25 +1,82 @@ -#pragma once +#ifndef INCLUDES_TARANTOOL_SMALL_PTALLOC_H +#define INCLUDES_TARANTOOL_SMALL_PTALLOC_H +/* + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF + * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ /* * pt_alloc: pointer translation allocator - * pt_alloc is as an allocator, that provides blocks of specified size (N), - * and provides an integer incrementally-growning ID for each provided block. - * Block size must be power of 2 (checked by assert in debug build). - * pt_alloc can restore pointer to block by it's ID, so - * one can store such ID instead of storing pointer to block. - * Since block IDs are generated incrementally from 0 and pt instance stores - * the number of provided blocks, one can simply iterate all provided blocks. - * pt_alloc in it's turn allocates extents of memory with given allocator, - * and strictly with specified size (M). - * Extent size must be power of 2. - * No block freeing provided, but destoying pt_alloc instance frees all blocks. - * There is an impotant compile-time setting - recurse level (L). - * Actually pt_alloc defined several times, with different L as suffix of "pt" - * For example pt3_alloc is a pt_alloc with L = 3 - * Briefly, with a given N, M and L (see above) the pt_alloc instance: - * 1) can provide not more than POW(M / sizeof(void*), L - 1) * (M / N) blocks - * 2) costs L memory reading for providing new block and restoring block ptr - * 3) has approximate memory overhead of size (L * M) - * Of course, ID integer type limit also limits maximum capability of pt_alloc. + * pt_alloc is as an allocator, that provides blocks of specified + * size (N), and provides an integer incrementally-growing ID for + * each returned block. + * + * The block size must be a power of 2 (checked by assert in + * a debug build). pt_alloc can restore the pointer to block + * by block ID, so one can store such ID instead of storing + * pointer to block. + * + * Since block IDs grow incrementally from 0 and pt_alloc + * instance stores the number of provided blocks, there is a + * simple way to iterate over all provided blocks. + * pt_alloc in its turn allocates extents of memory by means of + * the supplied allocator, each extent having the same size (M). + * M must be a power of 2. + * There is no way to free a single block, but destroying a + * pt_alloc instance frees all allocated extents. + * + * There is an important compile-time setting - recursion level + * (L). + * Imagine block id is uint32 and recursion level is 3. This + * means that block id value consists of 3 parts: + * - first N1 bits - level 0 extent id - stores the address of + * level 1 extent + * - second N2 bits - level 1 extent id - stores the address of + * the extent which contains actual blocks + * - remaining bits - block number in level 1 extent + * (Actual values of N1 and N2 are a function of block size B). + * + * Actually, pt_alloc is re-defined twice, with different + * recursion levels as suffixes of "pt" function names: + * pt3_alloc is a pt_alloc with L = 3 + * pt2_alloc is a pt_alloc with L = 2 + * + * To sum up, with a given N, M and L (see above) the pt_alloc + * instance: + * + * 1) can provide not more than + * pow(M / sizeof(void*), L - 1) * (M / N) + * blocks + * 2) costs L random memory accesses to provide a new block or + * restore a block pointer from block id + * 3) has an approximate memory overhead of size (L * M) + * + * Of course, the integer type used for block id (PT_ID_T, usually + * is a typedef to uint32) also limits the maximum number of + * objects that can be created by an instance of pt_alloc. */ #include <stdint.h> @@ -29,11 +86,16 @@ extern "C" { #endif /* defined(__cplusplus) */ typedef uint32_t PT_ID_T; +/* + * Type of the extent allocator (the allocator + * for blocks of size M). + */ typedef void *(*prov_alloc_func)(); typedef void (*prov_free_func)(void *); /* - * pt_alloc struct definition + * pt_alloc - memory allocator of blocks of equal + * size with support of address translation. */ typedef struct tag_pt3 { void *extent; @@ -63,48 +125,53 @@ typedef struct tag_pt2 { * pt_alloc API declaration */ -/* - * Construction +/** + * Initialize an empty instantce of pointer translating + * block allocator. Does not allocate memory. */ void pt3_construct(pt3 *p, PT_ID_T extent_size, PT_ID_T block_size, - prov_alloc_func alloc_func, prov_free_func free_func); + prov_alloc_func alloc_func, prov_free_func free_func); void pt2_construct(pt2 *p, PT_ID_T extent_size, PT_ID_T block_size, - prov_alloc_func alloc_func, prov_free_func free_func); + prov_alloc_func alloc_func, prov_free_func free_func); -/* - * Destruction +/** + * Free all memory used by an instance of pt_alloc. */ void pt3_destroy(pt3 *p); void pt2_destroy(pt2 *p); -/* - * Allocation +/** + * Allocate a new block. Return both, block pointer and block + * id. + * + * @retval NULL failed to allocate memory */ void * pt3_alloc(pt3 *p, PT_ID_T *id); void * pt2_alloc(pt2 *p, PT_ID_T *id); -/* - * Restoration +/** + * Convert block id into block address. */ void * -pt3_get(pt3 *p, PT_ID_T id); +pt3_get(const pt3 *p, PT_ID_T id); void * -pt2_get(pt2 *p, PT_ID_T id); +pt2_get(const pt2 *p, PT_ID_T id); /* * Getting number of allocated extents (of size extent_size each) */ PT_ID_T -pt3_extents_count(pt3 *p); +pt3_extents_count(const pt3 *p); PT_ID_T -pt2_extents_count(pt2 *p); +pt2_extents_count(const pt2 *p); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ +#endif /* INCLUDES_TARANTOOL_SMALL_PTALLOC_H */ diff --git a/test/big/tree_pk.result b/test/big/tree_pk.result index 303b1e24c28693f9a31e6edefae6c9a5284376b2..cada6c128975ce34cc2569bc58ea7b4f431a9523 100644 --- a/test/big/tree_pk.result +++ b/test/big/tree_pk.result @@ -546,195 +546,3 @@ s0:drop() s0 = nil --- ... ------------------------ --- Iterator corruption during data modification 1)skipping during deleteion -s = box.schema.create_space('test') ---- -... -s:create_index('primary', { type = 'tree', parts = {0, 'num'}, unique = true }) ---- -... -ind = s.index['primary'] ---- -... -s:insert{4} ---- -- [4] -... -s:insert{2} ---- -- [2] -... -s:insert{6} ---- -- [6] -... -s:insert{1} ---- -- [1] -... -s:insert{3} ---- -- [3] -... -s:insert{5} ---- -- [5] -... -s:insert{7} ---- -- [7] -... -s:insert{8} ---- -- [8] -... --- it seems that all elements will be deleted: -for state, t in ind:pairs() do s:delete{t[0]} end ---- -... --- but (oops) some elements are left in space: -iterate('test', 'primary', 0, 1) ---- -- - $ 5$ - - $ 6$ - - $ 7$ - - $ 8$ -... --- cleanup -s:drop() ---- -... -s = nil ---- -... ------------------------ --- Iterator corruption during data modification 2)skipping during insertion -s = box.schema.create_space('test') ---- -... -s:create_index('primary', { type = 'tree', parts = {0, 'num'}, unique = true }) ---- -... -ind = s.index['primary'] ---- -... -s:insert{3} ---- -- [3] -... -s:insert{2} ---- -- [2] -... -s:insert{4} -- now you see me ---- -- [4] -... -s:insert{1} ---- -- [1] -... -gen, param, state = ind:pairs() ---- -... -state, val = gen(param, state) ---- -... -val -- 1 ---- -- [1] -... -state, val = gen(param, state) ---- -... -val -- 2 ---- -- [2] -... -for i = 5,100 do s:insert{i} end ---- -... -state, val = gen(param, state) ---- -... -val -- 3 ---- -- [3] -... -state, val = gen(param, state) ---- -... -val -- now you don't ---- -- null -... --- cleanup -s:drop() ---- -... -s = nil ---- -... ------------------------ --- Iterator corruption during data modification 3)repeating during delete/insert -s = box.schema.create_space('test') ---- -... -s:create_index('primary', { type = 'tree', parts = {0, 'num'}, unique = true }) ---- -... -ind = s.index['primary'] ---- -... -s:insert{2} ---- -- [2] -... -s:insert{1} ---- -- [1] -... -s:insert{3} ---- -- [3] -... -gen, param, state = ind:pairs() ---- -... -state, val = gen(param, state) ---- -... -val -- 1 ---- -- [1] -... -s:delete{2} ---- -- [2] -... -s:insert{0} ---- -- [0] -... -state, val = gen(param, state) ---- -... -val -- 1 again ---- -- [1] -... -state, val = gen(param, state) ---- -... -val -- null ---- -- null -... --- cleanup -s:drop() ---- -... -s = nil ---- -... diff --git a/test/big/tree_pk.test.lua b/test/big/tree_pk.test.lua index 04ffddee1ab79c0071c08834eb35adb8590e69a4..51557a96590e665bed7949426945768c8a9d5e96 100644 --- a/test/big/tree_pk.test.lua +++ b/test/big/tree_pk.test.lua @@ -195,79 +195,3 @@ box.sort(s0.index['i2']:select(0)) s0:drop() s0 = nil - ------------------------ --- Iterator corruption during data modification 1)skipping during deleteion -s = box.schema.create_space('test') -s:create_index('primary', { type = 'tree', parts = {0, 'num'}, unique = true }) -ind = s.index['primary'] - -s:insert{4} -s:insert{2} -s:insert{6} -s:insert{1} -s:insert{3} -s:insert{5} -s:insert{7} -s:insert{8} - --- it seems that all elements will be deleted: -for state, t in ind:pairs() do s:delete{t[0]} end - --- but (oops) some elements are left in space: -iterate('test', 'primary', 0, 1) - --- cleanup -s:drop() -s = nil - ------------------------ --- Iterator corruption during data modification 2)skipping during insertion -s = box.schema.create_space('test') -s:create_index('primary', { type = 'tree', parts = {0, 'num'}, unique = true }) -ind = s.index['primary'] - -s:insert{3} -s:insert{2} -s:insert{4} -- now you see me -s:insert{1} - -gen, param, state = ind:pairs() -state, val = gen(param, state) -val -- 1 -state, val = gen(param, state) -val -- 2 -for i = 5,100 do s:insert{i} end -state, val = gen(param, state) -val -- 3 -state, val = gen(param, state) -val -- now you don't - --- cleanup -s:drop() -s = nil - - ------------------------ --- Iterator corruption during data modification 3)repeating during delete/insert -s = box.schema.create_space('test') -s:create_index('primary', { type = 'tree', parts = {0, 'num'}, unique = true }) -ind = s.index['primary'] - -s:insert{2} -s:insert{1} -s:insert{3} - -gen, param, state = ind:pairs() -state, val = gen(param, state) -val -- 1 -s:delete{2} -s:insert{0} -state, val = gen(param, state) -val -- 1 again -state, val = gen(param, state) -val -- null - --- cleanup -s:drop() -s = nil diff --git a/test/box/errinj_index.result b/test/box/errinj_index.result index feb59b77a8af5e0abc5053141aab1e0b40363a65..cdf852d27c003d4537f46f7e1803e396572b2604 100644 --- a/test/box/errinj_index.result +++ b/test/box/errinj_index.result @@ -69,36 +69,32 @@ res - [9, 9, 'test9'] - [10, 10, 'test10'] ... -for i = 501,1000 do s:insert{i, i} end ---- -- error: Failed to allocate 1024 bytes in TreeIndex for replace -... -s:delete{1} ---- -- [1, 1, 'test1'] -... res = {} --- ... for _, t in s.index[0]:pairs() do table.insert(res, t) end --- -- error: Failed to allocate 196 bytes in TreeIndex for init iterator ... res --- -- [] -... --- reserve memory for iterator in index. last insert may increase tree depth -box.errinj.set("ERRINJ_TREE_ALLOC", false) ---- -- ok +- - [1, 1, 'test1'] + - [2, 2, 'test2'] + - [3, 3, 'test3'] + - [4, 4, 'test4'] + - [5, 5, 'test5'] + - [6, 6, 'test6'] + - [7, 7, 'test7'] + - [8, 8, 'test8'] + - [9, 9, 'test9'] + - [10, 10, 'test10'] ... -s:get{1} +for i = 501,2500 do s:insert{i, i} end --- +- error: Failed to allocate 16384 bytes in TreeIndex for replace ... -box.errinj.set("ERRINJ_TREE_ALLOC", true) +s:delete{1} --- -- ok +- [1, 1, 'test1'] ... res = {} --- @@ -118,28 +114,52 @@ res - [9, 9, 'test9'] - [10, 10, 'test10'] ... -for i = 1001,1500 do s:insert{i, i} end +res = {} --- -- error: Failed to allocate 1024 bytes in TreeIndex for replace ... -s:delete{2} +for i = 501,510 do table.insert(res, (s:get{i})) end --- -- [2, 2, 'test2'] ... -s.index[0]:pairs() +res --- -- error: Failed to allocate 200 bytes in TreeIndex for init iterator +- - [501, 501] + - [502, 502] + - [503, 503] + - [504, 504] + - [505, 505] + - [506, 506] + - [507, 507] + - [508, 508] + - [509, 509] + - [510, 510] ... --- reserve memory for iterator in index. last insert may increase tree depth --- (if rebalance was not initiated) -box.errinj.set("ERRINJ_TREE_ALLOC", false) +res = {} +--- +... +for i = 2001,2010 do table.insert(res, (s:get{i})) end +--- +... +res +--- +- [] +... +--count must be greater that 1000 but less than 2000 +function check_iter_and_size() local count = 0 for _, t in s.index[0]:pairs() do count = count + 1 end return count <= 1000 and "fail 1" or count >= 2000 and "fail 2" or "ok" end +--- +... +check_iter_and_size() --- - ok ... -s:get{1} +for i = 2501,3500 do s:insert{i, i} end --- +- error: Failed to allocate 16384 bytes in TreeIndex for replace ... -box.errinj.set("ERRINJ_TREE_ALLOC", true) +s:delete{2} +--- +- [2, 2, 'test2'] +... +check_iter_and_size() --- - ok ... @@ -160,23 +180,23 @@ res - [9, 9, 'test9'] - [10, 10, 'test10'] ... -for i = 1501,2000 do s:insert{i, i} end +for i = 3501,4500 do s:insert{i, i} end --- -- error: Failed to allocate 1024 bytes in TreeIndex for replace +- error: Failed to allocate 16384 bytes in TreeIndex for replace ... s:delete{3} --- - [3, 3, 'test3'] ... -s.index[0]:pairs() +check_iter_and_size() --- -- error: Failed to allocate 200 bytes in TreeIndex for init iterator +- ok ... box.errinj.set("ERRINJ_TREE_ALLOC", false) --- - ok ... -for i = 2001,2500 do s:insert{i, i} end +for i = 4501,5500 do s:insert{i, i} end --- ... res = {} @@ -217,21 +237,21 @@ res res = {} --- ... -for i = 2001,2010 do table.insert(res, (s:get{i})) end +for i = 5001,5010 do table.insert(res, (s:get{i})) end --- ... res --- -- - [2001, 2001] - - [2002, 2002] - - [2003, 2003] - - [2004, 2004] - - [2005, 2005] - - [2006, 2006] - - [2007, 2007] - - [2008, 2008] - - [2009, 2009] - - [2010, 2010] +- - [5001, 5001] + - [5002, 5002] + - [5003, 5003] + - [5004, 5004] + - [5005, 5005] + - [5006, 5006] + - [5007, 5007] + - [5008, 5008] + - [5009, 5009] + - [5010, 5010] ... s:drop() --- diff --git a/test/box/errinj_index.test.lua b/test/box/errinj_index.test.lua index 05e65e060af7eb5630fff5f381bbfea05def1f2b..cd543a6143d830782fedba5076b3f2bf583eaf81 100644 --- a/test/box/errinj_index.test.lua +++ b/test/box/errinj_index.test.lua @@ -16,41 +16,41 @@ box.errinj.set("ERRINJ_TREE_ALLOC", true) res = {} for i = 1,10 do table.insert(res, s:get{i}) end res -for i = 501,1000 do s:insert{i, i} end -s:delete{1} res = {} for _, t in s.index[0]:pairs() do table.insert(res, t) end res --- reserve memory for iterator in index. last insert may increase tree depth -box.errinj.set("ERRINJ_TREE_ALLOC", false) -s:get{1} -box.errinj.set("ERRINJ_TREE_ALLOC", true) +for i = 501,2500 do s:insert{i, i} end +s:delete{1} res = {} for i = 1,10 do table.insert(res, (s:get{i})) end res +res = {} +for i = 501,510 do table.insert(res, (s:get{i})) end +res +res = {} +for i = 2001,2010 do table.insert(res, (s:get{i})) end +res -for i = 1001,1500 do s:insert{i, i} end -s:delete{2} -s.index[0]:pairs() - --- reserve memory for iterator in index. last insert may increase tree depth --- (if rebalance was not initiated) -box.errinj.set("ERRINJ_TREE_ALLOC", false) -s:get{1} -box.errinj.set("ERRINJ_TREE_ALLOC", true) +--count must be greater that 1000 but less than 2000 +function check_iter_and_size() local count = 0 for _, t in s.index[0]:pairs() do count = count + 1 end return count <= 1000 and "fail 1" or count >= 2000 and "fail 2" or "ok" end +check_iter_and_size() +for i = 2501,3500 do s:insert{i, i} end +s:delete{2} +check_iter_and_size() res = {} for i = 1,10 do table.insert(res, (s:get{i})) end res -for i = 1501,2000 do s:insert{i, i} end + +for i = 3501,4500 do s:insert{i, i} end s:delete{3} -s.index[0]:pairs() +check_iter_and_size() box.errinj.set("ERRINJ_TREE_ALLOC", false) -for i = 2001,2500 do s:insert{i, i} end +for i = 4501,5500 do s:insert{i, i} end res = {} for i = 1,10 do table.insert(res, (s:get{i})) end res @@ -59,7 +59,7 @@ res = {} for i = 1,10 do table.insert(res, (s:get{i})) end res res = {} -for i = 2001,2010 do table.insert(res, (s:get{i})) end +for i = 5001,5010 do table.insert(res, (s:get{i})) end res s:drop() diff --git a/test/lib/tarantool-python b/test/lib/tarantool-python index 3beaebe341b278804960021eea03e24a89300bce..0f9d99ecb770586ca011800a6e3921c3353c7459 160000 --- a/test/lib/tarantool-python +++ b/test/lib/tarantool-python @@ -1 +1 @@ -Subproject commit 3beaebe341b278804960021eea03e24a89300bce +Subproject commit 0f9d99ecb770586ca011800a6e3921c3353c7459 diff --git a/test/unit/CMakeLists.txt b/test/unit/CMakeLists.txt index 7a5a5593c9f8993418b4fdd0702d0e4991ef5393..96fe1b38b3161563b781e5841be06c4a5561fae5 100644 --- a/test/unit/CMakeLists.txt +++ b/test/unit/CMakeLists.txt @@ -5,6 +5,7 @@ set_source_files_compile_flags(${all_sources}) include_directories(${PROJECT_SOURCE_DIR}/src) include_directories(${PROJECT_BINARY_DIR}/src) include_directories(${PROJECT_SOURCE_DIR}/src/lib) +include_directories(${CMAKE_SOURCE_DIR}/third_party) add_executable(rlist.test rlist.c test.c ${CMAKE_SOURCE_DIR}/src/lib/salad/rlist.c) add_executable(fiob.test test.c fiob.c ${CMAKE_SOURCE_DIR}/src/fiob.c) add_executable(queue.test queue.c) @@ -35,6 +36,10 @@ add_executable(slab_arena.test slab_arena.c) target_link_libraries(slab_arena.test small) add_executable(arena_mt.test arena_mt.c) target_link_libraries(arena_mt.test small pthread) +add_executable(bps_tree.test bps_tree.cc ${CMAKE_SOURCE_DIR}/third_party/qsort_arg.c) +target_link_libraries(bps_tree.test small) +add_executable(bps_tree_itr.test bps_tree_itr.cc ${CMAKE_SOURCE_DIR}/third_party/qsort_arg.c) +target_link_libraries(bps_tree_itr.test small) add_executable(pt_alloc.test pt_alloc.cc) target_link_libraries(pt_alloc.test small) add_executable(log_dir.test log_dir.cc test.c) diff --git a/test/unit/bps_tree.cc b/test/unit/bps_tree.cc new file mode 100644 index 0000000000000000000000000000000000000000..4fdd881b2af0753ffec2f0657c2581d51ab1a4b3 --- /dev/null +++ b/test/unit/bps_tree.cc @@ -0,0 +1,205 @@ +#include <stdlib.h> +#include <stdint.h> +#include <stdio.h> +#include <stdbool.h> + +#include "unit.h" +#include "sptree.h" +#include "qsort_arg.h" + +#ifndef MAX +#define MAX(a,b) (((a)>(b))?(a):(b)) +#endif //#ifndef MAX + +SPTREE_DEF(test, realloc, qsort_arg); + +typedef long type_t; + +static int +compare(type_t a, type_t b); + +#define BPS_TREE_NAME _test +#define BPS_TREE_BLOCK_SIZE 64 /* value is to low specially for tests */ +#define BPS_TREE_EXTENT_SIZE 1024 /* value is to low specially for tests */ +#define BPS_TREE_COMPARE(a, b, arg) compare(a, b) +#define BPS_TREE_COMPARE_KEY(a, b, arg) compare(a, b) +#define bps_tree_elem_t type_t +#define bps_tree_key_t type_t +#define bps_tree_arg_t int +#include "salad/bps_tree.h" + +static int +node_comp(const void *p1, const void *p2, void* unused) +{ + (void)unused; + return *((const type_t *)p1) < *((const type_t *)p2) ? -1 : *((const type_t *)p2) < *((const type_t *)p1) ? 1 : 0; +} + +static int +compare(type_t a, type_t b) +{ + return a < b ? -1 : a > b ? 1 : 0; +} +static void * +extent_alloc() +{ + return malloc(BPS_TREE_EXTENT_SIZE); +} + +static void +extent_free(void *extent) +{ + free(extent); +} + +static void +simple_check() +{ + header(); + + const int rounds = 1000; + bps_tree tree; + bps_tree_create(&tree, 0, extent_alloc, extent_free); + + printf("Insert 1..X, remove 1..X\n"); + for (int i = 0; i < rounds; i++) { + long v = i; + if (bps_tree_find(&tree, v) != NULL) + fail("element already in tree (1)", "true"); + bps_tree_insert_or_replace(&tree, v, 0); + if (bps_tree_debug_check(&tree)) + fail("debug check nonzero", "true"); + } + if (bps_tree_size(&tree) != rounds) + fail("Tree count mismatch (1)", "true"); + + for (int i = 0; i < rounds; i++) { + long v = i; + if (bps_tree_find(&tree, v) == NULL) + fail("element in tree (1)", "false"); + bps_tree_delete(&tree, v); + if (bps_tree_debug_check(&tree)) + fail("debug check nonzero", "true"); + } + if (bps_tree_size(&tree) != 0) + fail("Tree count mismatch (2)", "true"); + + printf("Insert 1..X, remove X..1\n"); + for (int i = 0; i < rounds; i++) { + long v = i; + if (bps_tree_find(&tree, v) != NULL) + fail("element already in tree (2)", "true"); + bps_tree_insert_or_replace(&tree, v, 0); + if (bps_tree_debug_check(&tree)) + fail("debug check nonzero", "true"); + } + if (bps_tree_size(&tree) != rounds) + fail("Tree count mismatch (3)", "true"); + + for (int i = 0; i < rounds; i++) { + long v = rounds - 1 - i; + if (bps_tree_find(&tree, v) == NULL) + fail("element in tree (2)", "false"); + bps_tree_delete(&tree, v); + if (bps_tree_debug_check(&tree)) + fail("debug check nonzero", "true"); + } + if (bps_tree_size(&tree) != 0) + fail("Tree count mismatch (4)", "true"); + + printf("Insert X..1, remove 1..X\n"); + for (int i = 0; i < rounds; i++) { + long v = rounds - 1 - i; + if (bps_tree_find(&tree, v) != NULL) + fail("element already in tree (3)", "true"); + bps_tree_insert_or_replace(&tree, v, 0); + if (bps_tree_debug_check(&tree)) + fail("debug check nonzero", "true"); + } + if (bps_tree_size(&tree) != rounds) + fail("Tree count mismatch (5)", "true"); + + for (int i = 0; i < rounds; i++) { + long v = i; + if (bps_tree_find(&tree, v) == NULL) + fail("element in tree (3)", "false"); + bps_tree_delete(&tree, v); + if (bps_tree_debug_check(&tree)) + fail("debug check nonzero", "true"); + } + if (bps_tree_size(&tree) != 0) + fail("Tree count mismatch (6)", "true"); + + printf("Insert X..1, remove X..1\n"); + for (int i = 0; i < rounds; i++) { + long v = rounds - 1 - i; + if (bps_tree_find(&tree, v) != NULL) + fail("element already in tree (4)", "true"); + bps_tree_insert_or_replace(&tree, v, 0); + if (bps_tree_debug_check(&tree)) + fail("debug check nonzero", "true"); + } + if (bps_tree_size(&tree) != rounds) + fail("Tree count mismatch (7)", "true"); + + for (int i = 0; i < rounds; i++) { + long v = rounds - 1 - i; + if (bps_tree_find(&tree, v) == NULL) + fail("element in tree (4)", "false"); + bps_tree_delete(&tree, v); + if (bps_tree_debug_check(&tree)) + fail("debug check nonzero", "true"); + } + if (bps_tree_size(&tree) != 0) + fail("Tree count mismatch (8)", "true"); + + bps_tree_destroy(&tree); + + footer(); +} + +static void +compare_with_sptree_check() +{ + header(); + + sptree_test spt_test; + sptree_test_init(&spt_test, sizeof(type_t), 0, 0, 0, &node_comp, 0, 0); + + bps_tree tree; + bps_tree_create(&tree, 0, extent_alloc, extent_free); + + const int rounds = 2 * 1024; + const int elem_limit = 32 * 1024; + + for (int i = 0; i < rounds; i++) { + long rnd = rand() % elem_limit; + int find_res1 = sptree_test_find(&spt_test, &rnd) ? 1 : 0; + int find_res2 = bps_tree_find(&tree, rnd) ? 1 : 0; + if (find_res1 ^ find_res2) + fail("trees identity", "false"); + + if (find_res1 == 0) { + sptree_test_replace(&spt_test, &rnd, NULL); + bps_tree_insert_or_replace(&tree, rnd, 0); + } else { + sptree_test_delete(&spt_test, &rnd); + bps_tree_delete(&tree, rnd); + } + + if (bps_tree_debug_check(&tree)) + fail("debug check nonzero", "true"); + } + sptree_test_destroy(&spt_test); + + bps_tree_destroy(&tree); + + footer(); +} + +int +main(void) +{ + simple_check(); + compare_with_sptree_check(); +} diff --git a/test/unit/bps_tree.result b/test/unit/bps_tree.result new file mode 100644 index 0000000000000000000000000000000000000000..73568323e2d95e98b3dd60909fc6da7b595ede35 --- /dev/null +++ b/test/unit/bps_tree.result @@ -0,0 +1,9 @@ + *** simple_check *** +Insert 1..X, remove 1..X +Insert 1..X, remove X..1 +Insert X..1, remove 1..X +Insert X..1, remove X..1 + *** simple_check: done *** + *** compare_with_sptree_check *** + *** compare_with_sptree_check: done *** + \ No newline at end of file diff --git a/test/unit/bps_tree_itr.cc b/test/unit/bps_tree_itr.cc new file mode 100644 index 0000000000000000000000000000000000000000..7797f8e93e18f5e531e7a3e1bf53eab7e8f9c9dd --- /dev/null +++ b/test/unit/bps_tree_itr.cc @@ -0,0 +1,221 @@ +#include <stdlib.h> +#include <stdint.h> +#include <stdio.h> +#include <stdbool.h> + +#include "unit.h" + +struct elem_t { + long first; + long second; + bool operator!= (const struct elem_t& another) const + { + return first == another.first && second == another.second; + } +}; + +static int compare(const elem_t &a, const elem_t &b); +static int compare_key(const elem_t &a, long b); + +#define BPS_TREE_NAME _test +#define BPS_TREE_BLOCK_SIZE 128 /* value is to low specially for tests */ +#define BPS_TREE_EXTENT_SIZE 1024 /* value is to low specially for tests */ +#define BPS_TREE_COMPARE(a, b, arg) compare(a, b) +#define BPS_TREE_COMPARE_KEY(a, b, arg) compare_key(a, b) +#define bps_tree_elem_t struct elem_t +#define bps_tree_key_t long +#define bps_tree_arg_t int +#include "salad/bps_tree.h" + +static int compare(const elem_t &a, const elem_t &b) +{ + return a.first < b.first ? -1 : a.first > b.first ? 1 : + a.second < b.second ? -1 : a.second > b.second ? 1 : 0; +} + +static int compare_key(const elem_t &a, long b) +{ + return a.first < b ? -1 : a.first > b ? 1 : 0; +} + +static void * +extent_alloc() +{ + return malloc(BPS_TREE_EXTENT_SIZE); +} + +static void +extent_free(void *extent) +{ + free(extent); +} + +static void +itr_check() +{ + header(); + + bps_tree tree; + bps_tree_create(&tree, 0, extent_alloc, extent_free); + + /* Stupid tests */ + { + bps_tree_iterator tmp1, tmp2; + tmp1 = bps_tree_invalid_iterator(); + tmp2 = bps_tree_invalid_iterator(); + if (!bps_tree_itr_is_invalid(&tmp1)) + fail("invalid iterator is not invalid", "true"); + if (!bps_tree_itr_are_equal(&tree, &tmp1, &tmp2)) + fail("invalid iterators are not equal", "true"); + } + + /* Filing tree */ + const long count1 = 10000; + const long count2 = 5; + for (long i = 0; i < count1; i++) { + struct elem_t e; + e.first = i * 2; /* note that filled with even numbers */ + for (long j = 0; j < count2; j++) { + e.second = j; + bps_tree_insert_or_replace(&tree, e, 0); + } + } + printf("Test tree size: %d\n", (int)bps_tree_size(&tree)); + + /* Test that tree filled ok */ + for (long i = 0; i < count1; i++) { + for (long j = 0; j < count2; j++) { + if (bps_tree_find(&tree, i * 2) == 0) + fail("Integrity check failed (1)", "true"); + if (bps_tree_find(&tree, i * 2 + 1) != 0) + fail("Integrity check failed (2)", "true"); + } + } + + /* Print first 7 elems */ + { + printf("--> "); + bps_tree_iterator itr = bps_tree_itr_first(&tree); + for (int i = 0; i < 7; i++) { + elem_t *elem = bps_tree_itr_get_elem(&tree, &itr); + printf("(%ld,%ld) ", elem->first, elem->second); + bps_tree_itr_next(&tree, &itr); + } + printf("\n"); + } + /* Print last 7 elems */ + { + printf("<-- "); + bps_tree_iterator itr = bps_tree_itr_last(&tree); + for (int i = 0; i < 7; i++) { + elem_t *elem = bps_tree_itr_get_elem(&tree, &itr); + printf("(%ld,%ld) ", elem->first, elem->second); + bps_tree_itr_prev(&tree, &itr); + } + printf("\n"); + } + + /* Iteratete forward all elements 5 times */ + { + bps_tree_iterator itr = bps_tree_itr_first(&tree); + for (long i = 0; i < count1 * count2 * 5; i++) { + elem_t *elem = bps_tree_itr_get_elem(&tree, &itr); + if (elem->first != ((i % (count1 * count2)) / count2) * 2) + fail("iterate all failed (1)", "true"); + if (elem->second != i % count2) + fail("iterate all failed (2)", "true"); + bool itr_res = bps_tree_itr_next(&tree, &itr); + if (!!itr_res == !!bps_tree_itr_is_invalid(&itr)) + fail("iterate all failed (3)", "true"); + if (!itr_res) { + itr_res = bps_tree_itr_next(&tree, &itr); + if (!itr_res || bps_tree_itr_is_invalid(&itr)) + fail("iterate all failed (4)", "true"); + } + } + } + + /* Iteratete backward all elements 5 times */ + { + bps_tree_iterator itr = bps_tree_itr_last(&tree); + for (long i = 0; i < count1 * count2 * 5; i++) { + elem_t *elem = bps_tree_itr_get_elem(&tree, &itr); + long j = count1 * count2 - 1 - (i % (count1 * count2)); + if (elem->first != (j / count2) * 2) + fail("iterate all failed (5)", "true"); + if (elem->second != j % count2) + fail("iterate all failed (6)", "true"); + bool itr_res = bps_tree_itr_prev(&tree, &itr); + if (!!itr_res == !!bps_tree_itr_is_invalid(&itr)) + fail("iterate all failed (7)", "true"); + if (!itr_res) { + itr_res = bps_tree_itr_prev(&tree, &itr); + if (!itr_res || bps_tree_itr_is_invalid(&itr)) + fail("iterate all failed (8)", "true"); + } + } + } + + /* Check iterating in range from lower bound to upper bound */ + /* Several probes */ + const long keys[] = {-1, 0, 10, 15, count1*2 - 2, count1 * 2}; + for (size_t i = 0; i < sizeof(keys) / sizeof(keys[0]); i++) { + const long key = keys[i]; + bool has_this_key1; + bps_tree_iterator begin = bps_tree_lower_bound(&tree, key, &has_this_key1); + bool has_this_key2; + bps_tree_iterator end = bps_tree_upper_bound(&tree, key, &has_this_key2); + if (has_this_key1 != has_this_key2) + fail("Exact flag is broken", "true"); + printf("Key %ld, %s range [%s, %s): ", key, + has_this_key1 ? "not empty" : "empty", + bps_tree_itr_is_invalid(&begin) ? "eof" : "ptr", + bps_tree_itr_is_invalid(&end) ? "eof" : "ptr"); + bps_tree_iterator runner = begin; + while (!bps_tree_itr_are_equal(&tree, &runner, &end)) { + elem_t *elem = bps_tree_itr_get_elem(&tree, &runner); + printf("(%ld,%ld) ", elem->first, elem->second); + bps_tree_itr_next(&tree, &runner); + } + printf(" <-> "); + runner = end; + while (!bps_tree_itr_are_equal(&tree, &runner, &begin)) { + bps_tree_itr_prev(&tree, &runner); + elem_t *elem = bps_tree_itr_get_elem(&tree, &runner); + printf("(%ld,%ld) ", elem->first, elem->second); + } + printf("\n"); + } + + /* Check iterating in range from lower bound to upper bound */ + /* Automated */ + for (long i = -1; i <= count1 + 1; i++) { + bps_tree_iterator begin = bps_tree_lower_bound(&tree, i, 0); + bps_tree_iterator end = bps_tree_upper_bound(&tree, i, 0); + long real_count = 0; + while (!bps_tree_itr_are_equal(&tree, &begin, &end)) { + elem_t *elem = bps_tree_itr_get_elem(&tree, &begin); + if (elem->first != i) + fail("range itr failed (1)", "true"); + if (elem->second != real_count) + fail("range itr failed (2)", "true"); + real_count++; + bps_tree_itr_next(&tree, &begin); + } + long must_be_count = 0; + if (i >= 0 && i / 2 <= count1 - 1 && (i & 1) == 0) + must_be_count = count2; + if (real_count != must_be_count) + fail("range itr failed (3)", "true"); + } + + bps_tree_destroy(&tree); + + footer(); +} + +int +main(void) +{ + itr_check(); +} diff --git a/test/unit/bps_tree_itr.result b/test/unit/bps_tree_itr.result new file mode 100644 index 0000000000000000000000000000000000000000..7a548bdf4e7fce644ab3dd781f36e138eb4fa6b4 --- /dev/null +++ b/test/unit/bps_tree_itr.result @@ -0,0 +1,12 @@ + *** itr_check *** +Test tree size: 50000 +--> (0,0) (0,1) (0,2) (0,3) (0,4) (2,0) (2,1) +<-- (19998,4) (19998,3) (19998,2) (19998,1) (19998,0) (19996,4) (19996,3) +Key -1, empty range [ptr, ptr): <-> +Key 0, not empty range [ptr, ptr): (0,0) (0,1) (0,2) (0,3) (0,4) <-> (0,4) (0,3) (0,2) (0,1) (0,0) +Key 10, not empty range [ptr, ptr): (10,0) (10,1) (10,2) (10,3) (10,4) <-> (10,4) (10,3) (10,2) (10,1) (10,0) +Key 15, empty range [ptr, ptr): <-> +Key 19998, not empty range [ptr, eof): (19998,0) (19998,1) (19998,2) (19998,3) (19998,4) <-> (19998,4) (19998,3) (19998,2) (19998,1) (19998,0) +Key 20000, empty range [eof, eof): <-> + *** itr_check: done *** + \ No newline at end of file