From 73a8301725cc5fd247272e51e7fdbbf583796614 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov <kostja@tarantool.org> Date: Wed, 29 Oct 2014 21:07:40 +0300 Subject: [PATCH] Quotas: review fixes. --- src/box/lua/slab.cc | 3 +- src/box/tree_index.cc | 18 ++++-- src/box/tuple.cc | 22 +++---- src/box/tuple.h | 5 +- src/lib/small/quota.h | 121 +++++++++++++++++++----------------- src/lib/small/slab_arena.c | 4 +- src/memory.cc | 13 ++-- src/memory.h | 1 - test/unit/mempool.c | 2 +- test/unit/quota.cc | 39 ++++++------ test/unit/region.c | 2 +- test/unit/slab_arena.c | 6 +- test/unit/slab_arena.result | 4 +- test/unit/small_alloc.c | 2 +- 14 files changed, 130 insertions(+), 112 deletions(-) diff --git a/src/box/lua/slab.cc b/src/box/lua/slab.cc index 4f2385ac5c..6babee055a 100644 --- a/src/box/lua/slab.cc +++ b/src/box/lua/slab.cc @@ -37,6 +37,7 @@ extern "C" { #include "box/tuple.h" #include "small/small.h" +#include "small/quota.h" #include "memory.h" /** A callback passed into salloc_stat() and invoked for every slab class. */ @@ -130,7 +131,7 @@ lbox_runtime_info(struct lua_State *L) lua_settable(L, -3); lua_pushstring(L, "maxalloc"); - luaL_pushnumber64(L, quota_get_total(runtime.quota)); + luaL_pushnumber64(L, quota_get(runtime.quota)); lua_settable(L, -3); return 1; diff --git a/src/box/tree_index.cc b/src/box/tree_index.cc index cc45bdf8ba..d877a09e46 100644 --- a/src/box/tree_index.cc +++ b/src/box/tree_index.cc @@ -36,9 +36,12 @@ #include <third_party/qsort_arg.h> /** For all memory used by all tree indexes. */ +extern struct quota memtx_quota; +static struct slab_arena index_arena; +static struct slab_cache index_arena_slab_cache; static struct mempool tree_extent_pool; /** Number of allocated extents. */ -static int tree_extent_pool_initialized = 0; +static bool index_arena_initialized = false; /* {{{ Utilities. *************************************************/ @@ -203,10 +206,17 @@ TreeIndex::TreeIndex(struct key_def *key_def_arg) : Index(key_def_arg), build_array(0), build_array_size(0), build_array_alloc_size(0) { - if (tree_extent_pool_initialized == 0) { - mempool_create(&tree_extent_pool, &cord()->slabc, + if (index_arena_initialized == false) { + const uint32_t SLAB_SIZE = 4 * 1024 * 1024; + if (slab_arena_create(&index_arena, &memtx_quota, + 0, SLAB_SIZE, MAP_PRIVATE)) { + panic_syserror("failed to initialize index arena"); + } + slab_cache_create(&index_arena_slab_cache, &index_arena, + SLAB_SIZE); + mempool_create(&tree_extent_pool, &index_arena_slab_cache, BPS_TREE_EXTENT_SIZE); - tree_extent_pool_initialized = 1; + index_arena_initialized = true; } bps_tree_index_create(&tree, key_def, extent_alloc, extent_free); } diff --git a/src/box/tuple.cc b/src/box/tuple.cc index 441303e4e5..bafdf31ff3 100644 --- a/src/box/tuple.cc +++ b/src/box/tuple.cc @@ -46,12 +46,12 @@ static uint32_t formats_size, formats_capacity; uint32_t snapshot_version; +struct quota memtx_quota; + struct slab_arena memtx_arena; -struct slab_cache memtx_slab_cache; +static struct slab_cache memtx_slab_cache; struct small_alloc memtx_alloc; -extern struct quota memory_quota; - /** Extract all available type info from keys. */ void field_type_create(enum field_type *types, uint32_t field_count, @@ -520,18 +520,16 @@ tuple_compare_with_key(const struct tuple *tuple, const char *key, } void -tuple_init(float alloc_arena_max_size, uint32_t objsize_min, +tuple_init(float tuple_arena_max_size, uint32_t objsize_min, float alloc_factor) { tuple_format_ber = tuple_format_new(&rlist_nil); /* Make sure this one stays around. */ tuple_format_ref(tuple_format_ber, 1); - uint32_t slab_size = 4 * 1024 * 1024; - size_t max_size = alloc_arena_max_size * 1024 * 1024 * 1024; - if (quota_set(&memory_quota, max_size)) { - panic_syserror("Memory quota set failed!"); - } + const uint32_t SLAB_SIZE = 4 * 1024 * 1024; + size_t max_size = tuple_arena_max_size * 1024 * 1024 * 1024; + quota_init(&memtx_quota, max_size); int flags; if (access("/proc/user_beancounters", F_OK) == 0) { @@ -544,13 +542,13 @@ tuple_init(float alloc_arena_max_size, uint32_t objsize_min, flags = MAP_SHARED; } - if (slab_arena_create(&memtx_arena, &memory_quota, - max_size, slab_size, flags)) { + if (slab_arena_create(&memtx_arena, &memtx_quota, + max_size, SLAB_SIZE, flags)) { panic_syserror("failed to preallocate %zu bytes", max_size); } slab_cache_create(&memtx_slab_cache, &memtx_arena, - slab_size); + SLAB_SIZE); small_alloc_create(&memtx_alloc, &memtx_slab_cache, objsize_min, alloc_factor); } diff --git a/src/box/tuple.h b/src/box/tuple.h index 4cf6311ab2..5c973d512e 100644 --- a/src/box/tuple.h +++ b/src/box/tuple.h @@ -36,8 +36,11 @@ enum { FORMAT_REF_MAX = INT32_MAX, TUPLE_REF_MAX = UINT16_MAX }; struct tbuf; +/** Common quota for tuples and indexes */ +extern struct quota memtx_quota; +/** Tuple allocator */ extern struct small_alloc memtx_alloc; -extern struct slab_cache memtx_slab_cache; +/** Tuple slab arena */ extern struct slab_arena memtx_arena; /** diff --git a/src/lib/small/quota.h b/src/lib/small/quota.h index cf9faf2c64..96312f3305 100644 --- a/src/lib/small/quota.h +++ b/src/lib/small/quota.h @@ -28,7 +28,6 @@ * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ - #include <stddef.h> #include <stdlib.h> #include <stdint.h> @@ -38,28 +37,30 @@ extern "C" { #endif /* defined(__cplusplus) */ -enum { - QUOTA_MAX_ALLOC = 0xFFFFFFFFull * 1024u, - QUOTA_GRANULARITY = 1024 -}; +#define QUOTA_UNIT_SIZE 1024ULL + +static const uint64_t QUOTA_MAX = QUOTA_UNIT_SIZE * UINT32_MAX; /** A basic limit on memory usage */ struct quota { - /* high order dword is total available memory and low order dword - * - currently used memory both in QUOTA_GRANULARITY units + /** + * High order dword is the total available memory + * and the low order dword is the currently used amount. + * Both values are represented in units of size + * QUOTA_UNIT_SIZE. */ uint64_t value; }; /** - * Initialize quota with gived memory limit + * Initialize quota with a given memory limit */ static inline void quota_init(struct quota *quota, size_t total) { - uint64_t new_total_in_granul = (total + (QUOTA_GRANULARITY - 1)) - / QUOTA_GRANULARITY; - quota->value = new_total_in_granul << 32; + uint64_t new_total = (total + (QUOTA_UNIT_SIZE - 1)) / + QUOTA_UNIT_SIZE; + quota->value = new_total << 32; } /** @@ -74,108 +75,112 @@ quota_init(struct quota *quota, size_t total) * Get current quota limit */ static inline size_t -quota_get_total(const struct quota *quota) +quota_get(const struct quota *quota) { - return (quota->value >> 32) * QUOTA_GRANULARITY; + return (quota->value >> 32) * QUOTA_UNIT_SIZE; } /** * Get current quota usage */ static inline size_t -quota_get_used(const struct quota *quota) +quota_used(const struct quota *quota) { - return (quota->value & 0xFFFFFFFFu) * QUOTA_GRANULARITY; + return (quota->value & UINT32_MAX) * QUOTA_UNIT_SIZE; } static inline void quota_get_total_and_used(struct quota *quota, size_t *total, size_t *used) { uint64_t value = quota->value; - *total = (value >> 32) * QUOTA_GRANULARITY; - *used = (value & 0xFFFFFFFFu) * QUOTA_GRANULARITY; + *total = (value >> 32) * QUOTA_UNIT_SIZE; + *used = (value & UINT32_MAX) * QUOTA_UNIT_SIZE; } /** * Set quota memory limit. - * returns 0 on success - * returns -1 on error - if it's not possible to decrease limit - * due to greater current usage + * @retval > 0 aligned size set on success + * @retval -1 error, i.e. when it is not possible to decrease + * limit due to greater current usage */ -static inline int +static inline ssize_t quota_set(struct quota *quota, size_t new_total) { - uint32_t new_total_in_granul = (new_total + (QUOTA_GRANULARITY - 1)) - / QUOTA_GRANULARITY; + assert(new_total <= QUOTA_MAX); + /* Align the new total */ + uint32_t new_total_in_units = (new_total + (QUOTA_UNIT_SIZE - 1)) / + QUOTA_UNIT_SIZE; while (1) { - uint64_t old_value = quota->value; - /* uint32_t cur_total = old_value >> 32; */ - uint32_t cur_used = old_value & 0xFFFFFFFFu; - if (new_total_in_granul < cur_used) + uint64_t value = quota->value; + uint32_t used_in_units = value & UINT32_MAX; + if (new_total_in_units < used_in_units) return -1; - uint64_t new_value = (((uint64_t)new_total_in_granul) << 32) - | cur_used; - if (atomic_cas("a->value, old_value, new_value) == old_value) + uint64_t new_value = + ((uint64_t) new_total_in_units << 32) | used_in_units; + if (atomic_cas("a->value, value, new_value) == value) break; } - return 0; + return new_total_in_units * QUOTA_UNIT_SIZE; } /** * Use up a quota - * returns 0 on success - * returns -1 on error - if quota limit reached + * @retval > 0 aligned value on success + * @retval -1 on error - if quota limit reached */ -static inline int +static inline ssize_t quota_use(struct quota *quota, size_t size) { - uint32_t size_in_granul = (size + (QUOTA_GRANULARITY - 1)) - / QUOTA_GRANULARITY; - assert(size_in_granul); + assert(size < QUOTA_MAX); + uint32_t size_in_units = (size + (QUOTA_UNIT_SIZE - 1)) + / QUOTA_UNIT_SIZE; + assert(size_in_units); while (1) { - uint64_t old_value = quota->value; - uint32_t cur_total = old_value >> 32; - uint32_t old_used = old_value & 0xFFFFFFFFu; + uint64_t value = quota->value; + uint32_t total_in_units = value >> 32; + uint32_t used_in_units = value & UINT32_MAX; - uint32_t new_used = old_used + size_in_granul; - assert(new_used > old_used); + uint32_t new_used_in_units = used_in_units + size_in_units; + assert(new_used_in_units > used_in_units); - if (new_used > cur_total) + if (new_used_in_units > total_in_units) return -1; - uint64_t new_value = (((uint64_t)cur_total) << 32) - | new_used; + uint64_t new_value = + ((uint64_t) total_in_units << 32) | new_used_in_units; - if (atomic_cas("a->value, old_value, new_value) == old_value) + if (atomic_cas("a->value, value, new_value) == value) break; } - return 0; + return size_in_units * QUOTA_UNIT_SIZE; } /** Release used memory */ static inline void quota_release(struct quota *quota, size_t size) { - uint32_t size_in_granul = (size + (QUOTA_GRANULARITY - 1)) - / QUOTA_GRANULARITY; - assert(size_in_granul); + assert(size < QUOTA_MAX); + uint32_t size_in_units = (size + (QUOTA_UNIT_SIZE - 1)) + / QUOTA_UNIT_SIZE; + assert(size_in_units); while (1) { - uint64_t old_value = quota->value; - uint32_t cur_total = old_value >> 32; - uint32_t old_used = old_value & 0xFFFFFFFFu; + uint64_t value = quota->value; + uint32_t total_in_units = value >> 32; + uint32_t used_in_units = value & UINT32_MAX; - assert(size_in_granul <= old_used); - uint32_t new_used = old_used - size_in_granul; + assert(size_in_units <= used_in_units); + uint32_t new_used_in_units = used_in_units - size_in_units; - uint64_t new_value = (((uint64_t)cur_total) << 32) - | new_used; + uint64_t new_value = + ((uint64_t) total_in_units << 32) | new_used_in_units; - if (atomic_cas("a->value, old_value, new_value) == old_value) + if (atomic_cas("a->value, value, new_value) == value) break; } } #undef atomic_cas +#undef QUOTA_UNIT_SIZE #if defined(__cplusplus) } /* extern "C" { */ diff --git a/src/lib/small/slab_arena.c b/src/lib/small/slab_arena.c index b0d942875f..12e2ff2743 100644 --- a/src/lib/small/slab_arena.c +++ b/src/lib/small/slab_arena.c @@ -120,7 +120,7 @@ slab_arena_create(struct slab_arena *arena, struct quota *quota, arena->quota = quota; /** Prealloc can not be greater than the quota */ - prealloc = MIN(prealloc, quota_get_total(quota)); + prealloc = MIN(prealloc, quota_get(quota)); /** Extremely large sizes can not be aligned properly */ prealloc = MIN(prealloc, SIZE_MAX - arena->slab_size); /* Align prealloc around a fixed number of slabs. */ @@ -165,7 +165,7 @@ slab_map(struct slab_arena *arena) if ((ptr = lf_lifo_pop(&arena->cache))) return ptr; - if (quota_use(arena->quota, arena->slab_size)) + if (quota_use(arena->quota, arena->slab_size) < 0) return NULL; /** Need to allocate a new slab. */ diff --git a/src/memory.cc b/src/memory.cc index ccec92f402..95e9f97675 100644 --- a/src/memory.cc +++ b/src/memory.cc @@ -27,22 +27,21 @@ * SUCH DAMAGE. */ #include "memory.h" +#include "small/quota.h" struct slab_arena runtime; -struct quota memory_quota; - -static const size_t RUNTIME_SLAB_SIZE = 4 * 1024 * 1024; -static const size_t DEFAULT_MEM_QUOTA_SIZE = QUOTA_MAX_ALLOC; void memory_init() { + static struct quota runtime_quota; + static const size_t SLAB_SIZE = 4 * 1024 * 1024; /* default quota initialization */ - quota_init(&memory_quota, DEFAULT_MEM_QUOTA_SIZE); + quota_init(&runtime_quota, QUOTA_MAX); /* No limit on the runtime memory. */ - slab_arena_create(&runtime, &memory_quota, 0, - RUNTIME_SLAB_SIZE, MAP_PRIVATE); + slab_arena_create(&runtime, &runtime_quota, 0, + SLAB_SIZE, MAP_PRIVATE); } void diff --git a/src/memory.h b/src/memory.h index 1c52ef3eaf..fff192f03e 100644 --- a/src/memory.h +++ b/src/memory.h @@ -30,7 +30,6 @@ */ #include "small/region.h" #include "small/small.h" -#include "small/quota.h" /** * Define the global components of Tarantool memory subsystem: * slab caches, allocators, arenas. diff --git a/test/unit/mempool.c b/test/unit/mempool.c index c0cad2bef2..90c72d5f53 100644 --- a/test/unit/mempool.c +++ b/test/unit/mempool.c @@ -105,7 +105,7 @@ int main() quota_init("a, UINT_MAX); - slab_arena_create(&arena, "a, UINT_MAX, + slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena, 0); diff --git a/test/unit/quota.cc b/test/unit/quota.cc index 301d2b5c77..0f66a391bf 100644 --- a/test/unit/quota.cc +++ b/test/unit/quota.cc @@ -9,8 +9,8 @@ const size_t THREAD_CNT = 10; const size_t RUN_CNT = 128 * 1024; struct thread_data { - long use_change; - long last_lim_set; + size_t use_change; + size_t last_lim_set; long use_change_success; long lim_change_success; }; @@ -22,8 +22,7 @@ void *thread_routine(void *vparam) { struct thread_data *data = (struct thread_data *)vparam; size_t check_fail_count = 0; - bool allocated = false; - size_t allocated_size = 0; + ssize_t allocated_size = 0; for (size_t i = 0; i < RUN_CNT; i++) { { size_t total, used; @@ -31,24 +30,27 @@ void *thread_routine(void *vparam) if (used > total) check_fail_count++; } - long rnd = ((rand() & 0xFFFFFFF) + 1) * QUOTA_GRANULARITY; - int succ = quota_set("a, (size_t)rnd); - if (succ == 0) { - data->last_lim_set = rnd; + ssize_t max = rand() % QUOTA_MAX; + max = quota_set("a, max); + pthread_yield(); + if (max > 0) { + data->last_lim_set = max; data->lim_change_success++; } - if (allocated) { + if (allocated_size > 0) { quota_release("a, allocated_size); - allocated = false; + allocated_size = -1; data->use_change = 0; data->use_change_success++; + pthread_yield(); } else { - allocated_size = (((long)rand()) & 0xFFFFFFl) + 1; - if (quota_use("a, allocated_size) == 0) { - allocated = true; + allocated_size = rand() % max + 1; + allocated_size = quota_use("a, allocated_size); + if (allocated_size > 0) { data->use_change = allocated_size; data->use_change_success++; } + pthread_yield(); } } return (void *)check_fail_count; @@ -60,6 +62,7 @@ main(int n, char **a) (void)n; (void)a; quota_init("a, 0); + srand(time(0)); plan(5); @@ -78,18 +81,18 @@ main(int n, char **a) long set_success_count = 0; long use_success_count = 0; for (size_t i = 0; i < THREAD_CNT; i++) { - if (datum[i].last_lim_set == quota_get_total("a)) + if (datum[i].last_lim_set == quota_get("a)) one_set_successed = true; - total_alloc += ((datum[i].use_change + QUOTA_GRANULARITY - 1) / QUOTA_GRANULARITY) * QUOTA_GRANULARITY; + total_alloc += datum[i].use_change; use_success_count += datum[i].use_change_success; set_success_count += datum[i].lim_change_success; } ok(check_fail_count == 0, "no fails detected"); ok(one_set_successed, "one of thread limit set is final"); - ok(total_alloc == quota_get_used("a), "total alloc match"); - ok(use_success_count > THREAD_CNT * RUN_CNT * .9, "uses are mosly successful"); - ok(set_success_count > THREAD_CNT * RUN_CNT * .9, "sets are mosly successful"); + ok(total_alloc == quota_used("a), "total alloc match"); + ok(use_success_count > THREAD_CNT * RUN_CNT * .1, "uses are mosly successful"); + ok(set_success_count > THREAD_CNT * RUN_CNT * .1, "sets are mosly successful"); return check_plan(); } diff --git a/test/unit/region.c b/test/unit/region.c index 76175309b5..b1fbd34238 100644 --- a/test/unit/region.c +++ b/test/unit/region.c @@ -78,7 +78,7 @@ region_test_truncate() int main() { quota_init("a, UINT_MAX); - slab_arena_create(&arena, "a, UINT_MAX, + slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena, 0); diff --git a/test/unit/slab_arena.c b/test/unit/slab_arena.c index 5fdce47b21..fe71bc7134 100644 --- a/test/unit/slab_arena.c +++ b/test/unit/slab_arena.c @@ -11,7 +11,7 @@ slab_arena_print(struct slab_arena *arena) { printf("arena->prealloc = %zu\narena->maxalloc = %zu\n" "arena->used = %zu\narena->slab_size = %u\n", - arena->prealloc, quota_get_total(arena->quota), + arena->prealloc, quota_get(arena->quota), arena->used, arena->slab_size); } @@ -38,8 +38,8 @@ int main() slab_arena_print(&arena); slab_arena_destroy(&arena); - quota_init("a, 3000 * QUOTA_GRANULARITY); - slab_arena_create(&arena, "a, 2000 * QUOTA_GRANULARITY, 1, MAP_PRIVATE); + quota_init("a, 2000000); + slab_arena_create(&arena, "a, 3000000, 1, MAP_PRIVATE); slab_arena_print(&arena); slab_arena_destroy(&arena); } diff --git a/test/unit/slab_arena.result b/test/unit/slab_arena.result index 0492ec6eea..fb73025acb 100644 --- a/test/unit/slab_arena.result +++ b/test/unit/slab_arena.result @@ -19,7 +19,7 @@ arena->prealloc = 65536 arena->maxalloc = 65536 arena->used = 65536 arena->slab_size = 65536 -arena->prealloc = 2097152 -arena->maxalloc = 3072000 +arena->prealloc = 2031616 +arena->maxalloc = 2000896 arena->used = 0 arena->slab_size = 65536 diff --git a/test/unit/small_alloc.c b/test/unit/small_alloc.c index 3683b2535f..96126ac3ad 100644 --- a/test/unit/small_alloc.c +++ b/test/unit/small_alloc.c @@ -96,7 +96,7 @@ int main() quota_init("a, UINT_MAX); - slab_arena_create(&arena, "a, UINT_MAX, 4000000, + slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena, 0); -- GitLab