Skip to content
Snippets Groups Projects
Commit 73a83017 authored by Konstantin Osipov's avatar Konstantin Osipov
Browse files

Quotas: review fixes.

parent e4150145
No related branches found
No related tags found
No related merge requests found
......@@ -37,6 +37,7 @@ extern "C" {
#include "box/tuple.h"
#include "small/small.h"
#include "small/quota.h"
#include "memory.h"
/** A callback passed into salloc_stat() and invoked for every slab class. */
......@@ -130,7 +131,7 @@ lbox_runtime_info(struct lua_State *L)
lua_settable(L, -3);
lua_pushstring(L, "maxalloc");
luaL_pushnumber64(L, quota_get_total(runtime.quota));
luaL_pushnumber64(L, quota_get(runtime.quota));
lua_settable(L, -3);
return 1;
......
......@@ -36,9 +36,12 @@
#include <third_party/qsort_arg.h>
/** For all memory used by all tree indexes. */
extern struct quota memtx_quota;
static struct slab_arena index_arena;
static struct slab_cache index_arena_slab_cache;
static struct mempool tree_extent_pool;
/** Number of allocated extents. */
static int tree_extent_pool_initialized = 0;
static bool index_arena_initialized = false;
/* {{{ Utilities. *************************************************/
......@@ -203,10 +206,17 @@ TreeIndex::TreeIndex(struct key_def *key_def_arg)
: Index(key_def_arg), build_array(0), build_array_size(0),
build_array_alloc_size(0)
{
if (tree_extent_pool_initialized == 0) {
mempool_create(&tree_extent_pool, &cord()->slabc,
if (index_arena_initialized == false) {
const uint32_t SLAB_SIZE = 4 * 1024 * 1024;
if (slab_arena_create(&index_arena, &memtx_quota,
0, SLAB_SIZE, MAP_PRIVATE)) {
panic_syserror("failed to initialize index arena");
}
slab_cache_create(&index_arena_slab_cache, &index_arena,
SLAB_SIZE);
mempool_create(&tree_extent_pool, &index_arena_slab_cache,
BPS_TREE_EXTENT_SIZE);
tree_extent_pool_initialized = 1;
index_arena_initialized = true;
}
bps_tree_index_create(&tree, key_def, extent_alloc, extent_free);
}
......
......@@ -46,12 +46,12 @@ static uint32_t formats_size, formats_capacity;
uint32_t snapshot_version;
struct quota memtx_quota;
struct slab_arena memtx_arena;
struct slab_cache memtx_slab_cache;
static struct slab_cache memtx_slab_cache;
struct small_alloc memtx_alloc;
extern struct quota memory_quota;
/** Extract all available type info from keys. */
void
field_type_create(enum field_type *types, uint32_t field_count,
......@@ -520,18 +520,16 @@ tuple_compare_with_key(const struct tuple *tuple, const char *key,
}
void
tuple_init(float alloc_arena_max_size, uint32_t objsize_min,
tuple_init(float tuple_arena_max_size, uint32_t objsize_min,
float alloc_factor)
{
tuple_format_ber = tuple_format_new(&rlist_nil);
/* Make sure this one stays around. */
tuple_format_ref(tuple_format_ber, 1);
uint32_t slab_size = 4 * 1024 * 1024;
size_t max_size = alloc_arena_max_size * 1024 * 1024 * 1024;
if (quota_set(&memory_quota, max_size)) {
panic_syserror("Memory quota set failed!");
}
const uint32_t SLAB_SIZE = 4 * 1024 * 1024;
size_t max_size = tuple_arena_max_size * 1024 * 1024 * 1024;
quota_init(&memtx_quota, max_size);
int flags;
if (access("/proc/user_beancounters", F_OK) == 0) {
......@@ -544,13 +542,13 @@ tuple_init(float alloc_arena_max_size, uint32_t objsize_min,
flags = MAP_SHARED;
}
if (slab_arena_create(&memtx_arena, &memory_quota,
max_size, slab_size, flags)) {
if (slab_arena_create(&memtx_arena, &memtx_quota,
max_size, SLAB_SIZE, flags)) {
panic_syserror("failed to preallocate %zu bytes",
max_size);
}
slab_cache_create(&memtx_slab_cache, &memtx_arena,
slab_size);
SLAB_SIZE);
small_alloc_create(&memtx_alloc, &memtx_slab_cache,
objsize_min, alloc_factor);
}
......
......@@ -36,8 +36,11 @@ enum { FORMAT_REF_MAX = INT32_MAX, TUPLE_REF_MAX = UINT16_MAX };
struct tbuf;
/** Common quota for tuples and indexes */
extern struct quota memtx_quota;
/** Tuple allocator */
extern struct small_alloc memtx_alloc;
extern struct slab_cache memtx_slab_cache;
/** Tuple slab arena */
extern struct slab_arena memtx_arena;
/**
......
......@@ -28,7 +28,6 @@
* THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stddef.h>
#include <stdlib.h>
#include <stdint.h>
......@@ -38,28 +37,30 @@
extern "C" {
#endif /* defined(__cplusplus) */
enum {
QUOTA_MAX_ALLOC = 0xFFFFFFFFull * 1024u,
QUOTA_GRANULARITY = 1024
};
#define QUOTA_UNIT_SIZE 1024ULL
static const uint64_t QUOTA_MAX = QUOTA_UNIT_SIZE * UINT32_MAX;
/** A basic limit on memory usage */
struct quota {
/* high order dword is total available memory and low order dword
* - currently used memory both in QUOTA_GRANULARITY units
/**
* High order dword is the total available memory
* and the low order dword is the currently used amount.
* Both values are represented in units of size
* QUOTA_UNIT_SIZE.
*/
uint64_t value;
};
/**
* Initialize quota with gived memory limit
* Initialize quota with a given memory limit
*/
static inline void
quota_init(struct quota *quota, size_t total)
{
uint64_t new_total_in_granul = (total + (QUOTA_GRANULARITY - 1))
/ QUOTA_GRANULARITY;
quota->value = new_total_in_granul << 32;
uint64_t new_total = (total + (QUOTA_UNIT_SIZE - 1)) /
QUOTA_UNIT_SIZE;
quota->value = new_total << 32;
}
/**
......@@ -74,108 +75,112 @@ quota_init(struct quota *quota, size_t total)
* Get current quota limit
*/
static inline size_t
quota_get_total(const struct quota *quota)
quota_get(const struct quota *quota)
{
return (quota->value >> 32) * QUOTA_GRANULARITY;
return (quota->value >> 32) * QUOTA_UNIT_SIZE;
}
/**
* Get current quota usage
*/
static inline size_t
quota_get_used(const struct quota *quota)
quota_used(const struct quota *quota)
{
return (quota->value & 0xFFFFFFFFu) * QUOTA_GRANULARITY;
return (quota->value & UINT32_MAX) * QUOTA_UNIT_SIZE;
}
static inline void
quota_get_total_and_used(struct quota *quota, size_t *total, size_t *used)
{
uint64_t value = quota->value;
*total = (value >> 32) * QUOTA_GRANULARITY;
*used = (value & 0xFFFFFFFFu) * QUOTA_GRANULARITY;
*total = (value >> 32) * QUOTA_UNIT_SIZE;
*used = (value & UINT32_MAX) * QUOTA_UNIT_SIZE;
}
/**
* Set quota memory limit.
* returns 0 on success
* returns -1 on error - if it's not possible to decrease limit
* due to greater current usage
* @retval > 0 aligned size set on success
* @retval -1 error, i.e. when it is not possible to decrease
* limit due to greater current usage
*/
static inline int
static inline ssize_t
quota_set(struct quota *quota, size_t new_total)
{
uint32_t new_total_in_granul = (new_total + (QUOTA_GRANULARITY - 1))
/ QUOTA_GRANULARITY;
assert(new_total <= QUOTA_MAX);
/* Align the new total */
uint32_t new_total_in_units = (new_total + (QUOTA_UNIT_SIZE - 1)) /
QUOTA_UNIT_SIZE;
while (1) {
uint64_t old_value = quota->value;
/* uint32_t cur_total = old_value >> 32; */
uint32_t cur_used = old_value & 0xFFFFFFFFu;
if (new_total_in_granul < cur_used)
uint64_t value = quota->value;
uint32_t used_in_units = value & UINT32_MAX;
if (new_total_in_units < used_in_units)
return -1;
uint64_t new_value = (((uint64_t)new_total_in_granul) << 32)
| cur_used;
if (atomic_cas(&quota->value, old_value, new_value) == old_value)
uint64_t new_value =
((uint64_t) new_total_in_units << 32) | used_in_units;
if (atomic_cas(&quota->value, value, new_value) == value)
break;
}
return 0;
return new_total_in_units * QUOTA_UNIT_SIZE;
}
/**
* Use up a quota
* returns 0 on success
* returns -1 on error - if quota limit reached
* @retval > 0 aligned value on success
* @retval -1 on error - if quota limit reached
*/
static inline int
static inline ssize_t
quota_use(struct quota *quota, size_t size)
{
uint32_t size_in_granul = (size + (QUOTA_GRANULARITY - 1))
/ QUOTA_GRANULARITY;
assert(size_in_granul);
assert(size < QUOTA_MAX);
uint32_t size_in_units = (size + (QUOTA_UNIT_SIZE - 1))
/ QUOTA_UNIT_SIZE;
assert(size_in_units);
while (1) {
uint64_t old_value = quota->value;
uint32_t cur_total = old_value >> 32;
uint32_t old_used = old_value & 0xFFFFFFFFu;
uint64_t value = quota->value;
uint32_t total_in_units = value >> 32;
uint32_t used_in_units = value & UINT32_MAX;
uint32_t new_used = old_used + size_in_granul;
assert(new_used > old_used);
uint32_t new_used_in_units = used_in_units + size_in_units;
assert(new_used_in_units > used_in_units);
if (new_used > cur_total)
if (new_used_in_units > total_in_units)
return -1;
uint64_t new_value = (((uint64_t)cur_total) << 32)
| new_used;
uint64_t new_value =
((uint64_t) total_in_units << 32) | new_used_in_units;
if (atomic_cas(&quota->value, old_value, new_value) == old_value)
if (atomic_cas(&quota->value, value, new_value) == value)
break;
}
return 0;
return size_in_units * QUOTA_UNIT_SIZE;
}
/** Release used memory */
static inline void
quota_release(struct quota *quota, size_t size)
{
uint32_t size_in_granul = (size + (QUOTA_GRANULARITY - 1))
/ QUOTA_GRANULARITY;
assert(size_in_granul);
assert(size < QUOTA_MAX);
uint32_t size_in_units = (size + (QUOTA_UNIT_SIZE - 1))
/ QUOTA_UNIT_SIZE;
assert(size_in_units);
while (1) {
uint64_t old_value = quota->value;
uint32_t cur_total = old_value >> 32;
uint32_t old_used = old_value & 0xFFFFFFFFu;
uint64_t value = quota->value;
uint32_t total_in_units = value >> 32;
uint32_t used_in_units = value & UINT32_MAX;
assert(size_in_granul <= old_used);
uint32_t new_used = old_used - size_in_granul;
assert(size_in_units <= used_in_units);
uint32_t new_used_in_units = used_in_units - size_in_units;
uint64_t new_value = (((uint64_t)cur_total) << 32)
| new_used;
uint64_t new_value =
((uint64_t) total_in_units << 32) | new_used_in_units;
if (atomic_cas(&quota->value, old_value, new_value) == old_value)
if (atomic_cas(&quota->value, value, new_value) == value)
break;
}
}
#undef atomic_cas
#undef QUOTA_UNIT_SIZE
#if defined(__cplusplus)
} /* extern "C" { */
......
......@@ -120,7 +120,7 @@ slab_arena_create(struct slab_arena *arena, struct quota *quota,
arena->quota = quota;
/** Prealloc can not be greater than the quota */
prealloc = MIN(prealloc, quota_get_total(quota));
prealloc = MIN(prealloc, quota_get(quota));
/** Extremely large sizes can not be aligned properly */
prealloc = MIN(prealloc, SIZE_MAX - arena->slab_size);
/* Align prealloc around a fixed number of slabs. */
......@@ -165,7 +165,7 @@ slab_map(struct slab_arena *arena)
if ((ptr = lf_lifo_pop(&arena->cache)))
return ptr;
if (quota_use(arena->quota, arena->slab_size))
if (quota_use(arena->quota, arena->slab_size) < 0)
return NULL;
/** Need to allocate a new slab. */
......
......@@ -27,22 +27,21 @@
* SUCH DAMAGE.
*/
#include "memory.h"
#include "small/quota.h"
struct slab_arena runtime;
struct quota memory_quota;
static const size_t RUNTIME_SLAB_SIZE = 4 * 1024 * 1024;
static const size_t DEFAULT_MEM_QUOTA_SIZE = QUOTA_MAX_ALLOC;
void
memory_init()
{
static struct quota runtime_quota;
static const size_t SLAB_SIZE = 4 * 1024 * 1024;
/* default quota initialization */
quota_init(&memory_quota, DEFAULT_MEM_QUOTA_SIZE);
quota_init(&runtime_quota, QUOTA_MAX);
/* No limit on the runtime memory. */
slab_arena_create(&runtime, &memory_quota, 0,
RUNTIME_SLAB_SIZE, MAP_PRIVATE);
slab_arena_create(&runtime, &runtime_quota, 0,
SLAB_SIZE, MAP_PRIVATE);
}
void
......
......@@ -30,7 +30,6 @@
*/
#include "small/region.h"
#include "small/small.h"
#include "small/quota.h"
/**
* Define the global components of Tarantool memory subsystem:
* slab caches, allocators, arenas.
......
......@@ -105,7 +105,7 @@ int main()
quota_init(&quota, UINT_MAX);
slab_arena_create(&arena, &quota, UINT_MAX,
slab_arena_create(&arena, &quota, 0,
4000000, MAP_PRIVATE);
slab_cache_create(&cache, &arena, 0);
......
......@@ -9,8 +9,8 @@ const size_t THREAD_CNT = 10;
const size_t RUN_CNT = 128 * 1024;
struct thread_data {
long use_change;
long last_lim_set;
size_t use_change;
size_t last_lim_set;
long use_change_success;
long lim_change_success;
};
......@@ -22,8 +22,7 @@ void *thread_routine(void *vparam)
{
struct thread_data *data = (struct thread_data *)vparam;
size_t check_fail_count = 0;
bool allocated = false;
size_t allocated_size = 0;
ssize_t allocated_size = 0;
for (size_t i = 0; i < RUN_CNT; i++) {
{
size_t total, used;
......@@ -31,24 +30,27 @@ void *thread_routine(void *vparam)
if (used > total)
check_fail_count++;
}
long rnd = ((rand() & 0xFFFFFFF) + 1) * QUOTA_GRANULARITY;
int succ = quota_set(&quota, (size_t)rnd);
if (succ == 0) {
data->last_lim_set = rnd;
ssize_t max = rand() % QUOTA_MAX;
max = quota_set(&quota, max);
pthread_yield();
if (max > 0) {
data->last_lim_set = max;
data->lim_change_success++;
}
if (allocated) {
if (allocated_size > 0) {
quota_release(&quota, allocated_size);
allocated = false;
allocated_size = -1;
data->use_change = 0;
data->use_change_success++;
pthread_yield();
} else {
allocated_size = (((long)rand()) & 0xFFFFFFl) + 1;
if (quota_use(&quota, allocated_size) == 0) {
allocated = true;
allocated_size = rand() % max + 1;
allocated_size = quota_use(&quota, allocated_size);
if (allocated_size > 0) {
data->use_change = allocated_size;
data->use_change_success++;
}
pthread_yield();
}
}
return (void *)check_fail_count;
......@@ -60,6 +62,7 @@ main(int n, char **a)
(void)n;
(void)a;
quota_init(&quota, 0);
srand(time(0));
plan(5);
......@@ -78,18 +81,18 @@ main(int n, char **a)
long set_success_count = 0;
long use_success_count = 0;
for (size_t i = 0; i < THREAD_CNT; i++) {
if (datum[i].last_lim_set == quota_get_total(&quota))
if (datum[i].last_lim_set == quota_get(&quota))
one_set_successed = true;
total_alloc += ((datum[i].use_change + QUOTA_GRANULARITY - 1) / QUOTA_GRANULARITY) * QUOTA_GRANULARITY;
total_alloc += datum[i].use_change;
use_success_count += datum[i].use_change_success;
set_success_count += datum[i].lim_change_success;
}
ok(check_fail_count == 0, "no fails detected");
ok(one_set_successed, "one of thread limit set is final");
ok(total_alloc == quota_get_used(&quota), "total alloc match");
ok(use_success_count > THREAD_CNT * RUN_CNT * .9, "uses are mosly successful");
ok(set_success_count > THREAD_CNT * RUN_CNT * .9, "sets are mosly successful");
ok(total_alloc == quota_used(&quota), "total alloc match");
ok(use_success_count > THREAD_CNT * RUN_CNT * .1, "uses are mosly successful");
ok(set_success_count > THREAD_CNT * RUN_CNT * .1, "sets are mosly successful");
return check_plan();
}
......@@ -78,7 +78,7 @@ region_test_truncate()
int main()
{
quota_init(&quota, UINT_MAX);
slab_arena_create(&arena, &quota, UINT_MAX,
slab_arena_create(&arena, &quota, 0,
4000000, MAP_PRIVATE);
slab_cache_create(&cache, &arena, 0);
......
......@@ -11,7 +11,7 @@ slab_arena_print(struct slab_arena *arena)
{
printf("arena->prealloc = %zu\narena->maxalloc = %zu\n"
"arena->used = %zu\narena->slab_size = %u\n",
arena->prealloc, quota_get_total(arena->quota),
arena->prealloc, quota_get(arena->quota),
arena->used, arena->slab_size);
}
......@@ -38,8 +38,8 @@ int main()
slab_arena_print(&arena);
slab_arena_destroy(&arena);
quota_init(&quota, 3000 * QUOTA_GRANULARITY);
slab_arena_create(&arena, &quota, 2000 * QUOTA_GRANULARITY, 1, MAP_PRIVATE);
quota_init(&quota, 2000000);
slab_arena_create(&arena, &quota, 3000000, 1, MAP_PRIVATE);
slab_arena_print(&arena);
slab_arena_destroy(&arena);
}
......@@ -19,7 +19,7 @@ arena->prealloc = 65536
arena->maxalloc = 65536
arena->used = 65536
arena->slab_size = 65536
arena->prealloc = 2097152
arena->maxalloc = 3072000
arena->prealloc = 2031616
arena->maxalloc = 2000896
arena->used = 0
arena->slab_size = 65536
......@@ -96,7 +96,7 @@ int main()
quota_init(&quota, UINT_MAX);
slab_arena_create(&arena, &quota, UINT_MAX, 4000000,
slab_arena_create(&arena, &quota, 0, 4000000,
MAP_PRIVATE);
slab_cache_create(&cache, &arena, 0);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment