diff --git a/src/box/memtx_allocator.h b/src/box/memtx_allocator.h index 755ec0206aa1adeb6ad673488a751d00cdea16ce..5b5ae11eec16cee04bcbbe405213e541f64aabda 100644 --- a/src/box/memtx_allocator.h +++ b/src/box/memtx_allocator.h @@ -198,10 +198,7 @@ class MemtxAllocator { static void destroy() { - while (!stailq_empty(&gc)) { - struct memtx_tuple *memtx_tuple = stailq_shift_entry( - &gc, struct memtx_tuple, in_gc); - immediate_free_tuple(memtx_tuple); + while (collect_garbage()) { } } @@ -273,6 +270,20 @@ class MemtxAllocator { } } + /** + * Does a garbage collection step. Returns false if there's no more + * tuples to collect. + */ + static bool collect_garbage() + { + for (int i = 0; !stailq_empty(&gc) && i < GC_BATCH_SIZE; i++) { + struct memtx_tuple *memtx_tuple = stailq_shift_entry( + &gc, struct memtx_tuple, in_gc); + immediate_free_tuple(memtx_tuple); + } + return !stailq_empty(&gc); + } + private: static constexpr int GC_BATCH_SIZE = 100; @@ -294,15 +305,6 @@ class MemtxAllocator { free(memtx_tuple, size); } - static void collect_garbage() - { - for (int i = 0; !stailq_empty(&gc) && i < GC_BATCH_SIZE; i++) { - struct memtx_tuple *memtx_tuple = stailq_shift_entry( - &gc, struct memtx_tuple, in_gc); - immediate_free_tuple(memtx_tuple); - } - } - /** * Returns the most recent open read view that needs this tuple or null * if the tuple may be freed immediately. diff --git a/test/unit/memtx_allocator.cc b/test/unit/memtx_allocator.cc index e4520dacb91db8e336d3d7f6d7a3a641f0667f98..16792a572a5c60209e9fb48d03786fe7e01da6e6 100644 --- a/test/unit/memtx_allocator.cc +++ b/test/unit/memtx_allocator.cc @@ -88,10 +88,8 @@ alloc_tuple_count_cb(const void *stats_, void *ctx_) static int alloc_tuple_count() { - /* Trigger garbage collection before checking count. */ - struct tuple *tuple = alloc_tuple(); - fail_if(tuple == NULL); - free_tuple(tuple); + while (MemtxAllocator<SmallAlloc>::collect_garbage()) { + } struct alloc_tuple_count_ctx ctx; struct allocator_stats unused; ctx.count = 0;