Skip to content
Snippets Groups Projects
Commit 0bc66535 authored by Konstantin Osipov's avatar Konstantin Osipov
Browse files

Merge remote-tracking branch 'origin/fix-gh-624-alloc-align'

parents 94f4e1ec 25d99caf
No related branches found
No related tags found
No related merge requests found
......@@ -50,16 +50,13 @@ mslab_create(struct mslab *slab, struct mempool *pool)
slab->pool = pool;
slab->free_idx = 0;
slab->free_list = 0;
/* A bit is set if a slot is free. */
memset(slab->map, 0xFF, sizeof(slab->map[0]) * pool->mapsize);
}
/** Beginning of object data in the slab. */
static inline void *
mslab_offset(struct mslab *slab)
{
return (char *) slab + mslab_sizeof() +
MEMPOOL_MAP_SIZEOF * slab->pool->mapsize;
return (char *) slab + slab->pool->objoffset;
}
/** Pointer to an object from object index. */
......@@ -69,40 +66,20 @@ mslab_obj(struct mslab *slab, uint32_t idx)
return mslab_offset(slab) + idx * slab->pool->objsize;
}
/** Object index from pointer to object */
static inline uint32_t
mslab_idx(struct mslab *slab, void *ptr)
{
/*
* @todo: consider optimizing this division with
* multiply-shift method described in Hacker's Delight,
* p. 187.
*/
return ((uint32_t)(ptr - mslab_offset(slab)))/slab->pool->objsize;
}
void *
mslab_alloc(struct mslab *slab)
{
assert(slab->nfree);
void *result;
uint32_t idx;
if (slab->free_list) {
/* Recycle an object from the garbage pool. */
idx = mslab_idx(slab, slab->free_list);
result = slab->free_list;
slab->free_list = *(void **)slab->free_list;
} else {
/* Use an object from the "untouched" area of the slab. */
idx = slab->free_idx++;
result = mslab_obj(slab, idx);
result = mslab_obj(slab, slab->free_idx++);
}
/* Mark the position as occupied. */
const uint32_t slot = idx / MEMPOOL_MAP_BIT;
const uint32_t bit_no = idx & (MEMPOOL_MAP_BIT-1);
slab->map[slot] ^= ((mbitmap_t) 1) << (mbitmap_t) bit_no;
/* If the slab is full, remove it from the rb tree. */
if (--slab->nfree == 0)
mslab_tree_remove(&slab->pool->free_slabs, slab);
......@@ -117,13 +94,6 @@ mslab_free(struct mempool *pool, struct mslab *slab, void *ptr)
*(void **)ptr = slab->free_list;
slab->free_list = ptr;
uint32_t idx = mslab_idx(slab, ptr);
/* Mark the position as free. */
const uint32_t slot = idx / MEMPOOL_MAP_BIT;
const uint32_t bit_no = idx & (MEMPOOL_MAP_BIT-1);
slab->map[slot] |= ((mbitmap_t) 1) << bit_no;
slab->nfree++;
if (slab->nfree == 1) {
......@@ -161,35 +131,12 @@ mempool_create_with_order(struct mempool *pool, struct slab_cache *cache,
pool->spare = NULL;
pool->objsize = objsize;
pool->slab_order = order;
/* Account for slab meta. */
uint32_t slab_size = slab_order_size(pool->cache, pool->slab_order) -
mslab_sizeof();
/* Total size of slab */
uint32_t slab_size = slab_order_size(pool->cache, pool->slab_order);
/* Calculate how many objects will actually fit in a slab. */
/*
* We have 'slab_size' bytes for X objects and
* X / 8 bits in free/used array.
*
* Therefore the formula for objcount is:
*
* X * objsize + X/8 = slab_size
* X = (8 * slab_size)/(8 * objsize + 1)
*/
uint32_t objcount = (CHAR_BIT * slab_size)/(CHAR_BIT * objsize + 1);
/* How many elements of slab->map can map objcount. */
assert(objcount);
uint32_t mapsize = (objcount + MEMPOOL_MAP_BIT - 1)/MEMPOOL_MAP_BIT;
/* Adjust the result of integer division, which may be too large. */
while (objcount * objsize + mapsize * MEMPOOL_MAP_SIZEOF > slab_size) {
objcount--;
mapsize = (objcount + MEMPOOL_MAP_BIT - 1)/MEMPOOL_MAP_BIT;
}
assert(mapsize * MEMPOOL_MAP_BIT >= objcount);
/* The wasted memory should be under objsize */
assert(slab_size - objcount * objsize -
mapsize * MEMPOOL_MAP_SIZEOF < objsize ||
mapsize * MEMPOOL_MAP_BIT == objcount);
pool->objcount = objcount;
pool->mapsize = mapsize;
pool->objcount = (slab_size - mslab_sizeof()) / objsize;
assert(pool->objcount);
pool->objoffset = slab_size - pool->objcount * pool->objsize;
}
void
......
......@@ -70,26 +70,6 @@ extern "C" {
* error in case of failure.
*/
typedef unsigned long mbitmap_t;
enum {
/**
* At least this many bytes must be reserved
* for free/occupied object bit map.
*/
MEMPOOL_MAP_SIZEOF = sizeof(mbitmap_t),
/**
* How many bits per bitmap, i.e. how many objects
* a single bitmap can map.
*/
MEMPOOL_MAP_BIT = MEMPOOL_MAP_SIZEOF * CHAR_BIT,
/** Mempool slab has to contain at least this many
* objects, to ensure that overhead on bitmaps
* for free/used objects is small.
*/
MEMPOOL_OBJ_MIN = 2 * MEMPOOL_MAP_BIT
};
/** mslab - a standard slab formatted to store objects of equal size. */
struct mslab {
struct slab slab;
......@@ -103,18 +83,14 @@ struct mslab {
rb_node(struct mslab) node;
/* Reference to the owning pool. */
struct mempool *pool;
/**
* A bitmap for free used/objects in the slab.
* A bitmap rather than a free list is used since:
* - this tends to keep allocations close to the
* beginning of the slab, which is better for
* cache locality
* - it makes it possible to iterate over all
* objects in a slab.
*/
mbitmap_t map[0];
};
/**
* Mempool will try to allocate blocks large enough to have overhead
* less than specified below
*/
static const double expected_overhead_max = 0.05;
static inline uint32_t
mslab_sizeof()
{
......@@ -129,7 +105,7 @@ static inline uint32_t
mempool_objsize_max(uint32_t slab_size)
{
/* Fit at least 4 objects in a slab, aligned by pointer size. */
return ((slab_size - mslab_sizeof() - MEMPOOL_MAP_SIZEOF)/4) &
return ((slab_size - mslab_sizeof()) / 4) &
~(sizeof(intptr_t) - 1);
}
......@@ -172,11 +148,8 @@ struct mempool
uint8_t slab_order;
/** How many objects can fit in a slab. */
uint32_t objcount;
/**
* How many bytes of the slab are reserved for
* slab map.
*/
uint32_t mapsize;
/** Offset from beginning of slab to the first object */
uint32_t objoffset;
};
/** Allocation statistics. */
......@@ -218,8 +191,10 @@ static inline void
mempool_create(struct mempool *pool, struct slab_cache *cache,
uint32_t objsize)
{
/* Keep size-induced internal fragmentation within limits. */
size_t slab_size_min = objsize * MEMPOOL_OBJ_MIN;
size_t expected_loss = objsize > sizeof(struct mslab)
? objsize : sizeof(struct mslab);
size_t slab_size_min = (size_t)(expected_loss / expected_overhead_max);
/*
* Calculate the amount of usable space in a slab.
* @note: this asserts that slab_size_min is less than
......
......@@ -211,8 +211,8 @@ slab_order(struct slab_cache *cache, size_t size)
if (size > cache->arena->slab_size)
return cache->order_max + 1;
return (uint8_t) (CHAR_BIT * sizeof(uint32_t) -
__builtin_clz((uint32_t) size - 1) -
return (uint8_t) (CHAR_BIT * sizeof(unsigned) -
__builtin_clz((unsigned) size - 1) -
cache->order0_size_lb);
}
......
......@@ -93,6 +93,25 @@ mempool_basic()
footer();
}
void
mempool_aligh()
{
header();
for (uint32_t size = OBJSIZE_MIN; size < OBJSIZE_MAX; size <<= 1) {
mempool_create(&pool, &cache, size);
for (uint32_t i = 0; i < 32; i++) {
void *ptr = mempool_alloc_nothrow(&pool);
uintptr_t addr = (uintptr_t)ptr;
if (addr % size)
fail("aligment", "wrong");
}
mempool_destroy(&pool);
}
footer();
}
int main()
{
seed = time(0);
......@@ -111,5 +130,7 @@ int main()
mempool_basic();
mempool_aligh();
slab_cache_destroy(&cache);
}
*** mempool_basic ***
*** mempool_basic: done ***
*** mempool_aligh ***
*** mempool_aligh: done ***
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment