From 107b31ce5b68592627a3b6d3b9919fe3e30ff3ab Mon Sep 17 00:00:00 2001
From: Konstantin Osipov <kostja@tarantool.org>
Date: Tue, 10 Dec 2013 20:38:05 +0400
Subject: [PATCH] Minor edits of the small allocators library. Improve
 comments.

---
 src/lib/small/README    | 35 +++++++++++++++++++++++++++++++++++
 src/lib/small/lf_lifo.h | 10 ++++++++--
 src/lib/small/mempool.h |  8 +++++---
 3 files changed, 48 insertions(+), 5 deletions(-)

diff --git a/src/lib/small/README b/src/lib/small/README
index 8098fbc9aa..9e0e5bb94e 100644
--- a/src/lib/small/README
+++ b/src/lib/small/README
@@ -1,2 +1,37 @@
 small - a collection of Specialized Memory ALLocators
 for small allocations.
+
+The library provides the following facilities:
+
+slab_arena
+----------
+
+Defines an API with two methods: map() and unmap().
+Map returns a memory area. Unmap returns this area to the arena.
+All objects returned by arena have the same size, defined in
+initialization-time constant SLAB_MAX_SIZE.
+By default, SLAB_MAX_SIZE is 4M. All objects returned by arena
+are aligned by SLAB_MAX_SIZE: (ptr & (SLAB_MAX_SIZE - 1)) is
+always 0. SLAB_MAX_SIZE therefore must be a power of 2. Limiting
+SLAB_MAX_SIZE is important to avoid internal fragmentation.
+Multiple arenas can exist, an object must be returned to the same
+arena in which it was allocated.
+
+There is a number of different implementations of slab_arena
+API:
+
+- huge_arena: this implementation maps at initialization
+  time a huge region of memory, and then uses this region to
+  produce objects. Can be configured to use shared or private
+  mappings.
+- grow_arena - mmaps() each individual block. Thus can incur
+  fragmentation of the address space, but actually
+  returns objects to the OS on unmap.
+
+Use of instances of slab_arena is thread-safe: multiple
+threads can use the same arena.
+  
+slab_cache
+----------
+
+
diff --git a/src/lib/small/lf_lifo.h b/src/lib/small/lf_lifo.h
index a763a5a5fa..4381062359 100644
--- a/src/lib/small/lf_lifo.h
+++ b/src/lib/small/lf_lifo.h
@@ -72,7 +72,7 @@ lf_lifo_init(struct lf_lifo *head)
 	head->next = NULL;
 }
 
-struct lf_lifo *
+static inline struct lf_lifo *
 lf_lifo_push(struct lf_lifo *head, void *elem)
 {
 	assert(lf_lifo(elem) == elem); /* Aligned address. */
@@ -89,7 +89,7 @@ lf_lifo_push(struct lf_lifo *head, void *elem)
 	} while (true);
 }
 
-void *
+static inline void *
 lf_lifo_pop(struct lf_lifo *head)
 {
 	do {
@@ -111,4 +111,10 @@ lf_lifo_pop(struct lf_lifo *head)
 	} while (true);
 }
 
+static inline bool
+lf_lifo_is_empty(struct lf_lifo *head)
+{
+	return head->next == NULL;
+}
+
 #endif /* INCLUDES_TARANTOOL_LF_LIFO_H */
diff --git a/src/lib/small/mempool.h b/src/lib/small/mempool.h
index 1ad017f09b..ece6126449 100644
--- a/src/lib/small/mempool.h
+++ b/src/lib/small/mempool.h
@@ -45,7 +45,7 @@ extern "C" {
  * Good for allocating tons of small objects of the same size.
  * Stores all objects in order-of-virtual-page-size memory blocks,
  * called slabs. Each object can be freed if necessary. There is
- * (practically) no allocation overhead.  Internal fragmentation
+ * (practically) no allocation overhead. Internal fragmentation
  * may occur if lots of objects are allocated, and then many of
  * them are freed in reverse-to-allocation order.
  *
@@ -149,7 +149,10 @@ struct mempool
 	/**
 	 * Mempool slabs are ordered (@sa slab_cache.h for
 	 * definition of "ordered"). The order is calculated
-	 * when the pool is initialized.
+	 * when the pool is initialized or is set explicitly.
+	 * The latter is necessary for 'small' allocator,
+	 * which needs to quickly find mempool containing
+	 * an allocated object when the object is freed.
 	 */
 	uint8_t slab_order;
 	/** How many objects can fit in a slab. */
@@ -251,7 +254,6 @@ mempool_total(struct mempool *pool)
 static inline void *
 mempool_alloc(struct mempool *pool)
 {
-
 	void *ptr = mempool_alloc_nothrow(pool);
 	if (ptr == NULL)
 		tnt_raise(LoggedError, ER_MEMORY_ISSUE,
-- 
GitLab