diff --git a/include/salloc.h b/include/salloc.h
index fb8520e5492839f7b85ba9863b9c8485661d37c8..dab868314004aa0e9d46e7760152c292b482adbb 100644
--- a/include/salloc.h
+++ b/include/salloc.h
@@ -60,4 +60,33 @@ typedef int (*salloc_stat_cb)(const struct slab_cache_stats *st, void *ctx);
 int
 salloc_stat(salloc_stat_cb cb, struct slab_arena_stats *astat, void *cb_ctx);
 
+/**
+ * @brief Return an unique index associated with a chunk allocated by salloc.
+ * This index space is more dense than pointers space, especially in the less
+ * significant bits. This number is needed because some types of box's indexes
+ * (e.g. BITSET) have better performance then they operate on sequencial
+ * offsets (i.e. dense space) instead of memory pointers (sparse space).
+ *
+ * The calculation is based on SLAB number and the position of an item within
+ * it. Current implementation only guarantees that adjacent chunks from one
+ * SLAB will have consecutive indexes. That is, if two chunks were sequencially
+ * allocated from one chunk they will have sequencial ids. If a second chunk was
+ * allocated from another SLAB thеn the difference between indexes may be more
+ * then one.
+ *
+ * @param ptr pointer to memory allocated by salloc
+ * @return unique index
+ */
+size_t
+salloc_ptr_to_index(void *ptr);
+
+/**
+ * @brief Perform the opposite action of a salloc_ptr_to_index.
+ * @param index unique index
+ * @see salloc_ptr_to_index
+ * @return point to memory area associated with \a index.
+ */
+void *
+salloc_ptr_from_index(size_t index);
+
 #endif /* TARANTOOL_SALLOC_H_INCLUDED */
diff --git a/src/salloc.m b/src/salloc.m
index 7390ad3e3c32782eaec7c7205e6f1fe1479feb97..13d18e898719de258c279ee59fbc20467bf027ba 100644
--- a/src/salloc.m
+++ b/src/salloc.m
@@ -56,6 +56,10 @@ static const uint32_t SLAB_MAGIC = 0x51abface;
 static const size_t SLAB_SIZE = 1 << 22;
 static const size_t MAX_SLAB_ITEM = 1 << 20;
 
+/* maximum number of items in one slab */
+/* updated in slab_classes_init, depends on salloc_init params */
+size_t MAX_SLAB_ITEM_COUNT;
+
 struct slab_item {
 	struct slab_item *next;
 };
@@ -119,6 +123,9 @@ slab_caches_init(size_t minimal, double factor)
 	}
 
 	slab_active_caches = i;
+
+	MAX_SLAB_ITEM_COUNT = (size_t) (SLAB_SIZE - sizeof(struct slab)) /
+			slab_caches[0].item_size;
 }
 
 static bool
@@ -138,6 +145,7 @@ arena_init(struct arena *arena, size_t size)
 	arena->base = (char *)SLAB_ALIGN_PTR(arena->mmap_base) + SLAB_SIZE;
 	SLIST_INIT(&arena->slabs);
 	SLIST_INIT(&arena->free_slabs);
+
 	return true;
 }
 
@@ -329,6 +337,47 @@ sfree(void *ptr)
 }
 
 
+size_t
+salloc_ptr_to_index(void *ptr)
+{
+	struct slab *slab = slab_header(ptr);
+	struct slab_item *item = ptr;
+	struct slab_cache *clazz = slab->cache;
+
+	(void) item;
+	assert(valid_item(slab, item));
+
+	void *brk_start = (void *)CACHEALIGN((void *)slab+sizeof(struct slab));
+	ptrdiff_t item_no = (ptr - brk_start) / clazz->item_size;
+	assert(item_no >= 0);
+
+	ptrdiff_t slab_no = ((void *) slab - (void *) arena.base) / SLAB_SIZE;
+	assert(slab_no >= 0);
+
+	size_t index = (size_t)slab_no * MAX_SLAB_ITEM_COUNT + (size_t) item_no;
+
+	assert(salloc_ptr_from_index(index) == ptr);
+
+	return index;
+}
+
+void *
+salloc_ptr_from_index(size_t index)
+{
+	size_t slab_no = index / MAX_SLAB_ITEM_COUNT;
+	size_t item_no = index % MAX_SLAB_ITEM_COUNT;
+
+	struct slab *slab = slab_header(
+		(void *) ((size_t) arena.base + SLAB_SIZE * slab_no));
+	struct slab_cache *clazz = slab->cache;
+
+	void *brk_start = (void *)CACHEALIGN((void *)slab+sizeof(struct slab));
+	struct slab_item *item = brk_start + item_no * clazz->item_size;
+	assert(valid_item(slab, item));
+
+	return (void *) item;
+}
+
 /**
  * Collect slab allocator statistics.
  *