diff --git a/src/lib/small/lifo.h b/src/lib/small/lifo.h
new file mode 100644
index 0000000000000000000000000000000000000000..9c322b6c479d5336031750c3e049545ebc6ca1df
--- /dev/null
+++ b/src/lib/small/lifo.h
@@ -0,0 +1,80 @@
+#ifndef INCLUDES_TARANTOOL_LIFO_H
+#define INCLUDES_TARANTOOL_LIFO_H
+/*
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ *    copyright notice, this list of conditions and the
+ *    following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials
+ *    provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <assert.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdlib.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif /* defined(__cplusplus) */
+
+struct lifo {
+	void *next;
+};
+
+#define lifo(a) ((struct lifo *) a)
+
+static inline void
+lifo_init(struct lifo *head)
+{
+	head->next = NULL;
+}
+
+static inline void
+lifo_push(struct lifo *head, void *elem)
+{
+	lifo(elem)->next = head->next;
+	head->next = elem;
+}
+
+static inline void *
+lifo_pop(struct lifo *head)
+{
+	struct lifo *elem = lifo(head->next);
+	if (elem)
+		head->next = elem->next;
+	return elem;
+}
+
+static inline bool
+lifo_is_empty(struct lifo *head)
+{
+	return head->next == NULL;
+}
+
+#undef lifo
+
+#if defined(__cplusplus)
+} /* extern "C" */
+#endif /* defined(__cplusplus) */
+
+#endif /* INCLUDES_TARANTOOL_LIFO_H */
diff --git a/src/lib/small/slab_arena.c b/src/lib/small/slab_arena.c
index 00ab0b00797624fb74160acf74a86cedce1c2134..5df79839e7eb2c9c0c9f4f93807ef4cdf1657e2d 100644
--- a/src/lib/small/slab_arena.c
+++ b/src/lib/small/slab_arena.c
@@ -182,3 +182,10 @@ slab_unmap(struct slab_arena *arena, void *ptr)
 	if (ptr)
 		lf_lifo_push(&arena->cache, ptr);
 }
+
+void
+slab_arena_mprotect(struct slab_arena *arena)
+{
+	if (arena->arena)
+		mprotect(arena->arena, arena->prealloc, PROT_READ);
+}
diff --git a/src/lib/small/slab_arena.h b/src/lib/small/slab_arena.h
index 7597790cf37cac92dade9fb752afac8bfd993058..45e14c2a5ca1752ef2a2ba08c7634265a3b5d24a 100644
--- a/src/lib/small/slab_arena.h
+++ b/src/lib/small/slab_arena.h
@@ -96,22 +96,27 @@ struct slab_arena {
 	int flags;
 };
 
-/**
- * Initialize an arena.
- */
+/** Initialize an arena.  */
 void
 slab_arena_create(struct slab_arena *arena, size_t slab_size,
 		  size_t prealloc, size_t maxalloc, int flags);
 
+/** Destroy an arena. */
 void
 slab_arena_destroy(struct slab_arena *arena);
 
+/** Get a slab. */
 void *
 slab_map(struct slab_arena *arena);
 
+/** Put a slab into cache. */
 void
 slab_unmap(struct slab_arena *arena, void *ptr);
 
+/** mprotect() the preallocated arena. */
+void
+slab_arena_mprotect(struct slab_arena *arena);
+
 /** Align a size. Alignment must be a power of 2 */
 static inline size_t
 small_align(size_t size, size_t alignment)
diff --git a/src/lib/small/small.c b/src/lib/small/small.c
index 7a303e9073adfab70beb5aab028665d89fec4adf..4fbb6124deeb69ac563d1721900760faeb7698af 100644
--- a/src/lib/small/small.c
+++ b/src/lib/small/small.c
@@ -146,8 +146,42 @@ small_alloc_create(struct small_alloc *alloc, struct slab_cache *cache,
 	alloc->factor_pool_next = alloc->factor_pool_cache;
 	factor_tree_new(&alloc->factor_pools);
 	(void) factor_pool_create(alloc, NULL, objsize_max);
+
+	alloc->is_delayed_free_mode = false;
+}
+
+void
+small_alloc_setopt(struct small_alloc *alloc, enum small_opt opt, bool val)
+{
+	switch (opt) {
+	case SMALL_DELAYED_FREE_MODE:
+		/* Delayed mode is only usable if the arena is shared. */
+		if (alloc->cache->arena->flags & MAP_SHARED)
+			alloc->is_delayed_free_mode = val;
+		break;
+	default:
+		assert(false);
+		break;
+	}
+}
+
+static inline void
+smfree_batch(struct small_alloc *alloc)
+{
+	if (alloc->is_delayed_free_mode || lifo_is_empty(&alloc->delayed))
+		return;
+
+	const int BATCH = 100;
+
+	for (int i = 0; i < BATCH; i++) {
+		void *item = lifo_pop(&alloc->delayed);
+		if (item == NULL)
+			break;
+		smfree(alloc, item);
+	}
 }
 
+
 /**
  * Allocate a small object.
  *
@@ -165,6 +199,8 @@ small_alloc_create(struct small_alloc *alloc, struct slab_cache *cache,
 void *
 smalloc_nothrow(struct small_alloc *alloc, size_t size)
 {
+	smfree_batch(alloc);
+
 	struct mempool *pool;
 	if (size <= alloc->step_pool_objsize_max) {
 		/* Allocate in a stepped pool. */
@@ -226,6 +262,14 @@ smfree(struct small_alloc *alloc, void *ptr)
 	}
 }
 
+void
+smfree_delayed(struct small_alloc *alloc, void *ptr)
+{
+	assert(alloc->is_delayed_free_mode);
+	if (ptr)
+		lifo_push(&alloc->delayed, ptr);
+}
+
 /** Simplify iteration over small allocator mempools. */
 struct mempool_iterator
 {
diff --git a/src/lib/small/small.h b/src/lib/small/small.h
index 04612afef25c52e0c26994dbf2436cc306467740..839857cd298a756f0f55b2207312c268b1ab557e 100644
--- a/src/lib/small/small.h
+++ b/src/lib/small/small.h
@@ -31,6 +31,7 @@
 #include <stdint.h>
 #include "small/mempool.h"
 #include "small/slab_arena.h"
+#include "lifo.h"
 
 #if defined(__cplusplus)
 extern "C" {
@@ -89,6 +90,10 @@ enum {
 	FACTOR_POOL_MAX = 256,
 };
 
+enum small_opt {
+	SMALL_DELAYED_FREE_MODE
+};
+
 /**
  * A mempool to store objects sized within one multiple of
  * alloc_factor. Is a member of the red-black tree which
@@ -139,6 +144,10 @@ struct small_alloc {
 	 * each pool differs from its neighbor by a factor.
 	 */
 	factor_tree_t factor_pools;
+	/**
+	 * List of objects to be freed if delayed free mode.
+	 */
+	struct lifo delayed;
 	/**
 	 * The factor used for factored pools. Must be > 1.
 	 * Is provided during initialization.
@@ -146,6 +155,10 @@ struct small_alloc {
 	float factor;
 	/** All slabs in all mempools have the same order. */
 	uint8_t slab_order;
+	/**
+	 * If true, smfree_delayed puts items to delayed list.
+	 */
+	bool is_delayed_free_mode;
 };
 
 /** Initialize a small memory allocator. */
@@ -154,6 +167,13 @@ small_alloc_create(struct small_alloc *alloc, struct slab_cache *cache,
 		   uint32_t objsize_min, uint32_t objsize_max,
 		   float alloc_factor);
 
+/**
+ * Enter or leave delayed mode - in delayed mode smfree_delayed()
+ * doesn't free chunks but puts them into a pool.
+ */
+void
+small_alloc_setopt(struct small_alloc *alloc, enum small_opt opt, bool val);
+
 /** Destroy the allocator and all allocated memory. */
 void
 small_alloc_destroy(struct small_alloc *alloc);
@@ -170,6 +190,15 @@ smalloc_nothrow(struct small_alloc *alloc, size_t size);
 void
 smfree(struct small_alloc *alloc, void *ptr);
 
+
+/**
+ * Free memory chunk allocated by the small allocator
+ * if not in snapshot mode, otherwise put to the delayed
+ * free list.
+ */
+void
+smfree_delayed(struct small_alloc *alloc, void *ptr);
+
 /**
  * @brief Return an unique index associated with a chunk allocated
  * by the allocator.
@@ -194,6 +223,9 @@ smfree(struct small_alloc *alloc, void *ptr);
 size_t
 small_ptr_compress(struct small_alloc *alloc, void *ptr);
 
+/**
+ * Perform the opposite action of small_ptr_compress().
+ */
 void *
 small_ptr_decompress(struct small_alloc *alloc, size_t val);