diff --git a/src/box/memtx_allocator.h b/src/box/memtx_allocator.h
index d239f62fd08f86f4caa4533be7b241addee8867e..755ec0206aa1adeb6ad673488a751d00cdea16ce 100644
--- a/src/box/memtx_allocator.h
+++ b/src/box/memtx_allocator.h
@@ -128,6 +128,19 @@ memtx_tuple_rv_version(struct memtx_tuple_rv *rv)
 	return rv->lists[rv->count - 1].version;
 }
 
+/**
+ * Not all read views need to access all kinds of tuples. For example, snapshot
+ * isn't interested in temporary tuples. So we divide all tuples by type and
+ * for each type maintain an independent list.
+ */
+enum memtx_tuple_rv_type {
+	/** Tuples from non-temporary spaces. */
+	memtx_tuple_rv_default,
+	/** Tuples from temporary spaces. */
+	memtx_tuple_rv_temporary,
+	memtx_tuple_rv_type_MAX,
+};
+
 /**
  * Allocates a list array for a read view and initializes it using the list of
  * all open read views. Adds the new read view to the list.
@@ -154,7 +167,13 @@ void
 memtx_tuple_rv_add(struct memtx_tuple_rv *rv, struct memtx_tuple *tuple);
 
 /** Memtx read view options. */
-struct memtx_read_view_opts {};
+struct memtx_read_view_opts {
+	/**
+	 * If set to true, creation of this read view will delay deletion of
+	 * tuples from temporary spaces.
+	 */
+	bool include_temporary_tuples = false;
+};
 
 template<class Allocator>
 class MemtxAllocator {
@@ -167,13 +186,14 @@ class MemtxAllocator {
 	 */
 	struct ReadView {
 		/** Lists of tuples owned by this read view. */
-		struct memtx_tuple_rv *rv;
+		struct memtx_tuple_rv *rv[memtx_tuple_rv_type_MAX];
 	};
 
 	static void create()
 	{
 		stailq_create(&gc);
-		rlist_create(&read_views);
+		for (int type = 0; type < memtx_tuple_rv_type_MAX; type++)
+			rlist_create(&read_views[type]);
 	}
 
 	static void destroy()
@@ -192,10 +212,15 @@ class MemtxAllocator {
 	 */
 	static ReadView *open_read_view(struct memtx_read_view_opts opts)
 	{
-		(void)opts;
 		read_view_version++;
-		ReadView *rv = (ReadView *)xmalloc(sizeof(*rv));
-		rv->rv = memtx_tuple_rv_new(read_view_version, &read_views);
+		ReadView *rv = (ReadView *)xcalloc(1, sizeof(*rv));
+		for (int type = 0; type < memtx_tuple_rv_type_MAX; type++) {
+			if (!opts.include_temporary_tuples &&
+			    type == memtx_tuple_rv_temporary)
+				continue;
+			rv->rv[type] = memtx_tuple_rv_new(read_view_version,
+							  &read_views[type]);
+		}
 		return rv;
 	}
 
@@ -204,7 +229,12 @@ class MemtxAllocator {
 	 */
 	static void close_read_view(ReadView *rv)
 	{
-		memtx_tuple_rv_delete(rv->rv, &read_views, &gc);
+		for (int type = 0; type < memtx_tuple_rv_type_MAX; type++) {
+			if (rv->rv[type] == nullptr)
+				continue;
+			memtx_tuple_rv_delete(rv->rv[type],
+					      &read_views[type], &gc);
+		}
 		TRASH(rv);
 		::free(rv);
 	}
@@ -280,13 +310,12 @@ class MemtxAllocator {
 	static struct memtx_tuple_rv *
 	tuple_rv_last(struct tuple *tuple)
 	{
-		/* Temporary tuples are freed immediately. */
-		if (tuple_has_flag(tuple, TUPLE_IS_TEMPORARY))
-			return nullptr;
-		if (rlist_empty(&read_views))
+		struct rlist *list = tuple_has_flag(tuple, TUPLE_IS_TEMPORARY) ?
+			&read_views[memtx_tuple_rv_temporary] :
+			&read_views[memtx_tuple_rv_default];
+		if (rlist_empty(list))
 			return nullptr;
-		return rlist_last_entry(&read_views,
-					struct memtx_tuple_rv, link);
+		return rlist_last_entry(list, struct memtx_tuple_rv, link);
 	}
 
 	/**
@@ -305,7 +334,7 @@ class MemtxAllocator {
 	 * List of memtx_tuple_rv objects, ordered by read view version,
 	 * ascending (the oldest read view comes first).
 	 */
-	static struct rlist read_views;
+	static struct rlist read_views[];
 };
 
 template<class Allocator>
@@ -315,7 +344,7 @@ template<class Allocator>
 uint32_t MemtxAllocator<Allocator>::read_view_version;
 
 template<class Allocator>
-struct rlist MemtxAllocator<Allocator>::read_views;
+struct rlist MemtxAllocator<Allocator>::read_views[memtx_tuple_rv_type_MAX];
 
 void
 memtx_allocators_init(struct allocator_settings *settings);
diff --git a/test/unit/memtx_allocator.cc b/test/unit/memtx_allocator.cc
index b7c6f667799233212df1e6ac4ab9379a241973d9..e4520dacb91db8e336d3d7f6d7a3a641f0667f98 100644
--- a/test/unit/memtx_allocator.cc
+++ b/test/unit/memtx_allocator.cc
@@ -56,6 +56,14 @@ alloc_tuple()
 	return tuple;
 }
 
+static struct tuple *
+alloc_temp_tuple()
+{
+	struct tuple *tuple = alloc_tuple();
+	tuple_set_flag(tuple, TUPLE_IS_TEMPORARY);
+	return tuple;
+}
+
 static void
 free_tuple(struct tuple *tuple)
 {
@@ -200,9 +208,8 @@ test_free_not_delayed_if_temporary()
 	header();
 
 	is(alloc_tuple_count(), 0, "count before alloc");
-	struct tuple *tuple = alloc_tuple();
+	struct tuple *tuple = alloc_temp_tuple();
 	is(alloc_tuple_count(), 1, "count after alloc");
-	tuple_set_flag(tuple, TUPLE_IS_TEMPORARY);
 	memtx_allocators_read_view rv = memtx_allocators_open_read_view({});
 	free_tuple(tuple);
 	is(alloc_tuple_count(), 0, "count after free");
@@ -275,10 +282,97 @@ test_tuple_gc()
 	check_plan();
 }
 
+/**
+ * Checks that temporary tuples are freed as soon as the last read view opened
+ * with include_temporary_tuples flag is closed, even if there are still other
+ * read views that may see it.
+ */
+static void
+test_temp_tuple_gc()
+{
+	plan(10);
+	header();
+
+	struct memtx_read_view_opts opts;
+	opts.include_temporary_tuples = true;
+
+	is(alloc_tuple_count(), 0, "count before alloc");
+	struct tuple *temp_tuple11 = alloc_temp_tuple();
+	struct tuple *temp_tuple12 = alloc_temp_tuple();
+	struct tuple *temp_tuple13 = alloc_temp_tuple();
+	struct tuple *temp_tuple14 = alloc_temp_tuple();
+	struct tuple *tuple11 = alloc_tuple();
+	struct tuple *tuple12 = alloc_tuple();
+	struct tuple *tuple13 = alloc_tuple();
+	struct tuple *tuple14 = alloc_tuple();
+	memtx_allocators_read_view rv1 = memtx_allocators_open_read_view({});
+	is(alloc_tuple_count(), 8, "count after rv1 opened");
+	free_tuple(temp_tuple11);
+	free_tuple(tuple11);
+	struct tuple *temp_tuple22 = alloc_temp_tuple();
+	struct tuple *temp_tuple23 = alloc_temp_tuple();
+	struct tuple *temp_tuple24 = alloc_temp_tuple();
+	struct tuple *tuple22 = alloc_tuple();
+	struct tuple *tuple23 = alloc_tuple();
+	struct tuple *tuple24 = alloc_tuple();
+	memtx_allocators_read_view rv2 = memtx_allocators_open_read_view(opts);
+	/* temp_tuple11 is freed */
+	is(alloc_tuple_count(), 13, "count after rv2 opened");
+	free_tuple(temp_tuple12);
+	free_tuple(temp_tuple22);
+	free_tuple(tuple12);
+	free_tuple(tuple22);
+	struct tuple *temp_tuple33 = alloc_temp_tuple();
+	struct tuple *temp_tuple34 = alloc_temp_tuple();
+	struct tuple *tuple33 = alloc_tuple();
+	struct tuple *tuple34 = alloc_tuple();
+	memtx_allocators_read_view rv3 = memtx_allocators_open_read_view({});
+	is(alloc_tuple_count(), 17, "count after rv3 opened");
+	free_tuple(temp_tuple13);
+	free_tuple(temp_tuple23);
+	free_tuple(temp_tuple33);
+	free_tuple(tuple13);
+	free_tuple(tuple23);
+	free_tuple(tuple33);
+	struct tuple *temp_tuple44 = alloc_temp_tuple();
+	struct tuple *tuple44 = alloc_tuple();
+	memtx_allocators_read_view rv4 = memtx_allocators_open_read_view(opts);
+	/* temp_tuple33 is freed */
+	is(alloc_tuple_count(), 18, "count after rv4 opened");
+	free_tuple(temp_tuple14);
+	free_tuple(temp_tuple24);
+	free_tuple(temp_tuple34);
+	free_tuple(temp_tuple44);
+	free_tuple(tuple14);
+	free_tuple(tuple24);
+	free_tuple(tuple34);
+	free_tuple(tuple44);
+	is(alloc_tuple_count(), 18, "count before rv4 closed");
+	memtx_allocators_close_read_view(rv4);
+	/* temp_tuple34, temp_tuple44, tuple44 are freed */
+	is(alloc_tuple_count(), 15, "count after rv4 closed");
+	memtx_allocators_close_read_view(rv3);
+	/* tuple33 and tuple34 are freed */
+	is(alloc_tuple_count(), 13, "count after rv3 closed");
+	memtx_allocators_close_read_view(rv2);
+	/*
+	 * temp_tuple12, temp_tuple13, temp_tuple14,
+	 * temp_tuple22, temp_tuple23, temp_tuple24,
+	 * tuple22, tuple23, tuple24 are freed.
+	 */
+	is(alloc_tuple_count(), 4, "count after rv2 closed");
+	memtx_allocators_close_read_view(rv1);
+	/* tuple11, tuple12, tuple13, tuple14 are freed */
+	is(alloc_tuple_count(), 0, "count after rv1 closed");
+
+	footer();
+	check_plan();
+}
+
 static int
 test_main()
 {
-	plan(6);
+	plan(7);
 	header();
 
 	test_alloc_stats();
@@ -287,6 +381,7 @@ test_main()
 	test_free_not_delayed_if_alloc_after_read_view();
 	test_free_not_delayed_if_temporary();
 	test_tuple_gc();
+	test_temp_tuple_gc();
 
 	footer();
 	return check_plan();
diff --git a/test/unit/memtx_allocator.result b/test/unit/memtx_allocator.result
index f8fe4bff6585fb083238fc02e89e9ffa47851dd5..385a2077de47af1e94927386c86bdd6103d47762 100644
--- a/test/unit/memtx_allocator.result
+++ b/test/unit/memtx_allocator.result
@@ -1,4 +1,4 @@
-1..6
+1..7
 	*** test_main ***
     1..5
 	*** test_alloc_stats ***
@@ -55,4 +55,18 @@ ok 5 - subtests
     ok 11 - count after rv3 closed
 	*** test_tuple_gc: done ***
 ok 6 - subtests
+    1..10
+	*** test_temp_tuple_gc ***
+    ok 1 - count before alloc
+    ok 2 - count after rv1 opened
+    ok 3 - count after rv2 opened
+    ok 4 - count after rv3 opened
+    ok 5 - count after rv4 opened
+    ok 6 - count before rv4 closed
+    ok 7 - count after rv4 closed
+    ok 8 - count after rv3 closed
+    ok 9 - count after rv2 closed
+    ok 10 - count after rv1 closed
+	*** test_temp_tuple_gc: done ***
+ok 7 - subtests
 	*** test_main: done ***