diff --git a/src/box/vy_index.c b/src/box/vy_index.c
index 96afcff706924566275360a9bab2563685b3bda2..1dc4b465903dd01968bb4647a1e0b134383f73f6 100644
--- a/src/box/vy_index.c
+++ b/src/box/vy_index.c
@@ -665,9 +665,8 @@ int
 vy_index_set(struct vy_index *index, struct vy_mem *mem,
 	     const struct tuple *stmt, const struct tuple **region_stmt)
 {
-	assert(!vy_stmt_is_region_allocated(stmt));
-	assert(*region_stmt == NULL ||
-	       vy_stmt_is_region_allocated(*region_stmt));
+	assert(vy_stmt_is_refable(stmt));
+	assert(*region_stmt == NULL || !vy_stmt_is_refable(*region_stmt));
 
 	/* Allocate region_stmt on demand. */
 	if (*region_stmt == NULL) {
diff --git a/src/box/vy_mem.c b/src/box/vy_mem.c
index 3ad8c255c800371958cf369c7a45feaebbe1ab60..0816d546931e35deccfd2e90da4f575e02241197 100644
--- a/src/box/vy_mem.c
+++ b/src/box/vy_mem.c
@@ -147,7 +147,7 @@ vy_mem_insert_upsert(struct vy_mem *mem, const struct tuple *stmt)
 	       stmt->format_id == tuple_format_id(mem->format) ||
 	       stmt->format_id == tuple_format_id(mem->upsert_format));
 	/* The statement must be from a lsregion. */
-	assert(vy_stmt_is_region_allocated(stmt));
+	assert(!vy_stmt_is_refable(stmt));
 	size_t size = tuple_size(stmt);
 	const struct tuple *replaced_stmt = NULL;
 	struct vy_mem_tree_iterator inserted;
@@ -208,7 +208,7 @@ vy_mem_insert(struct vy_mem *mem, const struct tuple *stmt)
 	       stmt->format_id == tuple_format_id(mem->format) ||
 	       stmt->format_id == tuple_format_id(mem->upsert_format));
 	/* The statement must be from a lsregion. */
-	assert(vy_stmt_is_region_allocated(stmt));
+	assert(!vy_stmt_is_refable(stmt));
 	size_t size = tuple_size(stmt);
 	const struct tuple *replaced_stmt = NULL;
 	if (vy_mem_tree_insert(&mem->tree, stmt, &replaced_stmt))
@@ -228,7 +228,7 @@ void
 vy_mem_commit_stmt(struct vy_mem *mem, const struct tuple *stmt)
 {
 	/* The statement must be from a lsregion. */
-	assert(vy_stmt_is_region_allocated(stmt));
+	assert(!vy_stmt_is_refable(stmt));
 	int64_t lsn = vy_stmt_lsn(stmt);
 	if (mem->min_lsn == INT64_MAX)
 		mem->min_lsn = lsn;
@@ -241,7 +241,7 @@ void
 vy_mem_rollback_stmt(struct vy_mem *mem, const struct tuple *stmt)
 {
 	/* This is the statement we've inserted before. */
-	assert(vy_stmt_is_region_allocated(stmt));
+	assert(!vy_stmt_is_refable(stmt));
 	int rc = vy_mem_tree_delete(&mem->tree, stmt);
 	assert(rc == 0);
 	(void) rc;
diff --git a/src/box/vy_run.c b/src/box/vy_run.c
index 16c7341bbeb5964e518a7758f479ab7fbb166fe2..ceae5d6a0b97161ecffd681f10125040c16138c6 100644
--- a/src/box/vy_run.c
+++ b/src/box/vy_run.c
@@ -2098,12 +2098,11 @@ vy_run_write_page(struct vy_run *run, struct xlog *data_xlog,
 		}
 		*offset = page->unpacked_size;
 
-		if (last_stmt != NULL &&
-		    !vy_stmt_is_region_allocated(last_stmt))
+		if (last_stmt != NULL && vy_stmt_is_refable(last_stmt))
 			tuple_unref(last_stmt);
 
 		last_stmt = *curr_stmt;
-		if (!vy_stmt_is_region_allocated(last_stmt))
+		if (vy_stmt_is_refable(last_stmt))
 			tuple_ref(last_stmt);
 
 		if (vy_run_dump_stmt(*curr_stmt, data_xlog, page,
@@ -2144,7 +2143,7 @@ vy_run_write_page(struct vy_run *run, struct xlog *data_xlog,
 			goto error_rollback;
 	}
 
-	if (!vy_stmt_is_region_allocated(last_stmt))
+	if (vy_stmt_is_refable(last_stmt))
 		tuple_unref(last_stmt);
 
 	/* Save offset to row index  */
@@ -2181,8 +2180,7 @@ vy_run_write_page(struct vy_run *run, struct xlog *data_xlog,
 
 error_rollback:
 	xlog_tx_rollback(data_xlog);
-	if (last_stmt != NULL &&
-	    !vy_stmt_is_region_allocated(last_stmt))
+	if (last_stmt != NULL && vy_stmt_is_refable(last_stmt))
 		tuple_unref(last_stmt);
 error_row_index:
 	ibuf_destroy(&row_index_buf);
diff --git a/src/box/vy_stmt.h b/src/box/vy_stmt.h
index 633c034d90b9b4509a7f4afd83b1a2ec688a0d4a..83545b83843ba98ccd84fd70a20da4ef1c7b2839 100644
--- a/src/box/vy_stmt.h
+++ b/src/box/vy_stmt.h
@@ -222,15 +222,16 @@ vy_stmt_dup_lsregion(const struct tuple *stmt, struct lsregion *lsregion,
 		     int64_t alloc_id);
 
 /**
- * Return true if @a stmt was allocated on lsregion.
+ * Return true if @a stmt can be referenced. Now to be not refable
+ * it must be allocated on lsregion.
  * @param stmt a statement
  * @retval true if @a stmt was allocated on lsregion
  * @retval false otherwise
  */
 static inline bool
-vy_stmt_is_region_allocated(const struct tuple *stmt)
+vy_stmt_is_refable(const struct tuple *stmt)
 {
-	return stmt->refs == 0;
+	return stmt->refs > 0;
 }
 
 /**
diff --git a/src/box/vy_tx.c b/src/box/vy_tx.c
index 8967fd2f20744e86dd45dfcf449457027393f027..2a83a87201cf7a3f98579d98bc2fca2495341c2e 100644
--- a/src/box/vy_tx.c
+++ b/src/box/vy_tx.c
@@ -433,9 +433,8 @@ static int
 vy_tx_write(struct vy_index *index, struct vy_mem *mem,
 	    struct tuple *stmt, const struct tuple **region_stmt)
 {
-	assert(!vy_stmt_is_region_allocated(stmt));
-	assert(*region_stmt == NULL ||
-	       vy_stmt_is_region_allocated(*region_stmt));
+	assert(vy_stmt_is_refable(stmt));
+	assert(*region_stmt == NULL || !vy_stmt_is_refable(*region_stmt));
 
 	/*
 	 * The UPSERT statement can be applied to the cached