diff --git a/test/vinyl/write_iterator.result b/test/vinyl/write_iterator.result
index 2eab64e271be46565da40515ece0bc592a422607..eff5a6bb7534073006f6c70ab785e87e3b8c9178 100644
--- a/test/vinyl/write_iterator.result
+++ b/test/vinyl/write_iterator.result
@@ -4,38 +4,46 @@ env = require('test_run')
 test_run = env.new()
 ---
 ...
--- Tests on data integrity after ranges compacting and runs dumping.
+--
+-- Tests on data integrity after dump of memory runs or range
+-- compaction.
+--
 -- The aim is to test vy_write_iterator. There are several combinations
--- of various operations that can occur:
---   1) upsert before delete
---   2) delete before upsert
---   3) upsert before upsert
---   4) replace before upsert
---   5) upsert before replace
---   6) delete before replace
---   7) replace before delete
---   8) replace before replace
---   9) single upsert
---   10) single replace
+-- of various commands that can occur:
+--   1) delete followed by upsert : write iterator should convert
+--      upsert to replace (insert)
+--   2) upsert followed by delete: the upsert is filtered out,
+--      delete can be filtered out or not depending on whether it's
+--      compaction (filtered out) or dump (preserved)
+--   3) upsert followed by upsert: two upserts are folded together
+--      into one
+--   4) upsert followed by replace: upsert is replaced
+--   5) replace followed by upsert: two commands are folded
+--      into a single replace with upsert ops applied
+--   6) replace followed by delete:
+--      both are eliminated in case of compaction;
+--      replace is filtered out if it's dump
+--   7) delete followed by replace: delete is filtered out
+--   8) replace followed by replace: the first replace is filtered
+--      out
+--   9) single upsert (for completeness)
+--   10) single replace (for completeness)
 space = box.schema.space.create('test', { engine = 'vinyl' })
 ---
 ...
-pk = space:create_index('primary', { page_size = 512 * 24, range_size = 512 * 12 })
+--
+--
+pk = space:create_index('primary', { page_size = 12 * 1024, range_size = 12 * 1024 })
 ---
 ...
-space:insert({1})
----
-- [1]
-...
-box.snapshot()
----
-- ok
-...
 -- Insert many big tuples and then call snapshot to
 -- force dumping and compacting.
 big_val = string.rep('1', 2000)
 ---
 ...
+_ = space:insert{1}
+---
+...
 _ = space:insert{2, big_val}
 ---
 ...
@@ -71,7 +79,10 @@ box.snapshot()
 ---
 - ok
 ...
--- Need to insert something before snapshot() else an error will occur.
+--
+-- Create a couple of tiny runs on disk, to increate the "number of runs"
+-- heuristic of hte planner and trigger compaction
+--
 space:insert{12}
 ---
 - [12]
@@ -88,15 +99,21 @@ box.snapshot()
 ---
 - ok
 ...
+#space:select{}
+---
+- 12
+...
 space:drop()
 ---
 ...
--- Create vinyl index with little page_size parameter, so big tuples
--- will not fit in page.
+--
+-- Create a vinyl index with small page_size parameter, so that
+-- big tuples will not fit in a single page.
+--
 space = box.schema.space.create('test', { engine = 'vinyl' })
 ---
 ...
-pk = space:create_index('primary', { page_size = 256, range_size = 256 * 12 })
+pk = space:create_index('primary', { page_size = 256, range_size = 3 * 1024 })
 ---
 ...
 space:insert({1})
@@ -137,6 +154,7 @@ _ = space:insert{10, big_val}
 _ = space:insert{11, big_val}
 ---
 ...
+-- Increate the number of runs, trigger compaction
 space:count()
 ---
 - 10
@@ -161,6 +179,10 @@ box.snapshot()
 ---
 - ok
 ...
+#space:select{}
+---
+- 12
+...
 space:drop()
 ---
 ...
@@ -174,7 +196,7 @@ pk = space:create_index('primary', { page_size = 512, range_size = 1024 * 12 })
 index2 = space:create_index('secondary', { parts = {2, 'string'}, page_size = 512, range_size = 1024 * 12 })
 ---
 ...
-for i = 1, 100 do space:insert{i, ''..i} box.snapshot() end
+for i = 1, 100 do space:insert{i, ''..i} if i % 2 == 0 then box.snapshot() end end
 ---
 ...
 space:delete{1}
@@ -255,8 +277,8 @@ space = box.schema.space.create('test', { engine = 'vinyl' })
 pk = space:create_index('primary', { page_size = 128, range_size = 1024 })
 ---
 ...
--- Test that snaphot() inside a transaction don't lose data
--- and that upserts successfully merged.
+-- Test that snaphot() inside a transaction doesn't lose data
+-- and that upserts are successfully merged.
 box.begin()
 ---
 ...
@@ -297,8 +319,10 @@ space:select{}
 - - [2, 2222, 222]
   - [3]
 ...
--- Verify that deletion of tuples with keys 2 and 3 are
+--
+-- Verify that deletion of tuples with key 2 and 3 is
 -- successfully dumped and compacted.
+--
 box.begin()
 ---
 ...
diff --git a/test/vinyl/write_iterator.test.lua b/test/vinyl/write_iterator.test.lua
index 502f6bcce478bea9db919df2a64d97682b3146fd..2ee59cc3069142b8730e43503d96de1b7cdfa86b 100644
--- a/test/vinyl/write_iterator.test.lua
+++ b/test/vinyl/write_iterator.test.lua
@@ -1,30 +1,42 @@
 env = require('test_run')
 test_run = env.new()
 
--- Tests on data integrity after ranges compacting and runs dumping.
+--
+-- Tests on data integrity after dump of memory runs or range
+-- compaction.
+--
 -- The aim is to test vy_write_iterator. There are several combinations
--- of various operations that can occur:
---   1) upsert before delete
---   2) delete before upsert
---   3) upsert before upsert
---   4) replace before upsert
---   5) upsert before replace
---   6) delete before replace
---   7) replace before delete
---   8) replace before replace
---   9) single upsert
---   10) single replace
+-- of various commands that can occur:
+--   1) delete followed by upsert : write iterator should convert
+--      upsert to replace (insert)
+--   2) upsert followed by delete: the upsert is filtered out,
+--      delete can be filtered out or not depending on whether it's
+--      compaction (filtered out) or dump (preserved)
+--   3) upsert followed by upsert: two upserts are folded together
+--      into one
+--   4) upsert followed by replace: upsert is replaced
+--   5) replace followed by upsert: two commands are folded
+--      into a single replace with upsert ops applied
+--   6) replace followed by delete:
+--      both are eliminated in case of compaction;
+--      replace is filtered out if it's dump
+--   7) delete followed by replace: delete is filtered out
+--   8) replace followed by replace: the first replace is filtered
+--      out
+--   9) single upsert (for completeness)
+--   10) single replace (for completeness)
 
 space = box.schema.space.create('test', { engine = 'vinyl' })
-pk = space:create_index('primary', { page_size = 512 * 24, range_size = 512 * 12 })
-space:insert({1})
-box.snapshot()
+--
+--
+pk = space:create_index('primary', { page_size = 12 * 1024, range_size = 12 * 1024 })
 
 -- Insert many big tuples and then call snapshot to
 -- force dumping and compacting.
 
 big_val = string.rep('1', 2000)
 
+_ = space:insert{1}
 _ = space:insert{2, big_val}
 _ = space:insert{3, big_val}
 _ = space:insert{5, big_val}
@@ -37,20 +49,26 @@ _ = space:insert{11, big_val}
 space:count()
 box.snapshot()
 
--- Need to insert something before snapshot() else an error will occur.
+--
+-- Create a couple of tiny runs on disk, to increate the "number of runs"
+-- heuristic of hte planner and trigger compaction
+--
 
 space:insert{12}
 box.snapshot()
 space:insert{13}
 box.snapshot()
+#space:select{}
 
 space:drop()
 
--- Create vinyl index with little page_size parameter, so big tuples
--- will not fit in page.
+--
+-- Create a vinyl index with small page_size parameter, so that
+-- big tuples will not fit in a single page.
+--
 
 space = box.schema.space.create('test', { engine = 'vinyl' })
-pk = space:create_index('primary', { page_size = 256, range_size = 256 * 12 })
+pk = space:create_index('primary', { page_size = 256, range_size = 3 * 1024 })
 space:insert({1})
 box.snapshot()
 
@@ -65,12 +83,14 @@ _ = space:insert{8, big_val}
 _ = space:insert{9, big_val}
 _ = space:insert{10, big_val}
 _ = space:insert{11, big_val}
+-- Increate the number of runs, trigger compaction
 space:count()
 box.snapshot()
 space:insert{12}
 box.snapshot()
 space:insert{13}
 box.snapshot()
+#space:select{}
 
 space:drop()
 
@@ -79,7 +99,7 @@ space:drop()
 space = box.schema.space.create('test', { engine = 'vinyl' })
 pk = space:create_index('primary', { page_size = 512, range_size = 1024 * 12 })
 index2 = space:create_index('secondary', { parts = {2, 'string'}, page_size = 512, range_size = 1024 * 12 })
-for i = 1, 100 do space:insert{i, ''..i} box.snapshot() end
+for i = 1, 100 do space:insert{i, ''..i} if i % 2 == 0 then box.snapshot() end end
 space:delete{1}
 space:delete{10}
 space:delete{100}
@@ -113,8 +133,8 @@ space:drop()
 space = box.schema.space.create('test', { engine = 'vinyl' })
 pk = space:create_index('primary', { page_size = 128, range_size = 1024 })
 
--- Test that snaphot() inside a transaction don't lose data
--- and that upserts successfully merged.
+-- Test that snaphot() inside a transaction doesn't lose data
+-- and that upserts are successfully merged.
 
 box.begin()
 space:upsert({2}, {{'=', 2, 22}})
@@ -133,8 +153,10 @@ box.snapshot()
 
 space:select{}
 
--- Verify that deletion of tuples with keys 2 and 3 are
+--
+-- Verify that deletion of tuples with key 2 and 3 is
 -- successfully dumped and compacted.
+--
 
 box.begin()