diff --git a/perf/lua/CMakeLists.txt b/perf/lua/CMakeLists.txt
index 276a6e0db95cef619f9480c1e233f007030b93d0..5e39f1e839457dc56df98eb879fb11a0fd4acf98 100644
--- a/perf/lua/CMakeLists.txt
+++ b/perf/lua/CMakeLists.txt
@@ -1,6 +1,8 @@
 set(TARANTOOL_BIN $<TARGET_FILE:tarantool>)
 set(RUN_PERF_LUA_TESTS_LIST "")
 
+set(LUA_PATH "${CMAKE_CURRENT_SOURCE_DIR}/?.lua\;\;")
+
 function(create_perf_lua_test)
   set(prefix PERF)
   set(noValues)
@@ -18,7 +20,9 @@ function(create_perf_lua_test)
   message(STATUS "Creating Lua performance test ${PERF_NAME}_perftest")
   set(TEST_PATH ${CMAKE_CURRENT_SOURCE_DIR}/${PERF_NAME}.lua)
   add_custom_target(${PERF_NAME}_perftest
-                    COMMAND ${TARANTOOL_BIN} ${TEST_PATH}
+                    COMMAND ${CMAKE_COMMAND} -E env
+                      LUA_PATH="${LUA_PATH}"
+                      ${TARANTOOL_BIN} ${TEST_PATH}
                     COMMENT Running ${PERF_NAME}_perftest
                     DEPENDS tarantool ${TEST_PATH}
                     WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
diff --git a/perf/lua/benchmark.lua b/perf/lua/benchmark.lua
new file mode 100644
index 0000000000000000000000000000000000000000..a4c73127125270195457b164b327d2595ec5e28e
--- /dev/null
+++ b/perf/lua/benchmark.lua
@@ -0,0 +1,135 @@
+-- Usage:
+--
+-- local benchmark = require('benchmark')
+-- local clock = require('clock')
+--
+-- local USAGE = 'tarantool mybench.lua [options]'
+--
+-- -- These options are parsed by the module by default:
+-- -- output = 'string',
+-- -- output_format = 'string',
+-- local opts = benchmark.argparse(arg, {
+--     <..your options..>
+-- }, USAGE)
+-- local bench = benchmark.new(opts)
+--
+-- local ITERATIONS = 10
+--
+-- local start_time = {
+--     time = clock.time(),
+--     proc = clock.proc(),
+-- }
+-- for _ = 1, ITERATIONS do
+--     workload()
+-- end
+--
+-- bench:add_result('subtest name', {
+--     items = ITERATIONS,
+--     real_time = clock.time() - start_time.time,
+--     cpu_time = clock.proc() - start_time.proc,
+-- })
+--
+-- bench:dump_results()
+
+local json = require('json')
+local fio = require('fio')
+local argparse = require('internal.argparse')
+
+local M = {}
+
+local function format_report(bench)
+    local output_format = bench.output_format
+    local results = bench.results
+    local report = ''
+    if output_format == 'json' then
+        -- The output should have the same format as the Google
+        -- Benchmark JSON output format:
+        -- https://github.com/google/benchmark/blob/main/docs/user_guide.md
+        report = json.encode({benchmarks = results})
+    else
+        assert(output_format == 'console', 'unknown output format')
+        for _, res in ipairs(results) do
+            report = report .. ('%s %d rps\n'):format(res.name,
+                                                      res.items_per_second)
+        end
+    end
+    return report
+end
+
+local function dump_report(bench, text)
+    local output = bench.output
+    if output then
+        local fh = assert(fio.open(output, {'O_WRONLY', 'O_CREAT', 'O_TRUNC'}))
+        fh:write(text)
+        fh:close()
+    else
+        io.stdout:write(text)
+    end
+end
+
+local function add_result(bench, name, data)
+    local items_per_second = math.floor(data.items / data.real_time)
+    local result = {
+        name = name,
+        real_time = data.real_time,
+        cpu_time = data.cpu_time,
+        iterations = data.items,
+        items_per_second = items_per_second,
+    }
+    table.insert(bench.results, result)
+    return result
+end
+
+local function dump_results(bench)
+    dump_report(bench, format_report(bench))
+end
+
+local GENERAL_HELP = [[
+ The supported general options list:
+
+   help (same as -h) <boolean>       - print this message
+   output <string>                   - filename to dump the benchmark results
+   output_format <string, 'console'> - format (console, json) in which results
+                                       are dumped
+
+ Options can be used with '--', followed by the value if it's not a boolean
+ option.
+
+ There are a bunch of suggestions how to achieve the most stable results:
+ https://github.com/tarantool/tarantool/wiki/Benchmarking
+]]
+
+function M.argparse(arg, argtable, custom_help)
+    local benchname = fio.basename(debug.getinfo(2).short_src)
+    local usageline = ('\n Usage: tarantool %s [options]\n\n'):format(benchname)
+    argtable = argtable or {}
+    table.insert(argtable, {'h', 'boolean'})
+    table.insert(argtable, {'help', 'boolean'})
+    table.insert(argtable, {'output', 'string'})
+    table.insert(argtable, {'output_format', 'string'})
+    local params = argparse.parse(arg, argtable)
+    if params.h or params.help then
+        local help_msg = usageline .. GENERAL_HELP
+        if custom_help then
+            help_msg = ('%s%s%s'):format(usageline, custom_help, GENERAL_HELP)
+        end
+        print(help_msg)
+        os.exit(0)
+    end
+    return params
+end
+
+function M.new(opts)
+    assert(type(opts) == 'table', 'given argument should be a table')
+    local output_format = opts.output_format or 'console'
+    return setmetatable({
+        output = opts.output,
+        output_format = output_format:lower(),
+        results = {},
+    }, {__index = {
+        add_result = add_result,
+        dump_results = dump_results,
+    }})
+end
+
+return M