diff --git a/src/box/box.cc b/src/box/box.cc
index a2df1ab407055a8405222bedda3caec9543c5694..5b804b7df4f1b832f620d5ae03cb106a534214a2 100644
--- a/src/box/box.cc
+++ b/src/box/box.cc
@@ -6028,7 +6028,7 @@ box_storage_free(void)
 	flightrec_free();
 	audit_log_free();
 	sql_built_in_functions_cache_free();
-	/* fiber_pool_destroy(&tx_fiber_pool); */
+	fiber_pool_destroy(&tx_fiber_pool);
 	is_storage_initialized = false;
 }
 
@@ -6088,26 +6088,32 @@ box_storage_shutdown()
 		diag_log();
 		panic("cannot gracefully shutdown iproto");
 	}
-	box_watcher_shutdown();
-	/*
-	 * Finish client fibers after iproto_shutdown otherwise new fibers
-	 * can be started through new iproto requests. Also we should
-	 * finish client fibers before other subsystems shutdown so that
-	 * we won't need to handle requests from client fibers after/during
-	 * subsystem shutdown.
-	 */
-	if (fiber_shutdown(box_shutdown_timeout) != 0) {
-		diag_log();
-		panic("cannot gracefully shutdown client fibers");
-	}
 	replication_shutdown();
 	gc_shutdown();
 	engine_shutdown();
+	fiber_pool_shutdown(&tx_fiber_pool);
 }
 
 void
 box_shutdown(void)
 {
+	/*
+	 * Watcher should be shutdown before subsystems shutdown because
+	 * it may execute client code. It can be shutdown before or
+	 * after client fiber shutdown. Both cases are correct. But
+	 * if we do it after we may have noisy log fiber creation failure
+	 * for async watcher execution.
+	 */
+	box_watcher_shutdown();
+	/*
+	 * Finish client fibers before other subsystems shutdown so that
+	 * we won't get unexpected request to shutdown subsystems from
+	 * client code.
+	 */
+	if (fiber_shutdown(box_shutdown_timeout) != 0) {
+		diag_log();
+		panic("cannot gracefully shutdown client fibers");
+	}
 	box_storage_shutdown();
 }
 
diff --git a/src/box/iproto.cc b/src/box/iproto.cc
index 6223bdeeda62675c7fe92ed9fc4e98013c9bf825..a3245d668ccb1ab7c4205c9155ddbfd5dee6fc90 100644
--- a/src/box/iproto.cc
+++ b/src/box/iproto.cc
@@ -4304,17 +4304,21 @@ int
 iproto_shutdown(double timeout)
 {
 	assert(iproto_is_shutting_down);
-	return iproto_drop_connections(timeout);
+	if (iproto_drop_connections(timeout) != 0)
+		return -1;
+	for (int i = 0; i < iproto_threads_count; i++) {
+		cbus_stop_loop(&iproto_threads[i].net_pipe);
+		cpipe_destroy(&iproto_threads[i].net_pipe);
+		if (cord_join(&iproto_threads[i].net_cord) != 0)
+			panic_syserror("iproto cord join failed");
+	}
+	return 0;
 }
 
 void
 iproto_free(void)
 {
 	for (int i = 0; i < iproto_threads_count; i++) {
-		cbus_stop_loop(&iproto_threads[i].net_pipe);
-		cpipe_destroy(&iproto_threads[i].net_pipe);
-		if (cord_join(&iproto_threads[i].net_cord) != 0)
-			panic_syserror("iproto cord join failed");
 		mh_i32_delete(iproto_threads[i].req_handlers);
 		/*
 		 * Close socket descriptor to prevent hot standby instance
diff --git a/src/lib/core/fiber_pool.c b/src/lib/core/fiber_pool.c
index bd5f47284a520c5aae891fcb52c78f8c034448e5..4bb30af361eb8d8105e391d9b2b52fb85e16d686 100644
--- a/src/lib/core/fiber_pool.c
+++ b/src/lib/core/fiber_pool.c
@@ -188,26 +188,21 @@ fiber_pool_create(struct fiber_pool *pool, const char *name, int max_pool_size,
 }
 
 void
-fiber_pool_destroy(struct fiber_pool *pool)
+fiber_pool_shutdown(struct fiber_pool *pool)
 {
-	/** Endpoint has connected pipes or unfetched messages */
 	cbus_endpoint_destroy(&pool->endpoint, NULL);
-	/**
-	 * At this point all messages are started to execution because last
-	 * cbus poison message was fired (endpoint_destroy condition).
-	 * We won't to have new messages from cbus and can send wakeup
-	 * to each idle fiber. In this case idle fiber can not fetch any
-	 * new message and will exit. We adjust idle_timeout to.
-	 */
-	pool->idle_timeout = 0;
-	struct fiber *idle_fiber;
-	rlist_foreach_entry(idle_fiber, &pool->idle, state)
-		fiber_wakeup(idle_fiber);
+	struct fiber *idle_fiber, *tmp;
+	rlist_foreach_entry_safe(idle_fiber, &pool->idle, state, tmp)
+		fiber_cancel(idle_fiber);
 	/**
 	 * Just wait on fiber exit condition until all fibers are done
 	 */
 	while (pool->size > 0)
 		fiber_cond_wait(&pool->worker_cond);
-	fiber_cond_destroy(&pool->worker_cond);
 }
 
+void
+fiber_pool_destroy(struct fiber_pool *pool)
+{
+	fiber_cond_destroy(&pool->worker_cond);
+}
diff --git a/src/lib/core/fiber_pool.h b/src/lib/core/fiber_pool.h
index ede8b84d665a4091cfd6315c87bbd34b58ba8ea5..3c068af6967a7be6dfe96e428df648b66708ac8c 100644
--- a/src/lib/core/fiber_pool.h
+++ b/src/lib/core/fiber_pool.h
@@ -92,6 +92,10 @@ fiber_pool_create(struct fiber_pool *pool, const char *name, int max_pool_size,
 void
 fiber_pool_set_max_size(struct fiber_pool *pool, int new_max_size);
 
+/** Shutdown fiber pool. Finish all idle fibers left. */
+void
+fiber_pool_shutdown(struct fiber_pool *pool);
+
 /**
  * Destroy a fiber pool
  */
diff --git a/test/box-luatest/graceful_shutdown_test.lua b/test/box-luatest/graceful_shutdown_test.lua
index bf7bf033b062a8ec182f3c749cb018d63f7359c2..dfc601ca493abfe48b7ce721db4f90a74ca9965e 100644
--- a/test/box-luatest/graceful_shutdown_test.lua
+++ b/test/box-luatest/graceful_shutdown_test.lua
@@ -122,7 +122,7 @@ g.test_hung_requests_aborted = function()
     g.server:stop()
     local res, err = fut:result()
     t.assert_not(res)
-    t.assert_equals(tostring(err), 'Peer closed')
+    t.assert_equals(tostring(err), 'fiber is cancelled')
     t.assert_equals(conn.state, 'error')
     t.assert_equals(conn.error, 'Peer closed')
     conn:close()
diff --git a/test/box-luatest/shutdown_test.lua b/test/box-luatest/shutdown_test.lua
index 636aeeeba04e44afa843124a56ba0e864fd8abf9..90a7dfd6b2ca6a1c5c4ee26a4152849cb79b520c 100644
--- a/test/box-luatest/shutdown_test.lua
+++ b/test/box-luatest/shutdown_test.lua
@@ -144,7 +144,7 @@ g.test_shutdown_of_hanging_iproto_request = function(cg)
     end)
     local log = fio.pathjoin(cg.server.workdir, cg.server.alias .. '.log')
     test_no_hang_on_shutdown(cg.server)
-    t.assert(cg.server:grep_log('cannot gracefully shutdown iproto', nil,
+    t.assert(cg.server:grep_log('cannot gracefully shutdown client fibers', nil,
              {filename = log}))
 end
 
@@ -188,3 +188,31 @@ g.test_shutdown_memtx_gc_free_tuples = function(cg)
     end)
     server:stop()
 end
+
+local g_idle_pool = t.group('idle pool')
+
+g_idle_pool.before_each(function(cg)
+    cg.server = server:new({
+        env = {
+            TARANTOOL_RUN_BEFORE_BOX_CFG = [[
+                local tweaks = require('internal.tweaks')
+                tweaks.box_fiber_pool_idle_timeout = 100
+            ]]
+        }
+    })
+    cg.server:start()
+end)
+
+g_idle_pool.after_each(function(cg)
+    if cg.server ~= nil then
+        cg.server:drop()
+    end
+end)
+
+-- Test shutdown with idle fiber in fiber pool.
+g_idle_pool.test_shutdown_fiber_pool_with_idle_fibers = function(cg)
+    t.tarantool.skip_if_not_debug()
+    -- Just to create idle fiber in pool after request.
+    cg.server:exec(function() end)
+    test_no_hang_on_shutdown(cg.server)
+end