diff --git a/core/fiber.m b/core/fiber.m
index 89ab4d3288849c55d9a69bfc8f4efa06538454ea..4cae9c0360545825e01a524a06a3fd9eefb4723c 100644
--- a/core/fiber.m
+++ b/core/fiber.m
@@ -304,7 +304,7 @@ ev_schedule(ev_watcher *watcher, int event __attribute__((unused)))
 static struct fiber *
 fid2fiber(int fid)
 {
-	u32 k = mh_i32ptr_get(fibers_registry, fid);
+	mh_iter_t k = mh_i32ptr_get(fibers_registry, fid);
 
 	if (k == mh_end(fibers_registry))
 		return NULL;
@@ -323,7 +323,7 @@ register_fid(struct fiber *fiber)
 static void
 unregister_fid(struct fiber *fiber)
 {
-	u32 k = mh_i32ptr_get(fibers_registry, fiber->fid);
+	mh_iter_t k = mh_i32ptr_get(fibers_registry, fiber->fid);
 	mh_i32ptr_del(fibers_registry, k);
 }
 
diff --git a/include/assoc.h b/include/assoc.h
index 9fd1d641c7556b2aad948a12387b7f95efa1df43..ea0da7be45389c371b6f6f0553d9930c2885779d 100644
--- a/include/assoc.h
+++ b/include/assoc.h
@@ -44,7 +44,7 @@ typedef void* ptr_t;
 #define mh_name _i64ptr
 #define mh_key_t i64
 #define mh_val_t ptr_t
-#define mh_hash(a) ({ (uint32_t)((a)>>33^(a)^(a)<<11); })
+#define mh_hash(a) ({ (mh_int_t)((a)>>33^(a)^(a)<<11); })
 #define mh_eq(a, b) ({ (a) == (b); })
 #include <mhash.h>
 
diff --git a/include/mhash.h b/include/mhash.h
index 83b5c16bbfe2b389b096a094f793c6a995b654eb..84a5554add0f58c442415bf3dc462c7b2da4ff2f 100644
--- a/include/mhash.h
+++ b/include/mhash.h
@@ -63,9 +63,15 @@
 
 #define mh_unlikely(x)  __builtin_expect((x),0)
 
+#ifndef MH_TYPEDEFS
+#define MH_TYPEDEFS
+typedef uint32_t mh_int_t;
+typedef mh_int_t mh_iter_t;
+#endif /* MH_TYPEDEFS */
+
 #ifndef __ac_HASH_PRIME_SIZE
 #define __ac_HASH_PRIME_SIZE 31
-static const uint32_t __ac_prime_list[__ac_HASH_PRIME_SIZE] = {
+static const mh_int_t __ac_prime_list[__ac_HASH_PRIME_SIZE] = {
 	3ul,		11ul,		23ul,		53ul,
 	97ul,		193ul,		389ul,		769ul,
 	1543ul,		3079ul,		6151ul,		12289ul,
@@ -75,7 +81,7 @@ static const uint32_t __ac_prime_list[__ac_HASH_PRIME_SIZE] = {
 	100663319ul,	201326611ul,	402653189ul,	805306457ul,
 	1610612741ul,	3221225473ul,	4294967291ul
 };
-#endif
+#endif /* __ac_HASH_PRIME_SIZE */
 
 #ifndef MH_HEADER
 #define MH_HEADER
@@ -87,12 +93,16 @@ struct _mh(pair) {
 
 struct _mh(t) {
 	struct _mh(pair) * p;
-	uint32_t *b;
-	uint32_t n_buckets, n_dirty, size, upper_bound;
-	uint32_t prime;
-
-	uint32_t resize_cnt;
-	uint32_t resizing, batch;
+	mh_int_t *b;
+	mh_int_t n_buckets;
+	mh_int_t n_dirty;
+	mh_int_t size;
+	mh_int_t upper_bound;
+	mh_int_t prime;
+
+	mh_int_t resize_cnt;
+	mh_int_t resizing;
+	mh_int_t batch;
 	struct _mh(t) *shadow;
 };
 
@@ -114,21 +124,20 @@ struct _mh(t) * _mh(init)();
 void _mh(clear)(struct _mh(t) *h);
 void _mh(destroy)(struct _mh(t) *h);
 void _mh(resize)(struct _mh(t) *h);
-uint32_t _mh(start_resize)(struct _mh(t) *h, uint32_t buckets, uint32_t batch);
+mh_int_t _mh(start_resize)(struct _mh(t) *h, mh_int_t buckets, mh_int_t batch);
 void __attribute__((noinline)) _mh(put_resize)(struct _mh(t) *h, mh_key_t key, mh_val_t val);
-void __attribute__((noinline)) _mh(del_resize)(struct _mh(t) *h, uint32_t x);
+void __attribute__((noinline)) _mh(del_resize)(struct _mh(t) *h, mh_int_t x);
 void _mh(dump)(struct _mh(t) *h);
 
 #define get_slot(h, key) _mh(get_slot)(h, key)
 #define put_slot(h, key) _mh(put_slot)(h, key)
 
-static inline uint32_t
+static inline mh_int_t
 _mh(get_slot)(struct _mh(t) *h, mh_key_t key)
 {
-	uint32_t inc, k, i;
-	k = mh_hash(key);
-	i = k % h->n_buckets;
-	inc = 1 + k % (h->n_buckets - 1);
+	mh_int_t k = mh_hash(key);
+	mh_int_t i = k % h->n_buckets;
+	mh_int_t inc = 1 + k % (h->n_buckets - 1);
 	for (;;) {
 		if ((mh_exist(h, i) && mh_eq(h->p[i].key, key)))
 			return i;
@@ -143,10 +152,10 @@ _mh(get_slot)(struct _mh(t) *h, mh_key_t key)
 }
 
 #if 0
-static inline uint32_t
+static inline mh_int_t
 _mh(put_slot)(struct _mh(t) *h, mh_key_t key)
 {
-	uint32_t inc, k, i, p = h->n_buckets;
+	mh_int_t inc, k, i, p = h->n_buckets;
 	k = mh_hash(key);
 	i = k % h->n_buckets;
 	inc = 1 + k % (h->n_buckets - 1);
@@ -170,10 +179,10 @@ _mh(put_slot)(struct _mh(t) *h, mh_key_t key)
 }
 
 /* Faster variant of above loop */
-static inline uint32_t
+static inline mh_int_t
 _mh(put_slot)(struct _mh(t) *h, mh_key_t key)
 {
-	uint32_t inc, k, i, p = h->n_buckets;
+	mh_int_t inc, k, i, p = h->n_buckets;
 	void *loop = &&marking_loop;
 	k = mh_hash(key);
 	i = k % h->n_buckets;
@@ -210,20 +219,22 @@ _mh(put_slot)(struct _mh(t) *h, mh_key_t key)
 #endif
 
 /* clearer variant of above loop */
-static inline uint32_t
+static inline mh_int_t
 _mh(put_slot)(struct _mh(t) *h, mh_key_t key)
 {
-	uint32_t hashed_key = mh_hash(key);
-	uint32_t itr = hashed_key % h->n_buckets;
-	uint32_t step = 1 + hashed_key % (h->n_buckets - 1);
-	uint32_t found_slot = mh_end(h);
+	mh_int_t hashed_key = mh_hash(key);
+	mh_int_t itr = hashed_key % h->n_buckets;
+	mh_int_t step = 1 + hashed_key % (h->n_buckets - 1);
+	mh_int_t found_slot = mh_end(h);
 	/* marking loop */
 	while (true) {
 		if (mh_exist(h, itr)) {
 			/* this is slop occupied */
-			if (mh_eq(h->p[itr].key, key))
+			if (mh_eq(h->p[itr].key, key)) {
 				/* this is same element */
-				return itr;
+				found_slot = itr;
+				goto put_slot_done;
+			}
 			/* this is another element, mark it as dirty */
 			mh_setdirty(h, itr);
 		} else {
@@ -252,35 +263,40 @@ _mh(put_slot)(struct _mh(t) *h, mh_key_t key)
 				if (!mh_dirty(h, itr))
 					h->n_dirty--;
 				/* this is same element */
-				return found_slot;
+				goto put_slot_done;
 			}
 		} else {
 			/* all sequence checked, element with same key not
 			   found. */
 			if (!mh_dirty(h, itr))
-				return found_slot;
+				goto put_slot_done;
 		}
 		itr += step;
 		if (itr > h->n_buckets)
 			itr -= h->n_buckets;
 	}
+
+put_slot_done:
+	return found_slot;
 }
 
-static inline uint32_t
+static inline mh_int_t
 _mh(get)(struct _mh(t) *h, mh_key_t key)
 {
-	uint32_t i = get_slot(h, key);
+	mh_int_t i = get_slot(h, key);
 	if (!mh_exist(h, i))
-		return i = h->n_buckets;
+		i = h->n_buckets;
 	return i;
 }
 
-static inline uint32_t
+static inline mh_int_t
 _mh(put)(struct _mh(t) *h, mh_key_t key, mh_val_t val, int * ret)
 {
+	mh_int_t x = mh_end(h);
 	if (h->size == h->n_buckets)
-		/* no one free elements */
-		return mh_end(h);
+		/* no one free elements in the hash table */
+		goto put_done;
+
 #if MH_INCREMENTAL_RESIZE
 	if (mh_unlikely(h->n_dirty >= h->upper_bound || h->resizing > 0))
 		_mh(put_resize)(h, key, val);
@@ -288,7 +304,8 @@ _mh(put)(struct _mh(t) *h, mh_key_t key, mh_val_t val, int * ret)
 	if (mh_unlikely(h->n_dirty >= h->upper_bound))
 		_mh(start_resize)(h, h->n_buckets + 1, -1);
 #endif
-	uint32_t x = put_slot(h, key);
+
+	x = put_slot(h, key);
 	int exist = mh_exist(h, x);
 	if (ret)
 		*ret = !exist;
@@ -306,11 +323,13 @@ _mh(put)(struct _mh(t) *h, mh_key_t key, mh_val_t val, int * ret)
 		/* replace old */
 		h->p[x].val = val;
 	}
+
+put_done:
 	return x;
 }
 
 static inline void
-_mh(del)(struct _mh(t) *h, uint32_t x)
+_mh(del)(struct _mh(t) *h, mh_int_t x)
 {
 	if (x != h->n_buckets && mh_exist(h, x)) {
 		mh_setfree(h, x);
@@ -340,7 +359,7 @@ _mh(put_resize)(struct _mh(t) *h, mh_key_t key, mh_val_t val)
 
 
 void __attribute__((noinline))
-_mh(del_resize)(struct _mh(t) *h, uint32_t x)
+_mh(del_resize)(struct _mh(t) *h, mh_int_t x)
 {
 	struct _mh(t) *s = h->shadow;
 	uint32_t y = get_slot(s, h->p[x].key);
@@ -385,9 +404,9 @@ _mh(resize)(struct _mh(t) *h)
 {
 	struct _mh(t) *s = h->shadow;
 #if MH_INCREMENTAL_RESIZE
-	uint32_t batch = h->batch;
+	mh_int_t  batch = h->batch;
 #endif
-	for (uint32_t o = h->resizing; o < h->n_buckets; o++) {
+	for (mh_int_t o = h->resizing; o < h->n_buckets; o++) {
 #if MH_INCREMENTAL_RESIZE
 		if (batch-- == 0) {
 			h->resizing = o;
@@ -396,7 +415,7 @@ _mh(resize)(struct _mh(t) *h)
 #endif
 		if (!mh_exist(h, o))
 			continue;
-		uint32_t n = put_slot(s, h->p[o].key);
+		mh_int_t n = put_slot(s, h->p[o].key);
 		s->p[n] = h->p[o];
 		mh_setexist(s, n);
 		s->n_dirty++;
@@ -408,8 +427,8 @@ _mh(resize)(struct _mh(t) *h)
 	h->resize_cnt++;
 }
 
-uint32_t
-_mh(start_resize)(struct _mh(t) *h, uint32_t buckets, uint32_t batch)
+mh_int_t
+_mh(start_resize)(struct _mh(t) *h, mh_int_t buckets, mh_int_t batch)
 {
 	if (h->resizing)
 		/* resize is already started */
diff --git a/mod/box/index.m b/mod/box/index.m
index 69bd1d52fcc946813b29310b9c96b0ef63e6b432..17a1cf3345f88a2828639f3ed66597e04c3c8ae8 100644
--- a/mod/box/index.m
+++ b/mod/box/index.m
@@ -175,7 +175,7 @@ index_find_hash_num(struct index *self, void *key)
 	if (key_size != 4)
 		tnt_raise(IllegalParams, :"key is not u32");
 
-	u32 k = mh_i32ptr_get(self->idx.int_hash, num);
+	mh_iter_t k = mh_i32ptr_get(self->idx.int_hash, num);
 	if (k != mh_end(self->idx.int_hash))
 		ret = mh_value(self->idx.int_hash, k);
 #ifdef DEBUG
@@ -194,7 +194,7 @@ index_find_hash_num64(struct index *self, void *key)
 	if (key_size != 8)
 		tnt_raise(IllegalParams, :"key is not u64");
 
-	u32 k = mh_i64ptr_get(self->idx.int64_hash, num);
+	mh_iter_t k = mh_i64ptr_get(self->idx.int64_hash, num);
 	if (k != mh_end(self->idx.int64_hash))
 		ret = mh_value(self->idx.int64_hash, k);
 #ifdef DEBUG
@@ -208,7 +208,7 @@ index_find_hash_str(struct index *self, void *key)
 {
 	struct box_tuple *ret = NULL;
 
-	u32 k = mh_lstrptr_get(self->idx.str_hash, key);
+	mh_iter_t k = mh_lstrptr_get(self->idx.str_hash, key);
 	if (k != mh_end(self->idx.str_hash))
 		ret = mh_value(self->idx.str_hash, k);
 #ifdef DEBUG
@@ -317,7 +317,7 @@ index_remove_hash_num(struct index *self, struct box_tuple *tuple)
 	if (key_size != 4)
 		tnt_raise(IllegalParams, :"key is not u32");
 
-	u32 k = mh_i32ptr_get(self->idx.int_hash, num);
+	mh_iter_t k = mh_i32ptr_get(self->idx.int_hash, num);
 	if (k != mh_end(self->idx.int_hash))
 		mh_i32ptr_del(self->idx.int_hash, k);
 #ifdef DEBUG
@@ -335,7 +335,7 @@ index_remove_hash_num64(struct index *self, struct box_tuple *tuple)
 	if (key_size != 8)
 		tnt_raise(IllegalParams, :"key is not u64");
 
-	u32 k = mh_i64ptr_get(self->idx.int64_hash, num);
+	mh_iter_t k = mh_i64ptr_get(self->idx.int64_hash, num);
 	if (k != mh_end(self->idx.int64_hash))
 		mh_i64ptr_del(self->idx.int64_hash, k);
 #ifdef DEBUG
@@ -348,7 +348,7 @@ index_remove_hash_str(struct index *self, struct box_tuple *tuple)
 {
 	void *key = tuple_field(tuple, self->key_field->fieldno);
 
-	u32 k = mh_lstrptr_get(self->idx.str_hash, key);
+	mh_iter_t k = mh_lstrptr_get(self->idx.str_hash, key);
 	if (k != mh_end(self->idx.str_hash))
 		mh_lstrptr_del(self->idx.str_hash, k);
 #ifdef DEBUG
@@ -378,7 +378,7 @@ index_replace_hash_num(struct index *self, struct box_tuple *old_tuple, struct b
 		void *old_key = tuple_field(old_tuple, self->key_field->fieldno);
 		load_varint32(&old_key);
 		u32 old_num = *(u32 *)old_key;
-		u32 k = mh_i32ptr_get(self->idx.int_hash, old_num);
+		mh_iter_t k = mh_i32ptr_get(self->idx.int_hash, old_num);
 		if (k != mh_end(self->idx.int_hash))
 			mh_i32ptr_del(self->idx.int_hash, k);
 	}
@@ -405,7 +405,7 @@ index_replace_hash_num64(struct index *self, struct box_tuple *old_tuple, struct
 		void *old_key = tuple_field(old_tuple, self->key_field->fieldno);
 		load_varint32(&old_key);
 		u64 old_num = *(u64 *)old_key;
-		u32 k = mh_i64ptr_get(self->idx.int64_hash, old_num);
+		mh_iter_t k = mh_i64ptr_get(self->idx.int64_hash, old_num);
 		if (k != mh_end(self->idx.int64_hash))
 			mh_i64ptr_del(self->idx.int64_hash, k);
 	}
@@ -427,7 +427,7 @@ index_replace_hash_str(struct index *self, struct box_tuple *old_tuple, struct b
 
 	if (old_tuple != NULL) {
 		void *old_key = tuple_field(old_tuple, self->key_field->fieldno);
-		u32 k = mh_lstrptr_get(self->idx.str_hash, old_key);
+		mh_iter_t k = mh_lstrptr_get(self->idx.str_hash, old_key);
 		if (k != mh_end(self->idx.str_hash))
 			mh_lstrptr_del(self->idx.str_hash, k);
 	}