diff --git a/mod/box/box.m b/mod/box/box.m
index c62a3c6b3a24d65fecd4255e69f2f8cfb7bf067b..b3489b9010638b74054cf8bb38a5730376e0776a 100644
--- a/mod/box/box.m
+++ b/mod/box/box.m
@@ -126,27 +126,27 @@ validate_indexes(struct box_txn *txn)
 	/* There is more than one index. */
 	foreach_index(txn->n, index) {
 		/* XXX: skip the first index here! */
-		for (u32 f = 0; f < index->key.part_count; ++f) {
-			if (index->key.parts[f].fieldno >= txn->tuple->cardinality)
+		for (u32 f = 0; f < index->key_def.part_count; ++f) {
+			if (index->key_def.parts[f].fieldno >= txn->tuple->cardinality)
 				tnt_raise(IllegalParams, :"tuple must have all indexed fields");
 
-			if (index->key.parts[f].type == STRING)
+			if (index->key_def.parts[f].type == STRING)
 				continue;
 
-			void *field = tuple_field(txn->tuple, index->key.parts[f].fieldno);
+			void *field = tuple_field(txn->tuple, index->key_def.parts[f].fieldno);
 			u32 len = load_varint32(&field);
 
-			if (index->key.parts[f].type == NUM && len != sizeof(u32))
+			if (index->key_def.parts[f].type == NUM && len != sizeof(u32))
 				tnt_raise(IllegalParams, :"field must be NUM");
 
-			if (index->key.parts[f].type == NUM64 && len != sizeof(u64))
+			if (index->key_def.parts[f].type == NUM64 && len != sizeof(u64))
 				tnt_raise(IllegalParams, :"field must be NUM64");
 		}
-		if (index->type == TREE && index->key.is_unique == false)
+		if (index->type == TREE && index->key_def.is_unique == false)
 			/* Don't check non unique indexes */
 			continue;
 
-		struct box_tuple *tuple = [index findBy: txn->tuple];
+		struct box_tuple *tuple = [index findByTuple: txn->tuple];
 
 		if (tuple != NULL && tuple != txn->old_tuple)
 			tnt_raise(ClientError, :ER_INDEX_VIOLATION);
@@ -168,7 +168,7 @@ prepare_replace(struct box_txn *txn, size_t cardinality, struct tbuf *data)
 	txn->tuple->cardinality = cardinality;
 	memcpy(txn->tuple->data, data->data, data->size);
 
-	txn->old_tuple = [txn->index findBy: txn->tuple];
+	txn->old_tuple = [txn->index findByTuple: txn->tuple];
 
 	if (txn->old_tuple != NULL)
 		tuple_txn_ref(txn, txn->old_tuple);
@@ -184,8 +184,8 @@ prepare_replace(struct box_txn *txn, size_t cardinality, struct tbuf *data)
 	if (txn->old_tuple != NULL) {
 #ifndef NDEBUG
 		void *ka, *kb;
-		ka = tuple_field(txn->tuple, txn->index->key.parts[0].fieldno);
-		kb = tuple_field(txn->old_tuple, txn->index->key.parts[0].fieldno);
+		ka = tuple_field(txn->tuple, txn->index->key_def.parts[0].fieldno);
+		kb = tuple_field(txn->old_tuple, txn->index->key_def.parts[0].fieldno);
 		int kal, kab;
 		kal = load_varint32(&ka);
 		kab = load_varint32(&kb);
@@ -937,12 +937,12 @@ space_free(void)
 }
 
 static void
-key_init(struct key *key, struct tarantool_cfg_space_index *cfg_index)
+key_init(struct key_def *def, struct tarantool_cfg_space_index *cfg_index)
 {
-	key->max_fieldno = 0;
-	key->part_count = 0;
+	def->max_fieldno = 0;
+	def->part_count = 0;
 
-	/* calculate key cardinality and maximal field number */
+	/* Calculate key cardinality and maximal field number. */
 	for (int k = 0; cfg_index->key_field[k] != NULL; ++k) {
 		typeof(cfg_index->key_field[k]) cfg_key = cfg_index->key_field[k];
 
@@ -951,23 +951,23 @@ key_init(struct key *key, struct tarantool_cfg_space_index *cfg_index)
 			break;
 		}
 
-		key->max_fieldno = MAX(key->max_fieldno, cfg_key->fieldno);
-		key->part_count++;
+		def->max_fieldno = MAX(def->max_fieldno, cfg_key->fieldno);
+		def->part_count++;
 	}
 
-	/* init key array */
-	key->parts = salloc(sizeof(struct key_part) * key->part_count);
-	if (key->parts == NULL) {
-		panic("can't allocate key parts array for index");
+	/* init def array */
+	def->parts = salloc(sizeof(struct key_part) * def->part_count);
+	if (def->parts == NULL) {
+		panic("can't allocate def parts array for index");
 	}
 
 	/* init compare order array */
-	key->max_fieldno++;
-	key->cmp_order = salloc(key->max_fieldno * sizeof(u32));
-	if (key->cmp_order == NULL) {
-		panic("can't allocate key cmp_order array for index");
+	def->max_fieldno++;
+	def->cmp_order = salloc(def->max_fieldno * sizeof(u32));
+	if (def->cmp_order == NULL) {
+		panic("can't allocate def cmp_order array for index");
 	}
-	memset(key->cmp_order, -1, key->max_fieldno * sizeof(u32));
+	memset(def->cmp_order, -1, def->max_fieldno * sizeof(u32));
 
 	/* fill fields and compare order */
 	for (int k = 0; cfg_index->key_field[k] != NULL; ++k) {
@@ -979,12 +979,12 @@ key_init(struct key *key, struct tarantool_cfg_space_index *cfg_index)
 		}
 
 		/* fill keys */
-		key->parts[k].fieldno = cfg_key->fieldno;
-		key->parts[k].type = STR2ENUM(field_data_type, cfg_key->type);
+		def->parts[k].fieldno = cfg_key->fieldno;
+		def->parts[k].type = STR2ENUM(field_data_type, cfg_key->type);
 		/* fill compare order */
-		key->cmp_order[cfg_key->fieldno] = k;
+		def->cmp_order[cfg_key->fieldno] = k;
 	}
-	key->is_unique = cfg_index->unique;
+	def->is_unique = cfg_index->unique;
 }
 
 static void
@@ -1010,11 +1010,11 @@ space_config(void)
 		/* fill space indexes */
 		for (int j = 0; cfg_space->index[j] != NULL; ++j) {
 			typeof(cfg_space->index[j]) cfg_index = cfg_space->index[j];
-			struct key info;
-			key_init(&info, cfg_index);
+			struct key_def key_def;
+			key_init(&key_def, cfg_index);
 			enum index_type type = STR2ENUM(index_type, cfg_index->type);
-			Index *index = [Index alloc: type :&info];
-			[index init: type :&info :space + i :j];
+			Index *index = [Index alloc: type :&key_def];
+			[index init: type :&key_def:space + i :j];
 			space[i].index[j] = index;
 		}
 
diff --git a/mod/box/box_lua.m b/mod/box/box_lua.m
index d68efa40fdf0562f9b663a706988c7de18fe9d7e..2d672e8c0d4688c79a1657e1709af2fac00f6812 100644
--- a/mod/box/box_lua.m
+++ b/mod/box/box_lua.m
@@ -390,7 +390,7 @@ lbox_index_next(struct lua_State *L)
 			struct tbuf *data = tbuf_alloc(fiber->gc_pool);
 			for (int i = 0; i < argc; ++i)
 				append_key_part(L, i + 2, data,
-						index->key.parts[i].type);
+						index->key_def.parts[i].type);
 			key = data->data;
 		}
 		/*
@@ -399,10 +399,10 @@ lbox_index_next(struct lua_State *L)
 		 * keys.
 		*/
 		assert(cardinality != 0);
-		if (cardinality > index->key.part_count)
+		if (cardinality > index->key_def.part_count)
 			luaL_error(L, "index.next(): key part count (%d) "
 				   "does not match index cardinality (%d)",
-				   cardinality, index->key.part_count);
+				   cardinality, index->key_def.part_count);
 		[index initIterator: index->position :key :cardinality];
 	}
 	struct box_tuple *tuple = index->position->next(index->position);
diff --git a/mod/box/index.h b/mod/box/index.h
index 05a59250a6e497ef9f2084d3adefa724ecae7b55..588b157a37a9b9220a15c80eb5f3728672743841 100644
--- a/mod/box/index.h
+++ b/mod/box/index.h
@@ -51,7 +51,7 @@ struct key_part {
 };
 
 /* Descriptor of a multipart key. */
-struct key {
+struct key_def {
 	/* Description of parts of a multipart index. */
 	struct key_part *parts;
 	/*
@@ -85,21 +85,21 @@ struct key {
 	 */
 	struct iterator *position;
 	/* Description of a possibly multipart key. */
-	struct key key;
+	struct key_def key_def;
 	enum index_type type;
 	bool enabled;
 	/* Relative offset of the index in its namespace. */
 	u32 n;
 };
 
-+ (Index *) alloc: (enum index_type) type_arg :(struct key *) key_arg;
++ (Index *) alloc: (enum index_type) type_arg :(struct key_def *) key_def_arg;
 /**
  * Initialize index instance.
  *
  * @param space    space the index belongs to
  * @param key      key part description
  */
-- (id) init: (enum index_type) type_arg :(struct key *) key_arg
+- (id) init: (enum index_type) type_arg :(struct key_def *) key_def_arg
 	:(struct space *) space_arg :(u32) n_arg;
 /** Destroy and free index instance. */
 - (void) free;
@@ -111,7 +111,7 @@ struct key {
 - (struct box_tuple *) min;
 - (struct box_tuple *) max;
 - (struct box_tuple *) find: (void *) key_arg; /* only for unique lookups */
-- (struct box_tuple *) findBy: (struct box_tuple *) tuple;
+- (struct box_tuple *) findByTuple: (struct box_tuple *) tuple;
 - (void) remove: (struct box_tuple *) tuple;
 - (void) replace: (struct box_tuple *) old_tuple :(struct box_tuple *) new_tuple;
 /**
diff --git a/mod/box/index.m b/mod/box/index.m
index 6c75ed9b6ab942bf035e3decf7f9d2d912445747..2232b4914c65a18a3f8835d87fe210bfe273e2fe 100644
--- a/mod/box/index.m
+++ b/mod/box/index.m
@@ -57,14 +57,14 @@ iterator_first_equal(struct iterator *it)
 @class HashStrIndex;
 @class TreeIndex;
 
-+ (Index *) alloc: (enum index_type) type :(struct key *) key
++ (Index *) alloc: (enum index_type) type :(struct key_def *) key_def
 {
 	switch (type) {
 	case HASH:
 		/* Hash index, check key type.
 		 * Hash indes always has a single-field key.
 		 */
-		switch (key->parts[0].type) {
+		switch (key_def->parts[0].type) {
 		case NUM:
 			return [Hash32Index alloc]; /* 32-bit integer hash */
 		case NUM64:
@@ -83,11 +83,11 @@ iterator_first_equal(struct iterator *it)
 	panic("unsupported index type");
 }
 
-- (id) init: (enum index_type) type_arg :(struct key *) key_arg
+- (id) init: (enum index_type) type_arg :(struct key_def *) key_def_arg
 	:(struct space *) space_arg :(u32) n_arg;
 {
 	self = [super init];
-	key = *key_arg;
+	key_def = *key_def_arg;
 	type = type_arg;
 	n = n_arg;
 	space = space_arg;
@@ -98,8 +98,8 @@ iterator_first_equal(struct iterator *it)
 
 - (void) free
 {
-	sfree(key.parts);
-	sfree(key.cmp_order);
+	sfree(key_def.parts);
+	sfree(key_def.cmp_order);
 	sfree(position);
 	[super free];
 }
@@ -127,14 +127,14 @@ iterator_first_equal(struct iterator *it)
 	return NULL;
 }
 
-- (struct box_tuple *) find: (void *) key_arg
+- (struct box_tuple *) find: (void *) key
 {
-	(void) key_arg;
+	(void) key;
 	[self subclassResponsibility: _cmd];
 	return NULL;
 }
 
-- (struct box_tuple *) findBy: (struct box_tuple *) pattern
+- (struct box_tuple *) findByTuple: (struct box_tuple *) pattern
 {
 	(void) pattern;
 	[self subclassResponsibility: _cmd];
@@ -167,12 +167,12 @@ iterator_first_equal(struct iterator *it)
 	[self subclassResponsibility: _cmd];
 }
 
-- (void) initIterator: (struct iterator *) iterator :(void *) key_arg
+- (void) initIterator: (struct iterator *) iterator :(void *) key
 			:(int) part_count
 {
 	(void) iterator;
 	(void) part_count;
-	(void) key_arg;
+	(void) key;
 	[self subclassResponsibility: _cmd];
 }
 @end
@@ -230,12 +230,12 @@ hash_iterator_next(struct iterator *iterator)
 	return NULL;
 }
 
-- (struct box_tuple *) findBy: (struct box_tuple *) tuple
+- (struct box_tuple *) findByTuple: (struct box_tuple *) tuple
 {
 	/* Hash index currently is always single-part. */
-	void *field = tuple_field(tuple, key.parts[0].fieldno);
+	void *field = tuple_field(tuple, key_def.parts[0].fieldno);
 	if (field == NULL)
-		tnt_raise(ClientError, :ER_NO_SUCH_FIELD, key.parts[0].fieldno);
+		tnt_raise(ClientError, :ER_NO_SUCH_FIELD, key_def.parts[0].fieldno);
 	return [self find: field];
 }
 
@@ -290,14 +290,14 @@ hash_iterator_next(struct iterator *iterator)
 	if (k != mh_end(int_hash))
 		ret = mh_value(int_hash, k);
 #ifdef DEBUG
-	say_debug("index_hash_num_find(self:%p, key:%i) = %p", self, num, ret);
+	say_debug("Hash32Index find(self:%p, key:%i) = %p", self, num, ret);
 #endif
 	return ret;
 }
 
 - (void) remove: (struct box_tuple *) tuple
 {
-	void *field = tuple_field(tuple, self->key.parts[0].fieldno);
+	void *field = tuple_field(tuple, key_def.parts[0].fieldno);
 	unsigned int field_size = load_varint32(&field);
 	u32 num = *(u32 *)field;
 
@@ -308,14 +308,14 @@ hash_iterator_next(struct iterator *iterator)
 	if (k != mh_end(int_hash))
 		mh_i32ptr_del(int_hash, k);
 #ifdef DEBUG
-	say_debug("index_hash_num_remove(self:%p, key:%i)", self, num);
+	say_debug("Hash32Index remove(self:%p, key:%i)", self, num);
 #endif
 }
 
 - (void) replace: (struct box_tuple *) old_tuple
 	:(struct box_tuple *) new_tuple
 {
-	void *field = tuple_field(new_tuple, key.parts[0].fieldno);
+	void *field = tuple_field(new_tuple, key_def.parts[0].fieldno);
 	u32 field_size = load_varint32(&field);
 	u32 num = *(u32 *)field;
 
@@ -323,7 +323,7 @@ hash_iterator_next(struct iterator *iterator)
 		tnt_raise(IllegalParams, :"key is not u32");
 
 	if (old_tuple != NULL) {
-		void *old_field = tuple_field(old_tuple, key.parts[0].fieldno);
+		void *old_field = tuple_field(old_tuple, key_def.parts[0].fieldno);
 		load_varint32(&old_field);
 		u32 old_num = *(u32 *)old_field;
 		mh_int_t k = mh_i32ptr_get(int_hash, old_num);
@@ -334,7 +334,7 @@ hash_iterator_next(struct iterator *iterator)
 	mh_i32ptr_put(int_hash, num, new_tuple, NULL);
 
 #ifdef DEBUG
-	say_debug("index_hash_num_replace(self:%p, old_tuple:%p, new_tuple:%p) key:%i",
+	say_debug("Hash32Index replace(self:%p, old_tuple:%p, new_tuple:%p) key:%i",
 		  self, old_tuple, new_tuple, num);
 #endif
 }
@@ -350,7 +350,7 @@ hash_iterator_next(struct iterator *iterator)
 	it->hash = int_hash;
 }
 
-- (void) initIterator: (struct iterator *) iterator :(void *) key_arg
+- (void) initIterator: (struct iterator *) iterator :(void *) key
 			:(int) part_count
 {
 	struct hash_iterator *it = hash_iterator(iterator);
@@ -358,8 +358,8 @@ hash_iterator_next(struct iterator *iterator)
 	assert(part_count == 1);
 	assert(iterator->next = hash_iterator_next);
 
-	u32 field_size = load_varint32(&key_arg);
-	u32 num = *(u32 *)key_arg;
+	u32 field_size = load_varint32(&key);
+	u32 num = *(u32 *)key;
 
 	if (field_size != 4)
 		tnt_raise(IllegalParams, :"key is not u32");
@@ -410,14 +410,14 @@ hash_iterator_next(struct iterator *iterator)
 	if (k != mh_end(int64_hash))
 		ret = mh_value(int64_hash, k);
 #ifdef DEBUG
-	say_debug("index_hash_num64_find(self:%p, key:%"PRIu64") = %p", self, num, ret);
+	say_debug("Hash64Index find(self:%p, key:%"PRIu64") = %p", self, num, ret);
 #endif
 	return ret;
 }
 
 - (void) remove: (struct box_tuple *) tuple
 {
-	void *field = tuple_field(tuple, key.parts[0].fieldno);
+	void *field = tuple_field(tuple, key_def.parts[0].fieldno);
 	unsigned int field_size = load_varint32(&field);
 	u64 num = *(u64 *)field;
 
@@ -428,14 +428,14 @@ hash_iterator_next(struct iterator *iterator)
 	if (k != mh_end(int64_hash))
 		mh_i64ptr_del(int64_hash, k);
 #ifdef DEBUG
-	say_debug("index_hash_num64_remove(self:%p, key:%"PRIu64")", self, num);
+	say_debug("Hash64Index remove(self:%p, key:%"PRIu64")", self, num);
 #endif
 }
 
 - (void) replace: (struct box_tuple *) old_tuple
 	:(struct box_tuple *) new_tuple
 {
-	void *field = tuple_field(new_tuple, key.parts[0].fieldno);
+	void *field = tuple_field(new_tuple, key_def.parts[0].fieldno);
 	u32 field_size = load_varint32(&field);
 	u64 num = *(u64 *)field;
 
@@ -443,7 +443,8 @@ hash_iterator_next(struct iterator *iterator)
 		tnt_raise(IllegalParams, :"key is not u64");
 
 	if (old_tuple != NULL) {
-		void *old_field = tuple_field(old_tuple, key.parts[0].fieldno);
+		void *old_field = tuple_field(old_tuple,
+					      key_def.parts[0].fieldno);
 		load_varint32(&old_field);
 		u64 old_num = *(u64 *)old_field;
 		mh_int_t k = mh_i64ptr_get(int64_hash, old_num);
@@ -453,7 +454,7 @@ hash_iterator_next(struct iterator *iterator)
 
 	mh_i64ptr_put(int64_hash, num, new_tuple, NULL);
 #ifdef DEBUG
-	say_debug("index_hash_num64_replace(self:%p, old_tuple:%p, tuple:%p) key:%"PRIu64,
+	say_debug("Hash64Index replace(self:%p, old_tuple:%p, tuple:%p) key:%"PRIu64,
 		  self, old_tuple, new_tuple, num);
 #endif
 }
@@ -526,7 +527,7 @@ hash_iterator_next(struct iterator *iterator)
 		ret = mh_value(str_hash, k);
 #ifdef DEBUG
 	u32 field_size = load_varint32(&field);
-	say_debug("index_hash_str_find(self:%p, key:(%i)'%.*s') = %p",
+	say_debug("HashStrIndex find(self:%p, key:(%i)'%.*s') = %p",
 		  self, field_size, field_size, (u8 *)field, ret);
 #endif
 	return ret;
@@ -534,14 +535,14 @@ hash_iterator_next(struct iterator *iterator)
 
 - (void) remove: (struct box_tuple *) tuple
 {
-	void *field = tuple_field(tuple, key.parts[0].fieldno);
+	void *field = tuple_field(tuple, key_def.parts[0].fieldno);
 
 	mh_int_t k = mh_lstrptr_get(str_hash, field);
 	if (k != mh_end(str_hash))
 		mh_lstrptr_del(str_hash, k);
 #ifdef DEBUG
 	u32 field_size = load_varint32(&field);
-	say_debug("index_hash_str_remove(self:%p, key:'%.*s')",
+	say_debug("HashStrIndex remove(self:%p, key:'%.*s')",
 		  self, field_size, (u8 *)field);
 #endif
 }
@@ -549,13 +550,15 @@ hash_iterator_next(struct iterator *iterator)
 - (void) replace: (struct box_tuple *) old_tuple
 	:(struct box_tuple *) new_tuple
 {
-	void *field = tuple_field(new_tuple, key.parts[0].fieldno);
+	void *field = tuple_field(new_tuple, key_def.parts[0].fieldno);
 
 	if (field == NULL)
-		tnt_raise(ClientError, :ER_NO_SUCH_FIELD, key.parts[0].fieldno);
+		tnt_raise(ClientError, :ER_NO_SUCH_FIELD,
+			  key_def.parts[0].fieldno);
 
 	if (old_tuple != NULL) {
-		void *old_field = tuple_field(old_tuple, key.parts[0].fieldno);
+		void *old_field = tuple_field(old_tuple,
+					      key_def.parts[0].fieldno);
 		mh_int_t k = mh_lstrptr_get(str_hash, old_field);
 		if (k != mh_end(str_hash))
 			mh_lstrptr_del(str_hash, k);
@@ -564,7 +567,7 @@ hash_iterator_next(struct iterator *iterator)
 	mh_lstrptr_put(str_hash, field, new_tuple, NULL);
 #ifdef DEBUG
 	u32 field_size = load_varint32(&field);
-	say_debug("index_hash_str_replace(self:%p, old_tuple:%p, tuple:%p) key:'%.*s'",
+	say_debug("HashStrIndex replace(self:%p, old_tuple:%p, tuple:%p) key:'%.*s'",
 		  self, old_tuple, new_tuple, field_size, (u8 *)field);
 #endif
 }
@@ -672,21 +675,21 @@ field_compare(struct field *f1, struct field *f2, enum field_data_type type)
 	panic("impossible happened");
 }
 
-struct index_tree_el {
+struct tree_el {
 	struct box_tuple *tuple;
 	struct field key[];
 };
 
-#define INDEX_TREE_EL_SIZE(key) \
-	(sizeof(struct index_tree_el) + sizeof(struct field) * (key)->part_count)
+#define TREE_EL_SIZE(key) \
+	(sizeof(struct tree_el) + sizeof(struct field) * (key)->part_count)
 
 void
-index_tree_el_init(struct index_tree_el *elem,
-		   struct key *key, struct box_tuple *tuple)
+tree_el_init(struct tree_el *elem,
+		   struct key_def *key_def, struct box_tuple *tuple)
 {
 	void *tuple_data = tuple->data;
 
-	for (i32 i = 0; i < key->max_fieldno; ++i) {
+	for (i32 i = 0; i < key_def->max_fieldno; ++i) {
 		struct field f;
 
 		if (i < tuple->cardinality) {
@@ -700,48 +703,48 @@ index_tree_el_init(struct index_tree_el *elem,
 		} else
 			f = ASTERISK;
 
-		u32 key_field_no = key->cmp_order[i];
+		u32 fieldno = key_def->cmp_order[i];
 
-		if (key_field_no == -1)
+		if (fieldno == -1)
 			continue;
 
-		if (key->parts[key_field_no].type == NUM) {
+		if (key_def->parts[fieldno].type == NUM) {
 			if (f.len != 4)
 				tnt_raise(IllegalParams, :"key is not u32");
-		} else if (key->parts[key_field_no].type == NUM64 && f.len != 8) {
+		} else if (key_def->parts[fieldno].type == NUM64 && f.len != 8) {
 				tnt_raise(IllegalParams, :"key is not u64");
 		}
 
-		elem->key[key_field_no] = f;
+		elem->key[fieldno] = f;
 	}
 	elem->tuple = tuple;
 }
 
 void
-init_search_pattern(struct index_tree_el *pattern,
-		    struct key *key, int part_count, void *key_field)
+init_search_pattern(struct tree_el *pattern,
+		    struct key_def *key_def, int part_count, void *key)
 {
-	assert(part_count <= key->part_count);
+	assert(part_count <= key_def->part_count);
 
-	for (i32 i = 0; i < key->part_count; ++i)
+	for (i32 i = 0; i < key_def->part_count; ++i)
 		pattern->key[i] = ASTERISK;
 	for (int i = 0; i < part_count; i++) {
 		u32 len;
 
-		len = pattern->key[i].len = load_varint32(&key_field);
-		if (key->parts[i].type == NUM) {
+		len = pattern->key[i].len = load_varint32(&key);
+		if (key_def->parts[i].type == NUM) {
 			if (len != 4)
 				tnt_raise(IllegalParams, :"key is not u32");
-		} else if (key->parts[i].type == NUM64 && len != 8) {
+		} else if (key_def->parts[i].type == NUM64 && len != 8) {
 				tnt_raise(IllegalParams, :"key is not u64");
 		}
 		if (len <= sizeof(pattern->key[i].data)) {
 			memset(pattern->key[i].data, 0, sizeof(pattern->key[i].data));
-			memcpy(pattern->key[i].data, key_field, len);
+			memcpy(pattern->key[i].data, key, len);
 		} else
-			pattern->key[i].data_ptr = key_field;
+			pattern->key[i].data_ptr = key;
 
-		key_field += len;
+		key += len;
 	}
 
 	pattern->tuple = NULL;
@@ -756,14 +759,14 @@ init_search_pattern(struct index_tree_el *pattern,
  *              > 0  - a is greater than b
  */
 static int
-index_tree_el_unique_cmp(struct index_tree_el *elem_a,
-			 struct index_tree_el *elem_b,
-			 struct key *key)
+tree_el_unique_cmp(struct tree_el *elem_a,
+			 struct tree_el *elem_b,
+			 struct key_def *key_def)
 {
 	int r = 0;
-	for (i32 i = 0, end = key->part_count; i < end; ++i) {
+	for (i32 i = 0, end = key_def->part_count; i < end; ++i) {
 		r = field_compare(&elem_a->key[i], &elem_b->key[i],
-				  key->parts[i].type);
+				  key_def->parts[i].type);
 		if (r != 0)
 			break;
 	}
@@ -771,10 +774,10 @@ index_tree_el_unique_cmp(struct index_tree_el *elem_a,
 }
 
 static int
-index_tree_el_cmp(struct index_tree_el *elem_a, struct index_tree_el *elem_b,
-		  struct key *key)
+tree_el_cmp(struct tree_el *elem_a, struct tree_el *elem_b,
+		  struct key_def *key_def)
 {
-	int r = index_tree_el_unique_cmp(elem_a, elem_b, key);
+	int r = tree_el_unique_cmp(elem_a, elem_b, key_def);
 	if (r == 0 && elem_a->tuple && elem_b->tuple)
 		r = (elem_a->tuple < elem_b->tuple ?
 		     -1 : elem_a->tuple > elem_b->tuple);
@@ -786,7 +789,7 @@ SPTREE_DEF(str_t, realloc);
 
 @interface TreeIndex: Index {
 	sptree_str_t *tree;
-	struct index_tree_el *pattern;
+	struct tree_el *pattern;
 };
 - (void) build: (Index *) pk;
 @end
@@ -794,9 +797,9 @@ SPTREE_DEF(str_t, realloc);
 struct tree_iterator {
 	struct iterator base;
 	struct sptree_str_t_iterator *t_iter;
-	struct index_tree_el *pattern;
+	struct tree_el *pattern;
 	sptree_str_t *tree;
-	struct key *key;
+	struct key_def *key_def;
 };
 
 static inline struct tree_iterator *
@@ -812,7 +815,7 @@ tree_iterator_next(struct iterator *iterator)
 
 	struct tree_iterator *it = tree_iterator(iterator);
 
-	struct index_tree_el *elem = sptree_str_t_iterator_next(it->t_iter);
+	struct tree_el *elem = sptree_str_t_iterator_next(it->t_iter);
 
 	return elem ? elem->tuple : NULL;
 }
@@ -824,11 +827,13 @@ tree_iterator_next_equal(struct iterator *iterator)
 
 	struct tree_iterator *it = tree_iterator(iterator);
 
-	struct index_tree_el *elem =
+	struct tree_el *elem =
 		sptree_str_t_iterator_next(it->t_iter);
 
-	if (elem != NULL && index_tree_el_unique_cmp(it->pattern, elem, it->key) == 0)
+	if (elem != NULL &&
+	    tree_el_unique_cmp(it->pattern, elem, it->key_def) == 0) {
 		return elem->tuple;
+	}
 
 	return NULL;
 }
@@ -845,14 +850,14 @@ tree_iterator_next_equal(struct iterator *iterator)
 - (void) enable
 {
 	enabled = false;
-	pattern = salloc(INDEX_TREE_EL_SIZE(&key));
+	pattern = salloc(TREE_EL_SIZE(&key_def));
 	tree = salloc(sizeof(*tree));
 	memset(tree, 0, sizeof(*tree));
 	if (n == 0) {/* pk */
 		sptree_str_t_init(tree,
-				  INDEX_TREE_EL_SIZE(&key),
+				  TREE_EL_SIZE(&key_def),
 				  NULL, 0, 0,
-				  (void *)index_tree_el_unique_cmp, &key);
+				  (void *)tree_el_unique_cmp, &key_def);
 		enabled = true;
 	}
 }
@@ -864,62 +869,63 @@ tree_iterator_next_equal(struct iterator *iterator)
 
 - (struct box_tuple *) min
 {
-	struct index_tree_el *elem = sptree_str_t_first(tree);
+	struct tree_el *elem = sptree_str_t_first(tree);
 
 	return elem ? elem->tuple : NULL;
 }
 
 - (struct box_tuple *) max
 {
-	struct index_tree_el *elem = sptree_str_t_last(tree);
+	struct tree_el *elem = sptree_str_t_last(tree);
 
 	return elem ? elem->tuple : NULL;
 }
 
-- (struct box_tuple *) find: (void *) key_arg
+- (struct box_tuple *) find: (void *) key
 {
-	init_search_pattern(pattern, &key, 1, key_arg);
-	struct index_tree_el *elem = sptree_str_t_find(tree, pattern);
+	init_search_pattern(pattern, &key_def, 1, key);
+	struct tree_el *elem = sptree_str_t_find(tree, pattern);
 
 	return elem ? elem->tuple : NULL;
 }
 
-- (struct box_tuple *) findBy: (struct box_tuple *) tuple
+- (struct box_tuple *) findByTuple: (struct box_tuple *) tuple
 {
-	index_tree_el_init(pattern, &key, tuple);
+	tree_el_init(pattern, &key_def, tuple);
 
-	struct index_tree_el *elem = sptree_str_t_find(tree, pattern);
+	struct tree_el *elem = sptree_str_t_find(tree, pattern);
 
 	return elem ? elem->tuple : NULL;
 }
 
 - (void) remove: (struct box_tuple *) tuple
 {
-	index_tree_el_init(pattern, &key, tuple);
+	tree_el_init(pattern, &key_def, tuple);
 	sptree_str_t_delete(tree, pattern);
 }
 
 - (void) replace: (struct box_tuple *) old_tuple
 	:(struct box_tuple *) new_tuple
 {
-	if (new_tuple->cardinality < key.max_fieldno)
-		tnt_raise(ClientError, :ER_NO_SUCH_FIELD, key.max_fieldno);
+	if (new_tuple->cardinality < key_def.max_fieldno)
+		tnt_raise(ClientError, :ER_NO_SUCH_FIELD,
+			  key_def.max_fieldno);
 
 	if (old_tuple) {
-		index_tree_el_init(pattern, &key, old_tuple);
+		tree_el_init(pattern, &key_def, old_tuple);
 		sptree_str_t_delete(tree, pattern);
 	}
-	index_tree_el_init(pattern, &key, new_tuple);
+	tree_el_init(pattern, &key_def, new_tuple);
 	sptree_str_t_insert(tree, pattern);
 }
 
 - (struct iterator *) allocIterator
 {
 	struct tree_iterator *it = salloc(sizeof(struct tree_iterator) +
-					  INDEX_TREE_EL_SIZE(&key));
-	it->pattern = (struct index_tree_el *) (it + 1);
+					  TREE_EL_SIZE(&key_def));
+	it->pattern = (struct tree_el *) (it + 1);
 	it->base.next = tree_iterator_next;
-	it->key = &key;
+	it->key_def = &key_def;
 	it->tree = tree;
 	return (struct iterator *) it;
 }
@@ -929,19 +935,19 @@ tree_iterator_next_equal(struct iterator *iterator)
 	[self initIterator: iterator :NULL :0];
 }
 
-- (void) initIterator: (struct iterator *) iterator :(void *) key_arg
+- (void) initIterator: (struct iterator *) iterator :(void *) key
 			:(int) part_count
 {
 	assert(iterator->next == tree_iterator_next);
 
 	struct tree_iterator *it = tree_iterator(iterator);
 
-	if (key.is_unique && part_count == key.part_count)
+	if (key_def.is_unique && part_count == key_def.part_count)
 		it->base.next_equal = iterator_first_equal;
 	else
 		it->base.next_equal = tree_iterator_next_equal;
 
-	init_search_pattern(it->pattern, &key, part_count, key_arg);
+	init_search_pattern(it->pattern, &key_def, part_count, key);
 	sptree_str_t_iterator_init_set(tree, &it->t_iter, it->pattern);
 }
 
@@ -952,19 +958,19 @@ tree_iterator_next_equal(struct iterator *iterator)
 
 	assert(enabled == false);
 
-	struct index_tree_el *elem = NULL;
+	struct tree_el *elem = NULL;
 	if (n_tuples) {
 		/*
 		 * Allocate a little extra to avoid
 		 * unnecessary realloc() when more data is
 		 * inserted.
 		*/
-		size_t sz = estimated_tuples * INDEX_TREE_EL_SIZE(&key);
+		size_t sz = estimated_tuples * TREE_EL_SIZE(&key_def);
 		elem = malloc(sz);
 		if (elem == NULL)
 			panic("malloc(): failed to allocate %"PRI_SZ" bytes", sz);
 	}
-	struct index_tree_el *m;
+	struct tree_el *m;
 	u32 i = 0;
 
 	struct iterator *it = pk->position;
@@ -972,10 +978,10 @@ tree_iterator_next_equal(struct iterator *iterator)
 	struct box_tuple *tuple;
 	while ((tuple = it->next(it))) {
 
-		m = (struct index_tree_el *)
-			((char *)elem + i * INDEX_TREE_EL_SIZE(&key));
+		m = (struct tree_el *)
+			((char *)elem + i * TREE_EL_SIZE(&key_def));
 
-		index_tree_el_init(m, &key, tuple);
+		tree_el_init(m, &key_def, tuple);
 		++i;
 	}
 
@@ -983,10 +989,10 @@ tree_iterator_next_equal(struct iterator *iterator)
 		say_info("Sorting %"PRIu32 " keys in index %" PRIu32 "...", n_tuples, self->n);
 
 	/* If n_tuples == 0 then estimated_tuples = 0, elem == NULL, tree is empty */
-	sptree_str_t_init(tree, INDEX_TREE_EL_SIZE(&key),
+	sptree_str_t_init(tree, TREE_EL_SIZE(&key_def),
 			  elem, n_tuples, estimated_tuples,
-			  (void *) (key.is_unique ? index_tree_el_unique_cmp :
-			  index_tree_el_cmp), &key);
+			  (void *) (key_def.is_unique ?  tree_el_unique_cmp
+				    : tree_el_cmp), &key_def);
 	enabled = true;
 }
 @end
diff --git a/mod/box/memcached.m b/mod/box/memcached.m
index 0b873e4501471fa4f791f5bc7be8b2ee612dc03a..97b6815e369e44a675fa68da7c85a515bb31b7cc 100644
--- a/mod/box/memcached.m
+++ b/mod/box/memcached.m
@@ -430,26 +430,26 @@ memcached_space_init()
 	memc_s->cardinality = 4;
 	memc_s->n = cfg.memcached_space;
 
-	struct key key;
+	struct key_def key_def;
 	/* Configure memcached index key. */
-	key.part_count = 1;
-	key.is_unique = true;
+	key_def.part_count = 1;
+	key_def.is_unique = true;
 
-	key.parts = salloc(sizeof(struct key_part));
-	key.cmp_order = salloc(sizeof(u32));
+	key_def.parts = salloc(sizeof(struct key_part));
+	key_def.cmp_order = salloc(sizeof(u32));
 
-	if (key.parts == NULL || key.cmp_order == NULL)
+	if (key_def.parts == NULL || key_def.cmp_order == NULL)
 		panic("out of memory when configuring memcached_space");
 
-	key.parts[0].fieldno = 0;
-	key.parts[0].type = STRING;
+	key_def.parts[0].fieldno = 0;
+	key_def.parts[0].type = STRING;
 
-	key.max_fieldno = 1;
-	key.cmp_order[0] = 0;
+	key_def.max_fieldno = 1;
+	key_def.cmp_order[0] = 0;
 
 	/* Configure memcached index. */
-	Index *memc_index = memc_s->index[0] = [Index alloc: HASH :&key];
-	[memc_index init: HASH :&key :memc_s :0];
+	Index *memc_index = memc_s->index[0] = [Index alloc: HASH :&key_def];
+	[memc_index init: HASH :&key_def :memc_s :0];
 }
 
 /** Delete a bunch of expired keys. */