From 46fee0c6a2b5b101474d603bbe8f6d9988a1b6e3 Mon Sep 17 00:00:00 2001
From: Konstantin Shulgin <konstantin.shulgin@gmail.com>
Date: Tue, 28 Jun 2011 13:09:41 +0400
Subject: [PATCH] Refactoring: python to PEP8

PEP8 coding-style for Python code was added. Python code ndentation
was changed to PEP8 format.
---
 doc/coding-style-python.txt         | 854 ++++++++++++++++++++++++++++
 test/lib/box.py                     |  59 +-
 test/lib/server.py                  | 491 ++++++++--------
 test/lib/sql_ast.py                 | 399 ++++++-------
 test/lib/tarantool_admin.py         |  43 +-
 test/lib/tarantool_box_server.py    |  58 +-
 test/lib/tarantool_connection.py    | 114 ++--
 test/lib/tarantool_feeder_server.py |  23 +-
 test/lib/tarantool_preprocessor.py  |  83 +--
 test/lib/tarantool_server.py        | 125 ++--
 test/lib/test_suite.py              | 476 ++++++++--------
 11 files changed, 1812 insertions(+), 913 deletions(-)
 create mode 100644 doc/coding-style-python.txt

diff --git a/doc/coding-style-python.txt b/doc/coding-style-python.txt
new file mode 100644
index 0000000000..73b518fc51
--- /dev/null
+++ b/doc/coding-style-python.txt
@@ -0,0 +1,854 @@
+The project's coding style for Python Code is based on a version  of the PEP8
+coding style.
+
+The latest version of the Linux style can be found at:
+
+http://www.python.org/dev/peps/pep-0008/
+-------------------------------------------------------------------------------
+
+Introduction
+    This document gives coding conventions for the Python code comprising the
+    standard library in the main Python distribution.  Please see the
+    companion informational PEP describing style guidelines for the C code in
+    the C implementation of Python[1].
+
+    This document was adapted from Guido's original Python Style Guide
+    essay[2], with some additions from Barry's style guide[5].  Where there's
+    conflict, Guido's style rules for the purposes of this PEP.  This PEP may
+    still be incomplete (in fact, it may never be finished <wink>).
+
+
+A Foolish Consistency is the Hobgoblin of Little Minds
+    One of Guido's key insights is that code is read much more often than it
+    is written.  The guidelines provided here are intended to improve the
+    readability of code and make it consistent across the wide spectrum of
+    Python code.  As PEP 20 [6] says, "Readability counts".
+
+    A style guide is about consistency.  Consistency with this style guide is
+    important.  Consistency within a project is more important. Consistency
+    within one module or function is most important.
+
+    But most importantly: know when to be inconsistent -- sometimes the style
+    guide just doesn't apply.  When in doubt, use your best judgment.  Look
+    at other examples and decide what looks best.  And don't hesitate to ask!
+
+    Two good reasons to break a particular rule:
+
+    (1) When applying the rule would make the code less readable, even for
+        someone who is used to reading code that follows the rules.
+
+    (2) To be consistent with surrounding code that also breaks it (maybe for
+        historic reasons) -- although this is also an opportunity to clean up
+        someone else's mess (in true XP style).
+
+
+Code lay-out
+  Indentation
+
+    Use 4 spaces per indentation level.
+
+    For really old code that you don't want to mess up, you can continue to
+    use 8-space tabs.
+
+    Continuation lines should align wrapped elements either vertically using
+    Python's implicit line joining inside parentheses, brackets and braces, or
+    using a hanging indent.  When using a hanging indent the following
+    considerations should be applied; there should be no arguments on the
+    first line and further indentation should be used to clearly distinguish
+    itself as a continuation line.
+
+    Yes:  # Aligned with opening delimiter
+          foo = long_function_name(var_one, var_two,
+                                   var_three, var_four)
+
+          # More indentation included to distinguish this from the rest.
+          def long_function_name(
+                  var_one, var_two, var_three,
+                  var_four):
+              print(var_one)
+
+    No:   # Arguments on first line forbidden when not using vertical alignment
+          foo = long_function_name(var_one, var_two,
+              var_three, var_four)
+
+          # Further indentation required as indentation is not distinguishable
+          def long_function_name(
+              var_one, var_two, var_three,
+              var_four):
+              print(var_one)
+
+    Optional:
+          # Extra indentation is not necessary.
+          foo = long_function_name(
+            var_one, var_two,
+            var_three, var_four)
+
+  Tabs or Spaces?
+
+    Never mix tabs and spaces.
+
+    The most popular way of indenting Python is with spaces only.  The
+    second-most popular way is with tabs only.  Code indented with a mixture
+    of tabs and spaces should be converted to using spaces exclusively.  When
+    invoking the Python command line interpreter with the -t option, it issues
+    warnings about code that illegally mixes tabs and spaces.  When using -tt
+    these warnings become errors.  These options are highly recommended!
+
+    For new projects, spaces-only are strongly recommended over tabs.  Most
+    editors have features that make this easy to do.
+
+  Maximum Line Length
+
+    Limit all lines to a maximum of 79 characters.
+
+    There are still many devices around that are limited to 80 character
+    lines; plus, limiting windows to 80 characters makes it possible to have
+    several windows side-by-side.  The default wrapping on such devices
+    disrupts the visual structure of the code, making it more difficult to
+    understand.  Therefore, please limit all lines to a maximum of 79
+    characters.  For flowing long blocks of text (docstrings or comments),
+    limiting the length to 72 characters is recommended.
+
+    The preferred way of wrapping long lines is by using Python's implied line
+    continuation inside parentheses, brackets and braces.  Long lines can be
+    broken over multiple lines by wrapping expressions in parentheses. These
+    should be used in preference to using a backslash for line continuation.
+    Make sure to indent the continued line appropriately.  The preferred place
+    to break around a binary operator is *after* the operator, not before it.
+    Some examples:
+
+    class Rectangle(Blob):
+
+        def __init__(self, width, height,
+                     color='black', emphasis=None, highlight=0):
+            if (width == 0 and height == 0 and
+                color == 'red' and emphasis == 'strong' or
+                highlight > 100):
+                raise ValueError("sorry, you lose")
+            if width == 0 and height == 0 and (color == 'red' or
+                                               emphasis is None):
+                raise ValueError("I don't think so -- values are %s, %s" %
+                                 (width, height))
+            Blob.__init__(self, width, height,
+                          color, emphasis, highlight)
+
+  Blank Lines
+
+    Separate top-level function and class definitions with two blank lines.
+
+    Method definitions inside a class are separated by a single blank line.
+
+    Extra blank lines may be used (sparingly) to separate groups of related
+    functions.  Blank lines may be omitted between a bunch of related
+    one-liners (e.g. a set of dummy implementations).
+
+    Use blank lines in functions, sparingly, to indicate logical sections.
+
+    Python accepts the control-L (i.e. ^L) form feed character as whitespace;
+    Many tools treat these characters as page separators, so you may use them
+    to separate pages of related sections of your file.  Note, some editors
+    and web-based code viewers may not recognize control-L as a form feed
+    and will show another glyph in its place.
+
+  Encodings (PEP 263)
+
+    Code in the core Python distribution should always use the ASCII or
+    Latin-1 encoding (a.k.a. ISO-8859-1).  For Python 3.0 and beyond,
+    UTF-8 is preferred over Latin-1, see PEP 3120.
+
+    Files using ASCII should not have a coding cookie.  Latin-1 (or
+    UTF-8) should only be used when a comment or docstring needs to
+    mention an author name that requires Latin-1; otherwise, using
+    \x, \u or \U escapes is the preferred way to include non-ASCII
+    data in string literals.
+
+    For Python 3.0 and beyond, the following policy is prescribed for
+    the standard library (see PEP 3131): All identifiers in the Python
+    standard library MUST use ASCII-only identifiers, and SHOULD use
+    English words wherever feasible (in many cases, abbreviations and
+    technical terms are used which aren't English). In addition,
+    string literals and comments must also be in ASCII. The only
+    exceptions are (a) test cases testing the non-ASCII features, and
+    (b) names of authors. Authors whose names are not based on the
+    latin alphabet MUST provide a latin transliteration of their
+    names.
+
+    Open source projects with a global audience are encouraged to
+    adopt a similar policy.
+
+
+Imports
+    - Imports should usually be on separate lines, e.g.:
+
+        Yes: import os
+             import sys
+
+        No:  import sys, os
+
+      it's okay to say this though:
+
+        from subprocess import Popen, PIPE
+
+    - Imports are always put at the top of the file, just after any module
+      comments and docstrings, and before module globals and constants.
+
+      Imports should be grouped in the following order:
+
+      1. standard library imports
+      2. related third party imports
+      3. local application/library specific imports
+
+      You should put a blank line between each group of imports.
+
+      Put any relevant __all__ specification after the imports.
+
+    - Relative imports for intra-package imports are highly discouraged.
+      Always use the absolute package path for all imports.
+      Even now that PEP 328 [7] is fully implemented in Python 2.5,
+      its style of explicit relative imports is actively discouraged;
+      absolute imports are more portable and usually more readable.
+
+    - When importing a class from a class-containing module, it's usually okay
+      to spell this
+
+        from myclass import MyClass
+        from foo.bar.yourclass import YourClass
+
+      If this spelling causes local name clashes, then spell them
+
+        import myclass
+        import foo.bar.yourclass
+
+      and use "myclass.MyClass" and "foo.bar.yourclass.YourClass"
+
+
+Whitespace in Expressions and Statements
+  Pet Peeves
+
+    Avoid extraneous whitespace in the following situations:
+
+    - Immediately inside parentheses, brackets or braces.
+
+      Yes: spam(ham[1], {eggs: 2})
+      No:  spam( ham[ 1 ], { eggs: 2 } )
+
+    - Immediately before a comma, semicolon, or colon:
+
+      Yes: if x == 4: print x, y; x, y = y, x
+      No:  if x == 4 : print x , y ; x , y = y , x
+
+    - Immediately before the open parenthesis that starts the argument
+      list of a function call:
+
+      Yes: spam(1)
+      No:  spam (1)
+
+    - Immediately before the open parenthesis that starts an indexing or
+      slicing:
+
+      Yes: dict['key'] = list[index]
+      No:  dict ['key'] = list [index]
+
+    - More than one space around an assignment (or other) operator to
+      align it with another.
+
+      Yes:
+
+          x = 1
+          y = 2
+          long_variable = 3
+
+      No:
+
+          x             = 1
+          y             = 2
+          long_variable = 3
+
+
+  Other Recommendations
+
+    - Always surround these binary operators with a single space on
+      either side: assignment (=), augmented assignment (+=, -= etc.),
+      comparisons (==, <, >, !=, <>, <=, >=, in, not in, is, is not),
+      Booleans (and, or, not).
+
+    - Use spaces around arithmetic operators:
+
+      Yes:
+
+          i = i + 1
+          submitted += 1
+          x = x * 2 - 1
+          hypot2 = x * x + y * y
+          c = (a + b) * (a - b)
+
+      No:
+
+          i=i+1
+          submitted +=1
+          x = x*2 - 1
+          hypot2 = x*x + y*y
+          c = (a+b) * (a-b)
+
+    - Don't use spaces around the '=' sign when used to indicate a
+      keyword argument or a default parameter value.
+
+      Yes:
+
+          def complex(real, imag=0.0):
+              return magic(r=real, i=imag)
+
+      No:
+
+          def complex(real, imag = 0.0):
+              return magic(r = real, i = imag)
+
+    - Compound statements (multiple statements on the same line) are
+      generally discouraged.
+
+      Yes:
+
+          if foo == 'blah':
+              do_blah_thing()
+          do_one()
+          do_two()
+          do_three()
+
+      Rather not:
+
+          if foo == 'blah': do_blah_thing()
+          do_one(); do_two(); do_three()
+
+    - While sometimes it's okay to put an if/for/while with a small
+      body on the same line, never do this for multi-clause
+      statements.  Also avoid folding such long lines!
+
+      Rather not:
+
+          if foo == 'blah': do_blah_thing()
+          for x in lst: total += x
+          while t < 10: t = delay()
+
+      Definitely not:
+
+          if foo == 'blah': do_blah_thing()
+          else: do_non_blah_thing()
+
+          try: something()
+          finally: cleanup()
+
+          do_one(); do_two(); do_three(long, argument,
+                                       list, like, this)
+
+          if foo == 'blah': one(); two(); three()
+
+Comments
+    Comments that contradict the code are worse than no comments.  Always make
+    a priority of keeping the comments up-to-date when the code changes!
+
+    Comments should be complete sentences.  If a comment is a phrase or
+    sentence, its first word should be capitalized, unless it is an identifier
+    that begins with a lower case letter (never alter the case of
+    identifiers!).
+
+    If a comment is short, the period at the end can be omitted.  Block
+    comments generally consist of one or more paragraphs built out of complete
+    sentences, and each sentence should end in a period.
+
+    You should use two spaces after a sentence-ending period.
+
+    When writing English, Strunk and White apply.
+
+    Python coders from non-English speaking countries: please write
+    your comments in English, unless you are 120% sure that the code
+    will never be read by people who don't speak your language.
+
+
+  Block Comments
+
+    Block comments generally apply to some (or all) code that follows them,
+    and are indented to the same level as that code.  Each line of a block
+    comment starts with a # and a single space (unless it is indented text
+    inside the comment).
+
+    Paragraphs inside a block comment are separated by a line containing a
+    single #.
+
+  Inline Comments
+
+    Use inline comments sparingly.
+
+    An inline comment is a comment on the same line as a statement.  Inline
+    comments should be separated by at least two spaces from the statement.
+    They should start with a # and a single space.
+
+    Inline comments are unnecessary and in fact distracting if they state
+    the obvious.  Don't do this:
+
+        x = x + 1                 # Increment x
+
+    But sometimes, this is useful:
+
+        x = x + 1                 # Compensate for border
+
+
+Documentation Strings
+    Conventions for writing good documentation strings (a.k.a. "docstrings")
+    are immortalized in PEP 257 [3].
+
+    - Write docstrings for all public modules, functions, classes, and
+      methods.  Docstrings are not necessary for non-public methods, but you
+      should have a comment that describes what the method does.  This comment
+      should appear after the "def" line.
+
+    - PEP 257 describes good docstring conventions.  Note that most
+      importantly, the """ that ends a multiline docstring should be on a line
+      by itself, and preferably preceded by a blank line, e.g.:
+
+      """Return a foobang
+
+      Optional plotz says to frobnicate the bizbaz first.
+
+      """
+
+    - For one liner docstrings, it's okay to keep the closing """ on the same
+      line.
+
+
+Version Bookkeeping
+    If you have to have Subversion, CVS, or RCS crud in your source file, do
+    it as follows.
+
+        __version__ = "$Revision: 00f8e3bb1197 $"
+        # $Source$
+
+    These lines should be included after the module's docstring, before any
+    other code, separated by a blank line above and below.
+
+
+Naming Conventions
+    The naming conventions of Python's library are a bit of a mess, so we'll
+    never get this completely consistent -- nevertheless, here are the
+    currently recommended naming standards.  New modules and packages
+    (including third party frameworks) should be written to these standards,
+    but where an existing library has a different style, internal consistency
+    is preferred.
+
+  Descriptive: Naming Styles
+
+    There are a lot of different naming styles.  It helps to be able to
+    recognize what naming style is being used, independently from what they
+    are used for.
+
+    The following naming styles are commonly distinguished:
+
+    - b (single lowercase letter)
+
+    - B (single uppercase letter)
+
+    - lowercase
+
+    - lower_case_with_underscores
+
+    - UPPERCASE
+
+    - UPPER_CASE_WITH_UNDERSCORES
+
+    - CapitalizedWords (or CapWords, or CamelCase -- so named because
+      of the bumpy look of its letters[4]).  This is also sometimes known as
+      StudlyCaps.
+
+      Note: When using abbreviations in CapWords, capitalize all the letters
+      of the abbreviation.  Thus HTTPServerError is better than
+      HttpServerError.
+
+    - mixedCase (differs from CapitalizedWords by initial lowercase
+      character!)
+
+    - Capitalized_Words_With_Underscores (ugly!)
+
+    There's also the style of using a short unique prefix to group related
+    names together.  This is not used much in Python, but it is mentioned for
+    completeness.  For example, the os.stat() function returns a tuple whose
+    items traditionally have names like st_mode, st_size, st_mtime and so on.
+    (This is done to emphasize the correspondence with the fields of the
+    POSIX system call struct, which helps programmers familiar with that.)
+
+    The X11 library uses a leading X for all its public functions.  In Python,
+    this style is generally deemed unnecessary because attribute and method
+    names are prefixed with an object, and function names are prefixed with a
+    module name.
+
+    In addition, the following special forms using leading or trailing
+    underscores are recognized (these can generally be combined with any case
+    convention):
+
+    - _single_leading_underscore: weak "internal use" indicator.  E.g. "from M
+      import *" does not import objects whose name starts with an underscore.
+
+    - single_trailing_underscore_: used by convention to avoid conflicts with
+      Python keyword, e.g.
+
+      Tkinter.Toplevel(master, class_='ClassName')
+
+    - __double_leading_underscore: when naming a class attribute, invokes name
+      mangling (inside class FooBar, __boo becomes _FooBar__boo; see below).
+
+    - __double_leading_and_trailing_underscore__: "magic" objects or
+      attributes that live in user-controlled namespaces.  E.g. __init__,
+      __import__ or __file__.  Never invent such names; only use them
+      as documented.
+
+  Prescriptive: Naming Conventions
+
+    Names to Avoid
+
+      Never use the characters `l' (lowercase letter el), `O' (uppercase
+      letter oh), or `I' (uppercase letter eye) as single character variable
+      names.
+
+      In some fonts, these characters are indistinguishable from the numerals
+      one and zero.  When tempted to use `l', use `L' instead.
+
+    Package and Module Names
+
+      Modules should have short, all-lowercase names.  Underscores can be used
+      in the module name if it improves readability.  Python packages should
+      also have short, all-lowercase names, although the use of underscores is
+      discouraged.
+
+      Since module names are mapped to file names, and some file systems are
+      case insensitive and truncate long names, it is important that module
+      names be chosen to be fairly short -- this won't be a problem on Unix,
+      but it may be a problem when the code is transported to older Mac or
+      Windows versions, or DOS.
+
+      When an extension module written in C or C++ has an accompanying Python
+      module that provides a higher level (e.g. more object oriented)
+      interface, the C/C++ module has a leading underscore (e.g. _socket).
+
+    Class Names
+
+      Almost without exception, class names use the CapWords convention.
+      Classes for internal use have a leading underscore in addition.
+
+    Exception Names
+
+      Because exceptions should be classes, the class naming convention
+      applies here.  However, you should use the suffix "Error" on your
+      exception names (if the exception actually is an error).
+
+    Global Variable Names
+
+      (Let's hope that these variables are meant for use inside one module
+      only.)  The conventions are about the same as those for functions.
+
+      Modules that are designed for use via "from M import *" should use the
+      __all__ mechanism to prevent exporting globals, or use the older
+      convention of prefixing such globals with an underscore (which you might
+      want to do to indicate these globals are "module non-public").
+
+    Function Names
+
+      Function names should be lowercase, with words separated by underscores
+      as necessary to improve readability.
+
+      mixedCase is allowed only in contexts where that's already the
+      prevailing style (e.g. threading.py), to retain backwards compatibility.
+
+    Function and method arguments
+
+      Always use 'self' for the first argument to instance methods.
+
+      Always use 'cls' for the first argument to class methods.
+
+      If a function argument's name clashes with a reserved keyword, it is
+      generally better to append a single trailing underscore rather than use
+      an abbreviation or spelling corruption.  Thus "print_" is better than
+      "prnt".  (Perhaps better is to avoid such clashes by using a synonym.)
+
+    Method Names and Instance Variables
+
+      Use the function naming rules: lowercase with words separated by
+      underscores as necessary to improve readability.
+
+      Use one leading underscore only for non-public methods and instance
+      variables.
+
+      To avoid name clashes with subclasses, use two leading underscores to
+      invoke Python's name mangling rules.
+
+      Python mangles these names with the class name: if class Foo has an
+      attribute named __a, it cannot be accessed by Foo.__a.  (An insistent
+      user could still gain access by calling Foo._Foo__a.)  Generally, double
+      leading underscores should be used only to avoid name conflicts with
+      attributes in classes designed to be subclassed.
+
+      Note: there is some controversy about the use of __names (see below).
+
+    Constants
+
+       Constants are usually defined on a module level and written in all
+       capital letters with underscores separating words.  Examples include
+       MAX_OVERFLOW and TOTAL.
+
+    Designing for inheritance
+
+      Always decide whether a class's methods and instance variables
+      (collectively: "attributes") should be public or non-public.  If in
+      doubt, choose non-public; it's easier to make it public later than to
+      make a public attribute non-public.
+
+      Public attributes are those that you expect unrelated clients of your
+      class to use, with your commitment to avoid backward incompatible
+      changes.  Non-public attributes are those that are not intended to be
+      used by third parties; you make no guarantees that non-public attributes
+      won't change or even be removed.
+
+      We don't use the term "private" here, since no attribute is really
+      private in Python (without a generally unnecessary amount of work).
+
+      Another category of attributes are those that are part of the "subclass
+      API" (often called "protected" in other languages).  Some classes are
+      designed to be inherited from, either to extend or modify aspects of the
+      class's behavior.  When designing such a class, take care to make
+      explicit decisions about which attributes are public, which are part of
+      the subclass API, and which are truly only to be used by your base
+      class.
+
+      With this in mind, here are the Pythonic guidelines:
+
+      - Public attributes should have no leading underscores.
+
+      - If your public attribute name collides with a reserved keyword, append
+        a single trailing underscore to your attribute name.  This is
+        preferable to an abbreviation or corrupted spelling.  (However,
+        notwithstanding this rule, 'cls' is the preferred spelling for any
+        variable or argument which is known to be a class, especially the
+        first argument to a class method.)
+
+        Note 1: See the argument name recommendation above for class methods.
+
+      - For simple public data attributes, it is best to expose just the
+        attribute name, without complicated accessor/mutator methods.  Keep in
+        mind that Python provides an easy path to future enhancement, should
+        you find that a simple data attribute needs to grow functional
+        behavior.  In that case, use properties to hide functional
+        implementation behind simple data attribute access syntax.
+
+        Note 1: Properties only work on new-style classes.
+
+        Note 2: Try to keep the functional behavior side-effect free, although
+        side-effects such as caching are generally fine.
+
+        Note 3: Avoid using properties for computationally expensive
+        operations; the attribute notation makes the caller believe
+        that access is (relatively) cheap.
+
+      - If your class is intended to be subclassed, and you have attributes
+        that you do not want subclasses to use, consider naming them with
+        double leading underscores and no trailing underscores.  This invokes
+        Python's name mangling algorithm, where the name of the class is
+        mangled into the attribute name.  This helps avoid attribute name
+        collisions should subclasses inadvertently contain attributes with the
+        same name.
+
+        Note 1: Note that only the simple class name is used in the mangled
+        name, so if a subclass chooses both the same class name and attribute
+        name, you can still get name collisions.
+
+        Note 2: Name mangling can make certain uses, such as debugging and
+        __getattr__(), less convenient.  However the name mangling algorithm
+        is well documented and easy to perform manually.
+
+        Note 3: Not everyone likes name mangling.  Try to balance the
+        need to avoid accidental name clashes with potential use by
+        advanced callers.
+
+
+Programming Recommendations
+    - Code should be written in a way that does not disadvantage other
+      implementations of Python (PyPy, Jython, IronPython, Pyrex, Psyco,
+      and such).
+
+      For example, do not rely on CPython's efficient implementation of
+      in-place string concatenation for statements in the form a+=b or a=a+b.
+      Those statements run more slowly in Jython.  In performance sensitive
+      parts of the library, the ''.join() form should be used instead.  This
+      will ensure that concatenation occurs in linear time across various
+      implementations.
+
+    - Comparisons to singletons like None should always be done with
+      'is' or 'is not', never the equality operators.
+
+      Also, beware of writing "if x" when you really mean "if x is not None"
+      -- e.g. when testing whether a variable or argument that defaults to
+      None was set to some other value.  The other value might have a type
+      (such as a container) that could be false in a boolean context!
+
+    - When implementing ordering operations with rich comparisons, it is best to
+      implement all six operations (__eq__, __ne__, __lt__, __le__, __gt__,
+      __ge__) rather than relying on other code to only exercise a particular
+      comparison.
+
+      To minimize the effort involved, the functools.total_ordering() decorator
+      provides a tool to generate missing comparison methods.
+
+      PEP 207 indicates that reflexivity rules *are* assumed by Python.  Thus,
+      the interpreter may swap y>x with x<y, y>=x with x<=y, and may swap the
+      arguments of x==y and x!=y.  The sort() and min() operations are
+      guaranteed to use the < operator and the max() function uses the >
+      operator.  However, it is best to implement all six operations so that
+      confusion doesn't arise in other contexts.
+
+    - Use class-based exceptions.
+
+      String exceptions in new code are forbidden, because this language
+      feature is being removed in Python 2.6.
+
+      Modules or packages should define their own domain-specific base
+      exception class, which should be subclassed from the built-in Exception
+      class.  Always include a class docstring.  E.g.:
+
+        class MessageError(Exception):
+            """Base class for errors in the email package."""
+
+      Class naming conventions apply here, although you should add the suffix
+      "Error" to your exception classes, if the exception is an error.
+      Non-error exceptions need no special suffix.
+
+    - When raising an exception, use "raise ValueError('message')" instead of
+      the older form "raise ValueError, 'message'".
+
+      The paren-using form is preferred because when the exception arguments
+      are long or include string formatting, you don't need to use line
+      continuation characters thanks to the containing parentheses.  The older
+      form will be removed in Python 3000.
+
+    - When catching exceptions, mention specific exceptions
+      whenever possible instead of using a bare 'except:' clause.
+
+      For example, use:
+
+          try:
+              import platform_specific_module
+          except ImportError:
+              platform_specific_module = None
+
+      A bare 'except:' clause will catch SystemExit and KeyboardInterrupt
+      exceptions, making it harder to interrupt a program with Control-C,
+      and can disguise other problems.  If you want to catch all
+      exceptions that signal program errors, use 'except Exception:'.
+
+      A good rule of thumb is to limit use of bare 'except' clauses to two
+      cases:
+
+         1) If the exception handler will be printing out or logging
+            the traceback; at least the user will be aware that an
+            error has occurred.
+
+         2) If the code needs to do some cleanup work, but then lets
+            the exception propagate upwards with 'raise'.
+            'try...finally' is a better way to handle this case.
+
+    - Additionally, for all try/except clauses, limit the 'try' clause
+      to the absolute minimum amount of code necessary.  Again, this
+      avoids masking bugs.
+
+      Yes:
+
+          try:
+              value = collection[key]
+          except KeyError:
+              return key_not_found(key)
+          else:
+              return handle_value(value)
+
+      No:
+
+          try:
+              # Too broad!
+              return handle_value(collection[key])
+          except KeyError:
+              # Will also catch KeyError raised by handle_value()
+              return key_not_found(key)
+
+    - Use string methods instead of the string module.
+
+      String methods are always much faster and share the same API with
+      unicode strings.  Override this rule if backward compatibility with
+      Pythons older than 2.0 is required.
+
+    - Use ''.startswith() and ''.endswith() instead of string slicing to check
+      for prefixes or suffixes.
+
+      startswith() and endswith() are cleaner and less error prone.  For
+      example:
+
+        Yes: if foo.startswith('bar'):
+
+        No:  if foo[:3] == 'bar':
+
+      The exception is if your code must work with Python 1.5.2 (but let's
+      hope not!).
+
+    - Object type comparisons should always use isinstance() instead
+      of comparing types directly.
+
+        Yes: if isinstance(obj, int):
+
+        No:  if type(obj) is type(1):
+
+      When checking if an object is a string, keep in mind that it might be a
+      unicode string too!  In Python 2.3, str and unicode have a common base
+      class, basestring, so you can do:
+
+        if isinstance(obj, basestring):
+
+    - For sequences, (strings, lists, tuples), use the fact that empty
+      sequences are false.
+
+      Yes: if not seq:
+           if seq:
+
+      No: if len(seq)
+          if not len(seq)
+
+    - Don't write string literals that rely on significant trailing
+      whitespace.  Such trailing whitespace is visually indistinguishable and
+      some editors (or more recently, reindent.py) will trim them.
+
+    - Don't compare boolean values to True or False using ==
+
+        Yes:   if greeting:
+
+        No:    if greeting == True:
+
+        Worse: if greeting is True:
+
+Rules that apply only to the standard library
+    - Do not use function type annotations in the standard library.
+      These are reserved for users and third-party modules.  See
+      PEP 3107 and the bug 10899 for details.
+
+
+References
+    [1] PEP 7, Style Guide for C Code, van Rossum
+
+    [2] http://www.python.org/doc/essays/styleguide.html
+
+    [3] PEP 257, Docstring Conventions, Goodger, van Rossum
+
+    [4] http://www.wikipedia.com/wiki/CamelCase
+
+    [5] Barry's GNU Mailman style guide
+        http://barry.warsaw.us/software/STYLEGUIDE.txt
+
+    [6] PEP 20, The Zen of Python
+
+    [7] PEP 328, Imports: Multi-Line and Absolute/Relative
+
+
+Copyright
+    This document has been placed in the public domain.
+
diff --git a/test/lib/box.py b/test/lib/box.py
index a2603ccd68..fa39f5722f 100644
--- a/test/lib/box.py
+++ b/test/lib/box.py
@@ -27,42 +27,43 @@ import struct
 from tarantool_connection import TarantoolConnection
 
 class Box(TarantoolConnection):
-  def recvall(self, length):
-    res = ""
-    while len(res) < length:
-      buf = self.socket.recv(length - len(res))
-      if not buf:
-        raise RuntimeError("Got EOF from socket, the server has "
-                           "probably crashed")
-      res = res + buf
-    return res
 
-  def execute_no_reconnect(self, command, silent=True):
-    statement = sql.parse("sql", command)
-    if statement == None:
-      return "You have an error in your SQL syntax\n"
+    def recvall(self, length):
+        res = ""
+        while len(res) < length:
+            buf = self.socket.recv(length - len(res))
+            if not buf:
+                raise RuntimeError("Got EOF from socket, the server has "
+                                   "probably crashed")
+            res = res + buf
+        return res
 
-    payload = statement.pack()
-    header = struct.pack("<lll", statement.reqeust_type, len(payload), 0)
+    def execute_no_reconnect(self, command, silent=True):
+        statement = sql.parse("sql", command)
+        if statement == None:
+            return "You have an error in your SQL syntax\n"
 
-    self.socket.sendall(header)
-    if len(payload):
-      self.socket.sendall(payload)
+        payload = statement.pack()
+        header = struct.pack("<lll", statement.reqeust_type, len(payload), 0)
 
-    IPROTO_HEADER_SIZE = 12
+        self.socket.sendall(header)
+        if len(payload):
+            self.socket.sendall(payload)
 
-    header = self.recvall(IPROTO_HEADER_SIZE)
+        IPROTO_HEADER_SIZE = 12
 
-    response_len = struct.unpack("<lll", header)[1]
+        header = self.recvall(IPROTO_HEADER_SIZE)
 
-    if response_len:
-      response = self.recvall(response_len)
-    else:
-      response = None
+        response_len = struct.unpack("<lll", header)[1]
 
-    if not silent:
-      print command
-      print statement.unpack(response)
+        if response_len:
+            response = self.recvall(response_len)
+        else:
+            response = None
 
-    return statement.unpack(response) + "\n"
+        if not silent:
+            print command
+            print statement.unpack(response)
+
+        return statement.unpack(response) + "\n"
 
diff --git a/test/lib/server.py b/test/lib/server.py
index c825829a4e..2c8d47d733 100644
--- a/test/lib/server.py
+++ b/test/lib/server.py
@@ -12,253 +12,274 @@ import glob
 import ConfigParser
 
 def wait_until_connected(port):
-  """Wait until the server is started and accepting connections"""
-  is_connected = False
-  while not is_connected:
-    try:
-      sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-      sock.connect(("localhost", port))
-      is_connected = True
-      sock.close()
-    except socket.error as e:
-      time.sleep(0.001)
+    """Wait until the server is started and accepting connections"""
+
+    is_connected = False
+    while not is_connected:
+        try:
+            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            sock.connect(("localhost", port))
+            is_connected = True
+            sock.close()
+        except socket.error as e:
+            time.sleep(0.001)
 
 def check_port(port):
-  """Check if the port we're connecting to is available"""
-  try:
-    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    sock.connect(("localhost", port))
-  except socket.error as e:
-     return
-  raise RuntimeError("The server is already running on port {0}".format(port))
+    """Check if the port we're connecting to is available"""
+
+    try:
+        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        sock.connect(("localhost", port))
+    except socket.error as e:
+        return
+    raise RuntimeError("The server is already running on port {0}".format(port))
 
 def prepare_gdb(args):
-  """Prepare server startup arguments to run under gdb."""
-  if "TERM" in os.environ:
-    term = os.environ["TERM"]
-  else:
-    term = "xterm"
+    """Prepare server startup arguments to run under gdb."""
+
+    if "TERM" in os.environ:
+        term = os.environ["TERM"]
+    else:
+        term = "xterm"
 
-  if term not in ["xterm", "rxvt", "urxvt", "gnome-terminal", "konsole"]:
-    raise RuntimeError("--gdb: unsupported terminal {0}".format(term))
+    if term not in ["xterm", "rxvt", "urxvt", "gnome-terminal", "konsole"]:
+        raise RuntimeError("--gdb: unsupported terminal {0}".format(term))
 
-  args = [ term, "-e", "gdb", "-ex", "break main", "-ex", "run" ] + args
-  return args
+    args = [ term, "-e", "gdb", "-ex", "break main", "-ex", "run" ] + args
+    return args
 
 def prepare_valgrind(args, valgrind_log):
-  "Prepare server startup arguments to run under valgrind."
-  args = [ "valgrind", "--log-file={0}".format(valgrind_log), "--quiet" ] + args
-  return args
+    "Prepare server startup arguments to run under valgrind."
+    args = [ "valgrind", "--log-file={0}".format(valgrind_log), "--quiet" ] + args
+    return args
 
 def check_tmpfs_exists():
-  return os.uname()[0] in 'Linux' and os.path.isdir("/dev/shm")
+    return os.uname()[0] in 'Linux' and os.path.isdir("/dev/shm")
 
 def create_tmpfs_vardir(vardir):
-  os.makedirs(os.path.join("/dev/shm", vardir))
-  os.symlink(os.path.join("/dev/shm", vardir), vardir)
+    os.makedirs(os.path.join("/dev/shm", vardir))
+    os.symlink(os.path.join("/dev/shm", vardir), vardir)
 
 class Server(object):
-  """Server represents a single server instance. Normally, the
-  program operates with only one server, but in future we may add
-  replication slaves. The server is started once at the beginning
-  of each suite, and stopped at the end."""
-
-  def __new__(cls, core=None, module=None):
-    if core  == None:
-      return super(Server, cls).__new__(cls)
-    mdlname = "lib.{0}_server".format(core)
-    clsname = "{0}Server".format(core.title())
-    corecls = __import__(mdlname, fromlist=clsname).__dict__[clsname]
-    return corecls.__new__(corecls, core, module)
-
-  def __init__(self, core, module):
-    self.core = core
-    self.module = module
-    self.re_vardir_cleanup = ['*.core.*', 'core']
-    self.process = None
-    self.default_config_name = None
-    self.config = None
-    self.vardir = None
-    self.valgrind_log = "valgrind.log"
-    self.pidfile = None
-    self.port = None
-    self.binary = None
-    self.is_started = False
-    self.mem = False
-    self.start_and_exit = False
-    self.gdb = False
-    self.valgrind = False
-
-  def find_exe(self, builddir, silent=True):
-    "Locate server executable in the build dir or in the PATH."
-    exe_name = self.default_bin_name()
-    path = builddir + os.pathsep + os.environ["PATH"]
-    if not silent:
-      print "  Looking for server binary in {0} ...".format(path)
-    for dir in path.split(os.pathsep):
-      exe = os.path.join(dir, exe_name)
-      if os.access(exe, os.X_OK):
-        return exe
-    raise RuntimeError("Can't find server executable in " + path)
-
-  def cleanup(self, full=False):
-    trash = []
-    for re in self.re_vardir_cleanup:
-      trash += glob.glob(os.path.join(self.vardir, re))
-    for filename in trash:
-      os.remove(filename)
-    if full:
-      shutil.rmtree(self.vardir)
-
-  def configure(self, config):
-    self.config = os.path.abspath(config)
-
-  def install(self, binary=None, vardir=None, mem=None, silent=True):
-    """Install server instance: create necessary directories and files.
-    The server working directory is taken from 'vardir',
-    specified in the program options."""
-
-    if vardir != None: self.vardir = vardir
-    if binary != None: self.binary = os.path.abspath(binary)
-    if mem != None: self.mem = mem
-
-    self.pidfile = os.path.abspath(os.path.join(self.vardir, self.pidfile))
-    self.valgrind_log = os.path.abspath(os.path.join(self.vardir, self.valgrind_log))
-    if not silent:
-      print "Installing the server..."
-      print "  Found executable at " + self.binary
-      print "  Creating and populating working directory in " + self.vardir + "..."
-    if os.access(self.vardir, os.F_OK):
-      if not silent:
-        print "  Found old vardir, deleting..."
-      self.kill_old_server()
-      self.cleanup()
-    else:
-      if (self.mem == True and check_tmpfs_exists() and
-          os.path.basename(self.vardir) == self.vardir):
-        create_tmpfs_vardir(self.vardir)
-      else:
-        os.makedirs(self.vardir)
-    shutil.copy(self.config, os.path.join(self.vardir, self.default_config_name))
-
-  def init(self):
-    pass
-
-  def _start_and_exit(self, args, gdb=None, valgrind=None):
-    if gdb != None: self.gdb = gdb
-    if valgrind != None: self.valgrind = valgrind
-
-    if self.gdb == True:
-        raise RuntimeError("'--gdb' and '--start-and-exit' can't be defined together")
-    with daemon.DaemonContext(working_directory = self.vardir):
-      os.execvp(args[0], args)
-
-  def prepare_args(self):
-    return [self.binary]
-
-  def start(self, start_and_exit=None, gdb=None, valgrind=None, silent=True):
-    if start_and_exit != None: self.start_and_exit = start_and_exit
-    if gdb != None: self.gdb = gdb
-    if valgrind != None: self.valgrind = valgrind
-
-    if self.is_started:
-      if not silent:
-        print "The server is already started."
-      return
-    if not silent:
-      print "Starting the server..."
-      version = self.version()
-      print "Starting {0} {1}.".format(os.path.basename(self.binary),
-                                       version)
-    check_port(self.port)
-    args = self.prepare_args()
-    if self.gdb:
-      args = prepare_gdb(args)
-    elif self.valgrind:
-      args = prepare_valgrind(args, self.valgrind_log)
-    if self.start_and_exit:
-      self._start_and_exit(args)
-    else:
-      self.process = pexpect.spawn(args[0], args[1:], cwd = self.vardir)
-# wait until the server is connectedk
-    wait_until_connected(self.port)
-# Set is_started flag, to nicely support cleanup during an exception.
-    self.is_started = True
-    with open(self.pidfile) as f:
-      self.pid = int(f.read())
-
-  def stop(self, silent=True):
-    """Stop server instance. Do nothing if the server is not started,
-    to properly shut down the server in case of an exception during
-    start up."""
-    if not self.is_started:
-       if not silent:
-         print "The server is not started."
-       return
-    if not silent:
-      print "Stopping the server..."
-
-    if self.process == None:
-      self.kill_old_server()
-    else:
-      self.kill_server()
-
-    if self.gdb:
-      self.process.expect(pexpect.EOF, timeout = 1 << 30)
-    else:
-      self.process.expect(pexpect.EOF)
-    self.is_started = False
-    self.pid = None
-
-  def deploy(self, config=None, binary=None, vardir=None,
-             mem=None, start_and_exit=None, gdb=None, valgrind=None, silent=True):
-    if config != None: self.config = config
-    if binary != None: self.binary = binary
-    if vardir != None: self.vardir = vardir
-    if mem != None: self.mem = mem
-    if start_and_exit != None: self.start_and_exit = start_and_exit
-    if gdb != None: self.gdb = gdb
-    if valgrind != None: self.valgrind = valgrind
-
-    self.configure(self.config)
-    self.install(self.binary, self.vardir, self.mem, silent)
-    self.init()
-    self.start(self.start_and_exit, self.gdb, self.valgrind, silent)
-
-  def restart(self):
-    self.stop(silent=True)
-    self.start(silent=True)
-
-  def test_option(self, option_list_str):
-      args = [self.binary] + option_list_str.split()
-      print " ".join([os.path.basename(self.binary)] + args[1:])
-      output = subprocess.Popen(args,
-                                cwd = self.vardir,
-                                stdout = subprocess.PIPE,
-                                stderr = subprocess.STDOUT).stdout.read()
-      print output
-
-  def kill_server(self):
-    """Kill a server which was started correctly"""
-    try:
-      os.kill(self.pid, signal.SIGTERM)
-    except OSError as e:
-      print e
-      pass
-
-  def kill_old_server(self, silent=True):
-    """Kill old server instance if it exists."""
-    if os.access(self.pidfile, os.F_OK) == False:
-      return # Nothing to do
-    pid = 0
-    with open(self.pidfile) as f:
-      pid = int(f.read())
-    if not silent:
-      print "  Found old server, pid {0}, killing...".format(pid)
-    try:
-      os.kill(pid, signal.SIGTERM)
-      while os.kill(pid, 0) != -1:
-        time.sleep(0.001)
-    except OSError:
-      pass
+    """Server represents a single server instance. Normally, the
+    program operates with only one server, but in future we may add
+    replication slaves. The server is started once at the beginning
+    of each suite, and stopped at the end."""
+
+    def __new__(cls, core=None, module=None):
+        if core  == None:
+            return super(Server, cls).__new__(cls)
+        mdlname = "lib.{0}_server".format(core)
+        clsname = "{0}Server".format(core.title())
+        corecls = __import__(mdlname, fromlist=clsname).__dict__[clsname]
+        return corecls.__new__(corecls, core, module)
+
+    def __init__(self, core, module):
+        self.core = core
+        self.module = module
+        self.re_vardir_cleanup = ['*.core.*', 'core']
+        self.process = None
+        self.default_config_name = None
+        self.config = None
+        self.vardir = None
+        self.valgrind_log = "valgrind.log"
+        self.pidfile = None
+        self.port = None
+        self.binary = None
+        self.is_started = False
+        self.mem = False
+        self.start_and_exit = False
+        self.gdb = False
+        self.valgrind = False
+
+    def find_exe(self, builddir, silent=True):
+        "Locate server executable in the build dir or in the PATH."
+        exe_name = self.default_bin_name()
+        path = builddir + os.pathsep + os.environ["PATH"]
+
+        if not silent:
+            print "  Looking for server binary in {0} ...".format(path)
+
+        for dir in path.split(os.pathsep):
+            exe = os.path.join(dir, exe_name)
+            if os.access(exe, os.X_OK):
+                return exe
+
+        raise RuntimeError("Can't find server executable in " + path)
+
+    def cleanup(self, full=False):
+        trash = []
+
+        for re in self.re_vardir_cleanup:
+            trash += glob.glob(os.path.join(self.vardir, re))
+
+        for filename in trash:
+            os.remove(filename)
+
+        if full:
+            shutil.rmtree(self.vardir)
+
+    def configure(self, config):
+        self.config = os.path.abspath(config)
+
+    def install(self, binary=None, vardir=None, mem=None, silent=True):
+        """Install server instance: create necessary directories and files.
+        The server working directory is taken from 'vardir',
+        specified in the program options."""
+
+        if vardir != None: self.vardir = vardir
+        if binary != None: self.binary = os.path.abspath(binary)
+        if mem != None: self.mem = mem
+
+        self.pidfile = os.path.abspath(os.path.join(self.vardir, self.pidfile))
+        self.valgrind_log = os.path.abspath(os.path.join(self.vardir, self.valgrind_log))
+
+        if not silent:
+            print "Installing the server..."
+            print "  Found executable at " + self.binary
+            print "  Creating and populating working directory in " + self.vardir + "..."
+
+        if os.access(self.vardir, os.F_OK):
+            if not silent:
+                print "  Found old vardir, deleting..."
+            self.kill_old_server()
+            self.cleanup()
+        else:
+            if (self.mem == True and check_tmpfs_exists() and
+                os.path.basename(self.vardir) == self.vardir):
+                create_tmpfs_vardir(self.vardir)
+            else:
+                os.makedirs(self.vardir)
+
+        shutil.copy(self.config, os.path.join(self.vardir, self.default_config_name))
+
+    def init(self):
+        pass
+
+    def _start_and_exit(self, args, gdb=None, valgrind=None):
+        if gdb != None: self.gdb = gdb
+        if valgrind != None: self.valgrind = valgrind
+
+        if self.gdb == True:
+            raise RuntimeError("'--gdb' and '--start-and-exit' can't be defined together")
+        with daemon.DaemonContext(working_directory = self.vardir):
+            os.execvp(args[0], args)
+
+    def prepare_args(self):
+        return [self.binary]
+
+    def start(self, start_and_exit=None, gdb=None, valgrind=None, silent=True):
+        if start_and_exit != None: self.start_and_exit = start_and_exit
+        if gdb != None: self.gdb = gdb
+        if valgrind != None: self.valgrind = valgrind
+
+        if self.is_started:
+            if not silent:
+                print "The server is already started."
+            return
+
+        if not silent:
+            print "Starting the server..."
+            version = self.version()
+            print "Starting {0} {1}.".format(os.path.basename(self.binary), version)
+
+        check_port(self.port)
+        args = self.prepare_args()
+
+        if self.gdb:
+            args = prepare_gdb(args)
+        elif self.valgrind:
+            args = prepare_valgrind(args, self.valgrind_log)
+
+        if self.start_and_exit:
+            self._start_and_exit(args)
+        else:
+            self.process = pexpect.spawn(args[0], args[1:], cwd = self.vardir)
+
+        # wait until the server is connectedk
+        wait_until_connected(self.port)
+        # Set is_started flag, to nicely support cleanup during an exception.
+        self.is_started = True
+        with open(self.pidfile) as f:
+            self.pid = int(f.read())
+
+    def stop(self, silent=True):
+        """Stop server instance. Do nothing if the server is not started,
+        to properly shut down the server in case of an exception during
+        start up."""
+        if not self.is_started:
+            if not silent:
+                print "The server is not started."
+            return
+
+        if not silent:
+            print "Stopping the server..."
+
+        if self.process == None:
+            self.kill_old_server()
+        else:
+            self.kill_server()
+
+        if self.gdb:
+            self.process.expect(pexpect.EOF, timeout = 1 << 30)
+        else:
+            self.process.expect(pexpect.EOF)
+
+        self.is_started = False
+        self.pid = None
+
+    def deploy(self, config=None, binary=None, vardir=None,
+               mem=None, start_and_exit=None, gdb=None, valgrind=None, silent=True):
+        if config != None: self.config = config
+        if binary != None: self.binary = binary
+        if vardir != None: self.vardir = vardir
+        if mem != None: self.mem = mem
+        if start_and_exit != None: self.start_and_exit = start_and_exit
+        if gdb != None: self.gdb = gdb
+        if valgrind != None: self.valgrind = valgrind
+
+        self.configure(self.config)
+        self.install(self.binary, self.vardir, self.mem, silent)
+        self.init()
+        self.start(self.start_and_exit, self.gdb, self.valgrind, silent)
+
+    def restart(self):
+        self.stop(silent=True)
+        self.start(silent=True)
+
+    def test_option(self, option_list_str):
+        args = [self.binary] + option_list_str.split()
+        print " ".join([os.path.basename(self.binary)] + args[1:])
+        output = subprocess.Popen(args,
+                                  cwd = self.vardir,
+                                  stdout = subprocess.PIPE,
+                                  stderr = subprocess.STDOUT).stdout.read()
+        print output
+
+    def kill_server(self):
+        """Kill a server which was started correctly"""
+        try:
+            os.kill(self.pid, signal.SIGTERM)
+        except OSError as e:
+            print e
+            pass
+
+    def kill_old_server(self, silent=True):
+        """Kill old server instance if it exists."""
+        if os.access(self.pidfile, os.F_OK) == False:
+            return # Nothing to do
+
+        pid = 0
+        with open(self.pidfile) as f:
+            pid = int(f.read())
+
+        if not silent:
+            print "  Found old server, pid {0}, killing...".format(pid)
+
+        try:
+            os.kill(pid, signal.SIGTERM)
+            while os.kill(pid, 0) != -1:
+                time.sleep(0.001)
+        except OSError:
+            pass
 
diff --git a/test/lib/sql_ast.py b/test/lib/sql_ast.py
index 39d563f2c9..f76cc99516 100644
--- a/test/lib/sql_ast.py
+++ b/test/lib/sql_ast.py
@@ -59,238 +59,245 @@ ER = {
 
 
 def format_error(return_code, response):
-  return "An error occurred: {0}, \'{1}'".format(ER[return_code >> 8],
-                                                 response[4:])
+    return "An error occurred: {0}, \'{1}'".format(ER[return_code >> 8],
+                                                   response[4:])
 
 
 def save_varint32(value):
-  """Implement Perl pack's 'w' option, aka base 128 encoding."""
-  res = ''
-  if value >= 1 << 7:
-    if value >= 1 << 14:
-      if value >= 1 << 21:
-        if value >= 1 << 28:
-          res += chr(value >> 28 & 0xff | 0x80)
-        res += chr(value >> 21 & 0xff | 0x80)
-      res += chr(value >> 14 & 0xff | 0x80)
-    res += chr(value >> 7 & 0xff | 0x80)
-  res += chr(value & 0x7F)
-
-  return res
+    """Implement Perl pack's 'w' option, aka base 128 encoding."""
+    res = ''
+    if value >= 1 << 7:
+        if value >= 1 << 14:
+            if value >= 1 << 21:
+                if value >= 1 << 28:
+                    res += chr(value >> 28 & 0xff | 0x80)
+                res += chr(value >> 21 & 0xff | 0x80)
+            res += chr(value >> 14 & 0xff | 0x80)
+        res += chr(value >> 7 & 0xff | 0x80)
+    res += chr(value & 0x7F)
+
+    return res
+
 
 def read_varint32(varint, offset):
-  """Implement Perl unpack's 'w' option, aka base 128 decoding."""
-  res = ord(varint[offset])
-  if ord(varint[offset]) >= 0x80:
-    offset += 1
-    res = ((res - 0x80) << 7) + ord(varint[offset])
+    """Implement Perl unpack's 'w' option, aka base 128 decoding."""
+    res = ord(varint[offset])
     if ord(varint[offset]) >= 0x80:
-      offset += 1
-      res = ((res - 0x80) << 7) + ord(varint[offset])
-      if ord(varint[offset]) >= 0x80:
         offset += 1
         res = ((res - 0x80) << 7) + ord(varint[offset])
         if ord(varint[offset]) >= 0x80:
-          offset += 1
-          res = ((res - 0x80) << 7) + ord(varint[offset])
-  return res, offset + 1
+            offset += 1
+            res = ((res - 0x80) << 7) + ord(varint[offset])
+            if ord(varint[offset]) >= 0x80:
+                offset += 1
+                res = ((res - 0x80) << 7) + ord(varint[offset])
+                if ord(varint[offset]) >= 0x80:
+                    offset += 1
+                    res = ((res - 0x80) << 7) + ord(varint[offset])
+    return res, offset + 1
 
 
 def opt_resize_buf(buf, newsize):
-  if len(buf) < newsize:
-    return ctypes.create_string_buffer(buf.value, max(2*len, newsize))
-  return buf
+    if len(buf) < newsize:
+        return ctypes.create_string_buffer(buf.value, max(2*len, newsize))
+    return buf
 
 
 def pack_field(value, buf, offset):
-  if type(value) is int or type(value) is long:
-    if value > 0xffffffff:
-      raise RuntimeError("Integer value is too big")
-    buf = opt_resize_buf(buf, offset + INT_FIELD_LEN)
-    struct.pack_into("<cL", buf, offset, chr(INT_FIELD_LEN), value)
-    offset += INT_FIELD_LEN + 1
-  elif type(value) is str:
-    opt_resize_buf(buf, offset + INT_BER_MAX_LEN + len(value))
-    value_len_ber = save_varint32(len(value))
-    struct.pack_into("{0}s{1}s".format(len(value_len_ber), len(value)),
-                     buf, offset, value_len_ber, value)
-    offset += len(value_len_ber) + len(value)
-  else:
-    raise RuntimeError("Unsupported value type in value list")
-  return (buf, offset)
+    if type(value) is int or type(value) is long:
+        if value > 0xffffffff:
+            raise RuntimeError("Integer value is too big")
+        buf = opt_resize_buf(buf, offset + INT_FIELD_LEN)
+        struct.pack_into("<cL", buf, offset, chr(INT_FIELD_LEN), value)
+        offset += INT_FIELD_LEN + 1
+    elif type(value) is str:
+        opt_resize_buf(buf, offset + INT_BER_MAX_LEN + len(value))
+        value_len_ber = save_varint32(len(value))
+        struct.pack_into("{0}s{1}s".format(len(value_len_ber), len(value)),
+                         buf, offset, value_len_ber, value)
+        offset += len(value_len_ber) + len(value)
+    else:
+        raise RuntimeError("Unsupported value type in value list")
+    return (buf, offset)
 
 
 def pack_tuple(value_list, buf, offset):
-  """Represents <tuple> rule in tarantool protocol description.
-     Pack tuple into a binary representation.
-     buf and offset are in-out parameters, offset is advanced
-     to the amount of bytes that it took to pack the tuple"""
-  # length of int field: 1 byte - field len (is always 4), 4 bytes - data
-  # max length of compressed integer
-  cardinality = len(value_list)
-  struct.pack_into("<L", buf, offset, cardinality)
-  offset += INT_FIELD_LEN
-  for value in value_list:
-    (buf, offset) = pack_field(value, buf, offset)
-  return buf, offset
+    """Represents <tuple> rule in tarantool protocol description.
+    Pack tuple into a binary representation.
+    buf and offset are in-out parameters, offset is advanced
+    to the amount of bytes that it took to pack the tuple"""
+
+    # length of int field: 1 byte - field len (is always 4), 4 bytes - data
+    # max length of compressed integer
+    cardinality = len(value_list)
+    struct.pack_into("<L", buf, offset, cardinality)
+    offset += INT_FIELD_LEN
+    for value in value_list:
+        (buf, offset) = pack_field(value, buf, offset)
+
+    return buf, offset
+
 
 def pack_operation_list(update_list, buf, offset):
-  buf = opt_resize_buf(buf, offset + INT_FIELD_LEN)
-  struct.pack_into("<L", buf, offset, len(update_list))
-  offset += INT_FIELD_LEN
-  for update in update_list:
-    opt_resize_buf(buf, offset + INT_FIELD_LEN + 1)
-    struct.pack_into("<Lc", buf, offset,
-                     update[0],
-                     chr(UPDATE_SET_FIELD_OPCODE))
-    offset += INT_FIELD_LEN + 1
-    (buf, offset) = pack_field(update[1], buf, offset)
-  return (buf, offset)
+    buf = opt_resize_buf(buf, offset + INT_FIELD_LEN)
+    struct.pack_into("<L", buf, offset, len(update_list))
+    offset += INT_FIELD_LEN
+    for update in update_list:
+        opt_resize_buf(buf, offset + INT_FIELD_LEN + 1)
+        struct.pack_into("<Lc", buf, offset,
+                         update[0],
+                         chr(UPDATE_SET_FIELD_OPCODE))
+        offset += INT_FIELD_LEN + 1
+        (buf, offset) = pack_field(update[1], buf, offset)
+
+    return (buf, offset)
+
 
 def unpack_tuple(response, offset):
-  (size,cardinality) = struct.unpack("<LL", response[offset:offset + 8])
-  offset += 8
-  res = []
-  while len(res) < cardinality:
-    (data_len, offset) = read_varint32(response, offset)
-    data = response[offset:offset+data_len]
-    offset += data_len
-    if data_len == 4:
-      (data,) = struct.unpack("<L", data)
-      res.append((str(data)))
-    else:
-      res.append("'" + data + "'")
+    (size, cardinality) = struct.unpack("<LL", response[offset:offset + 8])
+    offset += 8
+    res = []
+    while len(res) < cardinality:
+        (data_len, offset) = read_varint32(response, offset)
+        data = response[offset:offset+data_len]
+        offset += data_len
+        if data_len == 4:
+            (data,) = struct.unpack("<L", data)
+            res.append((str(data)))
+        else:
+            res.append("'" + data + "'")
+
+    return '[' + ', '.join(res) + ']', offset
 
-  return '[' + ', '.join(res) + ']', offset
 
-   
 class StatementPing:
-  reqeust_type = PING_REQUEST_TYPE
-  def pack(self):
-    return ""
+    reqeust_type = PING_REQUEST_TYPE
+    def pack(self):
+        return ""
 
-  def unpack(self, response):
-    return "ok\n---"
+    def unpack(self, response):
+        return "ok\n---"
 
 class StatementInsert(StatementPing):
-  reqeust_type = INSERT_REQUEST_TYPE
+    reqeust_type = INSERT_REQUEST_TYPE
 
-  def __init__(self, table_name, value_list):
-    self.namespace_no = table_name
-    self.flags = 0
-    self.value_list = value_list
+    def __init__(self, table_name, value_list):
+        self.namespace_no = table_name
+        self.flags = 0
+        self.value_list = value_list
 
-  def pack(self):
-    buf = ctypes.create_string_buffer(PACKET_BUF_LEN)
-    (buf, offset) = pack_tuple(self.value_list, buf, INSERT_REQUEST_FIXED_LEN)
-    struct.pack_into("<LL", buf, 0, self.namespace_no, self.flags)
-    return buf[:offset]
+    def pack(self):
+        buf = ctypes.create_string_buffer(PACKET_BUF_LEN)
+        (buf, offset) = pack_tuple(self.value_list, buf, INSERT_REQUEST_FIXED_LEN)
+        struct.pack_into("<LL", buf, 0, self.namespace_no, self.flags)
+        return buf[:offset]
 
-  def unpack(self, response):
-    (return_code,) = struct.unpack("<L", response[:4])
-    if return_code:
-      return format_error(return_code, response)
-    (tuple_count,) = struct.unpack("<L", response[4:8])
-    return "Insert OK, {0} row affected".format(tuple_count)
+    def unpack(self, response):
+        (return_code,) = struct.unpack("<L", response[:4])
+        if return_code:
+            return format_error(return_code, response)
+        (tuple_count,) = struct.unpack("<L", response[4:8])
+        return "Insert OK, {0} row affected".format(tuple_count)
 
 
 class StatementUpdate(StatementPing):
-  reqeust_type = UPDATE_REQUEST_TYPE
-
-  def __init__(self, table_name, update_list, where):
-    self.namespace_no = table_name
-    self.flags = 0
-    key_no = where[0]
-    if key_no != 0:
-      raise RuntimeError("UPDATE can only be made by the primary key (#0)")
-    self.value_list = where[1:]
-    self.update_list = update_list
-
-  def pack(self):
-    buf = ctypes.create_string_buffer(PACKET_BUF_LEN)
-    struct.pack_into("<LL", buf, 0, self.namespace_no, self.flags)
-    (buf, offset) = pack_tuple(self.value_list, buf, UPDATE_REQUEST_FIXED_LEN)
-    (buf, offset) = pack_operation_list(self.update_list, buf, offset)
-    return buf[:offset]
-
-  def unpack(self, response):
-    (return_code,) = struct.unpack("<L", response[:4])
-    if return_code:
-      return format_error(return_code, response)
-    (tuple_count,) = struct.unpack("<L", response[4:8])
-    return "Update OK, {0} row affected".format(tuple_count)
+    reqeust_type = UPDATE_REQUEST_TYPE
+
+    def __init__(self, table_name, update_list, where):
+        self.namespace_no = table_name
+        self.flags = 0
+        key_no = where[0]
+        if key_no != 0:
+            raise RuntimeError("UPDATE can only be made by the primary key (#0)")
+        self.value_list = where[1:]
+        self.update_list = update_list
+
+    def pack(self):
+        buf = ctypes.create_string_buffer(PACKET_BUF_LEN)
+        struct.pack_into("<LL", buf, 0, self.namespace_no, self.flags)
+        (buf, offset) = pack_tuple(self.value_list, buf, UPDATE_REQUEST_FIXED_LEN)
+        (buf, offset) = pack_operation_list(self.update_list, buf, offset)
+        return buf[:offset]
+
+    def unpack(self, response):
+        (return_code,) = struct.unpack("<L", response[:4])
+        if return_code:
+            return format_error(return_code, response)
+        (tuple_count,) = struct.unpack("<L", response[4:8])
+        return "Update OK, {0} row affected".format(tuple_count)
 
 class StatementDelete(StatementPing):
-  reqeust_type = DELETE_REQUEST_TYPE
-
-  def __init__(self, table_name, where):
-    self.namespace_no = table_name
-    key_no = where[0]
-    if key_no != 0:
-      raise RuntimeError("DELETE can only be made by the primary key (#0)")
-    self.value_list = where[1:]
-
-  def pack(self):
-    buf = ctypes.create_string_buffer(PACKET_BUF_LEN)
-    (buf, offset) = pack_tuple(self.value_list, buf, DELETE_REQUEST_FIXED_LEN)
-    struct.pack_into("<L", buf, 0, self.namespace_no)
-    return buf[:offset]
-
-  def unpack(self, response):
-    (return_code,) = struct.unpack("<L", response[:4])
-    if return_code:
-      return format_error(return_code, response)
-    (tuple_count,) = struct.unpack("<L", response[4:8])
-    return "Delete OK, {0} row affected".format(tuple_count)
+    reqeust_type = DELETE_REQUEST_TYPE
+
+    def __init__(self, table_name, where):
+        self.namespace_no = table_name
+        key_no = where[0]
+        if key_no != 0:
+            raise RuntimeError("DELETE can only be made by the primary key (#0)")
+        self.value_list = where[1:]
+
+    def pack(self):
+        buf = ctypes.create_string_buffer(PACKET_BUF_LEN)
+        (buf, offset) = pack_tuple(self.value_list, buf, DELETE_REQUEST_FIXED_LEN)
+        struct.pack_into("<L", buf, 0, self.namespace_no)
+        return buf[:offset]
+
+    def unpack(self, response):
+        (return_code,) = struct.unpack("<L", response[:4])
+        if return_code:
+            return format_error(return_code, response)
+        (tuple_count,) = struct.unpack("<L", response[4:8])
+        return "Delete OK, {0} row affected".format(tuple_count)
 
 class StatementSelect(StatementPing):
-  reqeust_type = SELECT_REQUEST_TYPE
-
-  def __init__(self, table_name, where, limit):
-    self.namespace_no = table_name
-    self.index_no = None
-    self.key_list = []
-    if not where:
-      self.index_no = 0
-      self.key_list = ["",]
-    else:
-      for (index_no, key) in where:
-        self.key_list.append(key)
-        if self.index_no == None:
-          self.index_no = index_no
-        elif self.index_no != index_no:
-          raise RuntimeError("All key values in a disjunction must refer to the same index")
-    self.offset = 0
-    self.limit = limit
-
-  def pack(self):
-    buf = ctypes.create_string_buffer(PACKET_BUF_LEN)
-    struct.pack_into("<LLLLL", buf, 0,
-                     self.namespace_no,
-                     self.index_no,
-                     self.offset,
-                     self.limit,
-                     len(self.key_list))
-    offset = SELECT_REQUEST_FIXED_LEN
-    for key in self.key_list:
-      (buf, offset) = pack_tuple([key], buf, offset)
-
-    return buf[:offset]
-
-  def unpack(self, response):
-    (return_code,) = struct.unpack("<L", response[:4])
-    if return_code:
-      return format_error(return_code, response)
-    (tuple_count,) = struct.unpack("<L", response[4:8])
-    tuples = []
-    offset = 8
-    while len(tuples) < tuple_count:
-      (next_tuple, offset) = unpack_tuple(response, offset)
-      tuples.append(next_tuple)
-    if tuple_count == 0:
-      return "No match"
-    elif tuple_count == 1:
-      return "Found 1 tuple:\n" + tuples[0]
-    else:
-      return "Found {0} tuples:\n".format(tuple_count) + "\n".join(tuples)
+    reqeust_type = SELECT_REQUEST_TYPE
+
+    def __init__(self, table_name, where, limit):
+        self.namespace_no = table_name
+        self.index_no = None
+        self.key_list = []
+        if not where:
+            self.index_no = 0
+            self.key_list = ["",]
+        else:
+            for (index_no, key) in where:
+                self.key_list.append(key)
+                if self.index_no == None:
+                    self.index_no = index_no
+                elif self.index_no != index_no:
+                    raise RuntimeError("All key values in a disjunction must refer to the same index")
+        self.offset = 0
+        self.limit = limit
+
+    def pack(self):
+        buf = ctypes.create_string_buffer(PACKET_BUF_LEN)
+        struct.pack_into("<LLLLL", buf, 0,
+                         self.namespace_no,
+                         self.index_no,
+                         self.offset,
+                         self.limit,
+                         len(self.key_list))
+        offset = SELECT_REQUEST_FIXED_LEN
+
+        for key in self.key_list:
+            (buf, offset) = pack_tuple([key], buf, offset)
+
+        return buf[:offset]
+
+    def unpack(self, response):
+        (return_code,) = struct.unpack("<L", response[:4])
+        if return_code:
+            return format_error(return_code, response)
+        (tuple_count,) = struct.unpack("<L", response[4:8])
+        tuples = []
+        offset = 8
+        while len(tuples) < tuple_count:
+            (next_tuple, offset) = unpack_tuple(response, offset)
+            tuples.append(next_tuple)
+        if tuple_count == 0:
+            return "No match"
+        elif tuple_count == 1:
+            return "Found 1 tuple:\n" + tuples[0]
+        else:
+            return "Found {0} tuples:\n".format(tuple_count) + "\n".join(tuples)
 
diff --git a/test/lib/tarantool_admin.py b/test/lib/tarantool_admin.py
index bce37e34e4..838fd56d12 100644
--- a/test/lib/tarantool_admin.py
+++ b/test/lib/tarantool_admin.py
@@ -29,25 +29,26 @@ from tarantool_connection import TarantoolConnection
 is_admin_re = re.compile("^\s*(show|save|exec|exit|reload|help)", re.I)
 
 class TarantoolAdmin(TarantoolConnection):
-  def execute_no_reconnect(self, command, silent):
-    self.socket.sendall(command)
-
-    bufsiz = 4096
-    res = ""
-
-    while True:
-      buf = self.socket.recv(bufsiz)
-      if not buf:
-        break
-      res = res + buf;
-      if (res.rfind("\r\n...\r\n") >= 0):
-        break
-
-    # validate yaml by parsing it
-    yaml.load(res)
-
-    if not silent:
-      print command.replace('\n', '')
-      print res[:-1]
-    return res
+    def execute_no_reconnect(self, command, silent):
+        self.socket.sendall(command)
+
+        bufsiz = 4096
+        res = ""
+
+        while True:
+            buf = self.socket.recv(bufsiz)
+            if not buf:
+                break
+            res = res + buf;
+            if (res.rfind("\r\n...\r\n") >= 0):
+                break
+
+        # validate yaml by parsing it
+        yaml.load(res)
+
+        if not silent:
+            print command.replace('\n', '')
+            print res[:-1]
+
+        return res
 
diff --git a/test/lib/tarantool_box_server.py b/test/lib/tarantool_box_server.py
index 87b600e1e4..430e8a6add 100644
--- a/test/lib/tarantool_box_server.py
+++ b/test/lib/tarantool_box_server.py
@@ -8,36 +8,36 @@ from box import Box
 import time
 
 class TarantoolBoxServer(TarantoolServer):
-  def __new__(cls, core="tarantool", module="box"):
-    return TarantoolServer.__new__(cls)
+    def __new__(cls, core="tarantool", module="box"):
+        return TarantoolServer.__new__(cls)
 
-  def __init__(self, core="tarantool", module="box"):
-    TarantoolServer.__init__(self, core, module)
+    def __init__(self, core="tarantool", module="box"):
+        TarantoolServer.__init__(self, core, module)
 
-  def configure(self, config):
-    TarantoolServer.configure(self, config)
-    with open(self.config) as fp:
-      dummy_section_name = "tarantool"
-      config = ConfigParser.ConfigParser()
-      config.readfp(TarantoolConfigFile(fp, dummy_section_name))
-      self.primary_port = int(config.get(dummy_section_name, "primary_port"))
-      self.admin_port = int(config.get(dummy_section_name, "admin_port"))
-      self.port = self.admin_port
-      self.admin = TarantoolAdmin("localhost", self.admin_port)
-      self.sql = Box("localhost", self.primary_port)
+    def configure(self, config):
+        TarantoolServer.configure(self, config)
+        with open(self.config) as fp:
+            dummy_section_name = "tarantool"
+            config = ConfigParser.ConfigParser()
+            config.readfp(TarantoolConfigFile(fp, dummy_section_name))
+            self.primary_port = int(config.get(dummy_section_name, "primary_port"))
+            self.admin_port = int(config.get(dummy_section_name, "admin_port"))
+            self.port = self.admin_port
+            self.admin = TarantoolAdmin("localhost", self.admin_port)
+            self.sql = Box("localhost", self.primary_port)
 
-  def init(self):
-# init storage
-    subprocess.check_call([self.binary, "--init_storage"],
-                          cwd = self.vardir,
-# catch stdout/stderr to not clutter output
-                          stdout = subprocess.PIPE,
-                          stderr = subprocess.PIPE)
+    def init(self):
+        # init storage
+        subprocess.check_call([self.binary, "--init_storage"],
+                              cwd = self.vardir,
+                              # catch stdout/stderr to not clutter output
+                              stdout = subprocess.PIPE,
+                              stderr = subprocess.PIPE)
 
-  def wait_lsn(self, lsn):
-    while True:
-      data = self.admin.execute("show info\n", silent=True)
-      info = yaml.load(data)["info"]
-      if (int(info["lsn"]) >= lsn):
-        break
-      time.sleep(0.01)
+    def wait_lsn(self, lsn):
+        while True:
+            data = self.admin.execute("show info\n", silent=True)
+            info = yaml.load(data)["info"]
+            if (int(info["lsn"]) >= lsn):
+                break
+            time.sleep(0.01)
diff --git a/test/lib/tarantool_connection.py b/test/lib/tarantool_connection.py
index 50653f8790..d5d62dab79 100644
--- a/test/lib/tarantool_connection.py
+++ b/test/lib/tarantool_connection.py
@@ -27,71 +27,71 @@ import cStringIO
 import errno
 
 class TarantoolConnection:
-  def __init__(self, host, port):
-    self.host = host
-    self.port = port
-    self.is_connected = False
-    self.stream = cStringIO.StringIO()
-    self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
+    def __init__(self, host, port):
+        self.host = host
+        self.port = port
+        self.is_connected = False
+        self.stream = cStringIO.StringIO()
+        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
 
-  def connect(self):
-    self.socket.connect((self.host, self.port))
-    self.is_connected = True
+    def connect(self):
+        self.socket.connect((self.host, self.port))
+        self.is_connected = True
 
-  def disconnect(self):
-    if self.is_connected:
-      self.socket.close()
-      self.is_connected = False
+    def disconnect(self):
+        if self.is_connected:
+            self.socket.close()
+            self.is_connected = False
 
-  def reconnect(self):
-    self.disconnect()
-    self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
-    self.connect()
+    def reconnect(self):
+        self.disconnect()
+        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
+        self.connect()
 
-  def opt_reconnect(self):
-    """ On a socket which was disconnected, recv of 0 bytes immediately
-        returns with no data. On a socket which is alive, it returns EAGAIN.
-        Make use of this property and detect whether or not the socket is
-        dead. Reconnect a dead socket, do nothing if the socket is good."""
-    try:
-      if self.socket.recv(0, socket.MSG_DONTWAIT) == '':
-        self.reconnect()
-    except socket.error as e:
-      if e.errno == errno.EAGAIN:
-        pass
-      else:
-        self.reconnect()
+    def opt_reconnect(self):
+        """ On a socket which was disconnected, recv of 0 bytes immediately
+            returns with no data. On a socket which is alive, it returns EAGAIN.
+            Make use of this property and detect whether or not the socket is
+            dead. Reconnect a dead socket, do nothing if the socket is good."""
+        try:
+            if self.socket.recv(0, socket.MSG_DONTWAIT) == '':
+                self.reconnect()
+        except socket.error as e:
+            if e.errno == errno.EAGAIN:
+                pass
+            else:
+                self.reconnect()
 
-  def execute(self, command, silent=True):
-    self.opt_reconnect()
-    return self.execute_no_reconnect(command, silent)
+    def execute(self, command, silent=True):
+        self.opt_reconnect()
+        return self.execute_no_reconnect(command, silent)
 
-  def write(self, fragment):
-    """This is to support print >> admin, "command" syntax.
-    For every print statement, write is invoked twice: one to
-    write the command itself, and another to write \n. We should
-    accumulate all writes until we receive \n. When we receive it,
-    we execute the command, and rewind the stream."""
+    def write(self, fragment):
+        """This is to support print >> admin, "command" syntax.
+        For every print statement, write is invoked twice: one to
+        write the command itself, and another to write \n. We should
+        accumulate all writes until we receive \n. When we receive it,
+        we execute the command, and rewind the stream."""
 
-    newline_pos = fragment.rfind("\n")
-    while newline_pos >= 0:
-      self.stream.write(fragment[:newline_pos+1])
-      statement = self.stream.getvalue()
-      sys.stdout.write(statement)
-      sys.stdout.write(self.execute(statement))
-      fragment = fragment[newline_pos+1:]
-      newline_pos = fragment.rfind("\n")
-      self.stream.seek(0)
-      self.stream.truncate()
+        newline_pos = fragment.rfind("\n")
+        while newline_pos >= 0:
+            self.stream.write(fragment[:newline_pos+1])
+            statement = self.stream.getvalue()
+            sys.stdout.write(statement)
+            sys.stdout.write(self.execute(statement))
+            fragment = fragment[newline_pos+1:]
+            newline_pos = fragment.rfind("\n")
+            self.stream.seek(0)
+            self.stream.truncate()
 
-    self.stream.write(fragment)
+        self.stream.write(fragment)
 
-  def __enter__(self):
-    self.connect()
-    return self
+    def __enter__(self):
+        self.connect()
+        return self
 
-  def __exit__(self, type, value, tb):
-    self.disconnect()
+    def __exit__(self, type, value, tb):
+        self.disconnect()
 
diff --git a/test/lib/tarantool_feeder_server.py b/test/lib/tarantool_feeder_server.py
index 8c9865ec1e..2a4b7979c5 100644
--- a/test/lib/tarantool_feeder_server.py
+++ b/test/lib/tarantool_feeder_server.py
@@ -2,16 +2,17 @@ import ConfigParser
 from tarantool_server import TarantoolServer, TarantoolConfigFile
 
 class TarantoolFeederServer(TarantoolServer):
-  def __new__(cls, core="tarantool", module="feeder"):
-    return TarantoolServer.__new__(cls)
+    def __new__(cls, core="tarantool", module="feeder"):
+        return TarantoolServer.__new__(cls)
 
-  def __init__(self, core="tarantool", module="feeder"):
-    TarantoolServer.__init__(self, core, module)
+    def __init__(self, core="tarantool", module="feeder"):
+        TarantoolServer.__init__(self, core, module)
+
+    def configure(self, config):
+        TarantoolServer.configure(self, config)
+        with open(self.config) as fp:
+            dummy_section_name = "tarantool"
+            config = ConfigParser.ConfigParser()
+            config.readfp(TarantoolConfigFile(fp, dummy_section_name))
+            self.port = int(config.get(dummy_section_name, "wal_feeder_bind_port"))
 
-  def configure(self, config):
-    TarantoolServer.configure(self, config)
-    with open(self.config) as fp:
-      dummy_section_name = "tarantool"
-      config = ConfigParser.ConfigParser()
-      config.readfp(TarantoolConfigFile(fp, dummy_section_name))
-      self.port = int(config.get(dummy_section_name, "wal_feeder_bind_port"))
diff --git a/test/lib/tarantool_preprocessor.py b/test/lib/tarantool_preprocessor.py
index 90ba09e87e..ed8c24f335 100644
--- a/test/lib/tarantool_preprocessor.py
+++ b/test/lib/tarantool_preprocessor.py
@@ -9,55 +9,58 @@ import sys
 
 
 def tarantool_translate(readline):
-  token_stream = tokenize.generate_tokens(readline)
-  for token in token_stream:
-    type, name = token[:2]
-    if type == tokenize.NAME and name == "exec":
-      next_token = next(token_stream)
-      type, name = next_token[:2]
-      if type == tokenize.NAME and name in [ "sql", "admin"]:
-        yield (tokenize.NAME, 'print') + token[2:]
-        yield (tokenize.OP, '>>') + token[2:]
-        yield next_token
-        yield (tokenize.OP, ',') + next_token[2:]
-      else:
-        yield token
-        yield next_token
-    else:
-      yield token
+    token_stream = tokenize.generate_tokens(readline)
+    for token in token_stream:
+        type, name = token[:2]
+        if type == tokenize.NAME and name == "exec":
+            next_token = next(token_stream)
+            type, name = next_token[:2]
+            if type == tokenize.NAME and name in [ "sql", "admin"]:
+                yield (tokenize.NAME, 'print') + token[2:]
+                yield (tokenize.OP, '>>') + token[2:]
+                yield next_token
+                yield (tokenize.OP, ',') + next_token[2:]
+            else:
+                yield token
+                yield next_token
+        else:
+            yield token
+
 
 class TarantoolStreamReader(utf_8.StreamReader):
-  def __init__(self, *args, **kwargs):
-    utf_8.StreamReader.__init__(self, *args, **kwargs)
-    try:
-      data = tokenize.untokenize(tarantool_translate(self.stream.readline))
-      self.stream = cStringIO.StringIO(data)
-    except Exception:
-      self.stream.seek(0)
+    def __init__(self, *args, **kwargs):
+        utf_8.StreamReader.__init__(self, *args, **kwargs)
+        try:
+            data = tokenize.untokenize(tarantool_translate(self.stream.readline))
+            self.stream = cStringIO.StringIO(data)
+        except Exception:
+            self.stream.seek(0)
+
 
 def tarantool_encoding_builder(encoding_name):
-  """Return an encoding that pre-processes the input and
-  rewrites it to be pure python"""
-  if encoding_name == "tarantool":
-    utf8 = encodings.search_function("utf8")
-    return codecs.CodecInfo(name = "tarantool",
-                            encode = utf8.encode,
-                            decode = utf8.decode,
-                            incrementalencoder = utf8.incrementalencoder,
-                            incrementaldecoder = utf8.incrementaldecoder,
-                            streamreader = TarantoolStreamReader,
-                            streamwriter = utf8.streamwriter)
-  return None
+    """Return an encoding that pre-processes the input and
+    rewrites it to be pure python"""
+    if encoding_name == "tarantool":
+        utf8 = encodings.search_function("utf8")
+        return codecs.CodecInfo(name = "tarantool",
+                                encode = utf8.encode,
+                                decode = utf8.decode,
+                                incrementalencoder = utf8.incrementalencoder,
+                                incrementaldecoder = utf8.incrementaldecoder,
+                                streamreader = TarantoolStreamReader,
+                                streamwriter = utf8.streamwriter)
+    return None
 
 codecs.register(tarantool_encoding_builder)
 
+
 def main():
-  py_input = """exec admin 'show info'
+    py_input = """exec admin 'show info'
 print 'hello'
 exec sql 'select * from namespace1'\n"""
-  print py_input
-  py_stream = cStringIO.StringIO(py_input)
-  print tokenize.untokenize(tarantool_translate(py_stream.readline))
+    print py_input
+    py_stream = cStringIO.StringIO(py_input)
+    print tokenize.untokenize(tarantool_translate(py_stream.readline))
 
 if __name__ == "__main__":
-  main()
+    main()
diff --git a/test/lib/tarantool_server.py b/test/lib/tarantool_server.py
index 280d80d7ef..1d640352be 100644
--- a/test/lib/tarantool_server.py
+++ b/test/lib/tarantool_server.py
@@ -7,75 +7,78 @@ from server import Server
 from tarantool_admin import TarantoolAdmin
 
 class TarantoolConfigFile:
-  """ConfigParser can't read files without sections, work it around"""
-  def __init__(self, fp, section_name):
-    self.fp = fp
-    self.section_name = "[" + section_name + "]"
-  def readline(self):
-    if self.section_name:
-      section_name = self.section_name
-      self.section_name = None
-      return section_name
-    # tarantool.cfg puts string values in quote
-    return self.fp.readline().replace("\"", '')
+    """ConfigParser can't read files without sections, work it around"""
+    def __init__(self, fp, section_name):
+        self.fp = fp
+        self.section_name = "[" + section_name + "]"
+
+    def readline(self):
+        if self.section_name:
+            section_name = self.section_name
+            self.section_name = None
+            return section_name
+        # tarantool.cfg puts string values in quote
+        return self.fp.readline().replace("\"", '')
+
 
 class TarantoolServer(Server):
-  def __new__(cls, core=None, module=None):
-    if module  == None:
-      return super(Server, cls).__new__(cls)
-    mdlname = "lib.{0}_{1}_server".format(core, module)
-    clsname = "{0}{1}Server".format(core.title(), module.title())
-    modulecls = __import__(mdlname, fromlist=clsname).__dict__[clsname]
-    return modulecls.__new__(modulecls, core, module)
+    def __new__(cls, core=None, module=None):
+        if module  == None:
+            return super(Server, cls).__new__(cls)
+        mdlname = "lib.{0}_{1}_server".format(core, module)
+        clsname = "{0}{1}Server".format(core.title(), module.title())
+        modulecls = __import__(mdlname, fromlist=clsname).__dict__[clsname]
+
+        return modulecls.__new__(modulecls, core, module)
 
-  def __init__(self, core, module):
-    Server.__init__(self, core, module)
-    self.default_config_name = "tarantool.cfg"
-# append additional cleanup patterns
-    self.re_vardir_cleanup += ['*.snap', '*.xlog', '*.inprogress', '*.cfg']
+    def __init__(self, core, module):
+        Server.__init__(self, core, module)
+        self.default_config_name = "tarantool.cfg"
+        # append additional cleanup patterns
+        self.re_vardir_cleanup += ['*.snap', '*.xlog', '*.inprogress', '*.cfg']
 
-  def find_exe(self, builddir, silent=True):
-    return Server.find_exe(self, "{0}/mod/{1}".format(builddir, self.module), silent)
+    def find_exe(self, builddir, silent=True):
+        return Server.find_exe(self, "{0}/mod/{1}".format(builddir, self.module), silent)
 
-  def configure(self, config):
-    Server.configure(self, config)
-# now read the server config, we need some properties from it
-    with open(self.config) as fp:
-      dummy_section_name = "tarantool"
-      config = ConfigParser.ConfigParser()
-      config.readfp(TarantoolConfigFile(fp, dummy_section_name))
-      self.pidfile = config.get(dummy_section_name, "pid_file")
+    def configure(self, config):
+        Server.configure(self, config)
+        # now read the server config, we need some properties from it
+        with open(self.config) as fp:
+            dummy_section_name = "tarantool"
+            config = ConfigParser.ConfigParser()
+            config.readfp(TarantoolConfigFile(fp, dummy_section_name))
+            self.pidfile = config.get(dummy_section_name, "pid_file")
 
-  def reconfigure(self, config, silent=False):
-    if config == None:
-      os.unlink(os.path.join(self.vardir, self.default_config_name))
-    else:
-      self.config = os.path.abspath(config)
-      shutil.copy(self.config, os.path.join(self.vardir, self.default_config_name))
-    self.admin.execute("reload configuration\n", silent=silent)
+    def reconfigure(self, config, silent=False):
+        if config == None:
+            os.unlink(os.path.join(self.vardir, self.default_config_name))
+        else:
+            self.config = os.path.abspath(config)
+            shutil.copy(self.config, os.path.join(self.vardir, self.default_config_name))
+        self.admin.execute("reload configuration\n", silent=silent)
 
-  def version(self):
-    p = subprocess.Popen([self.binary, "--version"],
-                         cwd = self.vardir,
-                         stdout = subprocess.PIPE)
-    version = p.stdout.read().rstrip()
-    p.wait()
-    return version
+    def version(self):
+        p = subprocess.Popen([self.binary, "--version"],
+                             cwd = self.vardir,
+                             stdout = subprocess.PIPE)
+        version = p.stdout.read().rstrip()
+        p.wait()
+        return version
 
-  def _start_and_exit(self, args, gdb=None, valgrind=None):
-    if gdb != None: self.gdb = gdb
-    if valgrind != None: self.valgrind = valgrind
+    def _start_and_exit(self, args, gdb=None, valgrind=None):
+        if gdb != None: self.gdb = gdb
+        if valgrind != None: self.valgrind = valgrind
 
-    if self.valgrind:
-      Server._start_and_exit(self, args)
-    else:
-      if not self.gdb:
-        args.append("--background")
-      else:
-        raise RuntimeError("'--gdb' and '--start-and-exit' can't be defined together")
-      self.server = pexpect.spawn(args[0], args[1:], cwd = self.vardir)
-      self.server.wait()
+        if self.valgrind:
+            Server._start_and_exit(self, args)
+        else:
+            if not self.gdb:
+                args.append("--background")
+            else:
+                raise RuntimeError("'--gdb' and '--start-and-exit' can't be defined together")
+            self.server = pexpect.spawn(args[0], args[1:], cwd = self.vardir)
+            self.server.wait()
 
-  def default_bin_name(self):
-    return "{0}_{1}".format(self.core, self.module)
+    def default_bin_name(self):
+        return "{0}_{1}".format(self.core, self.module)
 
diff --git a/test/lib/test_suite.py b/test/lib/test_suite.py
index 2af4e06ebc..95838cf16f 100644
--- a/test/lib/test_suite.py
+++ b/test/lib/test_suite.py
@@ -18,248 +18,256 @@ import string
 import traceback
 
 class FilteredStream:
-  """Helper class to filter .result file output"""
-  def __init__(self, filename):
-    self.stream = open(filename, "w+")
-    self.filters = []
-  def write(self, fragment):
-    """Apply all filters, then write result to the undelrying stream.
-    Do line-oriented filtering: the fragment doesn't have to represent
-    just one line."""
-    fragment_stream = cStringIO.StringIO(fragment)
-    for line in fragment_stream:
-      original_len = len(line.strip())
-      for pattern, replacement in self.filters:
-        line = re.sub(pattern, replacement, line)
-# don't write lines that are completely filtered out:
-        if original_len and len(line.strip()) == 0:
-          return
-      self.stream.write(line)
-  def push_filter(self, pattern, replacement):
-    self.filters.append([pattern, replacement])
-  def pop_filter(self):
-    self.filters.pop()
-  def clear_all_filters(self):
-    filters = []
-  def close(self):
-    self.clear_all_filters()
-    self.stream.close()
+
+    """Helper class to filter .result file output"""
+    def __init__(self, filename):
+        self.stream = open(filename, "w+")
+        self.filters = []
+
+    def write(self, fragment):
+        """Apply all filters, then write result to the undelrying stream.
+        Do line-oriented filtering: the fragment doesn't have to represent
+        just one line."""
+        fragment_stream = cStringIO.StringIO(fragment)
+        for line in fragment_stream:
+            original_len = len(line.strip())
+            for pattern, replacement in self.filters:
+                line = re.sub(pattern, replacement, line)
+                # don't write lines that are completely filtered out:
+                if original_len and len(line.strip()) == 0:
+                    return
+            self.stream.write(line)
+
+    def push_filter(self, pattern, replacement):
+        self.filters.append([pattern, replacement])
+
+    def pop_filter(self):
+        self.filters.pop()
+
+    def clear_all_filters(self):
+        filters = []
+
+    def close(self):
+        self.clear_all_filters()
+        self.stream.close()
 
 
 def check_valgrind_log(path_to_log):
-  """ Check that there were no warnings in the log."""
-  return os.path.getsize(path_to_log) != 0
+    """ Check that there were no warnings in the log."""
+    return os.path.getsize(path_to_log) != 0
 
 
 def print_tail_n(filename, num_lines):
-  """Print N last lines of a file."""
-  with open(filename, "r+") as logfile:
-    tail_n = collections.deque(logfile, num_lines)
-    for line in tail_n:
-      sys.stdout.write(line)
+    """Print N last lines of a file."""
+    with open(filename, "r+") as logfile:
+        tail_n = collections.deque(logfile, num_lines)
+        for line in tail_n:
+            sys.stdout.write(line)
 
 
 class Test:
-  """An individual test file. A test object can run itself
-  and remembers completion state of the run."""
-  def __init__(self, name, args, suite_ini):
-    """Initialize test properties: path to test file, path to
-    temporary result file, path to the client program, test status."""
-    self.name = name
-    self.args = args
-    self.suite_ini = suite_ini
-    self.result = name.replace(".test", ".result")
-    self.tmp_result = os.path.join(self.args.vardir,
-                                   os.path.basename(self.result))
-    self.reject = name.replace(".test", ".reject")
-    self.is_executed = False
-    self.is_executed_ok = None
-    self.is_equal_result = None
-    self.is_valgrind_clean = True
-
-  def passed(self):
-    """Return true if this test was run successfully."""
-    return self.is_executed and self.is_executed_ok and self.is_equal_result
-
-  def run(self, server):
-    """Execute the test assuming it's a python program.
-    If the test aborts, print its output to stdout, and raise
-    an exception. Else, comprare result and reject files.
-    If there is a difference, print it to stdout and raise an
-    exception. The exception is raised only if is_force flag is
-    not set."""
-
-    diagnostics = "unknown"
-    save_stdout = sys.stdout
-    try:
-      sys.stdout = FilteredStream(self.tmp_result)
-      stdout_fileno = sys.stdout.stream.fileno()
-      execfile(self.name, dict(locals(), **server.__dict__))
-      self.is_executed_ok = True
-    except Exception as e:
-      traceback.print_exc(e)
-      diagnostics = str(e)
-    finally:
-      if sys.stdout and sys.stdout != save_stdout:
-        sys.stdout.close()
-      sys.stdout = save_stdout;
-
-    self.is_executed = True
-
-    if self.is_executed_ok and os.path.isfile(self.result):
-        self.is_equal_result = filecmp.cmp(self.result, self.tmp_result)
-
-    if self.args.valgrind:
-      self.is_valgrind_clean = \
-      check_valgrind_log(server.valgrind_log) == False
-
-    if self.is_executed_ok and self.is_equal_result and self.is_valgrind_clean:
-      print "[ pass ]"
-      os.remove(self.tmp_result)
-    elif (self.is_executed_ok and not self.is_equal_result and not
-        os.path.isfile(self.result)):
-      os.rename(self.tmp_result, self.result)
-      print "[ NEW ]"
-    else:
-      os.rename(self.tmp_result, self.reject)
-      print "[ fail ]"
-      where = ""
-      if not self.is_executed_ok:
-        self.print_diagnostics(self.reject,
-            "Test failed! Last 10 lines of the result file:")
-        where = ": test execution aborted, reason '{0}'".format(diagnostics)
-      elif not self.is_equal_result:
-        self.print_unidiff()
-        where = ": wrong test output"
-      elif not self.is_valgrind_clean:
-        os.remove(self.reject)
-        self.print_diagnostics(server.valgrind_log,
-                               "Test failed! Last 10 lines of valgrind.log:")
-        where = ": there were warnings in valgrind.log"
-
-      if not self.args.is_force:
-        raise RuntimeError("Failed to run test " + self.name + where)
-
-
-  def print_diagnostics(self, logfile, message):
-    """Print 10 lines of client program output leading to test
-    failure. Used to diagnose a failure of the client program"""
-
-    print message
-    print_tail_n(logfile, 10)
-
-  def print_unidiff(self):
-    """Print a unified diff between .test and .result files. Used
-    to establish the cause of a failure when .test differs
-    from .result."""
-
-    print "Test failed! Result content mismatch:"
-    with open(self.result, "r") as result:
-      with open(self.reject, "r") as reject:
-        result_time = time.ctime(os.stat(self.result).st_mtime)
-        reject_time = time.ctime(os.stat(self.reject).st_mtime)
-        diff = difflib.unified_diff(result.readlines(),
-                                    reject.readlines(),
-                                    self.result,
-                                    self.reject,
-                                    result_time,
-                                    reject_time)
-        for line in diff:
-          sys.stdout.write(line)
+    """An individual test file. A test object can run itself
+    and remembers completion state of the run."""
+
+    def __init__(self, name, args, suite_ini):
+        """Initialize test properties: path to test file, path to
+        temporary result file, path to the client program, test status."""
+
+        self.name = name
+        self.args = args
+        self.suite_ini = suite_ini
+        self.result = name.replace(".test", ".result")
+        self.tmp_result = os.path.join(self.args.vardir,
+                                       os.path.basename(self.result))
+        self.reject = name.replace(".test", ".reject")
+        self.is_executed = False
+        self.is_executed_ok = None
+        self.is_equal_result = None
+        self.is_valgrind_clean = True
+
+    def passed(self):
+        """Return true if this test was run successfully."""
+
+        return self.is_executed and self.is_executed_ok and self.is_equal_result
+
+    def run(self, server):
+        """Execute the test assuming it's a python program.
+        If the test aborts, print its output to stdout, and raise
+        an exception. Else, comprare result and reject files.
+        If there is a difference, print it to stdout and raise an
+        exception. The exception is raised only if is_force flag is
+        not set."""
+
+        diagnostics = "unknown"
+        save_stdout = sys.stdout
+        try:
+            sys.stdout = FilteredStream(self.tmp_result)
+            stdout_fileno = sys.stdout.stream.fileno()
+            execfile(self.name, dict(locals(), **server.__dict__))
+            self.is_executed_ok = True
+        except Exception as e:
+            traceback.print_exc(e)
+            diagnostics = str(e)
+        finally:
+            if sys.stdout and sys.stdout != save_stdout:
+                sys.stdout.close()
+            sys.stdout = save_stdout;
+
+        self.is_executed = True
+
+        if self.is_executed_ok and os.path.isfile(self.result):
+            self.is_equal_result = filecmp.cmp(self.result, self.tmp_result)
+
+        if self.args.valgrind:
+            self.is_valgrind_clean = \
+            check_valgrind_log(server.valgrind_log) == False
+
+        if self.is_executed_ok and self.is_equal_result and self.is_valgrind_clean:
+            print "[ pass ]"
+            os.remove(self.tmp_result)
+        elif (self.is_executed_ok and not self.is_equal_result and not
+              os.path.isfile(self.result)):
+            os.rename(self.tmp_result, self.result)
+            print "[ NEW ]"
+        else:
+            os.rename(self.tmp_result, self.reject)
+            print "[ fail ]"
+
+            where = ""
+            if not self.is_executed_ok:
+                self.print_diagnostics(self.reject, "Test failed! Last 10 lines of the result file:")
+                where = ": test execution aborted, reason '{0}'".format(diagnostics)
+            elif not self.is_equal_result:
+                self.print_unidiff()
+                where = ": wrong test output"
+            elif not self.is_valgrind_clean:
+                os.remove(self.reject)
+                self.print_diagnostics(server.valgrind_log, "Test failed! Last 10 lines of valgrind.log:")
+                where = ": there were warnings in valgrind.log"
+
+            if not self.args.is_force:
+                raise RuntimeError("Failed to run test " + self.name + where)
+
+    def print_diagnostics(self, logfile, message):
+        """Print 10 lines of client program output leading to test
+        failure. Used to diagnose a failure of the client program"""
+
+        print message
+        print_tail_n(logfile, 10)
+
+    def print_unidiff(self):
+        """Print a unified diff between .test and .result files. Used
+        to establish the cause of a failure when .test differs
+        from .result."""
+
+        print "Test failed! Result content mismatch:"
+        with open(self.result, "r") as result:
+            with open(self.reject, "r") as reject:
+                result_time = time.ctime(os.stat(self.result).st_mtime)
+                reject_time = time.ctime(os.stat(self.reject).st_mtime)
+                diff = difflib.unified_diff(result.readlines(),
+                                            reject.readlines(),
+                                            self.result,
+                                            self.reject,
+                                            result_time,
+                                            reject_time)
+                for line in diff:
+                    sys.stdout.write(line)
 
 class TestSuite:
-  """Each test suite contains a number of related tests files,
-  located in the same directory on disk. Each test file has
-  extention .test and contains a listing of server commands,
-  followed by their output. The commands are executed, and
-  obtained results are compared with pre-recorded output. In case
-  of a comparision difference, an exception is raised. A test suite
-  must also contain suite.ini, which describes how to start the
-  server for this suite, the client program to execute individual
-  tests and other suite properties. The server is started once per
-  suite."""
-
-  def __init__(self, suite_path, args):
-    """Initialize a test suite: check that it exists and contains
-    a syntactically correct configuration file. Then create
-    a test instance for each found test."""
-    self.args = args
-    self.tests = []
-    self.ini = {}
-
-    self.ini["core"] = "tarantool"
-    self.ini["module"] = "box"
-
-    if os.access(suite_path, os.F_OK) == False:
-      raise RuntimeError("Suite \"" + suite_path +\
-                         "\" doesn't exist")
-
-# read the suite config
-    config = ConfigParser.ConfigParser()
-    config.read(os.path.join(suite_path, "suite.ini"))
-    self.ini.update(dict(config.items("default")))
-    self.ini["config"] = os.path.join(suite_path, self.ini["config"])
-    if self.ini.has_key("disabled"):
-      self.ini["disabled"] = dict.fromkeys(self.ini["disabled"].split(" "))
-    else:
-      self.ini["disabled"] = dict()
-
-    print "Collecting tests in \"" + suite_path + "\": " +\
-      self.ini["description"] + "."
-
-    for test_name in glob.glob(os.path.join(suite_path, "*.test")):
-      for test_pattern in self.args.tests:
-        if test_name.find(test_pattern) != -1:
-          self.tests.append(Test(test_name, self.args, self.ini))
-    print "Found " + str(len(self.tests)) + " tests."
-
-  def run_all(self):
-    """For each file in the test suite, run client program
-    assuming each file represents an individual test."""
-    try:
-      server = Server(self.ini["core"], self.ini["module"])
-    except Exception as e:
-      print e
-      raise RuntimeError("Unknown server: core = {0}, module = {1}".format(
-        self.ini["core"], self.ini["module"]))
-    server.deploy(self.ini["config"],
-                  server.find_exe(self.args.builddir, silent=False),
-		  self.args.vardir,
-                  self.args.mem, self.args.start_and_exit, self.args.gdb, self.args.valgrind,
-		  silent=False)
-    if self.args.start_and_exit:
-      print "  Start and exit requested, exiting..."
-      exit(0)
-
-    longsep = "=============================================================================="
-    shortsep = "------------------------------------------------------------"
-    print longsep
-    print string.ljust("TEST", 31), "RESULT"
-    print shortsep
-    failed_tests = []
-    self.ini["server"] = server
-
-    for test in self.tests:
-      sys.stdout.write(string.ljust(test.name, 31))
-# for better diagnostics in case of a long-running test
-      sys.stdout.flush()
-
-      if os.path.basename(test.name) in self.ini["disabled"]:
-        print "[ skip ]"
-      else:
-        test.run(server)
-        if not test.passed():
-          failed_tests.append(test.name)
-
-    print shortsep
-    if len(failed_tests):
-      print "Failed {0} tests: {1}.".format(len(failed_tests),
-                                            ", ".join(failed_tests))
-    server.stop(silent=False)
-    server.cleanup()
-
-    if self.args.valgrind and check_valgrind_log(server.valgrind_log):
-      print "  Error! There were warnings/errors in valgrind log file:"
-      print_tail_n(server.valgrind_log, 20)
-      return 1
-    return len(failed_tests)
+    """Each test suite contains a number of related tests files,
+    located in the same directory on disk. Each test file has
+    extention .test and contains a listing of server commands,
+    followed by their output. The commands are executed, and
+    obtained results are compared with pre-recorded output. In case
+    of a comparision difference, an exception is raised. A test suite
+    must also contain suite.ini, which describes how to start the
+    server for this suite, the client program to execute individual
+    tests and other suite properties. The server is started once per
+    suite."""
+
+    def __init__(self, suite_path, args):
+        """Initialize a test suite: check that it exists and contains
+        a syntactically correct configuration file. Then create
+        a test instance for each found test."""
+        self.args = args
+        self.tests = []
+        self.ini = {}
+
+        self.ini["core"] = "tarantool"
+        self.ini["module"] = "box"
+
+        if os.access(suite_path, os.F_OK) == False:
+            raise RuntimeError("Suite \"" + suite_path + \
+                               "\" doesn't exist")
+
+        # read the suite config
+        config = ConfigParser.ConfigParser()
+        config.read(os.path.join(suite_path, "suite.ini"))
+        self.ini.update(dict(config.items("default")))
+        self.ini["config"] = os.path.join(suite_path, self.ini["config"])
+        if self.ini.has_key("disabled"):
+            self.ini["disabled"] = dict.fromkeys(self.ini["disabled"].split(" "))
+        else:
+            self.ini["disabled"] = dict()
+
+        print "Collecting tests in \"" + suite_path + "\": " +\
+            self.ini["description"] + "."
+
+        for test_name in glob.glob(os.path.join(suite_path, "*.test")):
+            for test_pattern in self.args.tests:
+                if test_name.find(test_pattern) != -1:
+                    self.tests.append(Test(test_name, self.args, self.ini))
+        print "Found " + str(len(self.tests)) + " tests."
+
+    def run_all(self):
+        """For each file in the test suite, run client program
+        assuming each file represents an individual test."""
+        try:
+            server = Server(self.ini["core"], self.ini["module"])
+        except Exception as e:
+            print e
+            raise RuntimeError("Unknown server: core = {0}, module = {1}".format(
+                               self.ini["core"], self.ini["module"]))
+
+        server.deploy(self.ini["config"],
+                      server.find_exe(self.args.builddir, silent=False),
+                      self.args.vardir, self.args.mem, self.args.start_and_exit, self.args.gdb,
+                      self.args.valgrind, silent=False)
+        if self.args.start_and_exit:
+            print "  Start and exit requested, exiting..."
+            exit(0)
+
+        longsep = "=============================================================================="
+        shortsep = "------------------------------------------------------------"
+        print longsep
+        print string.ljust("TEST", 31), "RESULT"
+        print shortsep
+        failed_tests = []
+        self.ini["server"] = server
+
+        for test in self.tests:
+            sys.stdout.write(string.ljust(test.name, 31))
+            # for better diagnostics in case of a long-running test
+            sys.stdout.flush()
+
+            if os.path.basename(test.name) in self.ini["disabled"]:
+                print "[ skip ]"
+            else:
+                test.run(server)
+                if not test.passed():
+                    failed_tests.append(test.name)
+
+        print shortsep
+        if len(failed_tests):
+            print "Failed {0} tests: {1}.".format(len(failed_tests),
+                                                ", ".join(failed_tests))
+        server.stop(silent=False)
+        server.cleanup()
+
+        if self.args.valgrind and check_valgrind_log(server.valgrind_log):
+            print "  Error! There were warnings/errors in valgrind log file:"
+            print_tail_n(server.valgrind_log, 20)
+            return 1
+        return len(failed_tests)
+
-- 
GitLab