Move to 2-space indent: top-level mobly files. (#701)

Pure shortening of the indentation length, no actual code change.
* top level files in mobly/
* setup.py
diff --git a/mobly/asserts.py b/mobly/asserts.py
index bcffb87..3a63ccc 100644
--- a/mobly/asserts.py
+++ b/mobly/asserts.py
@@ -24,269 +24,269 @@
 
 
 def assert_equal(first, second, msg=None, extras=None):
-    """Assert the equality of objects, otherwise fail the test.
+  """Assert the equality of objects, otherwise fail the test.
 
-    Error message is "first != second" by default. Additional explanation can
-    be supplied in the message.
+  Error message is "first != second" by default. Additional explanation can
+  be supplied in the message.
 
-    Args:
-        first: The first object to compare.
-        second: The second object to compare.
-        msg: A string that adds additional info about the failure.
-        extras: An optional field for extra information to be included in
-            test result.
-    """
-    my_msg = None
-    try:
-        _pyunit_proxy.assertEqual(first, second)
-    except AssertionError as e:
-        my_msg = str(e)
-        if msg:
-            my_msg = "%s %s" % (my_msg, msg)
+  Args:
+    first: The first object to compare.
+    second: The second object to compare.
+    msg: A string that adds additional info about the failure.
+    extras: An optional field for extra information to be included in
+      test result.
+  """
+  my_msg = None
+  try:
+    _pyunit_proxy.assertEqual(first, second)
+  except AssertionError as e:
+    my_msg = str(e)
+    if msg:
+      my_msg = "%s %s" % (my_msg, msg)
 
-    # This raise statement is outside of the above except statement to prevent
-    # Python3's exception message from having two tracebacks.
-    if my_msg is not None:
-        raise signals.TestFailure(my_msg, extras=extras)
+  # This raise statement is outside of the above except statement to prevent
+  # Python3's exception message from having two tracebacks.
+  if my_msg is not None:
+    raise signals.TestFailure(my_msg, extras=extras)
 
 
 def assert_raises(expected_exception, extras=None, *args, **kwargs):
-    """Assert that an exception is raised when a function is called.
+  """Assert that an exception is raised when a function is called.
 
-    If no exception is raised, test fail. If an exception is raised but not
-    of the expected type, the exception is let through.
+  If no exception is raised, test fail. If an exception is raised but not
+  of the expected type, the exception is let through.
 
-    This should only be used as a context manager:
-        with assert_raises(Exception):
-            func()
+  This should only be used as a context manager:
+    with assert_raises(Exception):
+      func()
 
-    Args:
-        expected_exception: An exception class that is expected to be
-            raised.
-        extras: An optional field for extra information to be included in
-            test result.
-    """
-    context = _AssertRaisesContext(expected_exception, extras=extras)
-    return context
+  Args:
+    expected_exception: An exception class that is expected to be
+      raised.
+    extras: An optional field for extra information to be included in
+      test result.
+  """
+  context = _AssertRaisesContext(expected_exception, extras=extras)
+  return context
 
 
 def assert_raises_regex(expected_exception,
-                        expected_regex,
-                        extras=None,
-                        *args,
-                        **kwargs):
-    """Assert that an exception is raised when a function is called.
+            expected_regex,
+            extras=None,
+            *args,
+            **kwargs):
+  """Assert that an exception is raised when a function is called.
 
-    If no exception is raised, test fail. If an exception is raised but not
-    of the expected type, the exception is let through. If an exception of the
-    expected type is raised but the error message does not match the
-    expected_regex, test fail.
+  If no exception is raised, test fail. If an exception is raised but not
+  of the expected type, the exception is let through. If an exception of the
+  expected type is raised but the error message does not match the
+  expected_regex, test fail.
 
-    This should only be used as a context manager:
-        with assert_raises(Exception):
-            func()
+  This should only be used as a context manager:
+    with assert_raises(Exception):
+      func()
 
-    Args:
-        expected_exception: An exception class that is expected to be
-            raised.
-        extras: An optional field for extra information to be included in
-            test result.
-    """
-    context = _AssertRaisesContext(expected_exception,
-                                   expected_regex,
-                                   extras=extras)
-    return context
+  Args:
+    expected_exception: An exception class that is expected to be
+      raised.
+    extras: An optional field for extra information to be included in
+      test result.
+  """
+  context = _AssertRaisesContext(expected_exception,
+                   expected_regex,
+                   extras=extras)
+  return context
 
 
 def assert_true(expr, msg, extras=None):
-    """Assert an expression evaluates to true, otherwise fail the test.
+  """Assert an expression evaluates to true, otherwise fail the test.
 
-    Args:
-        expr: The expression that is evaluated.
-        msg: A string explaining the details in case of failure.
-        extras: An optional field for extra information to be included in
-            test result.
-    """
-    if not expr:
-        fail(msg, extras)
+  Args:
+    expr: The expression that is evaluated.
+    msg: A string explaining the details in case of failure.
+    extras: An optional field for extra information to be included in
+      test result.
+  """
+  if not expr:
+    fail(msg, extras)
 
 
 def assert_false(expr, msg, extras=None):
-    """Assert an expression evaluates to false, otherwise fail the test.
+  """Assert an expression evaluates to false, otherwise fail the test.
 
-    Args:
-        expr: The expression that is evaluated.
-        msg: A string explaining the details in case of failure.
-        extras: An optional field for extra information to be included in
-            test result.
-    """
-    if expr:
-        fail(msg, extras)
+  Args:
+    expr: The expression that is evaluated.
+    msg: A string explaining the details in case of failure.
+    extras: An optional field for extra information to be included in
+      test result.
+  """
+  if expr:
+    fail(msg, extras)
 
 
 def skip(reason, extras=None):
-    """Skip a test.
+  """Skip a test.
 
-    Args:
-        reason: The reason this test is skipped.
-        extras: An optional field for extra information to be included in
-            test result.
+  Args:
+    reason: The reason this test is skipped.
+    extras: An optional field for extra information to be included in
+      test result.
 
-    Raises:
-        signals.TestSkip: Mark a test as skipped.
-    """
-    raise signals.TestSkip(reason, extras)
+  Raises:
+    signals.TestSkip: Mark a test as skipped.
+  """
+  raise signals.TestSkip(reason, extras)
 
 
 def skip_if(expr, reason, extras=None):
-    """Skip a test if expression evaluates to True.
+  """Skip a test if expression evaluates to True.
 
-    Args:
-        expr: The expression that is evaluated.
-        reason: The reason this test is skipped.
-        extras: An optional field for extra information to be included in
-            test result.
-    """
-    if expr:
-        skip(reason, extras)
+  Args:
+    expr: The expression that is evaluated.
+    reason: The reason this test is skipped.
+    extras: An optional field for extra information to be included in
+      test result.
+  """
+  if expr:
+    skip(reason, extras)
 
 
 def abort_class(reason, extras=None):
-    """Abort all subsequent tests within the same test class in one iteration.
+  """Abort all subsequent tests within the same test class in one iteration.
 
-    If one test class is requested multiple times in a test run, this can
-    only abort one of the requested executions, NOT all.
+  If one test class is requested multiple times in a test run, this can
+  only abort one of the requested executions, NOT all.
 
-    Args:
-        reason: The reason to abort.
-        extras: An optional field for extra information to be included in
-            test result.
+  Args:
+    reason: The reason to abort.
+    extras: An optional field for extra information to be included in
+      test result.
 
-    Raises:
-        signals.TestAbortClass: Abort all subsequent tests in a test class.
-    """
-    raise signals.TestAbortClass(reason, extras)
+  Raises:
+    signals.TestAbortClass: Abort all subsequent tests in a test class.
+  """
+  raise signals.TestAbortClass(reason, extras)
 
 
 def abort_class_if(expr, reason, extras=None):
-    """Abort all subsequent tests within the same test class in one iteration,
-    if expression evaluates to True.
+  """Abort all subsequent tests within the same test class in one iteration,
+  if expression evaluates to True.
 
-    If one test class is requested multiple times in a test run, this can
-    only abort one of the requested executions, NOT all.
+  If one test class is requested multiple times in a test run, this can
+  only abort one of the requested executions, NOT all.
 
-    Args:
-        expr: The expression that is evaluated.
-        reason: The reason to abort.
-        extras: An optional field for extra information to be included in
-            test result.
+  Args:
+    expr: The expression that is evaluated.
+    reason: The reason to abort.
+    extras: An optional field for extra information to be included in
+      test result.
 
-    Raises:
-        signals.TestAbortClass: Abort all subsequent tests in a test class.
-    """
-    if expr:
-        abort_class(reason, extras)
+  Raises:
+    signals.TestAbortClass: Abort all subsequent tests in a test class.
+  """
+  if expr:
+    abort_class(reason, extras)
 
 
 def abort_all(reason, extras=None):
-    """Abort all subsequent tests, including the ones not in this test class or
-    iteration.
+  """Abort all subsequent tests, including the ones not in this test class or
+  iteration.
 
-    Args:
-        reason: The reason to abort.
-        extras: An optional field for extra information to be included in
-            test result.
+  Args:
+    reason: The reason to abort.
+    extras: An optional field for extra information to be included in
+      test result.
 
-    Raises:
-        signals.TestAbortAll: Abort all subsequent tests.
-    """
-    raise signals.TestAbortAll(reason, extras)
+  Raises:
+    signals.TestAbortAll: Abort all subsequent tests.
+  """
+  raise signals.TestAbortAll(reason, extras)
 
 
 def abort_all_if(expr, reason, extras=None):
-    """Abort all subsequent tests, if the expression evaluates to True.
+  """Abort all subsequent tests, if the expression evaluates to True.
 
-    Args:
-        expr: The expression that is evaluated.
-        reason: The reason to abort.
-        extras: An optional field for extra information to be included in
-            test result.
+  Args:
+    expr: The expression that is evaluated.
+    reason: The reason to abort.
+    extras: An optional field for extra information to be included in
+      test result.
 
-    Raises:
-        signals.TestAbortAll: Abort all subsequent tests.
-    """
-    if expr:
-        abort_all(reason, extras)
+  Raises:
+    signals.TestAbortAll: Abort all subsequent tests.
+  """
+  if expr:
+    abort_all(reason, extras)
 
 
 def fail(msg, extras=None):
-    """Explicitly fail a test.
+  """Explicitly fail a test.
 
-    Args:
-        msg: A string explaining the details of the failure.
-        extras: An optional field for extra information to be included in
-            test result.
+  Args:
+    msg: A string explaining the details of the failure.
+    extras: An optional field for extra information to be included in
+      test result.
 
-    Raises:
-        signals.TestFailure: Mark a test as failed.
-    """
-    raise signals.TestFailure(msg, extras)
+  Raises:
+    signals.TestFailure: Mark a test as failed.
+  """
+  raise signals.TestFailure(msg, extras)
 
 
 def explicit_pass(msg, extras=None):
-    """Explicitly pass a test.
+  """Explicitly pass a test.
 
-    This will pass the test explicitly regardless of any other error happened
-    in the test body. E.g. even if errors have been recorded with `expects`,
-    the test will still be marked pass if this is called.
+  This will pass the test explicitly regardless of any other error happened
+  in the test body. E.g. even if errors have been recorded with `expects`,
+  the test will still be marked pass if this is called.
 
-    A test without uncaught exception will pass implicitly so this should be
-    used scarcely.
+  A test without uncaught exception will pass implicitly so this should be
+  used scarcely.
 
-    Args:
-        msg: A string explaining the details of the passed test.
-        extras: An optional field for extra information to be included in
-            test result.
+  Args:
+    msg: A string explaining the details of the passed test.
+    extras: An optional field for extra information to be included in
+      test result.
 
-    Raises:
-        signals.TestPass: Mark a test as passed.
-    """
-    raise signals.TestPass(msg, extras)
+  Raises:
+    signals.TestPass: Mark a test as passed.
+  """
+  raise signals.TestPass(msg, extras)
 
 
 class _AssertRaisesContext(object):
-    """A context manager used to implement TestCase.assertRaises* methods."""
+  """A context manager used to implement TestCase.assertRaises* methods."""
 
-    def __init__(self, expected, expected_regexp=None, extras=None):
-        self.expected = expected
-        self.failureException = signals.TestFailure
-        self.expected_regexp = expected_regexp
-        self.extras = extras
+  def __init__(self, expected, expected_regexp=None, extras=None):
+    self.expected = expected
+    self.failureException = signals.TestFailure
+    self.expected_regexp = expected_regexp
+    self.extras = extras
 
-    def __enter__(self):
-        return self
+  def __enter__(self):
+    return self
 
-    def __exit__(self, exc_type, exc_value, tb):
-        if exc_type is None:
-            try:
-                exc_name = self.expected.__name__
-            except AttributeError:
-                exc_name = str(self.expected)
-            raise signals.TestFailure('%s not raised' % exc_name,
-                                      extras=self.extras)
-        if not issubclass(exc_type, self.expected):
-            # let unexpected exceptions pass through
-            return False
-        self.exception = exc_value  # store for later retrieval
-        if self.expected_regexp is None:
-            return True
+  def __exit__(self, exc_type, exc_value, tb):
+    if exc_type is None:
+      try:
+        exc_name = self.expected.__name__
+      except AttributeError:
+        exc_name = str(self.expected)
+      raise signals.TestFailure('%s not raised' % exc_name,
+                    extras=self.extras)
+    if not issubclass(exc_type, self.expected):
+      # let unexpected exceptions pass through
+      return False
+    self.exception = exc_value  # store for later retrieval
+    if self.expected_regexp is None:
+      return True
 
-        expected_regexp = self.expected_regexp
-        if isinstance(expected_regexp, str):
-            expected_regexp = re.compile(expected_regexp)
-        if not expected_regexp.search(str(exc_value)):
-            raise signals.TestFailure(
-                '"%s" does not match "%s"' %
-                (expected_regexp.pattern, str(exc_value)),
-                extras=self.extras)
-        return True
+    expected_regexp = self.expected_regexp
+    if isinstance(expected_regexp, str):
+      expected_regexp = re.compile(expected_regexp)
+    if not expected_regexp.search(str(exc_value)):
+      raise signals.TestFailure(
+        '"%s" does not match "%s"' %
+        (expected_regexp.pattern, str(exc_value)),
+        extras=self.extras)
+    return True
diff --git a/mobly/base_instrumentation_test.py b/mobly/base_instrumentation_test.py
index f8443dd..2561025 100644
--- a/mobly/base_instrumentation_test.py
+++ b/mobly/base_instrumentation_test.py
@@ -23,960 +23,960 @@
 
 
 class _InstrumentationStructurePrefixes(object):
-    """Class containing prefixes that structure insturmentation output.
+  """Class containing prefixes that structure insturmentation output.
 
-    Android instrumentation generally follows the following format:
+  Android instrumentation generally follows the following format:
 
-    .. code-block:: none
+  .. code-block:: none
 
-        INSTRUMENTATION_STATUS: ...
-        ...
-        INSTRUMENTATION_STATUS: ...
-        INSTRUMENTATION_STATUS_CODE: ...
-        INSTRUMENTATION_STATUS: ...
-        ...
-        INSTRUMENTATION_STATUS: ...
-        INSTRUMENTATION_STATUS_CODE: ...
-        ...
-        INSTRUMENTATION_RESULT: ...
-        ...
-        INSTRUMENTATION_RESULT: ...
-        ...
-        INSTRUMENTATION_CODE: ...
+    INSTRUMENTATION_STATUS: ...
+    ...
+    INSTRUMENTATION_STATUS: ...
+    INSTRUMENTATION_STATUS_CODE: ...
+    INSTRUMENTATION_STATUS: ...
+    ...
+    INSTRUMENTATION_STATUS: ...
+    INSTRUMENTATION_STATUS_CODE: ...
+    ...
+    INSTRUMENTATION_RESULT: ...
+    ...
+    INSTRUMENTATION_RESULT: ...
+    ...
+    INSTRUMENTATION_CODE: ...
 
-    This means that these prefixes can be used to guide parsing
-    the output of the instrumentation command into the different
-    instrumetnation test methods.
+  This means that these prefixes can be used to guide parsing
+  the output of the instrumentation command into the different
+  instrumetnation test methods.
 
-    Refer to the following Android Framework package for more details:
+  Refer to the following Android Framework package for more details:
 
-    .. code-block:: none
+  .. code-block:: none
 
-        com.android.commands.am.AM
+    com.android.commands.am.AM
 
-    """
+  """
 
-    STATUS = 'INSTRUMENTATION_STATUS:'
-    STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE:'
-    RESULT = 'INSTRUMENTATION_RESULT:'
-    CODE = 'INSTRUMENTATION_CODE:'
-    FAILED = 'INSTRUMENTATION_FAILED:'
+  STATUS = 'INSTRUMENTATION_STATUS:'
+  STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE:'
+  RESULT = 'INSTRUMENTATION_RESULT:'
+  CODE = 'INSTRUMENTATION_CODE:'
+  FAILED = 'INSTRUMENTATION_FAILED:'
 
 
 class _InstrumentationKnownStatusKeys(object):
-    """Commonly used keys used in instrumentation output for listing
-    instrumentation test method result properties.
+  """Commonly used keys used in instrumentation output for listing
+  instrumentation test method result properties.
 
-    An instrumenation status line usually contains a key-value pair such as
-    the following:
+  An instrumenation status line usually contains a key-value pair such as
+  the following:
 
-    .. code-block:: none
+  .. code-block:: none
 
-        INSTRUMENTATION_STATUS: <key>=<value>
+    INSTRUMENTATION_STATUS: <key>=<value>
 
-    Some of these key-value pairs are very common and represent test case
-    properties. This mapping is used to handle each of the corresponding
-    key-value pairs different than less important key-value pairs.
+  Some of these key-value pairs are very common and represent test case
+  properties. This mapping is used to handle each of the corresponding
+  key-value pairs different than less important key-value pairs.
 
-    Refer to the following Android Framework packages for more details:
+  Refer to the following Android Framework packages for more details:
 
-    .. code-block:: none
+  .. code-block:: none
 
-        android.app.Instrumentation
-        android.support.test.internal.runner.listener.InstrumentationResultPrinter
+    android.app.Instrumentation
+    android.support.test.internal.runner.listener.InstrumentationResultPrinter
 
-    TODO: Convert android.support.* to androidx.*,
-    (https://android-developers.googleblog.com/2018/05/hello-world-androidx.html).
-    """
+  TODO: Convert android.support.* to androidx.*,
+  (https://android-developers.googleblog.com/2018/05/hello-world-androidx.html).
+  """
 
-    CLASS = 'class'
-    ERROR = 'Error'
-    STACK = 'stack'
-    TEST = 'test'
-    STREAM = 'stream'
+  CLASS = 'class'
+  ERROR = 'Error'
+  STACK = 'stack'
+  TEST = 'test'
+  STREAM = 'stream'
 
 
 class _InstrumentationStatusCodes(object):
-    """A mapping of instrumentation status codes to test method results.
+  """A mapping of instrumentation status codes to test method results.
 
-    When instrumentation runs, at various points output is created in a series
-    of blocks that terminate as follows:
+  When instrumentation runs, at various points output is created in a series
+  of blocks that terminate as follows:
 
-    .. code-block:: none
+  .. code-block:: none
 
-        INSTRUMENTATION_STATUS_CODE: 1
+    INSTRUMENTATION_STATUS_CODE: 1
 
-    These blocks typically have several status keys in them, and they indicate
-    the progression of a particular instrumentation test method. When the
-    corresponding instrumentation test method finishes, there is generally a
-    line which includes a status code that gives thes the test result.
+  These blocks typically have several status keys in them, and they indicate
+  the progression of a particular instrumentation test method. When the
+  corresponding instrumentation test method finishes, there is generally a
+  line which includes a status code that gives thes the test result.
 
-    The UNKNOWN status code is not an actual status code and is only used to
-    represent that a status code has not yet been read for an instrumentation
-    block.
+  The UNKNOWN status code is not an actual status code and is only used to
+  represent that a status code has not yet been read for an instrumentation
+  block.
 
-    Refer to the following Android Framework package for more details:
+  Refer to the following Android Framework package for more details:
 
-    .. code-block:: none
+  .. code-block:: none
 
-        android.support.test.internal.runner.listener.InstrumentationResultPrinter
+    android.support.test.internal.runner.listener.InstrumentationResultPrinter
 
-    TODO: Convert android.support.* to androidx.*,
-    (https://android-developers.googleblog.com/2018/05/hello-world-androidx.html).
-    """
+  TODO: Convert android.support.* to androidx.*,
+  (https://android-developers.googleblog.com/2018/05/hello-world-androidx.html).
+  """
 
-    UNKNOWN = None
-    OK = '0'
-    START = '1'
-    IN_PROGRESS = '2'
-    ERROR = '-1'
-    FAILURE = '-2'
-    IGNORED = '-3'
-    ASSUMPTION_FAILURE = '-4'
+  UNKNOWN = None
+  OK = '0'
+  START = '1'
+  IN_PROGRESS = '2'
+  ERROR = '-1'
+  FAILURE = '-2'
+  IGNORED = '-3'
+  ASSUMPTION_FAILURE = '-4'
 
 
 class _InstrumentationStatusCodeCategories(object):
-    """A mapping of instrumentation test method results to categories.
+  """A mapping of instrumentation test method results to categories.
 
-    Aside from the TIMING category, these categories roughly map to Mobly
-    signals and are used for determining how a particular instrumentation test
-    method gets recorded.
-    """
+  Aside from the TIMING category, these categories roughly map to Mobly
+  signals and are used for determining how a particular instrumentation test
+  method gets recorded.
+  """
 
-    TIMING = [
-        _InstrumentationStatusCodes.START,
-        _InstrumentationStatusCodes.IN_PROGRESS,
-    ]
-    PASS = [
-        _InstrumentationStatusCodes.OK,
-    ]
-    FAIL = [
-        _InstrumentationStatusCodes.ERROR,
-        _InstrumentationStatusCodes.FAILURE,
-    ]
-    SKIPPED = [
-        _InstrumentationStatusCodes.IGNORED,
-        _InstrumentationStatusCodes.ASSUMPTION_FAILURE,
-    ]
+  TIMING = [
+    _InstrumentationStatusCodes.START,
+    _InstrumentationStatusCodes.IN_PROGRESS,
+  ]
+  PASS = [
+    _InstrumentationStatusCodes.OK,
+  ]
+  FAIL = [
+    _InstrumentationStatusCodes.ERROR,
+    _InstrumentationStatusCodes.FAILURE,
+  ]
+  SKIPPED = [
+    _InstrumentationStatusCodes.IGNORED,
+    _InstrumentationStatusCodes.ASSUMPTION_FAILURE,
+  ]
 
 
 class _InstrumentationKnownResultKeys(object):
-    """Commonly used keys for outputting instrumentation errors.
+  """Commonly used keys for outputting instrumentation errors.
 
-    When instrumentation finishes running all of the instrumentation test
-    methods, a result line will appear as follows:
+  When instrumentation finishes running all of the instrumentation test
+  methods, a result line will appear as follows:
 
-    .. code-block:: none
+  .. code-block:: none
 
-        INSTRUMENTATION_RESULT:
+    INSTRUMENTATION_RESULT:
 
-    If something wrong happened during the instrumentation run such as an
-    application under test crash, the line will appear similarly as thus:
+  If something wrong happened during the instrumentation run such as an
+  application under test crash, the line will appear similarly as thus:
 
-    .. code-block:: none
+  .. code-block:: none
 
-        INSTRUMENTATION_RESULT: shortMsg=Process crashed.
+    INSTRUMENTATION_RESULT: shortMsg=Process crashed.
 
-    Since these keys indicate that something wrong has happened to the
-    instrumentation run, they should be checked for explicitly.
+  Since these keys indicate that something wrong has happened to the
+  instrumentation run, they should be checked for explicitly.
 
-    Refer to the following documentation page for more information:
+  Refer to the following documentation page for more information:
 
-    .. code-block:: none
+  .. code-block:: none
 
-        https://developer.android.com/reference/android/app/ActivityManager.ProcessErrorStateInfo.html
+    https://developer.android.com/reference/android/app/ActivityManager.ProcessErrorStateInfo.html
 
-    """
+  """
 
-    LONGMSG = 'longMsg'
-    SHORTMSG = 'shortMsg'
+  LONGMSG = 'longMsg'
+  SHORTMSG = 'shortMsg'
 
 
 class _InstrumentationResultSignals(object):
-    """Instrumenttion result block strings for signalling run completion.
+  """Instrumenttion result block strings for signalling run completion.
 
-    The final section of the instrumentation output generally follows this
-    format:
+  The final section of the instrumentation output generally follows this
+  format:
 
-    .. code-block:: none
+  .. code-block:: none
 
-        INSTRUMENTATION_RESULT: stream=
-        ...
-        INSTRUMENTATION_CODE -1
+    INSTRUMENTATION_RESULT: stream=
+    ...
+    INSTRUMENTATION_CODE -1
 
-    Inside of the ellipsed section, one of these signaling strings should be
-    present. If they are not present, this usually means that the
-    instrumentation run has failed in someway such as a crash. Because the
-    final instrumentation block simply summarizes information, simply roughly
-    checking for a particilar string should be sufficient to check to a proper
-    run completion as the contents of the instrumentation result block don't
-    really matter.
+  Inside of the ellipsed section, one of these signaling strings should be
+  present. If they are not present, this usually means that the
+  instrumentation run has failed in someway such as a crash. Because the
+  final instrumentation block simply summarizes information, simply roughly
+  checking for a particilar string should be sufficient to check to a proper
+  run completion as the contents of the instrumentation result block don't
+  really matter.
 
-    Refer to the following JUnit package for more details:
+  Refer to the following JUnit package for more details:
 
-    .. code-block:: none
+  .. code-block:: none
 
-        junit.textui.ResultPrinter
+    junit.textui.ResultPrinter
 
-    """
+  """
 
-    FAIL = 'FAILURES!!!'
-    PASS = 'OK ('
+  FAIL = 'FAILURES!!!'
+  PASS = 'OK ('
 
 
 class _InstrumentationBlockStates(Enum):
-    """States used for determing what the parser is currently parsing.
+  """States used for determing what the parser is currently parsing.
 
-    The parse always starts and ends a block in the UNKNOWN state, which is
-    used to indicate that either a method or a result block (matching the
-    METHOD and RESULT states respectively) are valid follow ups, which means
-    that parser should be checking for a structure prefix that indicates which
-    of those two states it should transition to. If the parser is in the
-    METHOD state, then the parser will be parsing input into test methods.
-    Otherwise, the parse can simply concatenate all the input to check for
-    some final run completion signals.
-    """
+  The parse always starts and ends a block in the UNKNOWN state, which is
+  used to indicate that either a method or a result block (matching the
+  METHOD and RESULT states respectively) are valid follow ups, which means
+  that parser should be checking for a structure prefix that indicates which
+  of those two states it should transition to. If the parser is in the
+  METHOD state, then the parser will be parsing input into test methods.
+  Otherwise, the parse can simply concatenate all the input to check for
+  some final run completion signals.
+  """
 
-    UNKNOWN = 0
-    METHOD = 1
-    RESULT = 2
+  UNKNOWN = 0
+  METHOD = 1
+  RESULT = 2
 
 
 class _InstrumentationBlock(object):
-    """Container class for parsed instrumentation output for instrumentation
-    test methods.
+  """Container class for parsed instrumentation output for instrumentation
+  test methods.
 
-    Instrumentation test methods typically follow the follwoing format:
+  Instrumentation test methods typically follow the follwoing format:
 
-    .. code-block:: none
+  .. code-block:: none
 
-        INSTRUMENTATION_STATUS: <key>=<value>
-        ...
-        INSTRUMENTATION_STATUS: <key>=<value>
-        INSTRUMENTATION_STATUS_CODE: <status code #>
+    INSTRUMENTATION_STATUS: <key>=<value>
+    ...
+    INSTRUMENTATION_STATUS: <key>=<value>
+    INSTRUMENTATION_STATUS_CODE: <status code #>
 
-    The main issue with parsing this however is that the key-value pairs can
-    span multiple lines such as this:
+  The main issue with parsing this however is that the key-value pairs can
+  span multiple lines such as this:
 
-    .. code-block:: none
+  .. code-block:: none
 
-      INSTRUMENTATION_STATUS: stream=
-      Error in ...
-      ...
+    INSTRUMENTATION_STATUS: stream=
+    Error in ...
+    ...
 
-    Or, such as this:
+  Or, such as this:
 
-    .. code-block:: none
+  .. code-block:: none
 
-      INSTRUMENTATION_STATUS: stack=...
-      ...
+    INSTRUMENTATION_STATUS: stack=...
+    ...
 
-    Because these keys are poentially very long, constant string contatention
-    is potentially inefficent. Instead, this class builds up a buffer to store
-    the raw output until it is processed into an actual test result by the
-    _InstrumentationBlockFormatter class.
+  Because these keys are poentially very long, constant string contatention
+  is potentially inefficent. Instead, this class builds up a buffer to store
+  the raw output until it is processed into an actual test result by the
+  _InstrumentationBlockFormatter class.
 
-    Additionally, this class also serves to store the parser state, which
-    means that the BaseInstrumentationTestClass does not need to keep any
-    potentially volatile instrumentation related state, so multiple
-    instrumentation runs should have completely separate parsing states.
+  Additionally, this class also serves to store the parser state, which
+  means that the BaseInstrumentationTestClass does not need to keep any
+  potentially volatile instrumentation related state, so multiple
+  instrumentation runs should have completely separate parsing states.
 
-    This class is also used for storing result blocks although very little
-    needs to be done for those.
+  This class is also used for storing result blocks although very little
+  needs to be done for those.
 
-    Attributes:
-        begin_time: string, optional timestamp for when the test corresponding
-            to the instrumentation block began.
-        current_key: string, the current key that is being parsed, default to
-            _InstrumentationKnownStatusKeys.STREAM.
-        error_message: string, an error message indicating that something
-            unexpected happened during a instrumentatoin test method.
-        known_keys: dict, well known keys that are handled uniquely.
-        prefix: string, a prefix to add to the class name of the
-            instrumentation test methods.
-        previous_instrumentation_block: _InstrumentationBlock, the last parsed
-            instrumentation block.
-        state: _InstrumentationBlockStates, the current state of the parser.
-        status_code: string, the state code for an instrumentation method
-            block.
-        unknown_keys: dict, arbitrary keys that are handled generically.
+  Attributes:
+    begin_time: string, optional timestamp for when the test corresponding
+      to the instrumentation block began.
+    current_key: string, the current key that is being parsed, default to
+      _InstrumentationKnownStatusKeys.STREAM.
+    error_message: string, an error message indicating that something
+      unexpected happened during a instrumentatoin test method.
+    known_keys: dict, well known keys that are handled uniquely.
+    prefix: string, a prefix to add to the class name of the
+      instrumentation test methods.
+    previous_instrumentation_block: _InstrumentationBlock, the last parsed
+      instrumentation block.
+    state: _InstrumentationBlockStates, the current state of the parser.
+    status_code: string, the state code for an instrumentation method
+      block.
+    unknown_keys: dict, arbitrary keys that are handled generically.
+  """
+
+  def __init__(self,
+         state=_InstrumentationBlockStates.UNKNOWN,
+         prefix=None,
+         previous_instrumentation_block=None):
+    self.state = state
+    self.prefix = prefix
+    self.previous_instrumentation_block = previous_instrumentation_block
+    if previous_instrumentation_block:
+      # The parser never needs lookback for two previous blocks,
+      # so unset to allow previous blocks to get garbage collected.
+      previous_instrumentation_block.previous_instrumentation_block = None
+
+    self._empty = True
+    self.error_message = ''
+    self.status_code = _InstrumentationStatusCodes.UNKNOWN
+
+    self.current_key = _InstrumentationKnownStatusKeys.STREAM
+    self.known_keys = {
+      _InstrumentationKnownStatusKeys.STREAM: [],
+      _InstrumentationKnownStatusKeys.CLASS: [],
+      _InstrumentationKnownStatusKeys.ERROR: [],
+      _InstrumentationKnownStatusKeys.STACK: [],
+      _InstrumentationKnownStatusKeys.TEST: [],
+      _InstrumentationKnownResultKeys.LONGMSG: [],
+      _InstrumentationKnownResultKeys.SHORTMSG: [],
+    }
+    self.unknown_keys = defaultdict(list)
+
+    self.begin_time = None
+
+  @property
+  def is_empty(self):
+    """Deteremines whether or not anything has been parsed with this
+    instrumentation block.
+
+    Returns:
+      A boolean indicating whether or not the this instrumentation block
+      has parsed and contains any output.
     """
+    return self._empty
 
-    def __init__(self,
-                 state=_InstrumentationBlockStates.UNKNOWN,
-                 prefix=None,
-                 previous_instrumentation_block=None):
-        self.state = state
-        self.prefix = prefix
-        self.previous_instrumentation_block = previous_instrumentation_block
-        if previous_instrumentation_block:
-            # The parser never needs lookback for two previous blocks,
-            # so unset to allow previous blocks to get garbage collected.
-            previous_instrumentation_block.previous_instrumentation_block = None
+  def set_error_message(self, error_message):
+    """Sets an error message on an instrumentation block.
 
-        self._empty = True
-        self.error_message = ''
-        self.status_code = _InstrumentationStatusCodes.UNKNOWN
+    This method is used exclusively to indicate that a test method failed
+    to complete, which is usually cause by a crash of some sort such that
+    the test method is marked as error instead of ignored.
 
-        self.current_key = _InstrumentationKnownStatusKeys.STREAM
-        self.known_keys = {
-            _InstrumentationKnownStatusKeys.STREAM: [],
-            _InstrumentationKnownStatusKeys.CLASS: [],
-            _InstrumentationKnownStatusKeys.ERROR: [],
-            _InstrumentationKnownStatusKeys.STACK: [],
-            _InstrumentationKnownStatusKeys.TEST: [],
-            _InstrumentationKnownResultKeys.LONGMSG: [],
-            _InstrumentationKnownResultKeys.SHORTMSG: [],
-        }
-        self.unknown_keys = defaultdict(list)
+    Args:
+      error_message: string, an error message to be added to the
+        TestResultRecord to explain that something wrong happened.
+    """
+    self._empty = False
+    self.error_message = error_message
 
-        self.begin_time = None
+  def _remove_structure_prefix(self, prefix, line):
+    """Helper function for removing the structure prefix for parsing.
 
-    @property
-    def is_empty(self):
-        """Deteremines whether or not anything has been parsed with this
-        instrumentation block.
+    Args:
+      prefix: string, a _InstrumentationStructurePrefixes to remove from
+        the raw output.
+      line: string, the raw line from the instrumentation output.
 
-        Returns:
-            A boolean indicating whether or not the this instrumentation block
-            has parsed and contains any output.
-        """
-        return self._empty
+    Returns:
+      A string containing a key value pair descripting some property
+      of the current instrumentation test method.
+    """
+    return line[len(prefix):].strip()
 
-    def set_error_message(self, error_message):
-        """Sets an error message on an instrumentation block.
+  def set_status_code(self, status_code_line):
+    """Sets the status code for the instrumentation test method, used in
+    determining the test result.
 
-        This method is used exclusively to indicate that a test method failed
-        to complete, which is usually cause by a crash of some sort such that
-        the test method is marked as error instead of ignored.
+    Args:
+      status_code_line: string, the raw instrumentation output line that
+        contains the status code of the instrumentation block.
+    """
+    self._empty = False
+    self.status_code = self._remove_structure_prefix(
+      _InstrumentationStructurePrefixes.STATUS_CODE,
+      status_code_line,
+    )
+    if self.status_code == _InstrumentationStatusCodes.START:
+      self.begin_time = utils.get_current_epoch_time()
 
-        Args:
-            error_message: string, an error message to be added to the
-                TestResultRecord to explain that something wrong happened.
-        """
-        self._empty = False
-        self.error_message = error_message
+  def set_key(self, structure_prefix, key_line):
+    """Sets the current key for the instrumentation block.
 
-    def _remove_structure_prefix(self, prefix, line):
-        """Helper function for removing the structure prefix for parsing.
+    For unknown keys, the key is added to the value list in order to
+    better contextualize the value in the output.
 
-        Args:
-            prefix: string, a _InstrumentationStructurePrefixes to remove from
-                the raw output.
-            line: string, the raw line from the instrumentation output.
+    Args:
+      structure_prefix: string, the structure prefix that was matched
+        and that needs to be removed.
+      key_line: string, the raw instrumentation output line that contains
+        the key-value pair.
+    """
+    self._empty = False
+    key_value = self._remove_structure_prefix(
+      structure_prefix,
+      key_line,
+    )
+    if '=' in key_value:
+      (key, value) = key_value.split('=', 1)
+      self.current_key = key
+      if key in self.known_keys:
+        self.known_keys[key].append(value)
+      else:
+        self.unknown_keys[key].append(key_value)
 
-        Returns:
-            A string containing a key value pair descripting some property
-            of the current instrumentation test method.
-        """
-        return line[len(prefix):].strip()
+  def add_value(self, line):
+    """Adds unstructured or multi-line value output to the current parsed
+    instrumentation block for outputting later.
 
-    def set_status_code(self, status_code_line):
-        """Sets the status code for the instrumentation test method, used in
-        determining the test result.
+    Usually, this will add extra lines to the value list for the current
+    key-value pair. However, sometimes, such as when instrumentation
+    failed to start, output does not follow the structured prefix format.
+    In this case, adding all of the output is still useful so that a user
+    can debug the issue.
 
-        Args:
-            status_code_line: string, the raw instrumentation output line that
-                contains the status code of the instrumentation block.
-        """
-        self._empty = False
-        self.status_code = self._remove_structure_prefix(
-            _InstrumentationStructurePrefixes.STATUS_CODE,
-            status_code_line,
-        )
-        if self.status_code == _InstrumentationStatusCodes.START:
-            self.begin_time = utils.get_current_epoch_time()
+    Args:
+      line: string, the raw instrumentation line to append to the value
+        list.
+    """
+    # Don't count whitespace only lines.
+    if line.strip():
+      self._empty = False
 
-    def set_key(self, structure_prefix, key_line):
-        """Sets the current key for the instrumentation block.
+    if self.current_key in self.known_keys:
+      self.known_keys[self.current_key].append(line)
+    else:
+      self.unknown_keys[self.current_key].append(line)
 
-        For unknown keys, the key is added to the value list in order to
-        better contextualize the value in the output.
+  def transition_state(self, new_state):
+    """Transitions or sets the current instrumentation block to the new
+    parser state.
 
-        Args:
-            structure_prefix: string, the structure prefix that was matched
-                and that needs to be removed.
-            key_line: string, the raw instrumentation output line that contains
-                the key-value pair.
-        """
-        self._empty = False
-        key_value = self._remove_structure_prefix(
-            structure_prefix,
-            key_line,
-        )
-        if '=' in key_value:
-            (key, value) = key_value.split('=', 1)
-            self.current_key = key
-            if key in self.known_keys:
-                self.known_keys[key].append(value)
-            else:
-                self.unknown_keys[key].append(key_value)
+    Args:
+      new_state: _InstrumentationBlockStates, the state that the parser
+        should transition to.
 
-    def add_value(self, line):
-        """Adds unstructured or multi-line value output to the current parsed
-        instrumentation block for outputting later.
-
-        Usually, this will add extra lines to the value list for the current
-        key-value pair. However, sometimes, such as when instrumentation
-        failed to start, output does not follow the structured prefix format.
-        In this case, adding all of the output is still useful so that a user
-        can debug the issue.
-
-        Args:
-            line: string, the raw instrumentation line to append to the value
-                list.
-        """
-        # Don't count whitespace only lines.
-        if line.strip():
-            self._empty = False
-
-        if self.current_key in self.known_keys:
-            self.known_keys[self.current_key].append(line)
-        else:
-            self.unknown_keys[self.current_key].append(line)
-
-    def transition_state(self, new_state):
-        """Transitions or sets the current instrumentation block to the new
-        parser state.
-
-        Args:
-            new_state: _InstrumentationBlockStates, the state that the parser
-                should transition to.
-
-        Returns:
-            A new instrumentation block set to the new state, representing
-            the start of parsing a new instrumentation test method.
-            Alternatively, if the current instrumentation block represents the
-            start of parsing a new instrumentation block (state UNKNOWN), then
-            this returns the current instrumentation block set to the now
-            known parsing state.
-        """
-        if self.state == _InstrumentationBlockStates.UNKNOWN:
-            self.state = new_state
-            return self
-        else:
-            next_block = _InstrumentationBlock(
-                state=new_state,
-                prefix=self.prefix,
-                previous_instrumentation_block=self,
-            )
-            if self.status_code in _InstrumentationStatusCodeCategories.TIMING:
-                next_block.begin_time = self.begin_time
-            return next_block
+    Returns:
+      A new instrumentation block set to the new state, representing
+      the start of parsing a new instrumentation test method.
+      Alternatively, if the current instrumentation block represents the
+      start of parsing a new instrumentation block (state UNKNOWN), then
+      this returns the current instrumentation block set to the now
+      known parsing state.
+    """
+    if self.state == _InstrumentationBlockStates.UNKNOWN:
+      self.state = new_state
+      return self
+    else:
+      next_block = _InstrumentationBlock(
+        state=new_state,
+        prefix=self.prefix,
+        previous_instrumentation_block=self,
+      )
+      if self.status_code in _InstrumentationStatusCodeCategories.TIMING:
+        next_block.begin_time = self.begin_time
+      return next_block
 
 
 class _InstrumentationBlockFormatter(object):
-    """Takes an instrumentation block and converts it into a Mobly test
-    result.
+  """Takes an instrumentation block and converts it into a Mobly test
+  result.
+  """
+
+  DEFAULT_INSTRUMENTATION_METHOD_NAME = 'instrumentation_method'
+
+  def __init__(self, instrumentation_block):
+    self._prefix = instrumentation_block.prefix
+    self._status_code = instrumentation_block.status_code
+    self._error_message = instrumentation_block.error_message
+    self._known_keys = {}
+    self._unknown_keys = {}
+    for key, value in instrumentation_block.known_keys.items():
+      self._known_keys[key] = '\n'.join(
+        instrumentation_block.known_keys[key]).rstrip()
+    for key, value in instrumentation_block.unknown_keys.items():
+      self._unknown_keys[key] = '\n'.join(
+        instrumentation_block.unknown_keys[key]).rstrip()
+    self._begin_time = instrumentation_block.begin_time
+
+  def _get_name(self):
+    """Gets the method name of the test method for the instrumentation
+    method block.
+
+    Returns:
+      A string containing the name of the instrumentation test method's
+      test or a default name if no name was parsed.
     """
+    if self._known_keys[_InstrumentationKnownStatusKeys.TEST]:
+      return self._known_keys[_InstrumentationKnownStatusKeys.TEST]
+    else:
+      return self.DEFAULT_INSTRUMENTATION_METHOD_NAME
 
-    DEFAULT_INSTRUMENTATION_METHOD_NAME = 'instrumentation_method'
+  def _get_class(self):
+    """Gets the class name of the test method for the instrumentation
+    method block.
 
-    def __init__(self, instrumentation_block):
-        self._prefix = instrumentation_block.prefix
-        self._status_code = instrumentation_block.status_code
-        self._error_message = instrumentation_block.error_message
-        self._known_keys = {}
-        self._unknown_keys = {}
-        for key, value in instrumentation_block.known_keys.items():
-            self._known_keys[key] = '\n'.join(
-                instrumentation_block.known_keys[key]).rstrip()
-        for key, value in instrumentation_block.unknown_keys.items():
-            self._unknown_keys[key] = '\n'.join(
-                instrumentation_block.unknown_keys[key]).rstrip()
-        self._begin_time = instrumentation_block.begin_time
+    Returns:
+      A string containing the class name of the instrumentation test
+      method's test or empty string if no name was parsed. If a prefix
+      was specified, then the prefix will be prepended to the class
+      name.
+    """
+    class_parts = [
+      self._prefix,
+      self._known_keys[_InstrumentationKnownStatusKeys.CLASS]
+    ]
+    return '.'.join(filter(None, class_parts))
 
-    def _get_name(self):
-        """Gets the method name of the test method for the instrumentation
-        method block.
+  def _get_full_name(self):
+    """Gets the qualified name of the test method corresponding to the
+    instrumentation block.
 
-        Returns:
-            A string containing the name of the instrumentation test method's
-            test or a default name if no name was parsed.
-        """
-        if self._known_keys[_InstrumentationKnownStatusKeys.TEST]:
-            return self._known_keys[_InstrumentationKnownStatusKeys.TEST]
-        else:
-            return self.DEFAULT_INSTRUMENTATION_METHOD_NAME
+    Returns:
+      A string containing the fully qualified name of the
+      instrumentation test method. If parts are missing, then degrades
+      steadily.
+    """
+    full_name_parts = [self._get_class(), self._get_name()]
+    return '#'.join(filter(None, full_name_parts))
 
-    def _get_class(self):
-        """Gets the class name of the test method for the instrumentation
-        method block.
+  def _get_details(self):
+    """Gets the output for the detail section of the TestResultRecord.
 
-        Returns:
-            A string containing the class name of the instrumentation test
-            method's test or empty string if no name was parsed. If a prefix
-            was specified, then the prefix will be prepended to the class
-            name.
-        """
-        class_parts = [
-            self._prefix,
-            self._known_keys[_InstrumentationKnownStatusKeys.CLASS]
-        ]
-        return '.'.join(filter(None, class_parts))
+    Returns:
+      A string to set for a TestResultRecord's details.
+    """
+    detail_parts = [self._get_full_name(), self._error_message]
+    return '\n'.join(filter(None, detail_parts))
 
-    def _get_full_name(self):
-        """Gets the qualified name of the test method corresponding to the
-        instrumentation block.
+  def _get_extras(self):
+    """Gets the output for the extras section of the TestResultRecord.
 
-        Returns:
-            A string containing the fully qualified name of the
-            instrumentation test method. If parts are missing, then degrades
-            steadily.
-        """
-        full_name_parts = [self._get_class(), self._get_name()]
-        return '#'.join(filter(None, full_name_parts))
+    Returns:
+      A string to set for a TestResultRecord's extras.
+    """
+    # Add empty line to start key-value pairs on a new line.
+    extra_parts = ['']
 
-    def _get_details(self):
-        """Gets the output for the detail section of the TestResultRecord.
+    for value in self._unknown_keys.values():
+      extra_parts.append(value)
 
-        Returns:
-            A string to set for a TestResultRecord's details.
-        """
-        detail_parts = [self._get_full_name(), self._error_message]
-        return '\n'.join(filter(None, detail_parts))
+    extra_parts.append(
+      self._known_keys[_InstrumentationKnownStatusKeys.STREAM])
+    extra_parts.append(
+      self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG])
+    extra_parts.append(
+      self._known_keys[_InstrumentationKnownResultKeys.LONGMSG])
+    extra_parts.append(
+      self._known_keys[_InstrumentationKnownStatusKeys.ERROR])
 
-    def _get_extras(self):
-        """Gets the output for the extras section of the TestResultRecord.
+    if self._known_keys[
+        _InstrumentationKnownStatusKeys.STACK] not in self._known_keys[
+          _InstrumentationKnownStatusKeys.STREAM]:
+      extra_parts.append(
+        self._known_keys[_InstrumentationKnownStatusKeys.STACK])
 
-        Returns:
-            A string to set for a TestResultRecord's extras.
-        """
-        # Add empty line to start key-value pairs on a new line.
-        extra_parts = ['']
+    return '\n'.join(filter(None, extra_parts))
 
-        for value in self._unknown_keys.values():
-            extra_parts.append(value)
+  def _is_failed(self):
+    """Determines if the test corresponding to the instrumentation block
+    failed.
 
-        extra_parts.append(
-            self._known_keys[_InstrumentationKnownStatusKeys.STREAM])
-        extra_parts.append(
-            self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG])
-        extra_parts.append(
-            self._known_keys[_InstrumentationKnownResultKeys.LONGMSG])
-        extra_parts.append(
-            self._known_keys[_InstrumentationKnownStatusKeys.ERROR])
+    This method can not be used to tell if a test method passed and
+    should not be used for such a purpose.
 
-        if self._known_keys[
-                _InstrumentationKnownStatusKeys.STACK] not in self._known_keys[
-                    _InstrumentationKnownStatusKeys.STREAM]:
-            extra_parts.append(
-                self._known_keys[_InstrumentationKnownStatusKeys.STACK])
+    Returns:
+      A boolean indicating if the test method failed.
+    """
+    if self._status_code in _InstrumentationStatusCodeCategories.FAIL:
+      return True
+    elif (self._known_keys[_InstrumentationKnownStatusKeys.STACK]
+        and self._status_code !=
+        _InstrumentationStatusCodes.ASSUMPTION_FAILURE):
+      return True
+    elif self._known_keys[_InstrumentationKnownStatusKeys.ERROR]:
+      return True
+    elif self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG]:
+      return True
+    elif self._known_keys[_InstrumentationKnownResultKeys.LONGMSG]:
+      return True
+    else:
+      return False
 
-        return '\n'.join(filter(None, extra_parts))
+  def create_test_record(self, mobly_test_class):
+    """Creates a TestResultRecord for the instrumentation block.
 
-    def _is_failed(self):
-        """Determines if the test corresponding to the instrumentation block
-        failed.
+    Args:
+      mobly_test_class: string, the name of the Mobly test case
+        executing the instrumentation run.
 
-        This method can not be used to tell if a test method passed and
-        should not be used for such a purpose.
+    Returns:
+      A TestResultRecord with an appropriate signals exception
+      representing the instrumentation test method's result status.
+    """
+    details = self._get_details()
+    extras = self._get_extras()
 
-        Returns:
-            A boolean indicating if the test method failed.
-        """
-        if self._status_code in _InstrumentationStatusCodeCategories.FAIL:
-            return True
-        elif (self._known_keys[_InstrumentationKnownStatusKeys.STACK]
-              and self._status_code !=
-              _InstrumentationStatusCodes.ASSUMPTION_FAILURE):
-            return True
-        elif self._known_keys[_InstrumentationKnownStatusKeys.ERROR]:
-            return True
-        elif self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG]:
-            return True
-        elif self._known_keys[_InstrumentationKnownResultKeys.LONGMSG]:
-            return True
-        else:
-            return False
+    tr_record = records.TestResultRecord(
+      t_name=self._get_full_name(),
+      t_class=mobly_test_class,
+    )
+    if self._begin_time:
+      tr_record.begin_time = self._begin_time
 
-    def create_test_record(self, mobly_test_class):
-        """Creates a TestResultRecord for the instrumentation block.
+    if self._is_failed():
+      tr_record.test_fail(
+        e=signals.TestFailure(details=details, extras=extras))
+    elif self._status_code in _InstrumentationStatusCodeCategories.SKIPPED:
+      tr_record.test_skip(
+        e=signals.TestSkip(details=details, extras=extras))
+    elif self._status_code in _InstrumentationStatusCodeCategories.PASS:
+      tr_record.test_pass(
+        e=signals.TestPass(details=details, extras=extras))
+    elif self._status_code in _InstrumentationStatusCodeCategories.TIMING:
+      if self._error_message:
+        tr_record.test_error(
+          e=signals.TestError(details=details, extras=extras))
+      else:
+        tr_record = None
+    else:
+      tr_record.test_error(
+        e=signals.TestError(details=details, extras=extras))
+    if self._known_keys[_InstrumentationKnownStatusKeys.STACK]:
+      tr_record.termination_signal.stacktrace = self._known_keys[
+        _InstrumentationKnownStatusKeys.STACK]
+    return tr_record
 
-        Args:
-            mobly_test_class: string, the name of the Mobly test case
-                executing the instrumentation run.
+  def has_completed_result_block_format(self, error_message):
+    """Checks the instrumentation result block for a signal indicating
+    normal completion.
 
-        Returns:
-            A TestResultRecord with an appropriate signals exception
-            representing the instrumentation test method's result status.
-        """
-        details = self._get_details()
-        extras = self._get_extras()
+    Args:
+      error_message: string, the error message to give if the
+        instrumentation run did not complete successfully.-
 
-        tr_record = records.TestResultRecord(
-            t_name=self._get_full_name(),
-            t_class=mobly_test_class,
-        )
-        if self._begin_time:
-            tr_record.begin_time = self._begin_time
+    Returns:
+      A boolean indicating whether or not the instrumentation run passed
+      or failed overall.
 
-        if self._is_failed():
-            tr_record.test_fail(
-                e=signals.TestFailure(details=details, extras=extras))
-        elif self._status_code in _InstrumentationStatusCodeCategories.SKIPPED:
-            tr_record.test_skip(
-                e=signals.TestSkip(details=details, extras=extras))
-        elif self._status_code in _InstrumentationStatusCodeCategories.PASS:
-            tr_record.test_pass(
-                e=signals.TestPass(details=details, extras=extras))
-        elif self._status_code in _InstrumentationStatusCodeCategories.TIMING:
-            if self._error_message:
-                tr_record.test_error(
-                    e=signals.TestError(details=details, extras=extras))
-            else:
-                tr_record = None
-        else:
-            tr_record.test_error(
-                e=signals.TestError(details=details, extras=extras))
-        if self._known_keys[_InstrumentationKnownStatusKeys.STACK]:
-            tr_record.termination_signal.stacktrace = self._known_keys[
-                _InstrumentationKnownStatusKeys.STACK]
-        return tr_record
-
-    def has_completed_result_block_format(self, error_message):
-        """Checks the instrumentation result block for a signal indicating
-        normal completion.
-
-        Args:
-            error_message: string, the error message to give if the
-                instrumentation run did not complete successfully.-
-
-        Returns:
-            A boolean indicating whether or not the instrumentation run passed
-            or failed overall.
-
-        Raises:
-            signals.TestError: Error raised if the instrumentation run did not
-                complete because of a crash or some other issue.
-        """
-        extras = self._get_extras()
-        if _InstrumentationResultSignals.PASS in extras:
-            return True
-        elif _InstrumentationResultSignals.FAIL in extras:
-            return False
-        else:
-            raise signals.TestError(details=error_message, extras=extras)
+    Raises:
+      signals.TestError: Error raised if the instrumentation run did not
+        complete because of a crash or some other issue.
+    """
+    extras = self._get_extras()
+    if _InstrumentationResultSignals.PASS in extras:
+      return True
+    elif _InstrumentationResultSignals.FAIL in extras:
+      return False
+    else:
+      raise signals.TestError(details=error_message, extras=extras)
 
 
 class InstrumentationTestMixin(object):
-    """A mixin for Mobly test classes to inherit from for instrumentation tests.
+  """A mixin for Mobly test classes to inherit from for instrumentation tests.
 
-    This class should be used in a subclass of both BaseTestClass and this class
-    in order to provide instrumentation test capabilities. This mixin is
-    explicitly for the case where the underlying BaseTestClass cannot be
-    replaced with BaseInstrumentationTestClass. In general, prefer using
-    BaseInstrumentationTestClass instead.
+  This class should be used in a subclass of both BaseTestClass and this class
+  in order to provide instrumentation test capabilities. This mixin is
+  explicitly for the case where the underlying BaseTestClass cannot be
+  replaced with BaseInstrumentationTestClass. In general, prefer using
+  BaseInstrumentationTestClass instead.
 
-    Attributes:
-        DEFAULT_INSTRUMENTATION_OPTION_PREFIX: string, the default prefix for
-            instrumentation params contained within user params.
-        DEFAULT_INSTRUMENTATION_ERROR_MESSAGE: string, the default error
-            message to set if something has prevented something in the
-            instrumentation test run from completing properly.
+  Attributes:
+    DEFAULT_INSTRUMENTATION_OPTION_PREFIX: string, the default prefix for
+      instrumentation params contained within user params.
+    DEFAULT_INSTRUMENTATION_ERROR_MESSAGE: string, the default error
+      message to set if something has prevented something in the
+      instrumentation test run from completing properly.
+  """
+
+  DEFAULT_INSTRUMENTATION_OPTION_PREFIX = 'instrumentation_option_'
+  DEFAULT_INSTRUMENTATION_ERROR_MESSAGE = ('instrumentation run exited '
+                       'unexpectedly')
+
+  def _previous_block_never_completed(self, current_block, previous_block,
+                    new_state):
+    """Checks if the previous instrumentation method block completed.
+
+    Args:
+      current_block: _InstrumentationBlock, the current instrumentation
+        block to check for being a different instrumentation test
+        method.
+      previous_block: _InstrumentationBlock, rhe previous
+        instrumentation block to check for an incomplete status.
+      new_state: _InstrumentationBlockStates, the next state for the
+        parser, used to check for the instrumentation run ending
+        with an incomplete test.
+
+    Returns:
+      A boolean indicating whether the previous instrumentation block
+      completed executing.
     """
+    if previous_block:
+      previously_timing_block = (
+        previous_block.status_code in
+        _InstrumentationStatusCodeCategories.TIMING)
+      currently_new_block = (
+        current_block.status_code == _InstrumentationStatusCodes.START
+        or new_state == _InstrumentationBlockStates.RESULT)
+      return all([previously_timing_block, currently_new_block])
+    else:
+      return False
 
-    DEFAULT_INSTRUMENTATION_OPTION_PREFIX = 'instrumentation_option_'
-    DEFAULT_INSTRUMENTATION_ERROR_MESSAGE = ('instrumentation run exited '
-                                             'unexpectedly')
+  def _create_formatters(self, instrumentation_block, new_state):
+    """Creates the _InstrumentationBlockFormatters for outputting the
+    instrumentation method block that have finished parsing.
 
-    def _previous_block_never_completed(self, current_block, previous_block,
-                                        new_state):
-        """Checks if the previous instrumentation method block completed.
+    Args:
+      instrumentation_block: _InstrumentationBlock, the current
+        instrumentation method block to create formatters based upon.
+      new_state: _InstrumentationBlockState, the next state that the
+        parser will transition to.
 
-        Args:
-            current_block: _InstrumentationBlock, the current instrumentation
-                block to check for being a different instrumentation test
-                method.
-            previous_block: _InstrumentationBlock, rhe previous
-                instrumentation block to check for an incomplete status.
-            new_state: _InstrumentationBlockStates, the next state for the
-                parser, used to check for the instrumentation run ending
-                with an incomplete test.
+    Returns:
+      A list of the formatters tha need to create and add
+      TestResultRecords to the test results.
+    """
+    formatters = []
+    if self._previous_block_never_completed(
+        current_block=instrumentation_block,
+        previous_block=instrumentation_block.
+        previous_instrumentation_block,
+        new_state=new_state):
+      instrumentation_block.previous_instrumentation_block.set_error_message(
+        self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)
+      formatters.append(
+        _InstrumentationBlockFormatter(
+          instrumentation_block.previous_instrumentation_block))
 
-        Returns:
-            A boolean indicating whether the previous instrumentation block
-            completed executing.
-        """
-        if previous_block:
-            previously_timing_block = (
-                previous_block.status_code in
-                _InstrumentationStatusCodeCategories.TIMING)
-            currently_new_block = (
-                current_block.status_code == _InstrumentationStatusCodes.START
-                or new_state == _InstrumentationBlockStates.RESULT)
-            return all([previously_timing_block, currently_new_block])
-        else:
-            return False
+    if not instrumentation_block.is_empty:
+      formatters.append(
+        _InstrumentationBlockFormatter(instrumentation_block))
+    return formatters
 
-    def _create_formatters(self, instrumentation_block, new_state):
-        """Creates the _InstrumentationBlockFormatters for outputting the
-        instrumentation method block that have finished parsing.
+  def _transition_instrumentation_block(
+      self,
+      instrumentation_block,
+      new_state=_InstrumentationBlockStates.UNKNOWN):
+    """Transitions and finishes the current instrumentation block.
 
-        Args:
-            instrumentation_block: _InstrumentationBlock, the current
-                instrumentation method block to create formatters based upon.
-            new_state: _InstrumentationBlockState, the next state that the
-                parser will transition to.
+    Args:
+      instrumentation_block: _InstrumentationBlock, the current
+        instrumentation block to finish.
+      new_state: _InstrumentationBlockState, the next state for the
+        parser to transition to.
 
-        Returns:
-            A list of the formatters tha need to create and add
-            TestResultRecords to the test results.
-        """
-        formatters = []
-        if self._previous_block_never_completed(
-                current_block=instrumentation_block,
-                previous_block=instrumentation_block.
-                previous_instrumentation_block,
-                new_state=new_state):
-            instrumentation_block.previous_instrumentation_block.set_error_message(
-                self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)
-            formatters.append(
-                _InstrumentationBlockFormatter(
-                    instrumentation_block.previous_instrumentation_block))
+    Returns:
+      The new instrumentation block to use for storing parsed
+      instrumentation output.
+    """
+    formatters = self._create_formatters(instrumentation_block, new_state)
+    for formatter in formatters:
+      test_record = formatter.create_test_record(self.TAG)
+      if test_record:
+        self.results.add_record(test_record)
+        self.summary_writer.dump(test_record.to_dict(),
+                     records.TestSummaryEntryType.RECORD)
+    return instrumentation_block.transition_state(new_state=new_state)
 
-        if not instrumentation_block.is_empty:
-            formatters.append(
-                _InstrumentationBlockFormatter(instrumentation_block))
-        return formatters
+  def _parse_method_block_line(self, instrumentation_block, line):
+    """Parses the instrumnetation method block's line.
 
-    def _transition_instrumentation_block(
-            self,
-            instrumentation_block,
-            new_state=_InstrumentationBlockStates.UNKNOWN):
-        """Transitions and finishes the current instrumentation block.
+    Args:
+      instrumentation_block: _InstrumentationBlock, the current
+        instrumentation method block.
+      line: string, the raw instrumentation output line to parse.
 
-        Args:
-            instrumentation_block: _InstrumentationBlock, the current
-                instrumentation block to finish.
-            new_state: _InstrumentationBlockState, the next state for the
-                parser to transition to.
+    Returns:
+      The next instrumentation block, which should be used to continue
+      parsing instrumentation output.
+    """
+    if line.startswith(_InstrumentationStructurePrefixes.STATUS):
+      instrumentation_block.set_key(
+        _InstrumentationStructurePrefixes.STATUS, line)
+      return instrumentation_block
+    elif line.startswith(_InstrumentationStructurePrefixes.STATUS_CODE):
+      instrumentation_block.set_status_code(line)
+      return self._transition_instrumentation_block(
+        instrumentation_block)
+    elif line.startswith(_InstrumentationStructurePrefixes.RESULT):
+      # Unexpected transition from method block -> result block
+      instrumentation_block.set_key(
+        _InstrumentationStructurePrefixes.RESULT, line)
+      return self._parse_result_line(
+        self._transition_instrumentation_block(
+          instrumentation_block,
+          new_state=_InstrumentationBlockStates.RESULT,
+        ),
+        line,
+      )
+    else:
+      instrumentation_block.add_value(line)
+      return instrumentation_block
 
-        Returns:
-            The new instrumentation block to use for storing parsed
-            instrumentation output.
-        """
-        formatters = self._create_formatters(instrumentation_block, new_state)
-        for formatter in formatters:
-            test_record = formatter.create_test_record(self.TAG)
-            if test_record:
-                self.results.add_record(test_record)
-                self.summary_writer.dump(test_record.to_dict(),
-                                         records.TestSummaryEntryType.RECORD)
-        return instrumentation_block.transition_state(new_state=new_state)
+  def _parse_result_block_line(self, instrumentation_block, line):
+    """Parses the instrumentation result block's line.
 
-    def _parse_method_block_line(self, instrumentation_block, line):
-        """Parses the instrumnetation method block's line.
+    Args:
+      instrumentation_block: _InstrumentationBlock, the instrumentation
+        result block for the instrumentation run.
+      line: string, the raw instrumentation output to add to the
+        instrumenation result block's _InstrumentationResultBlocki
+        object.
 
-        Args:
-            instrumentation_block: _InstrumentationBlock, the current
-                instrumentation method block.
-            line: string, the raw instrumentation output line to parse.
+    Returns:
+      The instrumentation result block for the instrumentation run.
+    """
+    instrumentation_block.add_value(line)
+    return instrumentation_block
 
-        Returns:
-            The next instrumentation block, which should be used to continue
-            parsing instrumentation output.
-        """
-        if line.startswith(_InstrumentationStructurePrefixes.STATUS):
-            instrumentation_block.set_key(
-                _InstrumentationStructurePrefixes.STATUS, line)
-            return instrumentation_block
-        elif line.startswith(_InstrumentationStructurePrefixes.STATUS_CODE):
-            instrumentation_block.set_status_code(line)
-            return self._transition_instrumentation_block(
-                instrumentation_block)
-        elif line.startswith(_InstrumentationStructurePrefixes.RESULT):
-            # Unexpected transition from method block -> result block
-            instrumentation_block.set_key(
-                _InstrumentationStructurePrefixes.RESULT, line)
-            return self._parse_result_line(
-                self._transition_instrumentation_block(
-                    instrumentation_block,
-                    new_state=_InstrumentationBlockStates.RESULT,
-                ),
-                line,
-            )
-        else:
-            instrumentation_block.add_value(line)
-            return instrumentation_block
+  def _parse_unknown_block_line(self, instrumentation_block, line):
+    """Parses a line from the instrumentation output from the UNKNOWN
+    parser state.
 
-    def _parse_result_block_line(self, instrumentation_block, line):
-        """Parses the instrumentation result block's line.
+    Args:
+      instrumentation_block: _InstrumentationBlock, the current
+        instrumenation block, where the correct categorization it noti
+        yet known.
+      line: string, the raw instrumenation output line to be used to
+        deteremine the correct categorization.
 
-        Args:
-            instrumentation_block: _InstrumentationBlock, the instrumentation
-                result block for the instrumentation run.
-            line: string, the raw instrumentation output to add to the
-                instrumenation result block's _InstrumentationResultBlocki
-                object.
+    Returns:
+      The next instrumentation block to continue parsing with. Usually,
+      this is the same instrumentation block but with the state
+      transitioned appropriately.
+    """
+    if line.startswith(_InstrumentationStructurePrefixes.STATUS):
+      return self._parse_method_block_line(
+        self._transition_instrumentation_block(
+          instrumentation_block,
+          new_state=_InstrumentationBlockStates.METHOD,
+        ),
+        line,
+      )
+    elif (line.startswith(_InstrumentationStructurePrefixes.RESULT)
+        or _InstrumentationStructurePrefixes.FAILED in line):
+      return self._parse_result_block_line(
+        self._transition_instrumentation_block(
+          instrumentation_block,
+          new_state=_InstrumentationBlockStates.RESULT,
+        ),
+        line,
+      )
+    else:
+      # This would only really execute if instrumentation failed to start.
+      instrumentation_block.add_value(line)
+      return instrumentation_block
 
-        Returns:
-            The instrumentation result block for the instrumentation run.
-        """
-        instrumentation_block.add_value(line)
-        return instrumentation_block
+  def _parse_line(self, instrumentation_block, line):
+    """Parses an arbitrary line from the instrumentation output based upon
+    the current parser state.
 
-    def _parse_unknown_block_line(self, instrumentation_block, line):
-        """Parses a line from the instrumentation output from the UNKNOWN
-        parser state.
+    Args:
+      instrumentation_block: _InstrumentationBlock, an instrumentation
+        block with any of the possible parser states.
+      line: string, the raw instrumentation output line to parse
+        appropriately.
 
-        Args:
-            instrumentation_block: _InstrumentationBlock, the current
-                instrumenation block, where the correct categorization it noti
-                yet known.
-            line: string, the raw instrumenation output line to be used to
-                deteremine the correct categorization.
+    Returns:
+      The next instrumenation block to continue parsing with.
+    """
+    if instrumentation_block.state == _InstrumentationBlockStates.METHOD:
+      return self._parse_method_block_line(instrumentation_block, line)
+    elif instrumentation_block.state == _InstrumentationBlockStates.RESULT:
+      return self._parse_result_block_line(instrumentation_block, line)
+    else:
+      return self._parse_unknown_block_line(instrumentation_block, line)
 
-        Returns:
-            The next instrumentation block to continue parsing with. Usually,
-            this is the same instrumentation block but with the state
-            transitioned appropriately.
-        """
-        if line.startswith(_InstrumentationStructurePrefixes.STATUS):
-            return self._parse_method_block_line(
-                self._transition_instrumentation_block(
-                    instrumentation_block,
-                    new_state=_InstrumentationBlockStates.METHOD,
-                ),
-                line,
-            )
-        elif (line.startswith(_InstrumentationStructurePrefixes.RESULT)
-              or _InstrumentationStructurePrefixes.FAILED in line):
-            return self._parse_result_block_line(
-                self._transition_instrumentation_block(
-                    instrumentation_block,
-                    new_state=_InstrumentationBlockStates.RESULT,
-                ),
-                line,
-            )
-        else:
-            # This would only really execute if instrumentation failed to start.
-            instrumentation_block.add_value(line)
-            return instrumentation_block
+  def _finish_parsing(self, instrumentation_block):
+    """Finishes parsing the instrumentation result block for the final
+    instrumentation run status.
 
-    def _parse_line(self, instrumentation_block, line):
-        """Parses an arbitrary line from the instrumentation output based upon
-        the current parser state.
+    Args:
+      instrumentation_block: _InstrumentationBlock, the instrumentation
+        result block for the instrumenation run. Potentially, thisi
+        could actually be method block if the instrumentation outputi
+        is malformed.
 
-        Args:
-            instrumentation_block: _InstrumentationBlock, an instrumentation
-                block with any of the possible parser states.
-            line: string, the raw instrumentation output line to parse
-                appropriately.
+    Returns:
+      A boolean indicating whether the instrumentation run completed
+        with all the tests passing.
 
-        Returns:
-            The next instrumenation block to continue parsing with.
-        """
-        if instrumentation_block.state == _InstrumentationBlockStates.METHOD:
-            return self._parse_method_block_line(instrumentation_block, line)
-        elif instrumentation_block.state == _InstrumentationBlockStates.RESULT:
-            return self._parse_result_block_line(instrumentation_block, line)
-        else:
-            return self._parse_unknown_block_line(instrumentation_block, line)
+    Raises:
+      signals.TestError: Error raised if the instrumentation failed to
+        complete with either a pass or fail status.
+    """
+    formatter = _InstrumentationBlockFormatter(instrumentation_block)
+    return formatter.has_completed_result_block_format(
+      self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)
 
-    def _finish_parsing(self, instrumentation_block):
-        """Finishes parsing the instrumentation result block for the final
-        instrumentation run status.
+  def parse_instrumentation_options(self, parameters=None):
+    """Returns the options for the instrumentation test from user_params.
 
-        Args:
-            instrumentation_block: _InstrumentationBlock, the instrumentation
-                result block for the instrumenation run. Potentially, thisi
-                could actually be method block if the instrumentation outputi
-                is malformed.
+    By default, this method assume that the correct instrumentation options
+    all start with DEFAULT_INSTRUMENTATION_OPTION_PREFIX.
 
-        Returns:
-            A boolean indicating whether the instrumentation run completed
-                with all the tests passing.
+    Args:
+      parameters: dict, the key value pairs representing an assortment
+        of parameters including instrumentation options. Usually,
+        this argument will be from self.user_params.
 
-        Raises:
-            signals.TestError: Error raised if the instrumentation failed to
-                complete with either a pass or fail status.
-        """
-        formatter = _InstrumentationBlockFormatter(instrumentation_block)
-        return formatter.has_completed_result_block_format(
-            self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)
+    Returns:
+      A dictionary of options/parameters for the instrumentation tst.
+    """
+    if parameters is None:
+      return {}
 
-    def parse_instrumentation_options(self, parameters=None):
-        """Returns the options for the instrumentation test from user_params.
+    filtered_parameters = {}
+    for parameter_key, parameter_value in parameters.items():
+      if parameter_key.startswith(
+          self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX):
+        option_key = parameter_key[len(
+          self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX):]
+        filtered_parameters[option_key] = parameter_value
+    return filtered_parameters
 
-        By default, this method assume that the correct instrumentation options
-        all start with DEFAULT_INSTRUMENTATION_OPTION_PREFIX.
+  def run_instrumentation_test(self,
+                 device,
+                 package,
+                 options=None,
+                 prefix=None,
+                 runner=None):
+    """Runs instrumentation tests on a device and creates test records.
 
-        Args:
-            parameters: dict, the key value pairs representing an assortment
-                of parameters including instrumentation options. Usually,
-                this argument will be from self.user_params.
+    Args:
+      device: AndroidDevice, the device to run instrumentation tests on.
+      package: string, the package name of the instrumentation tests.
+      options: dict, Instrumentation options for the instrumentation
+        tests.
+      prefix: string, an optional prefix for parser output for
+        distinguishing between instrumentation test runs.
+      runner: string, the runner to use for the instrumentation package,
+        default to DEFAULT_INSTRUMENTATION_RUNNER.
 
-        Returns:
-            A dictionary of options/parameters for the instrumentation tst.
-        """
-        if parameters is None:
-            return {}
+    Returns:
+      A boolean indicating whether or not all the instrumentation test
+        methods passed.
 
-        filtered_parameters = {}
-        for parameter_key, parameter_value in parameters.items():
-            if parameter_key.startswith(
-                    self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX):
-                option_key = parameter_key[len(
-                    self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX):]
-                filtered_parameters[option_key] = parameter_value
-        return filtered_parameters
+    Raises:
+      TestError if the instrumentation run crashed or if parsing the
+        output failed.
+    """
+    # Dictionary hack to allow overwriting the instrumentation_block in the
+    # parse_instrumentation closure
+    instrumentation_block = [_InstrumentationBlock(prefix=prefix)]
 
-    def run_instrumentation_test(self,
-                                 device,
-                                 package,
-                                 options=None,
-                                 prefix=None,
-                                 runner=None):
-        """Runs instrumentation tests on a device and creates test records.
+    def parse_instrumentation(raw_line):
+      line = raw_line.rstrip().decode('utf-8')
+      logging.info(line)
+      instrumentation_block[0] = self._parse_line(
+        instrumentation_block[0], line)
 
-        Args:
-            device: AndroidDevice, the device to run instrumentation tests on.
-            package: string, the package name of the instrumentation tests.
-            options: dict, Instrumentation options for the instrumentation
-                tests.
-            prefix: string, an optional prefix for parser output for
-                distinguishing between instrumentation test runs.
-            runner: string, the runner to use for the instrumentation package,
-                default to DEFAULT_INSTRUMENTATION_RUNNER.
+    device.adb.instrument(package=package,
+                options=options,
+                runner=runner,
+                handler=parse_instrumentation)
 
-        Returns:
-            A boolean indicating whether or not all the instrumentation test
-                methods passed.
-
-        Raises:
-            TestError if the instrumentation run crashed or if parsing the
-                output failed.
-        """
-        # Dictionary hack to allow overwriting the instrumentation_block in the
-        # parse_instrumentation closure
-        instrumentation_block = [_InstrumentationBlock(prefix=prefix)]
-
-        def parse_instrumentation(raw_line):
-            line = raw_line.rstrip().decode('utf-8')
-            logging.info(line)
-            instrumentation_block[0] = self._parse_line(
-                instrumentation_block[0], line)
-
-        device.adb.instrument(package=package,
-                              options=options,
-                              runner=runner,
-                              handler=parse_instrumentation)
-
-        return self._finish_parsing(instrumentation_block[0])
+    return self._finish_parsing(instrumentation_block[0])
 
 
 class BaseInstrumentationTestClass(InstrumentationTestMixin,
-                                   base_test.BaseTestClass):
-    """Base class for all instrumentation test classes to inherit from.
+                   base_test.BaseTestClass):
+  """Base class for all instrumentation test classes to inherit from.
 
-    This class extends the BaseTestClass to add functionality to run and parse
-    the output of instrumentation runs.
+  This class extends the BaseTestClass to add functionality to run and parse
+  the output of instrumentation runs.
 
-    Attributes:
-        DEFAULT_INSTRUMENTATION_OPTION_PREFIX: string, the default prefix for
-            instrumentation params contained within user params.
-        DEFAULT_INSTRUMENTATION_ERROR_MESSAGE: string, the default error
-            message to set if something has prevented something in the
-            instrumentation test run from completing properly.
-    """
+  Attributes:
+    DEFAULT_INSTRUMENTATION_OPTION_PREFIX: string, the default prefix for
+      instrumentation params contained within user params.
+    DEFAULT_INSTRUMENTATION_ERROR_MESSAGE: string, the default error
+      message to set if something has prevented something in the
+      instrumentation test run from completing properly.
+  """
diff --git a/mobly/base_test.py b/mobly/base_test.py
index 90ff75a..c9b9fab 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -46,831 +46,831 @@
 
 
 class Error(Exception):
-    """Raised for exceptions that occurred in BaseTestClass."""
+  """Raised for exceptions that occurred in BaseTestClass."""
 
 
 class BaseTestClass(object):
-    """Base class for all test classes to inherit from.
+  """Base class for all test classes to inherit from.
 
-    This class gets all the controller objects from test_runner and executes
-    the tests requested within itself.
+  This class gets all the controller objects from test_runner and executes
+  the tests requested within itself.
 
-    Most attributes of this class are set at runtime based on the configuration
-    provided.
+  Most attributes of this class are set at runtime based on the configuration
+  provided.
 
-    The default logger in logging module is set up for each test run. If you
-    want to log info to the test run output file, use `logging` directly, like
-    `logging.info`.
+  The default logger in logging module is set up for each test run. If you
+  want to log info to the test run output file, use `logging` directly, like
+  `logging.info`.
 
-    Attributes:
-        tests: A list of strings, each representing a test method name.
-        TAG: A string used to refer to a test class. Default is the test class
-            name.
-        results: A records.TestResult object for aggregating test results from
-            the execution of tests.
-        controller_configs: dict, controller configs provided by the user via
-            test bed config.
-        current_test_info: RuntimeTestInfo, runtime information on the test
-            currently being executed.
-        root_output_path: string, storage path for output files associated with
-            the entire test run. A test run can have multiple test class
-            executions. This includes the test summary and Mobly log files.
-        log_path: string, storage path for files specific to a single test
-            class execution.
-        test_bed_name: [Deprecated, use 'testbed_name' instead]
-            string, the name of the test bed used by a test run.
-        testbed_name: string, the name of the test bed used by a test run.
-        user_params: dict, custom parameters from user, to be consumed by
-            the test logic.
+  Attributes:
+    tests: A list of strings, each representing a test method name.
+    TAG: A string used to refer to a test class. Default is the test class
+      name.
+    results: A records.TestResult object for aggregating test results from
+      the execution of tests.
+    controller_configs: dict, controller configs provided by the user via
+      test bed config.
+    current_test_info: RuntimeTestInfo, runtime information on the test
+      currently being executed.
+    root_output_path: string, storage path for output files associated with
+      the entire test run. A test run can have multiple test class
+      executions. This includes the test summary and Mobly log files.
+    log_path: string, storage path for files specific to a single test
+      class execution.
+    test_bed_name: [Deprecated, use 'testbed_name' instead]
+      string, the name of the test bed used by a test run.
+    testbed_name: string, the name of the test bed used by a test run.
+    user_params: dict, custom parameters from user, to be consumed by
+      the test logic.
+  """
+
+  TAG = None
+
+  def __init__(self, configs):
+    """Constructor of BaseTestClass.
+
+    The constructor takes a config_parser.TestRunConfig object and which has
+    all the information needed to execute this test class, like log_path
+    and controller configurations. For details, see the definition of class
+    config_parser.TestRunConfig.
+
+    Args:
+      configs: A config_parser.TestRunConfig object.
+    """
+    self.tests = []
+    class_identifier = self.__class__.__name__
+    if configs.test_class_name_suffix:
+      class_identifier = '%s_%s' % (class_identifier,
+                      configs.test_class_name_suffix)
+    if self.TAG is None:
+      self.TAG = class_identifier
+    # Set params.
+    self.root_output_path = configs.log_path
+    self.log_path = os.path.join(self.root_output_path, class_identifier)
+    utils.create_dir(self.log_path)
+    # Deprecated, use 'testbed_name'
+    self.test_bed_name = configs.test_bed_name
+    self.testbed_name = configs.testbed_name
+    self.user_params = configs.user_params
+    self.results = records.TestResult()
+    self.summary_writer = configs.summary_writer
+    self._generated_test_table = collections.OrderedDict()
+    self._controller_manager = controller_manager.ControllerManager(
+      class_name=self.TAG, controller_configs=configs.controller_configs)
+    self.controller_configs = self._controller_manager.controller_configs
+
+  def unpack_userparams(self,
+              req_param_names=None,
+              opt_param_names=None,
+              **kwargs):
+    """An optional function that unpacks user defined parameters into
+    individual variables.
+
+    After unpacking, the params can be directly accessed with self.xxx.
+
+    If a required param is not provided, an exception is raised. If an
+    optional param is not provided, a warning line will be logged.
+
+    To provide a param, add it in the config file or pass it in as a kwarg.
+    If a param appears in both the config file and kwarg, the value in the
+    config file is used.
+
+    User params from the config file can also be directly accessed in
+    self.user_params.
+
+    Args:
+      req_param_names: A list of names of the required user params.
+      opt_param_names: A list of names of the optional user params.
+      **kwargs: Arguments that provide default values.
+        e.g. unpack_userparams(required_list, opt_list, arg_a='hello')
+        self.arg_a will be 'hello' unless it is specified again in
+        required_list or opt_list.
+
+    Raises:
+      Error: A required user params is not provided.
+    """
+    req_param_names = req_param_names or []
+    opt_param_names = opt_param_names or []
+    for k, v in kwargs.items():
+      if k in self.user_params:
+        v = self.user_params[k]
+      setattr(self, k, v)
+    for name in req_param_names:
+      if hasattr(self, name):
+        continue
+      if name not in self.user_params:
+        raise Error('Missing required user param "%s" in test '
+              'configuration.' % name)
+      setattr(self, name, self.user_params[name])
+    for name in opt_param_names:
+      if hasattr(self, name):
+        continue
+      if name in self.user_params:
+        setattr(self, name, self.user_params[name])
+      else:
+        logging.warning(
+          'Missing optional user param "%s" in '
+          'configuration, continue.', name)
+
+  def register_controller(self, module, required=True, min_number=1):
+    """Loads a controller module and returns its loaded devices.
+
+    A Mobly controller module is a Python lib that can be used to control
+    a device, service, or equipment. To be Mobly compatible, a controller
+    module needs to have the following members:
+
+    .. code-block:: python
+
+      def create(configs):
+        [Required] Creates controller objects from configurations.
+
+        Args:
+          configs: A list of serialized data like string/dict. Each
+            element of the list is a configuration for a controller
+            object.
+
+        Returns:
+          A list of objects.
+
+      def destroy(objects):
+        [Required] Destroys controller objects created by the create
+        function. Each controller object shall be properly cleaned up
+        and all the resources held should be released, e.g. memory
+        allocation, sockets, file handlers etc.
+
+        Args:
+          A list of controller objects created by the create function.
+
+      def get_info(objects):
+        [Optional] Gets info from the controller objects used in a test
+        run. The info will be included in test_summary.yaml under
+        the key 'ControllerInfo'. Such information could include unique
+        ID, version, or anything that could be useful for describing the
+        test bed and debugging.
+
+        Args:
+          objects: A list of controller objects created by the create
+            function.
+
+        Returns:
+          A list of json serializable objects: each represents the
+            info of a controller object. The order of the info
+            object should follow that of the input objects.
+
+    Registering a controller module declares a test class's dependency the
+    controller. If the module config exists and the module matches the
+    controller interface, controller objects will be instantiated with
+    corresponding configs. The module should be imported first.
+
+    Args:
+      module: A module that follows the controller module interface.
+      required: A bool. If True, failing to register the specified
+        controller module raises exceptions. If False, the objects
+        failed to instantiate will be skipped.
+      min_number: An integer that is the minimum number of controller
+        objects to be created. Default is one, since you should not
+        register a controller module without expecting at least one
+        object.
+
+    Returns:
+      A list of controller objects instantiated from controller_module, or
+      None if no config existed for this controller and it was not a
+      required controller.
+
+    Raises:
+      ControllerError:
+        * The controller module has already been registered.
+        * The actual number of objects instantiated is less than the
+        * `min_number`.
+        * `required` is True and no corresponding config can be found.
+        * Any other error occurred in the registration process.
+    """
+    return self._controller_manager.register_controller(
+      module, required, min_number)
+
+  def _record_controller_info(self):
+    # Collect controller information and write to test result.
+    for record in self._controller_manager.get_controller_info_records():
+      self.results.add_controller_info_record(record)
+      self.summary_writer.dump(
+        record.to_dict(), records.TestSummaryEntryType.CONTROLLER_INFO)
+
+  def _setup_generated_tests(self):
+    """Proxy function to guarantee the base implementation of
+    setup_generated_tests is called.
+
+    Returns:
+      True if setup is successful, False otherwise.
+    """
+    stage_name = STAGE_NAME_SETUP_GENERATED_TESTS
+    record = records.TestResultRecord(stage_name, self.TAG)
+    record.test_begin()
+    self.current_test_info = runtime_test_info.RuntimeTestInfo(
+      stage_name, self.log_path, record)
+    try:
+      with self._log_test_stage(stage_name):
+        self.setup_generated_tests()
+        return True
+    except Exception as e:
+      logging.exception('%s failed for %s.', stage_name, self.TAG)
+      record.test_error(e)
+      self.results.add_class_error(record)
+      self.summary_writer.dump(record.to_dict(),
+                   records.TestSummaryEntryType.RECORD)
+      return False
+
+  def setup_generated_tests(self):
+    """Preprocesses that need to be done before setup_class.
+
+    This phase is used to do pre-test processes like generating tests.
+    This is the only place `self.generate_tests` should be called.
+
+    If this function throws an error, the test class will be marked failure
+    and the "Requested" field will be 0 because the number of tests
+    requested is unknown at this point.
     """
 
-    TAG = None
+  def _setup_class(self):
+    """Proxy function to guarantee the base implementation of setup_class
+    is called.
 
-    def __init__(self, configs):
-        """Constructor of BaseTestClass.
+    Returns:
+      If `self.results` is returned instead of None, this means something
+      has gone wrong, and the rest of the test class should not execute.
+    """
+    # Setup for the class.
+    class_record = records.TestResultRecord(STAGE_NAME_SETUP_CLASS,
+                        self.TAG)
+    class_record.test_begin()
+    self.current_test_info = runtime_test_info.RuntimeTestInfo(
+      STAGE_NAME_SETUP_CLASS, self.log_path, class_record)
+    expects.recorder.reset_internal_states(class_record)
+    try:
+      with self._log_test_stage(STAGE_NAME_SETUP_CLASS):
+        self.setup_class()
+    except signals.TestAbortSignal:
+      # Throw abort signals to outer try block for handling.
+      raise
+    except Exception as e:
+      # Setup class failed for unknown reasons.
+      # Fail the class and skip all tests.
+      logging.exception('Error in %s#setup_class.', self.TAG)
+      class_record.test_error(e)
+      self.results.add_class_error(class_record)
+      self._exec_procedure_func(self._on_fail, class_record)
+      class_record.update_record()
+      self.summary_writer.dump(class_record.to_dict(),
+                   records.TestSummaryEntryType.RECORD)
+      self._skip_remaining_tests(e)
+      return self.results
+    if expects.recorder.has_error:
+      self._exec_procedure_func(self._on_fail, class_record)
+      class_record.test_error()
+      class_record.update_record()
+      self.summary_writer.dump(class_record.to_dict(),
+                   records.TestSummaryEntryType.RECORD)
+      self.results.add_class_error(class_record)
+      self._skip_remaining_tests(
+        class_record.termination_signal.exception)
+      return self.results
 
-        The constructor takes a config_parser.TestRunConfig object and which has
-        all the information needed to execute this test class, like log_path
-        and controller configurations. For details, see the definition of class
-        config_parser.TestRunConfig.
+  def setup_class(self):
+    """Setup function that will be called before executing any test in the
+    class.
 
-        Args:
-            configs: A config_parser.TestRunConfig object.
-        """
-        self.tests = []
-        class_identifier = self.__class__.__name__
-        if configs.test_class_name_suffix:
-            class_identifier = '%s_%s' % (class_identifier,
-                                          configs.test_class_name_suffix)
-        if self.TAG is None:
-            self.TAG = class_identifier
-        # Set params.
-        self.root_output_path = configs.log_path
-        self.log_path = os.path.join(self.root_output_path, class_identifier)
-        utils.create_dir(self.log_path)
-        # Deprecated, use 'testbed_name'
-        self.test_bed_name = configs.test_bed_name
-        self.testbed_name = configs.testbed_name
-        self.user_params = configs.user_params
-        self.results = records.TestResult()
-        self.summary_writer = configs.summary_writer
-        self._generated_test_table = collections.OrderedDict()
-        self._controller_manager = controller_manager.ControllerManager(
-            class_name=self.TAG, controller_configs=configs.controller_configs)
-        self.controller_configs = self._controller_manager.controller_configs
+    To signal setup failure, use asserts or raise your own exception.
 
-    def unpack_userparams(self,
-                          req_param_names=None,
-                          opt_param_names=None,
-                          **kwargs):
-        """An optional function that unpacks user defined parameters into
-        individual variables.
+    Errors raised from `setup_class` will trigger `on_fail`.
 
-        After unpacking, the params can be directly accessed with self.xxx.
+    Implementation is optional.
+    """
 
-        If a required param is not provided, an exception is raised. If an
-        optional param is not provided, a warning line will be logged.
+  def _teardown_class(self):
+    """Proxy function to guarantee the base implementation of
+    teardown_class is called.
+    """
+    stage_name = STAGE_NAME_TEARDOWN_CLASS
+    record = records.TestResultRecord(stage_name, self.TAG)
+    record.test_begin()
+    self.current_test_info = runtime_test_info.RuntimeTestInfo(
+      stage_name, self.log_path, record)
+    expects.recorder.reset_internal_states(record)
+    try:
+      with self._log_test_stage(stage_name):
+        self.teardown_class()
+    except signals.TestAbortAll as e:
+      setattr(e, 'results', self.results)
+      raise
+    except Exception as e:
+      logging.exception('Error encountered in %s.', stage_name)
+      record.test_error(e)
+      record.update_record()
+      self.results.add_class_error(record)
+      self.summary_writer.dump(record.to_dict(),
+                   records.TestSummaryEntryType.RECORD)
+    else:
+      if expects.recorder.has_error:
+        record.update_record()
+        self.results.add_class_error(record)
+        self.summary_writer.dump(record.to_dict(),
+                     records.TestSummaryEntryType.RECORD)
+    finally:
+      self._clean_up()
 
-        To provide a param, add it in the config file or pass it in as a kwarg.
-        If a param appears in both the config file and kwarg, the value in the
-        config file is used.
+  def teardown_class(self):
+    """Teardown function that will be called after all the selected tests in
+    the test class have been executed.
 
-        User params from the config file can also be directly accessed in
-        self.user_params.
+    Errors raised from `teardown_class` do not trigger `on_fail`.
 
-        Args:
-            req_param_names: A list of names of the required user params.
-            opt_param_names: A list of names of the optional user params.
-            **kwargs: Arguments that provide default values.
-                e.g. unpack_userparams(required_list, opt_list, arg_a='hello')
-                self.arg_a will be 'hello' unless it is specified again in
-                required_list or opt_list.
+    Implementation is optional.
+    """
 
-        Raises:
-            Error: A required user params is not provided.
-        """
-        req_param_names = req_param_names or []
-        opt_param_names = opt_param_names or []
-        for k, v in kwargs.items():
-            if k in self.user_params:
-                v = self.user_params[k]
-            setattr(self, k, v)
-        for name in req_param_names:
-            if hasattr(self, name):
-                continue
-            if name not in self.user_params:
-                raise Error('Missing required user param "%s" in test '
-                            'configuration.' % name)
-            setattr(self, name, self.user_params[name])
-        for name in opt_param_names:
-            if hasattr(self, name):
-                continue
-            if name in self.user_params:
-                setattr(self, name, self.user_params[name])
-            else:
-                logging.warning(
-                    'Missing optional user param "%s" in '
-                    'configuration, continue.', name)
+  @contextlib.contextmanager
+  def _log_test_stage(self, stage_name):
+    """Logs the begin and end of a test stage.
 
-    def register_controller(self, module, required=True, min_number=1):
-        """Loads a controller module and returns its loaded devices.
+    This context adds two log lines meant for clarifying the boundary of
+    each execution stage in Mobly log.
 
-        A Mobly controller module is a Python lib that can be used to control
-        a device, service, or equipment. To be Mobly compatible, a controller
-        module needs to have the following members:
+    Args:
+      stage_name: string, name of the stage to log.
+    """
+    parent_token = self.current_test_info.name
+    # If the name of the stage is the same as the test name, in which case
+    # the stage is class-level instead of test-level, use the class's
+    # reference tag as the parent token instead.
+    if parent_token == stage_name:
+      parent_token = self.TAG
+    logging.debug(
+      TEST_STAGE_BEGIN_LOG_TEMPLATE.format(parent_token=parent_token,
+                         child_token=stage_name))
+    try:
+      yield
+    finally:
+      logging.debug(
+        TEST_STAGE_END_LOG_TEMPLATE.format(parent_token=parent_token,
+                           child_token=stage_name))
 
-        .. code-block:: python
+  def _setup_test(self, test_name):
+    """Proxy function to guarantee the base implementation of setup_test is
+    called.
+    """
+    with self._log_test_stage(STAGE_NAME_SETUP_TEST):
+      self.setup_test()
 
-            def create(configs):
-                [Required] Creates controller objects from configurations.
+  def setup_test(self):
+    """Setup function that will be called every time before executing each
+    test method in the test class.
 
-                Args:
-                    configs: A list of serialized data like string/dict. Each
-                        element of the list is a configuration for a controller
-                        object.
+    To signal setup failure, use asserts or raise your own exception.
 
-                Returns:
-                    A list of objects.
+    Implementation is optional.
+    """
 
-            def destroy(objects):
-                [Required] Destroys controller objects created by the create
-                function. Each controller object shall be properly cleaned up
-                and all the resources held should be released, e.g. memory
-                allocation, sockets, file handlers etc.
+  def _teardown_test(self, test_name):
+    """Proxy function to guarantee the base implementation of teardown_test
+    is called.
+    """
+    with self._log_test_stage(STAGE_NAME_TEARDOWN_TEST):
+      self.teardown_test()
 
-                Args:
-                    A list of controller objects created by the create function.
+  def teardown_test(self):
+    """Teardown function that will be called every time a test method has
+    been executed.
 
-            def get_info(objects):
-                [Optional] Gets info from the controller objects used in a test
-                run. The info will be included in test_summary.yaml under
-                the key 'ControllerInfo'. Such information could include unique
-                ID, version, or anything that could be useful for describing the
-                test bed and debugging.
+    Implementation is optional.
+    """
 
-                Args:
-                    objects: A list of controller objects created by the create
-                        function.
+  def _on_fail(self, record):
+    """Proxy function to guarantee the base implementation of on_fail is
+    called.
 
-                Returns:
-                    A list of json serializable objects: each represents the
-                        info of a controller object. The order of the info
-                        object should follow that of the input objects.
+    Args:
+      record: records.TestResultRecord, a copy of the test record for
+          this test, containing all information of the test execution
+          including exception objects.
+    """
+    self.on_fail(record)
 
-        Registering a controller module declares a test class's dependency the
-        controller. If the module config exists and the module matches the
-        controller interface, controller objects will be instantiated with
-        corresponding configs. The module should be imported first.
+  def on_fail(self, record):
+    """A function that is executed upon a test failure.
 
-        Args:
-            module: A module that follows the controller module interface.
-            required: A bool. If True, failing to register the specified
-                controller module raises exceptions. If False, the objects
-                failed to instantiate will be skipped.
-            min_number: An integer that is the minimum number of controller
-                objects to be created. Default is one, since you should not
-                register a controller module without expecting at least one
-                object.
+    User implementation is optional.
 
-        Returns:
-            A list of controller objects instantiated from controller_module, or
-            None if no config existed for this controller and it was not a
-            required controller.
+    Args:
+      record: records.TestResultRecord, a copy of the test record for
+        this test, containing all information of the test execution
+        including exception objects.
+    """
 
-        Raises:
-            ControllerError:
-                * The controller module has already been registered.
-                * The actual number of objects instantiated is less than the
-                * `min_number`.
-                * `required` is True and no corresponding config can be found.
-                * Any other error occurred in the registration process.
-        """
-        return self._controller_manager.register_controller(
-            module, required, min_number)
+  def _on_pass(self, record):
+    """Proxy function to guarantee the base implementation of on_pass is
+    called.
 
-    def _record_controller_info(self):
-        # Collect controller information and write to test result.
-        for record in self._controller_manager.get_controller_info_records():
-            self.results.add_controller_info_record(record)
-            self.summary_writer.dump(
-                record.to_dict(), records.TestSummaryEntryType.CONTROLLER_INFO)
+    Args:
+      record: records.TestResultRecord, a copy of the test record for
+        this test, containing all information of the test execution
+        including exception objects.
+    """
+    msg = record.details
+    if msg:
+      logging.info(msg)
+    self.on_pass(record)
 
-    def _setup_generated_tests(self):
-        """Proxy function to guarantee the base implementation of
-        setup_generated_tests is called.
+  def on_pass(self, record):
+    """A function that is executed upon a test passing.
 
-        Returns:
-            True if setup is successful, False otherwise.
-        """
-        stage_name = STAGE_NAME_SETUP_GENERATED_TESTS
-        record = records.TestResultRecord(stage_name, self.TAG)
-        record.test_begin()
-        self.current_test_info = runtime_test_info.RuntimeTestInfo(
-            stage_name, self.log_path, record)
+    Implementation is optional.
+
+    Args:
+      record: records.TestResultRecord, a copy of the test record for
+        this test, containing all information of the test execution
+        including exception objects.
+    """
+
+  def _on_skip(self, record):
+    """Proxy function to guarantee the base implementation of on_skip is
+    called.
+
+    Args:
+      record: records.TestResultRecord, a copy of the test record for
+        this test, containing all information of the test execution
+        including exception objects.
+    """
+    logging.info('Reason to skip: %s', record.details)
+    logging.info(RESULT_LINE_TEMPLATE, record.test_name, record.result)
+    self.on_skip(record)
+
+  def on_skip(self, record):
+    """A function that is executed upon a test being skipped.
+
+    Implementation is optional.
+
+    Args:
+      record: records.TestResultRecord, a copy of the test record for
+        this test, containing all information of the test execution
+        including exception objects.
+    """
+
+  def _exec_procedure_func(self, func, tr_record):
+    """Executes a procedure function like on_pass, on_fail etc.
+
+    This function will alter the 'Result' of the test's record if
+    exceptions happened when executing the procedure function, but
+    prevents procedure functions from altering test records themselves
+    by only passing in a copy.
+
+    This will let signals.TestAbortAll through so abort_all works in all
+    procedure functions.
+
+    Args:
+      func: The procedure function to be executed.
+      tr_record: The TestResultRecord object associated with the test
+        executed.
+    """
+    func_name = func.__name__
+    procedure_name = func_name[1:] if func_name[0] == '_' else func_name
+    with self._log_test_stage(procedure_name):
+      try:
+        # Pass a copy of the record instead of the actual object so that it
+        # will not be modified.
+        func(copy.deepcopy(tr_record))
+      except signals.TestAbortSignal:
+        raise
+      except Exception as e:
+        logging.exception(
+          'Exception happened when executing %s for %s.',
+          procedure_name, self.current_test_info.name)
+        tr_record.add_error(procedure_name, e)
+
+  def record_data(self, content):
+    """Record an entry in test summary file.
+
+    Sometimes additional data need to be recorded in summary file for
+    debugging or post-test analysis.
+
+    Each call adds a new entry to the summary file, with no guarantee of
+    its position among the summary file entries.
+
+    The content should be a dict. If absent, timestamp field is added for
+    ease of parsing later.
+
+    Args:
+      content: dict, the data to add to summary file.
+    """
+    if 'timestamp' not in content:
+      content['timestamp'] = utils.get_current_epoch_time()
+    self.summary_writer.dump(content,
+                 records.TestSummaryEntryType.USER_DATA)
+
+  def exec_one_test(self, test_name, test_method):
+    """Executes one test and update test results.
+
+    Executes setup_test, the test method, and teardown_test; then creates a
+    records.TestResultRecord object with the execution information and adds
+    the record to the test class's test results.
+
+    Args:
+      test_name: string, Name of the test.
+      test_method: function, The test method to execute.
+    """
+    tr_record = records.TestResultRecord(test_name, self.TAG)
+    tr_record.uid = getattr(test_method, 'uid', None)
+    tr_record.test_begin()
+    self.current_test_info = runtime_test_info.RuntimeTestInfo(
+      test_name, self.log_path, tr_record)
+    expects.recorder.reset_internal_states(tr_record)
+    logging.info('%s %s', TEST_CASE_TOKEN, test_name)
+    # Did teardown_test throw an error.
+    teardown_test_failed = False
+    try:
+      try:
         try:
-            with self._log_test_stage(stage_name):
-                self.setup_generated_tests()
-                return True
-        except Exception as e:
-            logging.exception('%s failed for %s.', stage_name, self.TAG)
-            record.test_error(e)
-            self.results.add_class_error(record)
-            self.summary_writer.dump(record.to_dict(),
-                                     records.TestSummaryEntryType.RECORD)
-            return False
-
-    def setup_generated_tests(self):
-        """Preprocesses that need to be done before setup_class.
-
-        This phase is used to do pre-test processes like generating tests.
-        This is the only place `self.generate_tests` should be called.
-
-        If this function throws an error, the test class will be marked failure
-        and the "Requested" field will be 0 because the number of tests
-        requested is unknown at this point.
-        """
-
-    def _setup_class(self):
-        """Proxy function to guarantee the base implementation of setup_class
-        is called.
-
-        Returns:
-            If `self.results` is returned instead of None, this means something
-            has gone wrong, and the rest of the test class should not execute.
-        """
-        # Setup for the class.
-        class_record = records.TestResultRecord(STAGE_NAME_SETUP_CLASS,
-                                                self.TAG)
-        class_record.test_begin()
-        self.current_test_info = runtime_test_info.RuntimeTestInfo(
-            STAGE_NAME_SETUP_CLASS, self.log_path, class_record)
-        expects.recorder.reset_internal_states(class_record)
+          self._setup_test(test_name)
+        except signals.TestFailure as e:
+          raise_with_traceback(signals.TestError(
+            e.details, e.extras))
+        test_method()
+      except (signals.TestPass, signals.TestAbortSignal):
+        raise
+      except Exception:
+        logging.exception('Exception occurred in %s.',
+                  self.current_test_info.name)
+        raise
+      finally:
+        before_count = expects.recorder.error_count
         try:
-            with self._log_test_stage(STAGE_NAME_SETUP_CLASS):
-                self.setup_class()
+          self._teardown_test(test_name)
         except signals.TestAbortSignal:
-            # Throw abort signals to outer try block for handling.
-            raise
+          raise
         except Exception as e:
-            # Setup class failed for unknown reasons.
-            # Fail the class and skip all tests.
-            logging.exception('Error in %s#setup_class.', self.TAG)
-            class_record.test_error(e)
-            self.results.add_class_error(class_record)
-            self._exec_procedure_func(self._on_fail, class_record)
-            class_record.update_record()
-            self.summary_writer.dump(class_record.to_dict(),
-                                     records.TestSummaryEntryType.RECORD)
-            self._skip_remaining_tests(e)
-            return self.results
-        if expects.recorder.has_error:
-            self._exec_procedure_func(self._on_fail, class_record)
-            class_record.test_error()
-            class_record.update_record()
-            self.summary_writer.dump(class_record.to_dict(),
-                                     records.TestSummaryEntryType.RECORD)
-            self.results.add_class_error(class_record)
-            self._skip_remaining_tests(
-                class_record.termination_signal.exception)
-            return self.results
-
-    def setup_class(self):
-        """Setup function that will be called before executing any test in the
-        class.
-
-        To signal setup failure, use asserts or raise your own exception.
-
-        Errors raised from `setup_class` will trigger `on_fail`.
-
-        Implementation is optional.
-        """
-
-    def _teardown_class(self):
-        """Proxy function to guarantee the base implementation of
-        teardown_class is called.
-        """
-        stage_name = STAGE_NAME_TEARDOWN_CLASS
-        record = records.TestResultRecord(stage_name, self.TAG)
-        record.test_begin()
-        self.current_test_info = runtime_test_info.RuntimeTestInfo(
-            stage_name, self.log_path, record)
-        expects.recorder.reset_internal_states(record)
-        try:
-            with self._log_test_stage(stage_name):
-                self.teardown_class()
-        except signals.TestAbortAll as e:
-            setattr(e, 'results', self.results)
-            raise
-        except Exception as e:
-            logging.exception('Error encountered in %s.', stage_name)
-            record.test_error(e)
-            record.update_record()
-            self.results.add_class_error(record)
-            self.summary_writer.dump(record.to_dict(),
-                                     records.TestSummaryEntryType.RECORD)
+          logging.exception(e)
+          tr_record.test_error()
+          tr_record.add_error(STAGE_NAME_TEARDOWN_TEST, e)
+          teardown_test_failed = True
         else:
-            if expects.recorder.has_error:
-                record.update_record()
-                self.results.add_class_error(record)
-                self.summary_writer.dump(record.to_dict(),
-                                         records.TestSummaryEntryType.RECORD)
-        finally:
-            self._clean_up()
+          # Check if anything failed by `expects`.
+          if before_count < expects.recorder.error_count:
+            teardown_test_failed = True
+    except (signals.TestFailure, AssertionError) as e:
+      tr_record.test_fail(e)
+    except signals.TestSkip as e:
+      # Test skipped.
+      tr_record.test_skip(e)
+    except signals.TestAbortSignal as e:
+      # Abort signals, pass along.
+      tr_record.test_fail(e)
+      raise
+    except signals.TestPass as e:
+      # Explicit test pass.
+      tr_record.test_pass(e)
+    except Exception as e:
+      # Exception happened during test.
+      tr_record.test_error(e)
+    else:
+      # No exception is thrown from test and teardown, if `expects` has
+      # error, the test should fail with the first error in `expects`.
+      if expects.recorder.has_error and not teardown_test_failed:
+        tr_record.test_fail()
+      # Otherwise the test passed.
+      elif not teardown_test_failed:
+        tr_record.test_pass()
+    finally:
+      tr_record.update_record()
+      try:
+        if tr_record.result in (
+            records.TestResultEnums.TEST_RESULT_ERROR,
+            records.TestResultEnums.TEST_RESULT_FAIL):
+          self._exec_procedure_func(self._on_fail, tr_record)
+        elif tr_record.result == records.TestResultEnums.TEST_RESULT_PASS:
+          self._exec_procedure_func(self._on_pass, tr_record)
+        elif tr_record.result == records.TestResultEnums.TEST_RESULT_SKIP:
+          self._exec_procedure_func(self._on_skip, tr_record)
+      finally:
+        logging.info(RESULT_LINE_TEMPLATE, tr_record.test_name,
+               tr_record.result)
+        self.results.add_record(tr_record)
+        self.summary_writer.dump(tr_record.to_dict(),
+                     records.TestSummaryEntryType.RECORD)
+        self.current_test_info = None
 
-    def teardown_class(self):
-        """Teardown function that will be called after all the selected tests in
-        the test class have been executed.
+  def _assert_function_name_in_stack(self, expected_func_name):
+    """Asserts that the current stack contains the given function name."""
+    current_frame = inspect.currentframe()
+    caller_frames = inspect.getouterframes(current_frame, 2)
+    for caller_frame in caller_frames[2:]:
+      if caller_frame[3] == expected_func_name:
+        return
+    raise Error('"%s" cannot be called outside of %s' %
+          (caller_frames[1][3], expected_func_name))
 
-        Errors raised from `teardown_class` do not trigger `on_fail`.
+  def generate_tests(self, test_logic, name_func, arg_sets, uid_func=None):
+    """Generates tests in the test class.
 
-        Implementation is optional.
-        """
+    This function has to be called inside a test class's
+    `self.setup_generated_tests` function.
 
-    @contextlib.contextmanager
-    def _log_test_stage(self, stage_name):
-        """Logs the begin and end of a test stage.
+    Generated tests are not written down as methods, but as a list of
+    parameter sets. This way we reduce code repetition and improve test
+    scalability.
 
-        This context adds two log lines meant for clarifying the boundary of
-        each execution stage in Mobly log.
+    Users can provide an optional function to specify the UID of each test.
+    Not all generated tests are required to have UID.
 
-        Args:
-            stage_name: string, name of the stage to log.
-        """
-        parent_token = self.current_test_info.name
-        # If the name of the stage is the same as the test name, in which case
-        # the stage is class-level instead of test-level, use the class's
-        # reference tag as the parent token instead.
-        if parent_token == stage_name:
-            parent_token = self.TAG
-        logging.debug(
-            TEST_STAGE_BEGIN_LOG_TEMPLATE.format(parent_token=parent_token,
-                                                 child_token=stage_name))
-        try:
-            yield
-        finally:
-            logging.debug(
-                TEST_STAGE_END_LOG_TEMPLATE.format(parent_token=parent_token,
-                                                   child_token=stage_name))
-
-    def _setup_test(self, test_name):
-        """Proxy function to guarantee the base implementation of setup_test is
-        called.
-        """
-        with self._log_test_stage(STAGE_NAME_SETUP_TEST):
-            self.setup_test()
-
-    def setup_test(self):
-        """Setup function that will be called every time before executing each
-        test method in the test class.
-
-        To signal setup failure, use asserts or raise your own exception.
-
-        Implementation is optional.
-        """
-
-    def _teardown_test(self, test_name):
-        """Proxy function to guarantee the base implementation of teardown_test
-        is called.
-        """
-        with self._log_test_stage(STAGE_NAME_TEARDOWN_TEST):
-            self.teardown_test()
-
-    def teardown_test(self):
-        """Teardown function that will be called every time a test method has
-        been executed.
-
-        Implementation is optional.
-        """
-
-    def _on_fail(self, record):
-        """Proxy function to guarantee the base implementation of on_fail is
-        called.
-
-        Args:
-            record: records.TestResultRecord, a copy of the test record for
-                    this test, containing all information of the test execution
-                    including exception objects.
-        """
-        self.on_fail(record)
-
-    def on_fail(self, record):
-        """A function that is executed upon a test failure.
-
-        User implementation is optional.
-
-        Args:
-            record: records.TestResultRecord, a copy of the test record for
-                this test, containing all information of the test execution
-                including exception objects.
-        """
-
-    def _on_pass(self, record):
-        """Proxy function to guarantee the base implementation of on_pass is
-        called.
-
-        Args:
-            record: records.TestResultRecord, a copy of the test record for
-                this test, containing all information of the test execution
-                including exception objects.
-        """
-        msg = record.details
-        if msg:
-            logging.info(msg)
-        self.on_pass(record)
-
-    def on_pass(self, record):
-        """A function that is executed upon a test passing.
-
-        Implementation is optional.
-
-        Args:
-            record: records.TestResultRecord, a copy of the test record for
-                this test, containing all information of the test execution
-                including exception objects.
-        """
-
-    def _on_skip(self, record):
-        """Proxy function to guarantee the base implementation of on_skip is
-        called.
-
-        Args:
-            record: records.TestResultRecord, a copy of the test record for
-                this test, containing all information of the test execution
-                including exception objects.
-        """
-        logging.info('Reason to skip: %s', record.details)
-        logging.info(RESULT_LINE_TEMPLATE, record.test_name, record.result)
-        self.on_skip(record)
-
-    def on_skip(self, record):
-        """A function that is executed upon a test being skipped.
-
-        Implementation is optional.
-
-        Args:
-            record: records.TestResultRecord, a copy of the test record for
-                this test, containing all information of the test execution
-                including exception objects.
-        """
-
-    def _exec_procedure_func(self, func, tr_record):
-        """Executes a procedure function like on_pass, on_fail etc.
-
-        This function will alter the 'Result' of the test's record if
-        exceptions happened when executing the procedure function, but
-        prevents procedure functions from altering test records themselves
-        by only passing in a copy.
-
-        This will let signals.TestAbortAll through so abort_all works in all
-        procedure functions.
-
-        Args:
-            func: The procedure function to be executed.
-            tr_record: The TestResultRecord object associated with the test
-                executed.
-        """
-        func_name = func.__name__
-        procedure_name = func_name[1:] if func_name[0] == '_' else func_name
-        with self._log_test_stage(procedure_name):
-            try:
-                # Pass a copy of the record instead of the actual object so that it
-                # will not be modified.
-                func(copy.deepcopy(tr_record))
-            except signals.TestAbortSignal:
-                raise
-            except Exception as e:
-                logging.exception(
-                    'Exception happened when executing %s for %s.',
-                    procedure_name, self.current_test_info.name)
-                tr_record.add_error(procedure_name, e)
-
-    def record_data(self, content):
-        """Record an entry in test summary file.
-
-        Sometimes additional data need to be recorded in summary file for
-        debugging or post-test analysis.
-
-        Each call adds a new entry to the summary file, with no guarantee of
-        its position among the summary file entries.
-
-        The content should be a dict. If absent, timestamp field is added for
-        ease of parsing later.
-
-        Args:
-            content: dict, the data to add to summary file.
-        """
-        if 'timestamp' not in content:
-            content['timestamp'] = utils.get_current_epoch_time()
-        self.summary_writer.dump(content,
-                                 records.TestSummaryEntryType.USER_DATA)
-
-    def exec_one_test(self, test_name, test_method):
-        """Executes one test and update test results.
-
-        Executes setup_test, the test method, and teardown_test; then creates a
-        records.TestResultRecord object with the execution information and adds
-        the record to the test class's test results.
-
-        Args:
-            test_name: string, Name of the test.
-            test_method: function, The test method to execute.
-        """
-        tr_record = records.TestResultRecord(test_name, self.TAG)
-        tr_record.uid = getattr(test_method, 'uid', None)
-        tr_record.test_begin()
-        self.current_test_info = runtime_test_info.RuntimeTestInfo(
-            test_name, self.log_path, tr_record)
-        expects.recorder.reset_internal_states(tr_record)
-        logging.info('%s %s', TEST_CASE_TOKEN, test_name)
-        # Did teardown_test throw an error.
-        teardown_test_failed = False
-        try:
-            try:
-                try:
-                    self._setup_test(test_name)
-                except signals.TestFailure as e:
-                    raise_with_traceback(signals.TestError(
-                        e.details, e.extras))
-                test_method()
-            except (signals.TestPass, signals.TestAbortSignal):
-                raise
-            except Exception:
-                logging.exception('Exception occurred in %s.',
-                                  self.current_test_info.name)
-                raise
-            finally:
-                before_count = expects.recorder.error_count
-                try:
-                    self._teardown_test(test_name)
-                except signals.TestAbortSignal:
-                    raise
-                except Exception as e:
-                    logging.exception(e)
-                    tr_record.test_error()
-                    tr_record.add_error(STAGE_NAME_TEARDOWN_TEST, e)
-                    teardown_test_failed = True
-                else:
-                    # Check if anything failed by `expects`.
-                    if before_count < expects.recorder.error_count:
-                        teardown_test_failed = True
-        except (signals.TestFailure, AssertionError) as e:
-            tr_record.test_fail(e)
-        except signals.TestSkip as e:
-            # Test skipped.
-            tr_record.test_skip(e)
-        except signals.TestAbortSignal as e:
-            # Abort signals, pass along.
-            tr_record.test_fail(e)
-            raise
-        except signals.TestPass as e:
-            # Explicit test pass.
-            tr_record.test_pass(e)
-        except Exception as e:
-            # Exception happened during test.
-            tr_record.test_error(e)
+    Args:
+      test_logic: function, the common logic shared by all the generated
+        tests.
+      name_func: function, generate a test name according to a set of
+        test arguments. This function should take the same arguments as
+        the test logic function.
+      arg_sets: a list of tuples, each tuple is a set of arguments to be
+        passed to the test logic function and name function.
+      uid_func: function, an optional function that takes the same
+        arguments as the test logic function and returns a string that
+        is the corresponding UID.
+    """
+    self._assert_function_name_in_stack(STAGE_NAME_SETUP_GENERATED_TESTS)
+    root_msg = 'During test generation of "%s":' % test_logic.__name__
+    for args in arg_sets:
+      test_name = name_func(*args)
+      if test_name in self.get_existing_test_names():
+        raise Error(
+          '%s Test name "%s" already exists, cannot be duplicated!' %
+          (root_msg, test_name))
+      test_func = functools.partial(test_logic, *args)
+      if uid_func is not None:
+        uid = uid_func(*args)
+        if uid is None:
+          logging.warning('%s UID for arg set %s is None.', root_msg,
+                  args)
         else:
-            # No exception is thrown from test and teardown, if `expects` has
-            # error, the test should fail with the first error in `expects`.
-            if expects.recorder.has_error and not teardown_test_failed:
-                tr_record.test_fail()
-            # Otherwise the test passed.
-            elif not teardown_test_failed:
-                tr_record.test_pass()
-        finally:
-            tr_record.update_record()
-            try:
-                if tr_record.result in (
-                        records.TestResultEnums.TEST_RESULT_ERROR,
-                        records.TestResultEnums.TEST_RESULT_FAIL):
-                    self._exec_procedure_func(self._on_fail, tr_record)
-                elif tr_record.result == records.TestResultEnums.TEST_RESULT_PASS:
-                    self._exec_procedure_func(self._on_pass, tr_record)
-                elif tr_record.result == records.TestResultEnums.TEST_RESULT_SKIP:
-                    self._exec_procedure_func(self._on_skip, tr_record)
-            finally:
-                logging.info(RESULT_LINE_TEMPLATE, tr_record.test_name,
-                             tr_record.result)
-                self.results.add_record(tr_record)
-                self.summary_writer.dump(tr_record.to_dict(),
-                                         records.TestSummaryEntryType.RECORD)
-                self.current_test_info = None
+          setattr(test_func, 'uid', uid)
+      self._generated_test_table[test_name] = test_func
 
-    def _assert_function_name_in_stack(self, expected_func_name):
-        """Asserts that the current stack contains the given function name."""
-        current_frame = inspect.currentframe()
-        caller_frames = inspect.getouterframes(current_frame, 2)
-        for caller_frame in caller_frames[2:]:
-            if caller_frame[3] == expected_func_name:
-                return
-        raise Error('"%s" cannot be called outside of %s' %
-                    (caller_frames[1][3], expected_func_name))
+  def _safe_exec_func(self, func, *args):
+    """Executes a function with exception safeguard.
 
-    def generate_tests(self, test_logic, name_func, arg_sets, uid_func=None):
-        """Generates tests in the test class.
+    This will let signals.TestAbortAll through so abort_all works in all
+    procedure functions.
 
-        This function has to be called inside a test class's
-        `self.setup_generated_tests` function.
+    Args:
+      func: Function to be executed.
+      args: Arguments to be passed to the function.
 
-        Generated tests are not written down as methods, but as a list of
-        parameter sets. This way we reduce code repetition and improve test
-        scalability.
+    Returns:
+      Whatever the function returns.
+    """
+    try:
+      return func(*args)
+    except signals.TestAbortAll:
+      raise
+    except:
+      logging.exception('Exception happened when executing %s in %s.',
+                func.__name__, self.TAG)
 
-        Users can provide an optional function to specify the UID of each test.
-        Not all generated tests are required to have UID.
+  def get_existing_test_names(self):
+    """Gets the names of existing tests in the class.
 
-        Args:
-            test_logic: function, the common logic shared by all the generated
-                tests.
-            name_func: function, generate a test name according to a set of
-                test arguments. This function should take the same arguments as
-                the test logic function.
-            arg_sets: a list of tuples, each tuple is a set of arguments to be
-                passed to the test logic function and name function.
-            uid_func: function, an optional function that takes the same
-                arguments as the test logic function and returns a string that
-                is the corresponding UID.
-        """
-        self._assert_function_name_in_stack(STAGE_NAME_SETUP_GENERATED_TESTS)
-        root_msg = 'During test generation of "%s":' % test_logic.__name__
-        for args in arg_sets:
-            test_name = name_func(*args)
-            if test_name in self.get_existing_test_names():
-                raise Error(
-                    '%s Test name "%s" already exists, cannot be duplicated!' %
-                    (root_msg, test_name))
-            test_func = functools.partial(test_logic, *args)
-            if uid_func is not None:
-                uid = uid_func(*args)
-                if uid is None:
-                    logging.warning('%s UID for arg set %s is None.', root_msg,
-                                    args)
-                else:
-                    setattr(test_func, 'uid', uid)
-            self._generated_test_table[test_name] = test_func
+    A method in the class is considered a test if its name starts with
+    'test_*'.
 
-    def _safe_exec_func(self, func, *args):
-        """Executes a function with exception safeguard.
+    Note this only gets the names of tests that already exist. If
+    `setup_generated_test` has not happened when this was called, the
+    generated tests won't be listed.
 
-        This will let signals.TestAbortAll through so abort_all works in all
-        procedure functions.
+    Returns:
+      A list of strings, each is a test method name.
+    """
+    test_names = []
+    for name, _ in inspect.getmembers(self, callable):
+      if name.startswith('test_'):
+        test_names.append(name)
+    return test_names + list(self._generated_test_table.keys())
 
-        Args:
-            func: Function to be executed.
-            args: Arguments to be passed to the function.
+  def _get_test_methods(self, test_names):
+    """Resolves test method names to bound test methods.
 
-        Returns:
-            Whatever the function returns.
-        """
-        try:
-            return func(*args)
-        except signals.TestAbortAll:
-            raise
-        except:
-            logging.exception('Exception happened when executing %s in %s.',
-                              func.__name__, self.TAG)
+    Args:
+      test_names: A list of strings, each string is a test method name.
 
-    def get_existing_test_names(self):
-        """Gets the names of existing tests in the class.
+    Returns:
+      A list of tuples of (string, function). String is the test method
+      name, function is the actual python method implementing its logic.
 
-        A method in the class is considered a test if its name starts with
-        'test_*'.
+    Raises:
+      Error: The test name does not follow naming convention 'test_*'.
+        This can only be caused by user input.
+    """
+    test_methods = []
+    for test_name in test_names:
+      if not test_name.startswith('test_'):
+        raise Error('Test method name %s does not follow naming '
+              'convention test_*, abort.' % test_name)
+      if hasattr(self, test_name):
+        test_method = getattr(self, test_name)
+      elif test_name in self._generated_test_table:
+        test_method = self._generated_test_table[test_name]
+      else:
+        raise Error('%s does not have test method %s.' %
+              (self.TAG, test_name))
+      test_methods.append((test_name, test_method))
+    return test_methods
 
-        Note this only gets the names of tests that already exist. If
-        `setup_generated_test` has not happened when this was called, the
-        generated tests won't be listed.
+  def _skip_remaining_tests(self, exception):
+    """Marks any requested test that has not been executed in a class as
+    skipped.
 
-        Returns:
-            A list of strings, each is a test method name.
-        """
-        test_names = []
-        for name, _ in inspect.getmembers(self, callable):
-            if name.startswith('test_'):
-                test_names.append(name)
-        return test_names + list(self._generated_test_table.keys())
+    This is useful for handling abort class signal.
 
-    def _get_test_methods(self, test_names):
-        """Resolves test method names to bound test methods.
+    Args:
+      exception: The exception object that was thrown to trigger the
+        skip.
+    """
+    for test_name in self.results.requested:
+      if not self.results.is_test_executed(test_name):
+        test_record = records.TestResultRecord(test_name, self.TAG)
+        test_record.test_skip(exception)
+        self.results.add_record(test_record)
+        self.summary_writer.dump(test_record.to_dict(),
+                     records.TestSummaryEntryType.RECORD)
 
-        Args:
-            test_names: A list of strings, each string is a test method name.
+  def run(self, test_names=None):
+    """Runs tests within a test class.
 
-        Returns:
-            A list of tuples of (string, function). String is the test method
-            name, function is the actual python method implementing its logic.
+    One of these test method lists will be executed, shown here in priority
+    order:
 
-        Raises:
-            Error: The test name does not follow naming convention 'test_*'.
-                This can only be caused by user input.
-        """
-        test_methods = []
-        for test_name in test_names:
-            if not test_name.startswith('test_'):
-                raise Error('Test method name %s does not follow naming '
-                            'convention test_*, abort.' % test_name)
-            if hasattr(self, test_name):
-                test_method = getattr(self, test_name)
-            elif test_name in self._generated_test_table:
-                test_method = self._generated_test_table[test_name]
-            else:
-                raise Error('%s does not have test method %s.' %
-                            (self.TAG, test_name))
-            test_methods.append((test_name, test_method))
-        return test_methods
+    1. The test_names list, which is passed from cmd line. Invalid names
+       are guarded by cmd line arg parsing.
+    2. The self.tests list defined in test class. Invalid names are
+       ignored.
+    3. All function that matches test method naming convention in the test
+       class.
 
-    def _skip_remaining_tests(self, exception):
-        """Marks any requested test that has not been executed in a class as
-        skipped.
+    Args:
+      test_names: A list of string that are test method names requested in
+        cmd line.
 
-        This is useful for handling abort class signal.
+    Returns:
+      The test results object of this class.
+    """
+    logging.log_path = self.log_path
+    # Executes pre-setup procedures, like generating test methods.
+    if not self._setup_generated_tests():
+      return self.results
+    logging.info('==========> %s <==========', self.TAG)
+    # Devise the actual test methods to run in the test class.
+    if not test_names:
+      if self.tests:
+        # Specified by run list in class.
+        test_names = list(self.tests)
+      else:
+        # No test method specified by user, execute all in test class.
+        test_names = self.get_existing_test_names()
+    self.results.requested = test_names
+    self.summary_writer.dump(self.results.requested_test_names_dict(),
+                 records.TestSummaryEntryType.TEST_NAME_LIST)
+    tests = self._get_test_methods(test_names)
+    try:
+      setup_class_result = self._setup_class()
+      if setup_class_result:
+        return setup_class_result
+      # Run tests in order.
+      for test_name, test_method in tests:
+        self.exec_one_test(test_name, test_method)
+      return self.results
+    except signals.TestAbortClass as e:
+      e.details = 'Test class aborted due to: %s' % e.details
+      self._skip_remaining_tests(e)
+      return self.results
+    except signals.TestAbortAll as e:
+      e.details = 'All remaining tests aborted due to: %s' % e.details
+      self._skip_remaining_tests(e)
+      # Piggy-back test results on this exception object so we don't lose
+      # results from this test class.
+      setattr(e, 'results', self.results)
+      raise e
+    finally:
+      self._teardown_class()
+      logging.info('Summary for test class %s: %s', self.TAG,
+             self.results.summary_str())
 
-        Args:
-            exception: The exception object that was thrown to trigger the
-                skip.
-        """
-        for test_name in self.results.requested:
-            if not self.results.is_test_executed(test_name):
-                test_record = records.TestResultRecord(test_name, self.TAG)
-                test_record.test_skip(exception)
-                self.results.add_record(test_record)
-                self.summary_writer.dump(test_record.to_dict(),
-                                         records.TestSummaryEntryType.RECORD)
-
-    def run(self, test_names=None):
-        """Runs tests within a test class.
-
-        One of these test method lists will be executed, shown here in priority
-        order:
-
-        1. The test_names list, which is passed from cmd line. Invalid names
-           are guarded by cmd line arg parsing.
-        2. The self.tests list defined in test class. Invalid names are
-           ignored.
-        3. All function that matches test method naming convention in the test
-           class.
-
-        Args:
-            test_names: A list of string that are test method names requested in
-                cmd line.
-
-        Returns:
-            The test results object of this class.
-        """
-        logging.log_path = self.log_path
-        # Executes pre-setup procedures, like generating test methods.
-        if not self._setup_generated_tests():
-            return self.results
-        logging.info('==========> %s <==========', self.TAG)
-        # Devise the actual test methods to run in the test class.
-        if not test_names:
-            if self.tests:
-                # Specified by run list in class.
-                test_names = list(self.tests)
-            else:
-                # No test method specified by user, execute all in test class.
-                test_names = self.get_existing_test_names()
-        self.results.requested = test_names
-        self.summary_writer.dump(self.results.requested_test_names_dict(),
-                                 records.TestSummaryEntryType.TEST_NAME_LIST)
-        tests = self._get_test_methods(test_names)
-        try:
-            setup_class_result = self._setup_class()
-            if setup_class_result:
-                return setup_class_result
-            # Run tests in order.
-            for test_name, test_method in tests:
-                self.exec_one_test(test_name, test_method)
-            return self.results
-        except signals.TestAbortClass as e:
-            e.details = 'Test class aborted due to: %s' % e.details
-            self._skip_remaining_tests(e)
-            return self.results
-        except signals.TestAbortAll as e:
-            e.details = 'All remaining tests aborted due to: %s' % e.details
-            self._skip_remaining_tests(e)
-            # Piggy-back test results on this exception object so we don't lose
-            # results from this test class.
-            setattr(e, 'results', self.results)
-            raise e
-        finally:
-            self._teardown_class()
-            logging.info('Summary for test class %s: %s', self.TAG,
-                         self.results.summary_str())
-
-    def _clean_up(self):
-        """The final stage of a test class execution."""
-        stage_name = STAGE_NAME_CLEAN_UP
-        record = records.TestResultRecord(stage_name, self.TAG)
-        record.test_begin()
-        self.current_test_info = runtime_test_info.RuntimeTestInfo(
-            stage_name, self.log_path, record)
-        expects.recorder.reset_internal_states(record)
-        with self._log_test_stage(stage_name):
-            # Write controller info and summary to summary file.
-            self._record_controller_info()
-            self._controller_manager.unregister_controllers()
-            if expects.recorder.has_error:
-                record.test_error()
-                record.update_record()
-                self.results.add_class_error(record)
-                self.summary_writer.dump(record.to_dict(),
-                                         records.TestSummaryEntryType.RECORD)
+  def _clean_up(self):
+    """The final stage of a test class execution."""
+    stage_name = STAGE_NAME_CLEAN_UP
+    record = records.TestResultRecord(stage_name, self.TAG)
+    record.test_begin()
+    self.current_test_info = runtime_test_info.RuntimeTestInfo(
+      stage_name, self.log_path, record)
+    expects.recorder.reset_internal_states(record)
+    with self._log_test_stage(stage_name):
+      # Write controller info and summary to summary file.
+      self._record_controller_info()
+      self._controller_manager.unregister_controllers()
+      if expects.recorder.has_error:
+        record.test_error()
+        record.update_record()
+        self.results.add_class_error(record)
+        self.summary_writer.dump(record.to_dict(),
+                     records.TestSummaryEntryType.RECORD)
diff --git a/mobly/config_parser.py b/mobly/config_parser.py
index 3372d7c..477cb3d 100644
--- a/mobly/config_parser.py
+++ b/mobly/config_parser.py
@@ -29,171 +29,171 @@
 
 
 class MoblyConfigError(Exception):
-    """Raised when there is a problem in test configuration file."""
+  """Raised when there is a problem in test configuration file."""
 
 
 def _validate_test_config(test_config):
-    """Validates the raw configuration loaded from the config file.
+  """Validates the raw configuration loaded from the config file.
 
-    Making sure the required key 'TestBeds' is present.
-    """
-    required_key = keys.Config.key_testbed.value
-    if required_key not in test_config:
-        raise MoblyConfigError('Required key %s missing in test config.' %
-                               required_key)
+  Making sure the required key 'TestBeds' is present.
+  """
+  required_key = keys.Config.key_testbed.value
+  if required_key not in test_config:
+    raise MoblyConfigError('Required key %s missing in test config.' %
+                 required_key)
 
 
 def _validate_testbed_name(name):
-    """Validates the name of a test bed.
+  """Validates the name of a test bed.
 
-    Since test bed names are used as part of the test run id, it needs to meet
-    certain requirements.
+  Since test bed names are used as part of the test run id, it needs to meet
+  certain requirements.
 
-    Args:
-        name: The test bed's name specified in config file.
+  Args:
+    name: The test bed's name specified in config file.
 
-    Raises:
-        MoblyConfigError: The name does not meet any criteria.
-    """
-    if not name:
-        raise MoblyConfigError("Test bed names can't be empty.")
-    name = str(name)
-    for char in name:
-        if char not in utils.valid_filename_chars:
-            raise MoblyConfigError(
-                'Char "%s" is not allowed in test bed names.' % char)
+  Raises:
+    MoblyConfigError: The name does not meet any criteria.
+  """
+  if not name:
+    raise MoblyConfigError("Test bed names can't be empty.")
+  name = str(name)
+  for char in name:
+    if char not in utils.valid_filename_chars:
+      raise MoblyConfigError(
+        'Char "%s" is not allowed in test bed names.' % char)
 
 
 def _validate_testbed_configs(testbed_configs):
-    """Validates the testbed configurations.
+  """Validates the testbed configurations.
 
-    Args:
-        testbed_configs: A list of testbed configuration dicts.
+  Args:
+    testbed_configs: A list of testbed configuration dicts.
 
-    Raises:
-        MoblyConfigError: Some parts of the configuration is invalid.
-    """
-    seen_names = set()
-    # Cross checks testbed configs for resource conflicts.
-    for config in testbed_configs:
-        # Check for conflicts between multiple concurrent testbed configs.
-        # No need to call it if there's only one testbed config.
-        name = config[keys.Config.key_testbed_name.value]
-        _validate_testbed_name(name)
-        # Test bed names should be unique.
-        if name in seen_names:
-            raise MoblyConfigError('Duplicate testbed name %s found.' % name)
-        seen_names.add(name)
+  Raises:
+    MoblyConfigError: Some parts of the configuration is invalid.
+  """
+  seen_names = set()
+  # Cross checks testbed configs for resource conflicts.
+  for config in testbed_configs:
+    # Check for conflicts between multiple concurrent testbed configs.
+    # No need to call it if there's only one testbed config.
+    name = config[keys.Config.key_testbed_name.value]
+    _validate_testbed_name(name)
+    # Test bed names should be unique.
+    if name in seen_names:
+      raise MoblyConfigError('Duplicate testbed name %s found.' % name)
+    seen_names.add(name)
 
 
 def load_test_config_file(test_config_path, tb_filters=None):
-    """Processes the test configuration file provied by user.
+  """Processes the test configuration file provied by user.
 
-    Loads the configuration file into a dict, unpacks each testbed
-    config into its own dict, and validate the configuration in the
-    process.
+  Loads the configuration file into a dict, unpacks each testbed
+  config into its own dict, and validate the configuration in the
+  process.
 
-    Args:
-        test_config_path: Path to the test configuration file.
-        tb_filters: A subset of test bed names to be pulled from the config
-            file. If None, then all test beds will be selected.
+  Args:
+    test_config_path: Path to the test configuration file.
+    tb_filters: A subset of test bed names to be pulled from the config
+      file. If None, then all test beds will be selected.
 
-    Returns:
-        A list of test configuration dicts to be passed to
-        test_runner.TestRunner.
-    """
-    configs = _load_config_file(test_config_path)
-    if tb_filters:
-        tbs = []
-        for tb in configs[keys.Config.key_testbed.value]:
-            if tb[keys.Config.key_testbed_name.value] in tb_filters:
-                tbs.append(tb)
-        if len(tbs) != len(tb_filters):
-            raise MoblyConfigError(
-                'Expect to find %d test bed configs, found %d. Check if'
-                ' you have the correct test bed names.' %
-                (len(tb_filters), len(tbs)))
-        configs[keys.Config.key_testbed.value] = tbs
-    mobly_params = configs.get(keys.Config.key_mobly_params.value, {})
-    # Decide log path.
-    log_path = mobly_params.get(keys.Config.key_log_path.value,
-                                _DEFAULT_LOG_PATH)
-    if ENV_MOBLY_LOGPATH in os.environ:
-        log_path = os.environ[ENV_MOBLY_LOGPATH]
-    log_path = utils.abs_path(log_path)
-    # Validate configs
-    _validate_test_config(configs)
-    _validate_testbed_configs(configs[keys.Config.key_testbed.value])
-    # Transform config dict from user-facing key mapping to internal config object.
-    test_configs = []
-    for original_bed_config in configs[keys.Config.key_testbed.value]:
-        test_run_config = TestRunConfig()
-        test_run_config.testbed_name = original_bed_config[
-            keys.Config.key_testbed_name.value]
-        # Deprecated, use testbed_name
-        test_run_config.test_bed_name = test_run_config.testbed_name
-        test_run_config.log_path = log_path
-        test_run_config.controller_configs = original_bed_config.get(
-            keys.Config.key_testbed_controllers.value, {})
-        test_run_config.user_params = original_bed_config.get(
-            keys.Config.key_testbed_test_params.value, {})
-        test_configs.append(test_run_config)
-    return test_configs
+  Returns:
+    A list of test configuration dicts to be passed to
+    test_runner.TestRunner.
+  """
+  configs = _load_config_file(test_config_path)
+  if tb_filters:
+    tbs = []
+    for tb in configs[keys.Config.key_testbed.value]:
+      if tb[keys.Config.key_testbed_name.value] in tb_filters:
+        tbs.append(tb)
+    if len(tbs) != len(tb_filters):
+      raise MoblyConfigError(
+        'Expect to find %d test bed configs, found %d. Check if'
+        ' you have the correct test bed names.' %
+        (len(tb_filters), len(tbs)))
+    configs[keys.Config.key_testbed.value] = tbs
+  mobly_params = configs.get(keys.Config.key_mobly_params.value, {})
+  # Decide log path.
+  log_path = mobly_params.get(keys.Config.key_log_path.value,
+                _DEFAULT_LOG_PATH)
+  if ENV_MOBLY_LOGPATH in os.environ:
+    log_path = os.environ[ENV_MOBLY_LOGPATH]
+  log_path = utils.abs_path(log_path)
+  # Validate configs
+  _validate_test_config(configs)
+  _validate_testbed_configs(configs[keys.Config.key_testbed.value])
+  # Transform config dict from user-facing key mapping to internal config object.
+  test_configs = []
+  for original_bed_config in configs[keys.Config.key_testbed.value]:
+    test_run_config = TestRunConfig()
+    test_run_config.testbed_name = original_bed_config[
+      keys.Config.key_testbed_name.value]
+    # Deprecated, use testbed_name
+    test_run_config.test_bed_name = test_run_config.testbed_name
+    test_run_config.log_path = log_path
+    test_run_config.controller_configs = original_bed_config.get(
+      keys.Config.key_testbed_controllers.value, {})
+    test_run_config.user_params = original_bed_config.get(
+      keys.Config.key_testbed_test_params.value, {})
+    test_configs.append(test_run_config)
+  return test_configs
 
 
 def _load_config_file(path):
-    """Loads a test config file.
+  """Loads a test config file.
 
-    The test config file has to be in YAML format.
+  The test config file has to be in YAML format.
 
-    Args:
-        path: A string that is the full path to the config file, including the
-            file name.
+  Args:
+    path: A string that is the full path to the config file, including the
+      file name.
 
-    Returns:
-        A dict that represents info in the config file.
-    """
-    with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f:
-        conf = yaml.safe_load(f)
-        return conf
+  Returns:
+    A dict that represents info in the config file.
+  """
+  with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f:
+    conf = yaml.safe_load(f)
+    return conf
 
 
 class TestRunConfig(object):
-    """The data class that holds all the information needed for a test run.
+  """The data class that holds all the information needed for a test run.
 
-    Attributes:
-        log_path: string, specifies the root directory for all logs written by
-            a test run.
-        test_bed_name: [Deprecated, use 'testbed_name' instead]
-            string, the name of the test bed used by a test run.
-        testbed_name: string, the name of the test bed used by a test run.
-        controller_configs: dict, configs used for instantiating controller
-            objects.
-        user_params: dict, all the parameters to be consumed by the test logic.
-        summary_writer: records.TestSummaryWriter, used to write elements to
-            the test result summary file.
-        test_class_name_suffix: string, suffix to append to the class name for
-                reporting. This is used for differentiating the same class
-                executed with different parameters in a suite.
+  Attributes:
+    log_path: string, specifies the root directory for all logs written by
+      a test run.
+    test_bed_name: [Deprecated, use 'testbed_name' instead]
+      string, the name of the test bed used by a test run.
+    testbed_name: string, the name of the test bed used by a test run.
+    controller_configs: dict, configs used for instantiating controller
+      objects.
+    user_params: dict, all the parameters to be consumed by the test logic.
+    summary_writer: records.TestSummaryWriter, used to write elements to
+      the test result summary file.
+    test_class_name_suffix: string, suffix to append to the class name for
+        reporting. This is used for differentiating the same class
+        executed with different parameters in a suite.
+  """
+
+  def __init__(self):
+    # Init value is an empty string to avoid string joining errors.
+    self.log_path = ''
+    # Deprecated, use 'testbed_name'
+    self.test_bed_name = None
+    self.testbed_name = None
+    self.controller_configs = {}
+    self.user_params = {}
+    self.summary_writer = None
+    self.test_class_name_suffix = None
+
+  def copy(self):
+    """Returns a deep copy of the current config.
     """
+    return copy.deepcopy(self)
 
-    def __init__(self):
-        # Init value is an empty string to avoid string joining errors.
-        self.log_path = ''
-        # Deprecated, use 'testbed_name'
-        self.test_bed_name = None
-        self.testbed_name = None
-        self.controller_configs = {}
-        self.user_params = {}
-        self.summary_writer = None
-        self.test_class_name_suffix = None
-
-    def copy(self):
-        """Returns a deep copy of the current config.
-        """
-        return copy.deepcopy(self)
-
-    def __str__(self):
-        content = dict(self.__dict__)
-        content.pop('summary_writer')
-        return pprint.pformat(content)
+  def __str__(self):
+    content = dict(self.__dict__)
+    content.pop('summary_writer')
+    return pprint.pformat(content)
diff --git a/mobly/controller_manager.py b/mobly/controller_manager.py
index acba8db..2325d27 100644
--- a/mobly/controller_manager.py
+++ b/mobly/controller_manager.py
@@ -23,194 +23,194 @@
 
 
 def verify_controller_module(module):
-    """Verifies a module object follows the required interface for
-    controllers.
+  """Verifies a module object follows the required interface for
+  controllers.
 
-    The interface is explained in the docstring of
-    `base_test.BaseTestClass.register_controller`.
+  The interface is explained in the docstring of
+  `base_test.BaseTestClass.register_controller`.
 
-    Args:
-        module: An object that is a controller module. This is usually
-            imported with import statements or loaded by importlib.
+  Args:
+    module: An object that is a controller module. This is usually
+      imported with import statements or loaded by importlib.
 
-    Raises:
-        ControllerError: if the module does not match the Mobly controller
-            interface, or one of the required members is null.
-    """
-    required_attributes = ('create', 'destroy', 'MOBLY_CONTROLLER_CONFIG_NAME')
-    for attr in required_attributes:
-        if not hasattr(module, attr):
-            raise signals.ControllerError(
-                'Module %s missing required controller module attribute'
-                ' %s.' % (module.__name__, attr))
-        if not getattr(module, attr):
-            raise signals.ControllerError(
-                'Controller interface %s in %s cannot be null.' %
-                (attr, module.__name__))
+  Raises:
+    ControllerError: if the module does not match the Mobly controller
+      interface, or one of the required members is null.
+  """
+  required_attributes = ('create', 'destroy', 'MOBLY_CONTROLLER_CONFIG_NAME')
+  for attr in required_attributes:
+    if not hasattr(module, attr):
+      raise signals.ControllerError(
+        'Module %s missing required controller module attribute'
+        ' %s.' % (module.__name__, attr))
+    if not getattr(module, attr):
+      raise signals.ControllerError(
+        'Controller interface %s in %s cannot be null.' %
+        (attr, module.__name__))
 
 
 class ControllerManager(object):
-    """Manages the controller objects for Mobly tests.
+  """Manages the controller objects for Mobly tests.
 
-    This manages the life cycles and info retrieval of all controller objects
-    used in a test.
+  This manages the life cycles and info retrieval of all controller objects
+  used in a test.
 
-    Attributes:
-        controller_configs: dict, controller configs provided by the user via
-            test bed config.
+  Attributes:
+    controller_configs: dict, controller configs provided by the user via
+      test bed config.
+  """
+
+  def __init__(self, class_name, controller_configs):
+    # Controller object management.
+    self._controller_objects = collections.OrderedDict(
+    )  # controller_name: objects
+    self._controller_modules = {}  # controller_name: module
+    self._class_name = class_name
+    self.controller_configs = controller_configs
+
+  def register_controller(self, module, required=True, min_number=1):
+    """Loads a controller module and returns its loaded devices.
+
+    This is to be used in a mobly test class.
+
+    Args:
+      module: A module that follows the controller module interface.
+      required: A bool. If True, failing to register the specified
+        controller module raises exceptions. If False, the objects
+        failed to instantiate will be skipped.
+      min_number: An integer that is the minimum number of controller
+        objects to be created. Default is one, since you should not
+        register a controller module without expecting at least one
+        object.
+
+    Returns:
+      A list of controller objects instantiated from controller_module, or
+      None if no config existed for this controller and it was not a
+      required controller.
+
+    Raises:
+      ControllerError:
+        * The controller module has already been registered.
+        * The actual number of objects instantiated is less than the
+        * `min_number`.
+        * `required` is True and no corresponding config can be found.
+        * Any other error occurred in the registration process.
     """
+    verify_controller_module(module)
+    # Use the module's name as the ref name
+    module_ref_name = module.__name__.split('.')[-1]
+    if module_ref_name in self._controller_objects:
+      raise signals.ControllerError(
+        'Controller module %s has already been registered. It cannot '
+        'be registered again.' % module_ref_name)
+    # Create controller objects.
+    module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME
+    if module_config_name not in self.controller_configs:
+      if required:
+        raise signals.ControllerError(
+          'No corresponding config found for %s' %
+          module_config_name)
+      logging.warning(
+        'No corresponding config found for optional controller %s',
+        module_config_name)
+      return None
+    try:
+      # Make a deep copy of the config to pass to the controller module,
+      # in case the controller module modifies the config internally.
+      original_config = self.controller_configs[module_config_name]
+      controller_config = copy.deepcopy(original_config)
+      objects = module.create(controller_config)
+    except:
+      logging.exception(
+        'Failed to initialize objects for controller %s, abort!',
+        module_config_name)
+      raise
+    if not isinstance(objects, list):
+      raise signals.ControllerError(
+        'Controller module %s did not return a list of objects, abort.'
+        % module_ref_name)
+    # Check we got enough controller objects to continue.
+    actual_number = len(objects)
+    if actual_number < min_number:
+      module.destroy(objects)
+      raise signals.ControllerError(
+        'Expected to get at least %d controller objects, got %d.' %
+        (min_number, actual_number))
+    # Save a shallow copy of the list for internal usage, so tests can't
+    # affect internal registry by manipulating the object list.
+    self._controller_objects[module_ref_name] = copy.copy(objects)
+    logging.debug('Found %d objects for controller %s', len(objects),
+            module_config_name)
+    self._controller_modules[module_ref_name] = module
+    return objects
 
-    def __init__(self, class_name, controller_configs):
-        # Controller object management.
-        self._controller_objects = collections.OrderedDict(
-        )  # controller_name: objects
-        self._controller_modules = {}  # controller_name: module
-        self._class_name = class_name
-        self.controller_configs = controller_configs
+  def unregister_controllers(self):
+    """Destroy controller objects and clear internal registry.
 
-    def register_controller(self, module, required=True, min_number=1):
-        """Loads a controller module and returns its loaded devices.
+    This will be called after each test class.
+    """
+    # TODO(xpconanfan): actually record these errors instead of just
+    # logging them.
+    for name, module in self._controller_modules.items():
+      logging.debug('Destroying %s.', name)
+      with expects.expect_no_raises('Exception occurred destroying %s.' %
+                      name):
+        module.destroy(self._controller_objects[name])
+    self._controller_objects = collections.OrderedDict()
+    self._controller_modules = {}
 
-        This is to be used in a mobly test class.
+  def _create_controller_info_record(self, controller_module_name):
+    """Creates controller info record for a particular controller type.
 
-        Args:
-            module: A module that follows the controller module interface.
-            required: A bool. If True, failing to register the specified
-                controller module raises exceptions. If False, the objects
-                failed to instantiate will be skipped.
-            min_number: An integer that is the minimum number of controller
-                objects to be created. Default is one, since you should not
-                register a controller module without expecting at least one
-                object.
+    Info is retrieved from all the controller objects spawned from the
+    specified module, using the controller module's `get_info` function.
 
-        Returns:
-            A list of controller objects instantiated from controller_module, or
-            None if no config existed for this controller and it was not a
-            required controller.
+    Args:
+      controller_module_name: string, the name of the controller module
+        to retrieve info from.
 
-        Raises:
-            ControllerError:
-                * The controller module has already been registered.
-                * The actual number of objects instantiated is less than the
-                * `min_number`.
-                * `required` is True and no corresponding config can be found.
-                * Any other error occurred in the registration process.
-        """
-        verify_controller_module(module)
-        # Use the module's name as the ref name
-        module_ref_name = module.__name__.split('.')[-1]
-        if module_ref_name in self._controller_objects:
-            raise signals.ControllerError(
-                'Controller module %s has already been registered. It cannot '
-                'be registered again.' % module_ref_name)
-        # Create controller objects.
-        module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME
-        if module_config_name not in self.controller_configs:
-            if required:
-                raise signals.ControllerError(
-                    'No corresponding config found for %s' %
-                    module_config_name)
-            logging.warning(
-                'No corresponding config found for optional controller %s',
-                module_config_name)
-            return None
-        try:
-            # Make a deep copy of the config to pass to the controller module,
-            # in case the controller module modifies the config internally.
-            original_config = self.controller_configs[module_config_name]
-            controller_config = copy.deepcopy(original_config)
-            objects = module.create(controller_config)
-        except:
-            logging.exception(
-                'Failed to initialize objects for controller %s, abort!',
-                module_config_name)
-            raise
-        if not isinstance(objects, list):
-            raise signals.ControllerError(
-                'Controller module %s did not return a list of objects, abort.'
-                % module_ref_name)
-        # Check we got enough controller objects to continue.
-        actual_number = len(objects)
-        if actual_number < min_number:
-            module.destroy(objects)
-            raise signals.ControllerError(
-                'Expected to get at least %d controller objects, got %d.' %
-                (min_number, actual_number))
-        # Save a shallow copy of the list for internal usage, so tests can't
-        # affect internal registry by manipulating the object list.
-        self._controller_objects[module_ref_name] = copy.copy(objects)
-        logging.debug('Found %d objects for controller %s', len(objects),
-                      module_config_name)
-        self._controller_modules[module_ref_name] = module
-        return objects
+    Returns:
+      A records.ControllerInfoRecord object.
+    """
+    module = self._controller_modules[controller_module_name]
+    controller_info = None
+    try:
+      controller_info = module.get_info(
+        copy.copy(self._controller_objects[controller_module_name]))
+    except AttributeError:
+      logging.warning(
+        'No optional debug info found for controller '
+        '%s. To provide it, implement `get_info`.',
+        controller_module_name)
+    try:
+      yaml.dump(controller_info)
+    except TypeError:
+      logging.warning(
+        'The info of controller %s in class "%s" is not '
+        'YAML serializable! Coercing it to string.',
+        controller_module_name, self._class_name)
+      controller_info = str(controller_info)
+    return records.ControllerInfoRecord(
+      self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME,
+      controller_info)
 
-    def unregister_controllers(self):
-        """Destroy controller objects and clear internal registry.
+  def get_controller_info_records(self):
+    """Get the info records for all the controller objects in the manager.
 
-        This will be called after each test class.
-        """
-        # TODO(xpconanfan): actually record these errors instead of just
-        # logging them.
-        for name, module in self._controller_modules.items():
-            logging.debug('Destroying %s.', name)
-            with expects.expect_no_raises('Exception occurred destroying %s.' %
-                                          name):
-                module.destroy(self._controller_objects[name])
-        self._controller_objects = collections.OrderedDict()
-        self._controller_modules = {}
+    New info records for each controller object are created for every call
+    so the latest info is included.
 
-    def _create_controller_info_record(self, controller_module_name):
-        """Creates controller info record for a particular controller type.
-
-        Info is retrieved from all the controller objects spawned from the
-        specified module, using the controller module's `get_info` function.
-
-        Args:
-            controller_module_name: string, the name of the controller module
-                to retrieve info from.
-
-        Returns:
-            A records.ControllerInfoRecord object.
-        """
-        module = self._controller_modules[controller_module_name]
-        controller_info = None
-        try:
-            controller_info = module.get_info(
-                copy.copy(self._controller_objects[controller_module_name]))
-        except AttributeError:
-            logging.warning(
-                'No optional debug info found for controller '
-                '%s. To provide it, implement `get_info`.',
-                controller_module_name)
-        try:
-            yaml.dump(controller_info)
-        except TypeError:
-            logging.warning(
-                'The info of controller %s in class "%s" is not '
-                'YAML serializable! Coercing it to string.',
-                controller_module_name, self._class_name)
-            controller_info = str(controller_info)
-        return records.ControllerInfoRecord(
-            self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME,
-            controller_info)
-
-    def get_controller_info_records(self):
-        """Get the info records for all the controller objects in the manager.
-
-        New info records for each controller object are created for every call
-        so the latest info is included.
-
-        Returns:
-            List of records.ControllerInfoRecord objects. Each opject conatins
-            the info of a type of controller
-        """
-        info_records = []
-        for controller_module_name in self._controller_objects.keys():
-            with expects.expect_no_raises(
-                    'Failed to collect controller info from %s' %
-                    controller_module_name):
-                record = self._create_controller_info_record(
-                    controller_module_name)
-                if record:
-                    info_records.append(record)
-        return info_records
+    Returns:
+      List of records.ControllerInfoRecord objects. Each opject conatins
+      the info of a type of controller
+    """
+    info_records = []
+    for controller_module_name in self._controller_objects.keys():
+      with expects.expect_no_raises(
+          'Failed to collect controller info from %s' %
+          controller_module_name):
+        record = self._create_controller_info_record(
+          controller_module_name)
+        if record:
+          info_records.append(record)
+    return info_records
diff --git a/mobly/controllers/android_device_lib/jsonrpc_client_base.py b/mobly/controllers/android_device_lib/jsonrpc_client_base.py
index 6cecaa8..bb8f639 100644
--- a/mobly/controllers/android_device_lib/jsonrpc_client_base.py
+++ b/mobly/controllers/android_device_lib/jsonrpc_client_base.py
@@ -17,26 +17,26 @@
 
 .. code-block:: json
 
-    Request:
-    {
-        "id": <monotonically increasing integer containing the ID of
-               this request>
-        "method": <string containing the name of the method to execute>
-        "params": <JSON array containing the arguments to the method>
-    }
+  Request:
+  {
+    "id": <monotonically increasing integer containing the ID of
+         this request>
+    "method": <string containing the name of the method to execute>
+    "params": <JSON array containing the arguments to the method>
+  }
 
-    Response:
-    {
-        "id": <int id of request that this response maps to>,
-        "result": <Arbitrary JSON object containing the result of
-                   executing the method. If the method could not be
-                   executed or returned void, contains 'null'.>,
-        "error": <String containing the error thrown by executing the
-                  method. If no error occurred, contains 'null'.>
-        "callback": <String that represents a callback ID used to
-                     identify events associated with a particular
-                     CallbackHandler object.>
-    }
+  Response:
+  {
+    "id": <int id of request that this response maps to>,
+    "result": <Arbitrary JSON object containing the result of
+           executing the method. If the method could not be
+           executed or returned void, contains 'null'.>,
+    "error": <String containing the error thrown by executing the
+          method. If no error occurred, contains 'null'.>
+    "callback": <String that represents a callback ID used to
+           identify events associated with a particular
+           CallbackHandler object.>
+  }
 """
 
 from builtins import str
@@ -47,11 +47,11 @@
 # embedded Python environments. So, pre-emptively import and cache the encoder.
 # See https://bugs.python.org/issue17305 for more details.
 try:
-    import encodings.idna
+  import encodings.idna
 except ImportError:
-    # Some implementations of Python (e.g. IronPython) do not support the`idna`
-    # encoding, so ignore import failures based on that.
-    pass
+  # Some implementations of Python (e.g. IronPython) do not support the`idna`
+  # encoding, so ignore import failures based on that.
+  pass
 
 import json
 import socket
@@ -76,328 +76,328 @@
 
 
 class Error(errors.DeviceError):
-    pass
+  pass
 
 
 class AppStartError(Error):
-    """Raised when the app is not able to be started."""
+  """Raised when the app is not able to be started."""
 
 
 class AppRestoreConnectionError(Error):
-    """Raised when failed to restore app from disconnection."""
+  """Raised when failed to restore app from disconnection."""
 
 
 class ApiError(Error):
-    """Raised when remote API reports an error."""
+  """Raised when remote API reports an error."""
 
 
 class ProtocolError(Error):
-    """Raised when there is some error in exchanging data with server."""
-    NO_RESPONSE_FROM_HANDSHAKE = 'No response from handshake.'
-    NO_RESPONSE_FROM_SERVER = 'No response from server.'
-    MISMATCHED_API_ID = 'RPC request-response ID mismatch.'
+  """Raised when there is some error in exchanging data with server."""
+  NO_RESPONSE_FROM_HANDSHAKE = 'No response from handshake.'
+  NO_RESPONSE_FROM_SERVER = 'No response from server.'
+  MISMATCHED_API_ID = 'RPC request-response ID mismatch.'
 
 
 class JsonRpcCommand(object):
-    """Commands that can be invoked on all jsonrpc clients.
+  """Commands that can be invoked on all jsonrpc clients.
 
-    INIT: Initializes a new session.
-    CONTINUE: Creates a connection.
-    """
-    INIT = 'initiate'
-    CONTINUE = 'continue'
+  INIT: Initializes a new session.
+  CONTINUE: Creates a connection.
+  """
+  INIT = 'initiate'
+  CONTINUE = 'continue'
 
 
 class JsonRpcClientBase(object):
-    """Base class for jsonrpc clients that connect to remote servers.
+  """Base class for jsonrpc clients that connect to remote servers.
 
-    Connects to a remote device running a jsonrpc-compatible app. Before opening
-    a connection a port forward must be setup to go over usb. This be done using
-    adb.forward([local, remote]). Once the port has been forwarded it can be
-    used in this object as the port of communication.
+  Connects to a remote device running a jsonrpc-compatible app. Before opening
+  a connection a port forward must be setup to go over usb. This be done using
+  adb.forward([local, remote]). Once the port has been forwarded it can be
+  used in this object as the port of communication.
 
-    Attributes:
-        host_port: (int) The host port of this RPC client.
-        device_port: (int) The device port of this RPC client.
-        app_name: (str) The user-visible name of the app being communicated
-                  with.
-        uid: (int) The uid of this session.
+  Attributes:
+    host_port: (int) The host port of this RPC client.
+    device_port: (int) The device port of this RPC client.
+    app_name: (str) The user-visible name of the app being communicated
+          with.
+    uid: (int) The uid of this session.
+  """
+
+  def __init__(self, app_name, ad):
     """
+    Args:
+      app_name: (str) The user-visible name of the app being communicated
+        with.
+      ad: (AndroidDevice) The device object associated with a client.
+    """
+    self.host_port = None
+    self.device_port = None
+    self.app_name = app_name
+    self._ad = ad
+    self.log = self._ad.log
+    self.uid = None
+    self._client = None  # prevent close errors on connect failure
+    self._conn = None
+    self._counter = None
+    self._lock = threading.Lock()
+    self._event_client = None
+    self.verbose_logging = True
 
-    def __init__(self, app_name, ad):
-        """
-        Args:
-            app_name: (str) The user-visible name of the app being communicated
-                with.
-            ad: (AndroidDevice) The device object associated with a client.
-        """
-        self.host_port = None
-        self.device_port = None
-        self.app_name = app_name
-        self._ad = ad
-        self.log = self._ad.log
-        self.uid = None
-        self._client = None  # prevent close errors on connect failure
-        self._conn = None
-        self._counter = None
-        self._lock = threading.Lock()
-        self._event_client = None
-        self.verbose_logging = True
+  def __del__(self):
+    self.disconnect()
 
-    def __del__(self):
-        self.disconnect()
+  # Methods to be implemented by subclasses.
 
-    # Methods to be implemented by subclasses.
+  def start_app_and_connect(self):
+    """Starts the server app on the android device and connects to it.
 
-    def start_app_and_connect(self):
-        """Starts the server app on the android device and connects to it.
+    After this, the self.host_port and self.device_port attributes must be
+    set.
 
-        After this, the self.host_port and self.device_port attributes must be
-        set.
+    Must be implemented by subclasses.
 
-        Must be implemented by subclasses.
+    Raises:
+      AppStartError: When the app was not able to be started.
+    """
+    raise NotImplementedError()
 
-        Raises:
-            AppStartError: When the app was not able to be started.
-        """
-        raise NotImplementedError()
+  def stop_app(self):
+    """Kills any running instance of the app.
 
-    def stop_app(self):
-        """Kills any running instance of the app.
+    Must be implemented by subclasses.
+    """
+    raise NotImplementedError()
 
-        Must be implemented by subclasses.
-        """
-        raise NotImplementedError()
+  def restore_app_connection(self, port=None):
+    """Reconnects to the app after device USB was disconnected.
 
-    def restore_app_connection(self, port=None):
-        """Reconnects to the app after device USB was disconnected.
+    Instead of creating new instance of the client:
+      - Uses the given port (or finds a new available host_port if none is
+      given).
+      - Tries to connect to remote server with selected port.
 
-        Instead of creating new instance of the client:
-          - Uses the given port (or finds a new available host_port if none is
-            given).
-          - Tries to connect to remote server with selected port.
+    Must be implemented by subclasses.
 
-        Must be implemented by subclasses.
+    Args:
+      port: If given, this is the host port from which to connect to remote
+        device port. If not provided, find a new available port as host
+        port.
 
-        Args:
-          port: If given, this is the host port from which to connect to remote
-              device port. If not provided, find a new available port as host
-              port.
+    Raises:
+      AppRestoreConnectionError: When the app was not able to be
+      reconnected.
+    """
+    raise NotImplementedError()
 
-        Raises:
-            AppRestoreConnectionError: When the app was not able to be
-            reconnected.
-        """
-        raise NotImplementedError()
+  def _start_event_client(self):
+    """Starts a separate JsonRpc client to the same session for propagating
+    events.
 
-    def _start_event_client(self):
-        """Starts a separate JsonRpc client to the same session for propagating
-        events.
+    This is an optional function that should only implement if the client
+    utilizes the snippet event mechanism.
 
-        This is an optional function that should only implement if the client
-        utilizes the snippet event mechanism.
+    Returns:
+      A JsonRpc Client object that connects to the same session as the
+      one on which this function is called.
+    """
+    raise NotImplementedError()
 
-        Returns:
-            A JsonRpc Client object that connects to the same session as the
-            one on which this function is called.
-        """
-        raise NotImplementedError()
+  # Rest of the client methods.
 
-    # Rest of the client methods.
+  def connect(self, uid=UNKNOWN_UID, cmd=JsonRpcCommand.INIT):
+    """Opens a connection to a JSON RPC server.
 
-    def connect(self, uid=UNKNOWN_UID, cmd=JsonRpcCommand.INIT):
-        """Opens a connection to a JSON RPC server.
+    Opens a connection to a remote client. The connection attempt will time
+    out if it takes longer than _SOCKET_CONNECTION_TIMEOUT seconds. Each
+    subsequent operation over this socket will time out after
+    _SOCKET_READ_TIMEOUT seconds as well.
 
-        Opens a connection to a remote client. The connection attempt will time
-        out if it takes longer than _SOCKET_CONNECTION_TIMEOUT seconds. Each
-        subsequent operation over this socket will time out after
-        _SOCKET_READ_TIMEOUT seconds as well.
+    Args:
+      uid: int, The uid of the session to join, or UNKNOWN_UID to start a
+        new session.
+      cmd: JsonRpcCommand, The command to use for creating the connection.
 
-        Args:
-            uid: int, The uid of the session to join, or UNKNOWN_UID to start a
-                new session.
-            cmd: JsonRpcCommand, The command to use for creating the connection.
+    Raises:
+      IOError: Raised when the socket times out from io error
+      socket.timeout: Raised when the socket waits to long for connection.
+      ProtocolError: Raised when there is an error in the protocol.
+    """
+    self._counter = self._id_counter()
+    try:
+      self._conn = socket.create_connection(('localhost', self.host_port),
+                        _SOCKET_CONNECTION_TIMEOUT)
+    except ConnectionRefusedError as err:
+      # Retry using '127.0.0.1' for IPv4 enabled machines that only resolve
+      # 'localhost' to '[::1]'.
+      self.log.debug('Failed to connect to localhost, trying 127.0.0.1: {}'
+             .format(str(err)))
+      self._conn = socket.create_connection(('127.0.0.1', self.host_port),
+                        _SOCKET_CONNECTION_TIMEOUT)
 
-        Raises:
-            IOError: Raised when the socket times out from io error
-            socket.timeout: Raised when the socket waits to long for connection.
-            ProtocolError: Raised when there is an error in the protocol.
-        """
-        self._counter = self._id_counter()
-        try:
-          self._conn = socket.create_connection(('localhost', self.host_port),
-                                                _SOCKET_CONNECTION_TIMEOUT)
-        except ConnectionRefusedError as err:
-          # Retry using '127.0.0.1' for IPv4 enabled machines that only resolve
-          # 'localhost' to '[::1]'.
-          self.log.debug('Failed to connect to localhost, trying 127.0.0.1: {}'
-                         .format(str(err)))
-          self._conn = socket.create_connection(('127.0.0.1', self.host_port),
-                                                _SOCKET_CONNECTION_TIMEOUT)
+    self._conn.settimeout(_SOCKET_READ_TIMEOUT)
+    self._client = self._conn.makefile(mode='brw')
 
-        self._conn.settimeout(_SOCKET_READ_TIMEOUT)
-        self._client = self._conn.makefile(mode='brw')
+    resp = self._cmd(cmd, uid)
+    if not resp:
+      raise ProtocolError(self._ad,
+                ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)
+    result = json.loads(str(resp, encoding='utf8'))
+    if result['status']:
+      self.uid = result['uid']
+    else:
+      self.uid = UNKNOWN_UID
 
-        resp = self._cmd(cmd, uid)
-        if not resp:
-            raise ProtocolError(self._ad,
-                                ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)
-        result = json.loads(str(resp, encoding='utf8'))
-        if result['status']:
-            self.uid = result['uid']
+  def disconnect(self):
+    """Close the connection to the remote client."""
+    if self._conn:
+      self._conn.close()
+      self._conn = None
+
+  def clear_host_port(self):
+    """Stops the adb port forwarding of the host port used by this client.
+    """
+    if self.host_port:
+      self._adb.forward(['--remove', 'tcp:%d' % self.host_port])
+      self.host_port = None
+
+  def _client_send(self, msg):
+    """Sends an Rpc message through the connection.
+
+    Args:
+      msg: string, the message to send.
+
+    Raises:
+      Error: a socket error occurred during the send.
+    """
+    try:
+      self._client.write(msg.encode("utf8") + b'\n')
+      self._client.flush()
+      self.log.debug('Snippet sent %s.', msg)
+    except socket.error as e:
+      raise Error(
+        self._ad,
+        'Encountered socket error "%s" sending RPC message "%s"' %
+        (e, msg))
+
+  def _client_receive(self):
+    """Receives the server's response of an Rpc message.
+
+    Returns:
+      Raw byte string of the response.
+
+    Raises:
+      Error: a socket error occurred during the read.
+    """
+    try:
+      response = self._client.readline()
+      if self.verbose_logging:
+        self.log.debug('Snippet received: %s', response)
+      else:
+        if _MAX_RPC_RESP_LOGGING_LENGTH >= len(response):
+          self.log.debug('Snippet received: %s', response)
         else:
-            self.uid = UNKNOWN_UID
+          self.log.debug(
+            'Snippet received: %s... %d chars are truncated',
+            response[:_MAX_RPC_RESP_LOGGING_LENGTH],
+            len(response) - _MAX_RPC_RESP_LOGGING_LENGTH)
+      return response
+    except socket.error as e:
+      raise Error(
+        self._ad,
+        'Encountered socket error reading RPC response "%s"' % e)
 
-    def disconnect(self):
-        """Close the connection to the remote client."""
-        if self._conn:
-            self._conn.close()
-            self._conn = None
+  def _cmd(self, command, uid=None):
+    """Send a command to the server.
 
-    def clear_host_port(self):
-        """Stops the adb port forwarding of the host port used by this client.
-        """
-        if self.host_port:
-            self._adb.forward(['--remove', 'tcp:%d' % self.host_port])
-            self.host_port = None
+    Args:
+      command: str, The name of the command to execute.
+      uid: int, the uid of the session to send the command to.
 
-    def _client_send(self, msg):
-        """Sends an Rpc message through the connection.
+    Returns:
+      The line that was written back.
+    """
+    if not uid:
+      uid = self.uid
+    self._client_send(json.dumps({'cmd': command, 'uid': uid}))
+    return self._client_receive()
 
-        Args:
-            msg: string, the message to send.
+  def _rpc(self, method, *args):
+    """Sends an rpc to the app.
 
-        Raises:
-            Error: a socket error occurred during the send.
-        """
-        try:
-            self._client.write(msg.encode("utf8") + b'\n')
-            self._client.flush()
-            self.log.debug('Snippet sent %s.', msg)
-        except socket.error as e:
-            raise Error(
-                self._ad,
-                'Encountered socket error "%s" sending RPC message "%s"' %
-                (e, msg))
+    Args:
+      method: str, The name of the method to execute.
+      args: any, The args of the method.
 
-    def _client_receive(self):
-        """Receives the server's response of an Rpc message.
+    Returns:
+      The result of the rpc.
 
-        Returns:
-            Raw byte string of the response.
+    Raises:
+      ProtocolError: Something went wrong with the protocol.
+      ApiError: The rpc went through, however executed with errors.
+    """
+    with self._lock:
+      apiid = next(self._counter)
+      data = {'id': apiid, 'method': method, 'params': args}
+      request = json.dumps(data)
+      self._client_send(request)
+      response = self._client_receive()
+    if not response:
+      raise ProtocolError(self._ad,
+                ProtocolError.NO_RESPONSE_FROM_SERVER)
+    result = json.loads(str(response, encoding='utf8'))
+    if result['error']:
+      raise ApiError(self._ad, result['error'])
+    if result['id'] != apiid:
+      raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID)
+    if result.get('callback') is not None:
+      if self._event_client is None:
+        self._event_client = self._start_event_client()
+      return callback_handler.CallbackHandler(
+        callback_id=result['callback'],
+        event_client=self._event_client,
+        ret_value=result['result'],
+        method_name=method,
+        ad=self._ad)
+    return result['result']
 
-        Raises:
-            Error: a socket error occurred during the read.
-        """
-        try:
-            response = self._client.readline()
-            if self.verbose_logging:
-                self.log.debug('Snippet received: %s', response)
-            else:
-                if _MAX_RPC_RESP_LOGGING_LENGTH >= len(response):
-                    self.log.debug('Snippet received: %s', response)
-                else:
-                    self.log.debug(
-                        'Snippet received: %s... %d chars are truncated',
-                        response[:_MAX_RPC_RESP_LOGGING_LENGTH],
-                        len(response) - _MAX_RPC_RESP_LOGGING_LENGTH)
-            return response
-        except socket.error as e:
-            raise Error(
-                self._ad,
-                'Encountered socket error reading RPC response "%s"' % e)
+  def disable_hidden_api_blacklist(self):
+    """If necessary and possible, disables hidden api blacklist."""
+    version_codename = self._ad.build_info['build_version_codename']
+    sdk_version = int(self._ad.build_info['build_version_sdk'])
+    # we check version_codename in addition to sdk_version because P builds
+    # in development report sdk_version 27, but still enforce the blacklist.
+    if self._ad.is_rootable and (sdk_version >= 28
+                   or version_codename == 'P'):
+      self._ad.adb.shell(
+        'settings put global hidden_api_blacklist_exemptions "*"')
 
-    def _cmd(self, command, uid=None):
-        """Send a command to the server.
+  def __getattr__(self, name):
+    """Wrapper for python magic to turn method calls into RPC calls."""
 
-        Args:
-            command: str, The name of the command to execute.
-            uid: int, the uid of the session to send the command to.
+    def rpc_call(*args):
+      return self._rpc(name, *args)
 
-        Returns:
-            The line that was written back.
-        """
-        if not uid:
-            uid = self.uid
-        self._client_send(json.dumps({'cmd': command, 'uid': uid}))
-        return self._client_receive()
+    return rpc_call
 
-    def _rpc(self, method, *args):
-        """Sends an rpc to the app.
+  def _id_counter(self):
+    i = 0
+    while True:
+      yield i
+      i += 1
 
-        Args:
-            method: str, The name of the method to execute.
-            args: any, The args of the method.
+  def set_snippet_client_verbose_logging(self, verbose):
+    """Switches verbose logging. True for logging full RPC response.
 
-        Returns:
-            The result of the rpc.
+    By default it will only write max_rpc_return_value_length for Rpc return
+    strings. If you need to see full message returned from Rpc, please turn
+    on verbose logging.
 
-        Raises:
-            ProtocolError: Something went wrong with the protocol.
-            ApiError: The rpc went through, however executed with errors.
-        """
-        with self._lock:
-            apiid = next(self._counter)
-            data = {'id': apiid, 'method': method, 'params': args}
-            request = json.dumps(data)
-            self._client_send(request)
-            response = self._client_receive()
-        if not response:
-            raise ProtocolError(self._ad,
-                                ProtocolError.NO_RESPONSE_FROM_SERVER)
-        result = json.loads(str(response, encoding='utf8'))
-        if result['error']:
-            raise ApiError(self._ad, result['error'])
-        if result['id'] != apiid:
-            raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID)
-        if result.get('callback') is not None:
-            if self._event_client is None:
-                self._event_client = self._start_event_client()
-            return callback_handler.CallbackHandler(
-                callback_id=result['callback'],
-                event_client=self._event_client,
-                ret_value=result['result'],
-                method_name=method,
-                ad=self._ad)
-        return result['result']
+    max_rpc_return_value_length will set to 1024 by default, the length
+    contains full Rpc response in Json format, included 1st element "id".
 
-    def disable_hidden_api_blacklist(self):
-        """If necessary and possible, disables hidden api blacklist."""
-        version_codename = self._ad.build_info['build_version_codename']
-        sdk_version = int(self._ad.build_info['build_version_sdk'])
-        # we check version_codename in addition to sdk_version because P builds
-        # in development report sdk_version 27, but still enforce the blacklist.
-        if self._ad.is_rootable and (sdk_version >= 28
-                                     or version_codename == 'P'):
-            self._ad.adb.shell(
-                'settings put global hidden_api_blacklist_exemptions "*"')
-
-    def __getattr__(self, name):
-        """Wrapper for python magic to turn method calls into RPC calls."""
-
-        def rpc_call(*args):
-            return self._rpc(name, *args)
-
-        return rpc_call
-
-    def _id_counter(self):
-        i = 0
-        while True:
-            yield i
-            i += 1
-
-    def set_snippet_client_verbose_logging(self, verbose):
-        """Switches verbose logging. True for logging full RPC response.
-
-        By default it will only write max_rpc_return_value_length for Rpc return
-        strings. If you need to see full message returned from Rpc, please turn
-        on verbose logging.
-
-        max_rpc_return_value_length will set to 1024 by default, the length
-        contains full Rpc response in Json format, included 1st element "id".
-
-        Args:
-            verbose: bool. If True, turns on verbose logging, if False turns off
-        """
-        self._ad.log.info('Set verbose logging to %s.', verbose)
-        self.verbose_logging = verbose
+    Args:
+      verbose: bool. If True, turns on verbose logging, if False turns off
+    """
+    self._ad.log.info('Set verbose logging to %s.', verbose)
+    self.verbose_logging = verbose
diff --git a/mobly/controllers/android_device_lib/services/logcat.py b/mobly/controllers/android_device_lib/services/logcat.py
index fd8d644..c470b17 100644
--- a/mobly/controllers/android_device_lib/services/logcat.py
+++ b/mobly/controllers/android_device_lib/services/logcat.py
@@ -26,240 +26,240 @@
 
 
 class Error(errors.ServiceError):
-    """Root error type for logcat service."""
-    SERVICE_TYPE = 'Logcat'
+  """Root error type for logcat service."""
+  SERVICE_TYPE = 'Logcat'
 
 
 class Config(object):
-    """Config object for logcat service.
+  """Config object for logcat service.
 
-    Attributes:
-        clear_log: bool, clears the logcat before collection if True.
-        logcat_params: string, extra params to be added to logcat command.
-        output_file_path: string, the path on the host to write the log file
-            to, including the actual filename. The service will automatically
-            generate one if not specified.
-    """
+  Attributes:
+    clear_log: bool, clears the logcat before collection if True.
+    logcat_params: string, extra params to be added to logcat command.
+    output_file_path: string, the path on the host to write the log file
+      to, including the actual filename. The service will automatically
+      generate one if not specified.
+  """
 
-    def __init__(self,
-                 logcat_params=None,
-                 clear_log=True,
-                 output_file_path=None):
-        self.clear_log = clear_log
-        self.logcat_params = logcat_params if logcat_params else ''
-        self.output_file_path = output_file_path
+  def __init__(self,
+         logcat_params=None,
+         clear_log=True,
+         output_file_path=None):
+    self.clear_log = clear_log
+    self.logcat_params = logcat_params if logcat_params else ''
+    self.output_file_path = output_file_path
 
 
 class Logcat(base_service.BaseService):
-    """Android logcat service for Mobly's AndroidDevice controller.
+  """Android logcat service for Mobly's AndroidDevice controller.
 
-    Attributes:
-        adb_logcat_file_path: string, path to the file that the service writes
-            adb logcat to by default.
+  Attributes:
+    adb_logcat_file_path: string, path to the file that the service writes
+      adb logcat to by default.
+  """
+  OUTPUT_FILE_TYPE = 'logcat'
+
+  def __init__(self, android_device, configs=None):
+    super(Logcat, self).__init__(android_device, configs)
+    self._ad = android_device
+    self._adb_logcat_process = None
+    self._adb_logcat_file_obj = None
+    self.adb_logcat_file_path = None
+    # Logcat service uses a single config obj, using singular internal
+    # name: `_config`.
+    self._config = configs if configs else Config()
+
+  def _enable_logpersist(self):
+    """Attempts to enable logpersist daemon to persist logs."""
+    # Logpersist is only allowed on rootable devices because of excessive
+    # reads/writes for persisting logs.
+    if not self._ad.is_rootable:
+      return
+
+    logpersist_warning = ('%s encountered an error enabling persistent'
+                ' logs, logs may not get saved.')
+    # Android L and older versions do not have logpersist installed,
+    # so check that the logpersist scripts exists before trying to use
+    # them.
+    if not self._ad.adb.has_shell_command('logpersist.start'):
+      logging.warning(logpersist_warning, self)
+      return
+
+    try:
+      # Disable adb log spam filter for rootable devices. Have to stop
+      # and clear settings first because 'start' doesn't support --clear
+      # option before Android N.
+      self._ad.adb.shell('logpersist.stop --clear')
+      self._ad.adb.shell('logpersist.start')
+    except adb.AdbError:
+      logging.warning(logpersist_warning, self)
+
+  def _is_timestamp_in_range(self, target, begin_time, end_time):
+    low = mobly_logger.logline_timestamp_comparator(begin_time,
+                            target) <= 0
+    high = mobly_logger.logline_timestamp_comparator(end_time, target) >= 0
+    return low and high
+
+  def create_output_excerpts(self, test_info):
+    """Convenient method for creating excerpts of adb logcat.
+
+    This copies logcat lines from self.adb_logcat_file_path to an excerpt
+    file, starting from the location where the previous excerpt ended.
+
+    Call this method at the end of: `setup_class`, `teardown_test`, and
+    `teardown_class`.
+
+    Args:
+      test_info: `self.current_test_info` in a Mobly test.
+
+    Returns:
+      List of strings, the absolute paths to excerpt files.
     """
-    OUTPUT_FILE_TYPE = 'logcat'
+    dest_path = test_info.output_path
+    utils.create_dir(dest_path)
+    filename = self._ad.generate_filename(self.OUTPUT_FILE_TYPE, test_info,
+                        'txt')
+    excerpt_file_path = os.path.join(dest_path, filename)
+    with io.open(excerpt_file_path, 'w', encoding='utf-8',
+           errors='replace') as out:
+      # Devices may accidentally go offline during test,
+      # check not None before readline().
+      while self._adb_logcat_file_obj:
+        line = self._adb_logcat_file_obj.readline()
+        if not line:
+          break
+        out.write(line)
+    self._ad.log.debug('logcat excerpt created at: %s', excerpt_file_path)
+    return [excerpt_file_path]
 
-    def __init__(self, android_device, configs=None):
-        super(Logcat, self).__init__(android_device, configs)
-        self._ad = android_device
-        self._adb_logcat_process = None
-        self._adb_logcat_file_obj = None
-        self.adb_logcat_file_path = None
-        # Logcat service uses a single config obj, using singular internal
-        # name: `_config`.
-        self._config = configs if configs else Config()
+  @property
+  def is_alive(self):
+    return True if self._adb_logcat_process else False
 
-    def _enable_logpersist(self):
-        """Attempts to enable logpersist daemon to persist logs."""
-        # Logpersist is only allowed on rootable devices because of excessive
-        # reads/writes for persisting logs.
-        if not self._ad.is_rootable:
-            return
+  def clear_adb_log(self):
+    """Clears cached adb content."""
+    try:
+      self._ad.adb.logcat('-c')
+    except adb.AdbError as e:
+      # On Android O, the clear command fails due to a known bug.
+      # Catching this so we don't crash from this Android issue.
+      if b'failed to clear' in e.stderr:
+        self._ad.log.warning(
+          'Encountered known Android error to clear logcat.')
+      else:
+        raise
 
-        logpersist_warning = ('%s encountered an error enabling persistent'
-                              ' logs, logs may not get saved.')
-        # Android L and older versions do not have logpersist installed,
-        # so check that the logpersist scripts exists before trying to use
-        # them.
-        if not self._ad.adb.has_shell_command('logpersist.start'):
-            logging.warning(logpersist_warning, self)
-            return
+  def _assert_not_running(self):
+    """Asserts the logcat service is not running.
 
-        try:
-            # Disable adb log spam filter for rootable devices. Have to stop
-            # and clear settings first because 'start' doesn't support --clear
-            # option before Android N.
-            self._ad.adb.shell('logpersist.stop --clear')
-            self._ad.adb.shell('logpersist.start')
-        except adb.AdbError:
-            logging.warning(logpersist_warning, self)
+    Raises:
+      Error, if the logcat service is running.
+    """
+    if self.is_alive:
+      raise Error(
+        self._ad,
+        'Logcat thread is already running, cannot start another one.')
 
-    def _is_timestamp_in_range(self, target, begin_time, end_time):
-        low = mobly_logger.logline_timestamp_comparator(begin_time,
-                                                        target) <= 0
-        high = mobly_logger.logline_timestamp_comparator(end_time, target) >= 0
-        return low and high
+  def update_config(self, new_config):
+    """Updates the configuration for the service.
 
-    def create_output_excerpts(self, test_info):
-        """Convenient method for creating excerpts of adb logcat.
+    The service needs to be stopped before updating, and explicitly started
+    after the update.
 
-        This copies logcat lines from self.adb_logcat_file_path to an excerpt
-        file, starting from the location where the previous excerpt ended.
+    This will reset the service. Previous output files may be orphaned if
+    output path is changed.
 
-        Call this method at the end of: `setup_class`, `teardown_test`, and
-        `teardown_class`.
+    Args:
+      new_config: Config, the new config to use.
+    """
+    self._assert_not_running()
+    self._ad.log.info('[LogcatService] Changing config from %s to %s',
+              self._config, new_config)
+    self._config = new_config
 
-        Args:
-            test_info: `self.current_test_info` in a Mobly test.
+  def _open_logcat_file(self):
+    """Create a file object that points to the beginning of the logcat file.
+    Wait for the logcat file to be created by the subprocess if it doesn't
+    exist.
+    """
+    if not self._adb_logcat_file_obj:
+      start_time = time.time()
+      while not os.path.exists(self.adb_logcat_file_path):
+        if time.time() > start_time + CREATE_LOGCAT_FILE_TIMEOUT_SEC:
+          raise Error(
+            self._ad,
+            'Timeout while waiting for logcat file to be created.')
+        time.sleep(1)
+      self._adb_logcat_file_obj = io.open(
+        self.adb_logcat_file_path, 'r', encoding='utf-8',
+        errors='replace')
+      self._adb_logcat_file_obj.seek(0, os.SEEK_END)
 
-        Returns:
-            List of strings, the absolute paths to excerpt files.
-        """
-        dest_path = test_info.output_path
-        utils.create_dir(dest_path)
-        filename = self._ad.generate_filename(self.OUTPUT_FILE_TYPE, test_info,
-                                              'txt')
-        excerpt_file_path = os.path.join(dest_path, filename)
-        with io.open(excerpt_file_path, 'w', encoding='utf-8',
-                     errors='replace') as out:
-            # Devices may accidentally go offline during test,
-            # check not None before readline().
-            while self._adb_logcat_file_obj:
-                line = self._adb_logcat_file_obj.readline()
-                if not line:
-                    break
-                out.write(line)
-        self._ad.log.debug('logcat excerpt created at: %s', excerpt_file_path)
-        return [excerpt_file_path]
+  def _close_logcat_file(self):
+    """Closes and resets the logcat file object, if it exists."""
+    if self._adb_logcat_file_obj:
+      self._adb_logcat_file_obj.close()
+      self._adb_logcat_file_obj = None
 
-    @property
-    def is_alive(self):
-        return True if self._adb_logcat_process else False
+  def start(self):
+    """Starts a standing adb logcat collection.
 
-    def clear_adb_log(self):
-        """Clears cached adb content."""
-        try:
-            self._ad.adb.logcat('-c')
-        except adb.AdbError as e:
-            # On Android O, the clear command fails due to a known bug.
-            # Catching this so we don't crash from this Android issue.
-            if b'failed to clear' in e.stderr:
-                self._ad.log.warning(
-                    'Encountered known Android error to clear logcat.')
-            else:
-                raise
+    The collection runs in a separate subprocess and saves logs in a file.
+    """
+    self._assert_not_running()
+    if self._config.clear_log:
+      self.clear_adb_log()
+    self._start()
+    self._open_logcat_file()
 
-    def _assert_not_running(self):
-        """Asserts the logcat service is not running.
+  def _start(self):
+    """The actual logic of starting logcat."""
+    self._enable_logpersist()
+    if self._config.output_file_path:
+      self._close_logcat_file()
+      self.adb_logcat_file_path = self._config.output_file_path
+    if not self.adb_logcat_file_path:
+      f_name = self._ad.generate_filename(self.OUTPUT_FILE_TYPE,
+                        extension_name='txt')
+      logcat_file_path = os.path.join(self._ad.log_path, f_name)
+      self.adb_logcat_file_path = logcat_file_path
+    utils.create_dir(os.path.dirname(self.adb_logcat_file_path))
+    # In debugging mode of IntelijIDEA, "patch_args" remove
+    # double quotes in args if starting and ending with it.
+    # Add spaces at beginning and at last to fix this issue.
+    cmd = ' "%s" -s %s logcat -v threadtime -T 1 %s >> "%s" ' % (
+      adb.ADB, self._ad.serial, self._config.logcat_params,
+      self.adb_logcat_file_path)
+    process = utils.start_standing_subprocess(cmd, shell=True)
+    self._adb_logcat_process = process
 
-        Raises:
-            Error, if the logcat service is running.
-        """
-        if self.is_alive:
-            raise Error(
-                self._ad,
-                'Logcat thread is already running, cannot start another one.')
+  def stop(self):
+    """Stops the adb logcat service."""
+    self._close_logcat_file()
+    self._stop()
 
-    def update_config(self, new_config):
-        """Updates the configuration for the service.
+  def _stop(self):
+    """Stops the background process for logcat."""
+    if not self._adb_logcat_process:
+      return
+    try:
+      utils.stop_standing_subprocess(self._adb_logcat_process)
+    except:
+      self._ad.log.exception('Failed to stop adb logcat.')
+    self._adb_logcat_process = None
 
-        The service needs to be stopped before updating, and explicitly started
-        after the update.
+  def pause(self):
+    """Pauses logcat.
 
-        This will reset the service. Previous output files may be orphaned if
-        output path is changed.
+    Note: the service is unable to collect the logs when paused, if more
+    logs are generated on the device than the device's log buffer can hold,
+    some logs would be lost.
+    """
+    self._stop()
 
-        Args:
-            new_config: Config, the new config to use.
-        """
-        self._assert_not_running()
-        self._ad.log.info('[LogcatService] Changing config from %s to %s',
-                          self._config, new_config)
-        self._config = new_config
-
-    def _open_logcat_file(self):
-        """Create a file object that points to the beginning of the logcat file.
-        Wait for the logcat file to be created by the subprocess if it doesn't
-        exist.
-        """
-        if not self._adb_logcat_file_obj:
-            start_time = time.time()
-            while not os.path.exists(self.adb_logcat_file_path):
-                if time.time() > start_time + CREATE_LOGCAT_FILE_TIMEOUT_SEC:
-                    raise Error(
-                        self._ad,
-                        'Timeout while waiting for logcat file to be created.')
-                time.sleep(1)
-            self._adb_logcat_file_obj = io.open(
-                self.adb_logcat_file_path, 'r', encoding='utf-8',
-                errors='replace')
-            self._adb_logcat_file_obj.seek(0, os.SEEK_END)
-
-    def _close_logcat_file(self):
-        """Closes and resets the logcat file object, if it exists."""
-        if self._adb_logcat_file_obj:
-            self._adb_logcat_file_obj.close()
-            self._adb_logcat_file_obj = None
-
-    def start(self):
-        """Starts a standing adb logcat collection.
-
-        The collection runs in a separate subprocess and saves logs in a file.
-        """
-        self._assert_not_running()
-        if self._config.clear_log:
-            self.clear_adb_log()
-        self._start()
-        self._open_logcat_file()
-
-    def _start(self):
-        """The actual logic of starting logcat."""
-        self._enable_logpersist()
-        if self._config.output_file_path:
-            self._close_logcat_file()
-            self.adb_logcat_file_path = self._config.output_file_path
-        if not self.adb_logcat_file_path:
-            f_name = self._ad.generate_filename(self.OUTPUT_FILE_TYPE,
-                                                extension_name='txt')
-            logcat_file_path = os.path.join(self._ad.log_path, f_name)
-            self.adb_logcat_file_path = logcat_file_path
-        utils.create_dir(os.path.dirname(self.adb_logcat_file_path))
-        # In debugging mode of IntelijIDEA, "patch_args" remove
-        # double quotes in args if starting and ending with it.
-        # Add spaces at beginning and at last to fix this issue.
-        cmd = ' "%s" -s %s logcat -v threadtime -T 1 %s >> "%s" ' % (
-            adb.ADB, self._ad.serial, self._config.logcat_params,
-            self.adb_logcat_file_path)
-        process = utils.start_standing_subprocess(cmd, shell=True)
-        self._adb_logcat_process = process
-
-    def stop(self):
-        """Stops the adb logcat service."""
-        self._close_logcat_file()
-        self._stop()
-
-    def _stop(self):
-        """Stops the background process for logcat."""
-        if not self._adb_logcat_process:
-            return
-        try:
-            utils.stop_standing_subprocess(self._adb_logcat_process)
-        except:
-            self._ad.log.exception('Failed to stop adb logcat.')
-        self._adb_logcat_process = None
-
-    def pause(self):
-        """Pauses logcat.
-
-        Note: the service is unable to collect the logs when paused, if more
-        logs are generated on the device than the device's log buffer can hold,
-        some logs would be lost.
-        """
-        self._stop()
-
-    def resume(self):
-        """Resumes a paused logcat service."""
-        self._assert_not_running()
-        # Not clearing the log regardless of the config when resuming.
-        # Otherwise the logs during the paused time will be lost.
-        self._start()
+  def resume(self):
+    """Resumes a paused logcat service."""
+    self._assert_not_running()
+    # Not clearing the log regardless of the config when resuming.
+    # Otherwise the logs during the paused time will be lost.
+    self._start()
diff --git a/mobly/expects.py b/mobly/expects.py
index 2dd51f5..d95a83c 100644
--- a/mobly/expects.py
+++ b/mobly/expects.py
@@ -30,136 +30,136 @@
 
 
 class _ExpectErrorRecorder(object):
-    """Singleton used to store errors caught via `expect_*` functions in test.
+  """Singleton used to store errors caught via `expect_*` functions in test.
 
-    This class is only instantiated once as a singleton. It holds a reference
-    to the record object for the test currently executing.
+  This class is only instantiated once as a singleton. It holds a reference
+  to the record object for the test currently executing.
+  """
+
+  def __init__(self, record=None):
+    self.reset_internal_states(record=record)
+
+  def reset_internal_states(self, record=None):
+    """Resets the internal state of the recorder.
+
+    Args:
+      record: records.TestResultRecord, the test record for a test.
     """
+    self._record = None
+    self._count = 0
+    self._record = record
 
-    def __init__(self, record=None):
-        self.reset_internal_states(record=record)
+  @property
+  def has_error(self):
+    """If any error has been recorded since the last reset."""
+    return self._count > 0
 
-    def reset_internal_states(self, record=None):
-        """Resets the internal state of the recorder.
+  @property
+  def error_count(self):
+    """The number of errors that have been recorded since last reset."""
+    return self._count
 
-        Args:
-            record: records.TestResultRecord, the test record for a test.
-        """
-        self._record = None
-        self._count = 0
-        self._record = record
+  def add_error(self, error):
+    """Record an error from expect APIs.
 
-    @property
-    def has_error(self):
-        """If any error has been recorded since the last reset."""
-        return self._count > 0
+    This method generates a position stamp for the expect. The stamp is
+    composed of a timestamp and the number of errors recorded so far.
 
-    @property
-    def error_count(self):
-        """The number of errors that have been recorded since last reset."""
-        return self._count
-
-    def add_error(self, error):
-        """Record an error from expect APIs.
-
-        This method generates a position stamp for the expect. The stamp is
-        composed of a timestamp and the number of errors recorded so far.
-
-        Args:
-            error: Exception or signals.ExceptionRecord, the error to add.
-        """
-        self._count += 1
-        self._record.add_error('expect@%s+%s' % (time.time(), self._count),
-                               error)
+    Args:
+      error: Exception or signals.ExceptionRecord, the error to add.
+    """
+    self._count += 1
+    self._record.add_error('expect@%s+%s' % (time.time(), self._count),
+                 error)
 
 
 def expect_true(condition, msg, extras=None):
-    """Expects an expression evaluates to True.
+  """Expects an expression evaluates to True.
 
-    If the expectation is not met, the test is marked as fail after its
-    execution finishes.
+  If the expectation is not met, the test is marked as fail after its
+  execution finishes.
 
-    Args:
-        expr: The expression that is evaluated.
-        msg: A string explaining the details in case of failure.
-        extras: An optional field for extra information to be included in test
-            result.
-    """
-    try:
-        asserts.assert_true(condition, msg, extras)
-    except signals.TestSignal as e:
-        logging.exception('Expected a `True` value, got `False`.')
-        recorder.add_error(e)
+  Args:
+    expr: The expression that is evaluated.
+    msg: A string explaining the details in case of failure.
+    extras: An optional field for extra information to be included in test
+      result.
+  """
+  try:
+    asserts.assert_true(condition, msg, extras)
+  except signals.TestSignal as e:
+    logging.exception('Expected a `True` value, got `False`.')
+    recorder.add_error(e)
 
 
 def expect_false(condition, msg, extras=None):
-    """Expects an expression evaluates to False.
+  """Expects an expression evaluates to False.
 
-    If the expectation is not met, the test is marked as fail after its
-    execution finishes.
+  If the expectation is not met, the test is marked as fail after its
+  execution finishes.
 
-    Args:
-        expr: The expression that is evaluated.
-        msg: A string explaining the details in case of failure.
-        extras: An optional field for extra information to be included in test
-            result.
-    """
-    try:
-        asserts.assert_false(condition, msg, extras)
-    except signals.TestSignal as e:
-        logging.exception('Expected a `False` value, got `True`.')
-        recorder.add_error(e)
+  Args:
+    expr: The expression that is evaluated.
+    msg: A string explaining the details in case of failure.
+    extras: An optional field for extra information to be included in test
+      result.
+  """
+  try:
+    asserts.assert_false(condition, msg, extras)
+  except signals.TestSignal as e:
+    logging.exception('Expected a `False` value, got `True`.')
+    recorder.add_error(e)
 
 
 def expect_equal(first, second, msg=None, extras=None):
-    """Expects the equality of objects, otherwise fail the test.
+  """Expects the equality of objects, otherwise fail the test.
 
-    If the expectation is not met, the test is marked as fail after its
-    execution finishes.
+  If the expectation is not met, the test is marked as fail after its
+  execution finishes.
 
-    Error message is "first != second" by default. Additional explanation can
-    be supplied in the message.
+  Error message is "first != second" by default. Additional explanation can
+  be supplied in the message.
 
-    Args:
-        first: The first object to compare.
-        second: The second object to compare.
-        msg: A string that adds additional info about the failure.
-        extras: An optional field for extra information to be included in test
-            result.
-    """
-    try:
-        asserts.assert_equal(first, second, msg, extras)
-    except signals.TestSignal as e:
-        logging.exception('Expected %s equals to %s, but they are not.', first,
-                          second)
-        recorder.add_error(e)
+  Args:
+    first: The first object to compare.
+    second: The second object to compare.
+    msg: A string that adds additional info about the failure.
+    extras: An optional field for extra information to be included in test
+      result.
+  """
+  try:
+    asserts.assert_equal(first, second, msg, extras)
+  except signals.TestSignal as e:
+    logging.exception('Expected %s equals to %s, but they are not.', first,
+              second)
+    recorder.add_error(e)
 
 
 @contextlib.contextmanager
 def expect_no_raises(message=None, extras=None):
-    """Expects no exception is raised in a context.
+  """Expects no exception is raised in a context.
 
-    If the expectation is not met, the test is marked as fail after its
-    execution finishes.
+  If the expectation is not met, the test is marked as fail after its
+  execution finishes.
 
-    A default message is added to the exception `details`.
+  A default message is added to the exception `details`.
 
-    Args:
-        message: string, custom message to add to exception's `details`.
-        extras: An optional field for extra information to be included in test
-            result.
-    """
-    try:
-        yield
-    except Exception as e:
-        e_record = records.ExceptionRecord(e)
-        if extras:
-            e_record.extras = extras
-        msg = message or 'Got an unexpected exception'
-        details = '%s: %s' % (msg, e_record.details)
-        logging.exception(details)
-        e_record.details = details
-        recorder.add_error(e_record)
+  Args:
+    message: string, custom message to add to exception's `details`.
+    extras: An optional field for extra information to be included in test
+      result.
+  """
+  try:
+    yield
+  except Exception as e:
+    e_record = records.ExceptionRecord(e)
+    if extras:
+      e_record.extras = extras
+    msg = message or 'Got an unexpected exception'
+    details = '%s: %s' % (msg, e_record.details)
+    logging.exception(details)
+    e_record.details = details
+    recorder.add_error(e_record)
 
 
 recorder = _ExpectErrorRecorder(DEFAULT_TEST_RESULT_RECORD)
diff --git a/mobly/keys.py b/mobly/keys.py
index b1a35c0..b561734 100644
--- a/mobly/keys.py
+++ b/mobly/keys.py
@@ -16,13 +16,13 @@
 
 
 class Config(enum.Enum):
-    """The reserved keywordss used in configurations."""
-    # Keywords for params consumed by Mobly itself.
-    key_mobly_params = 'MoblyParams'
-    key_log_path = 'LogPath'
-    # Keyword for the section that defines test bed configs.
-    key_testbed = 'TestBeds'
-    # Keywords for sections inside a test bed config.
-    key_testbed_name = 'Name'
-    key_testbed_controllers = 'Controllers'
-    key_testbed_test_params = 'TestParams'
+  """The reserved keywordss used in configurations."""
+  # Keywords for params consumed by Mobly itself.
+  key_mobly_params = 'MoblyParams'
+  key_log_path = 'LogPath'
+  # Keyword for the section that defines test bed configs.
+  key_testbed = 'TestBeds'
+  # Keywords for sections inside a test bed config.
+  key_testbed_name = 'Name'
+  key_testbed_controllers = 'Controllers'
+  key_testbed_test_params = 'TestParams'
diff --git a/mobly/logger.py b/mobly/logger.py
index 74f76fe..4204b71 100644
--- a/mobly/logger.py
+++ b/mobly/logger.py
@@ -31,34 +31,34 @@
 # length seems to be lower.
 WINDOWS_MAX_FILENAME_LENGTH = 237
 WINDOWS_RESERVED_CHARACTERS_REPLACEMENTS = {
-    '<':
-    '-',
-    '>':
-    '-',
-    ':':
-    '-',
-    '"':
-    '_',
-    '/':
-    '_',
-    '\\':
-    '_',
-    '|':
-    ',',
-    '?':
-    ',',
-    '*':
-    ',',
-    # Integer zero (i.e. NUL) is not a valid character.
-    # While integers 1-31 are also usually valid, they aren't sanitized because
-    # they are situationally valid.
-    chr(0):
-    '0',
+  '<':
+  '-',
+  '>':
+  '-',
+  ':':
+  '-',
+  '"':
+  '_',
+  '/':
+  '_',
+  '\\':
+  '_',
+  '|':
+  ',',
+  '?':
+  ',',
+  '*':
+  ',',
+  # Integer zero (i.e. NUL) is not a valid character.
+  # While integers 1-31 are also usually valid, they aren't sanitized because
+  # they are situationally valid.
+  chr(0):
+  '0',
 }
 # Note, although the documentation does not specify as such, COM0 and LPT0 are
 # also invalid/reserved filenames.
 WINDOWS_RESERVED_FILENAME_REGEX = re.compile(
-    r'^(CON|PRN|AUX|NUL|(COM|LPT)[0-9])(\.[^.]*)?$', re.IGNORECASE)
+  r'^(CON|PRN|AUX|NUL|(COM|LPT)[0-9])(\.[^.]*)?$', re.IGNORECASE)
 WINDOWS_RESERVED_FILENAME_PREFIX = 'mobly_'
 
 log_line_format = '%(asctime)s.%(msecs).03d %(levelname)s %(message)s'
@@ -71,300 +71,300 @@
 
 
 def _parse_logline_timestamp(t):
-    """Parses a logline timestamp into a tuple.
+  """Parses a logline timestamp into a tuple.
 
-    Args:
-        t: Timestamp in logline format.
+  Args:
+    t: Timestamp in logline format.
 
-    Returns:
-        An iterable of date and time elements in the order of month, day, hour,
-        minute, second, microsecond.
-    """
-    date, time = t.split(' ')
-    month, day = date.split('-')
-    h, m, s = time.split(':')
-    s, ms = s.split('.')
-    return (month, day, h, m, s, ms)
+  Returns:
+    An iterable of date and time elements in the order of month, day, hour,
+    minute, second, microsecond.
+  """
+  date, time = t.split(' ')
+  month, day = date.split('-')
+  h, m, s = time.split(':')
+  s, ms = s.split('.')
+  return (month, day, h, m, s, ms)
 
 
 def is_valid_logline_timestamp(timestamp):
-    if len(timestamp) == log_line_timestamp_len:
-        if logline_timestamp_re.match(timestamp):
-            return True
-    return False
+  if len(timestamp) == log_line_timestamp_len:
+    if logline_timestamp_re.match(timestamp):
+      return True
+  return False
 
 
 def logline_timestamp_comparator(t1, t2):
-    """Comparator for timestamps in logline format.
+  """Comparator for timestamps in logline format.
 
-    Args:
-        t1: Timestamp in logline format.
-        t2: Timestamp in logline format.
+  Args:
+    t1: Timestamp in logline format.
+    t2: Timestamp in logline format.
 
-    Returns:
-        -1 if t1 < t2; 1 if t1 > t2; 0 if t1 == t2.
-    """
-    dt1 = _parse_logline_timestamp(t1)
-    dt2 = _parse_logline_timestamp(t2)
-    for u1, u2 in zip(dt1, dt2):
-        if u1 < u2:
-            return -1
-        elif u1 > u2:
-            return 1
-    return 0
+  Returns:
+    -1 if t1 < t2; 1 if t1 > t2; 0 if t1 == t2.
+  """
+  dt1 = _parse_logline_timestamp(t1)
+  dt2 = _parse_logline_timestamp(t2)
+  for u1, u2 in zip(dt1, dt2):
+    if u1 < u2:
+      return -1
+    elif u1 > u2:
+      return 1
+  return 0
 
 
 def _get_timestamp(time_format, delta=None):
-    t = datetime.datetime.now()
-    if delta:
-        t = t + datetime.timedelta(seconds=delta)
-    return t.strftime(time_format)[:-3]
+  t = datetime.datetime.now()
+  if delta:
+    t = t + datetime.timedelta(seconds=delta)
+  return t.strftime(time_format)[:-3]
 
 
 def epoch_to_log_line_timestamp(epoch_time, time_zone=None):
-    """Converts an epoch timestamp in ms to log line timestamp format, which
-    is readible for humans.
+  """Converts an epoch timestamp in ms to log line timestamp format, which
+  is readible for humans.
 
-    Args:
-        epoch_time: integer, an epoch timestamp in ms.
-        time_zone: instance of tzinfo, time zone information.
-            Using pytz rather than python 3.2 time_zone implementation for
-            python 2 compatibility reasons.
+  Args:
+    epoch_time: integer, an epoch timestamp in ms.
+    time_zone: instance of tzinfo, time zone information.
+      Using pytz rather than python 3.2 time_zone implementation for
+      python 2 compatibility reasons.
 
-    Returns:
-        A string that is the corresponding timestamp in log line timestamp
-        format.
-    """
-    s, ms = divmod(epoch_time, 1000)
-    d = datetime.datetime.fromtimestamp(s, tz=time_zone)
-    return d.strftime('%m-%d %H:%M:%S.') + str(ms)
+  Returns:
+    A string that is the corresponding timestamp in log line timestamp
+    format.
+  """
+  s, ms = divmod(epoch_time, 1000)
+  d = datetime.datetime.fromtimestamp(s, tz=time_zone)
+  return d.strftime('%m-%d %H:%M:%S.') + str(ms)
 
 
 def get_log_line_timestamp(delta=None):
-    """Returns a timestamp in the format used by log lines.
+  """Returns a timestamp in the format used by log lines.
 
-    Default is current time. If a delta is set, the return value will be
-    the current time offset by delta seconds.
+  Default is current time. If a delta is set, the return value will be
+  the current time offset by delta seconds.
 
-    Args:
-        delta: Number of seconds to offset from current time; can be negative.
+  Args:
+    delta: Number of seconds to offset from current time; can be negative.
 
-    Returns:
-        A timestamp in log line format with an offset.
-    """
-    return _get_timestamp('%m-%d %H:%M:%S.%f', delta)
+  Returns:
+    A timestamp in log line format with an offset.
+  """
+  return _get_timestamp('%m-%d %H:%M:%S.%f', delta)
 
 
 def get_log_file_timestamp(delta=None):
-    """Returns a timestamp in the format used for log file names.
+  """Returns a timestamp in the format used for log file names.
 
-    Default is current time. If a delta is set, the return value will be
-    the current time offset by delta seconds.
+  Default is current time. If a delta is set, the return value will be
+  the current time offset by delta seconds.
 
-    Args:
-        delta: Number of seconds to offset from current time; can be negative.
+  Args:
+    delta: Number of seconds to offset from current time; can be negative.
 
-    Returns:
-        A timestamp in log filen name format with an offset.
-    """
-    return _get_timestamp('%m-%d-%Y_%H-%M-%S-%f', delta)
+  Returns:
+    A timestamp in log filen name format with an offset.
+  """
+  return _get_timestamp('%m-%d-%Y_%H-%M-%S-%f', delta)
 
 
 def _setup_test_logger(log_path, prefix=None):
-    """Customizes the root logger for a test run.
+  """Customizes the root logger for a test run.
 
-    The logger object has a stream handler and a file handler. The stream
-    handler logs INFO level to the terminal, the file handler logs DEBUG
-    level to files.
+  The logger object has a stream handler and a file handler. The stream
+  handler logs INFO level to the terminal, the file handler logs DEBUG
+  level to files.
 
-    Args:
-        log_path: Location of the log file.
-        prefix: A prefix for each log line in terminal.
-        filename: Name of the log file. The default is the time the logger
-            is requested.
-    """
-    log = logging.getLogger()
-    kill_test_logger(log)
-    log.propagate = False
-    log.setLevel(logging.DEBUG)
-    # Log info to stream
-    terminal_format = log_line_format
-    if prefix:
-        terminal_format = '[%s] %s' % (prefix, log_line_format)
-    c_formatter = logging.Formatter(terminal_format, log_line_time_format)
-    ch = logging.StreamHandler(sys.stdout)
-    ch.setFormatter(c_formatter)
-    ch.setLevel(logging.INFO)
-    # Log everything to file
-    f_formatter = logging.Formatter(log_line_format, log_line_time_format)
-    # Write logger output to files
-    fh_info = logging.FileHandler(
-        os.path.join(log_path, records.OUTPUT_FILE_INFO_LOG))
-    fh_info.setFormatter(f_formatter)
-    fh_info.setLevel(logging.INFO)
-    fh_debug = logging.FileHandler(
-        os.path.join(log_path, records.OUTPUT_FILE_DEBUG_LOG))
-    fh_debug.setFormatter(f_formatter)
-    fh_debug.setLevel(logging.DEBUG)
-    log.addHandler(ch)
-    log.addHandler(fh_info)
-    log.addHandler(fh_debug)
-    log.log_path = log_path
-    logging.log_path = log_path
-    logging.root_output_path = log_path
+  Args:
+    log_path: Location of the log file.
+    prefix: A prefix for each log line in terminal.
+    filename: Name of the log file. The default is the time the logger
+      is requested.
+  """
+  log = logging.getLogger()
+  kill_test_logger(log)
+  log.propagate = False
+  log.setLevel(logging.DEBUG)
+  # Log info to stream
+  terminal_format = log_line_format
+  if prefix:
+    terminal_format = '[%s] %s' % (prefix, log_line_format)
+  c_formatter = logging.Formatter(terminal_format, log_line_time_format)
+  ch = logging.StreamHandler(sys.stdout)
+  ch.setFormatter(c_formatter)
+  ch.setLevel(logging.INFO)
+  # Log everything to file
+  f_formatter = logging.Formatter(log_line_format, log_line_time_format)
+  # Write logger output to files
+  fh_info = logging.FileHandler(
+    os.path.join(log_path, records.OUTPUT_FILE_INFO_LOG))
+  fh_info.setFormatter(f_formatter)
+  fh_info.setLevel(logging.INFO)
+  fh_debug = logging.FileHandler(
+    os.path.join(log_path, records.OUTPUT_FILE_DEBUG_LOG))
+  fh_debug.setFormatter(f_formatter)
+  fh_debug.setLevel(logging.DEBUG)
+  log.addHandler(ch)
+  log.addHandler(fh_info)
+  log.addHandler(fh_debug)
+  log.log_path = log_path
+  logging.log_path = log_path
+  logging.root_output_path = log_path
 
 
 def kill_test_logger(logger):
-    """Cleans up a test logger object by removing all of its handlers.
+  """Cleans up a test logger object by removing all of its handlers.
 
-    Args:
-        logger: The logging object to clean up.
-    """
-    for h in list(logger.handlers):
-        logger.removeHandler(h)
-        if isinstance(h, logging.FileHandler):
-            h.close()
+  Args:
+    logger: The logging object to clean up.
+  """
+  for h in list(logger.handlers):
+    logger.removeHandler(h)
+    if isinstance(h, logging.FileHandler):
+      h.close()
 
 
 def create_latest_log_alias(actual_path, alias):
-    """Creates a symlink to the latest test run logs.
+  """Creates a symlink to the latest test run logs.
 
-    Args:
-        actual_path: string, the source directory where the latest test run's
-            logs are.
-        alias: string, the name of the directory to contain the latest log
-            files.
-    """
-    alias_path = os.path.join(os.path.dirname(actual_path), alias)
-    utils.create_alias(actual_path, alias_path)
+  Args:
+    actual_path: string, the source directory where the latest test run's
+      logs are.
+    alias: string, the name of the directory to contain the latest log
+      files.
+  """
+  alias_path = os.path.join(os.path.dirname(actual_path), alias)
+  utils.create_alias(actual_path, alias_path)
 
 
 def setup_test_logger(log_path, prefix=None, alias='latest'):
-    """Customizes the root logger for a test run.
+  """Customizes the root logger for a test run.
 
-    In addition to configuring the Mobly logging handlers, this also sets two
-    attributes on the `logging` module for the output directories:
+  In addition to configuring the Mobly logging handlers, this also sets two
+  attributes on the `logging` module for the output directories:
 
-    root_output_path: path to the directory for the entire test run.
-    log_path: same as `root_output_path` outside of a test class run. In the
-        context of a test class run, this is the output directory for files
-        specific to a test class.
+  root_output_path: path to the directory for the entire test run.
+  log_path: same as `root_output_path` outside of a test class run. In the
+    context of a test class run, this is the output directory for files
+    specific to a test class.
 
-    Args:
-        log_path: string, the location of the report file.
-        prefix: optional string, a prefix for each log line in terminal.
-        alias: optional string, The name of the alias to use for the latest log
-            directory. If a falsy value is provided, then the alias directory
-            will not be created, which is useful to save storage space when the
-            storage system (e.g. ZIP files) does not properly support
-            shortcut/symlinks.
-    """
-    utils.create_dir(log_path)
-    _setup_test_logger(log_path, prefix)
-    logging.info('Test output folder: "%s"', log_path)
-    if alias:
-        create_latest_log_alias(log_path, alias=alias)
+  Args:
+    log_path: string, the location of the report file.
+    prefix: optional string, a prefix for each log line in terminal.
+    alias: optional string, The name of the alias to use for the latest log
+      directory. If a falsy value is provided, then the alias directory
+      will not be created, which is useful to save storage space when the
+      storage system (e.g. ZIP files) does not properly support
+      shortcut/symlinks.
+  """
+  utils.create_dir(log_path)
+  _setup_test_logger(log_path, prefix)
+  logging.info('Test output folder: "%s"', log_path)
+  if alias:
+    create_latest_log_alias(log_path, alias=alias)
 
 
 def _truncate_filename(filename, max_length):
-    """Truncates a filename while trying to preserve the extension.
+  """Truncates a filename while trying to preserve the extension.
 
   Args:
-      filename: string, the filename to potentially truncate.
+    filename: string, the filename to potentially truncate.
 
   Returns:
-    The truncated filename that is less than or equal to the given maximum
-    length.
+  The truncated filename that is less than or equal to the given maximum
+  length.
   """
-    if len(filename) <= max_length:
-        return filename
+  if len(filename) <= max_length:
+    return filename
 
-    if '.' in filename:
-        filename, extension = filename.rsplit('.', 1)
-        # Subtract one for the extension's period.
-        if len(extension) > max_length - 1:
-            # This is kind of a degrenerate case where the extension is
-            # extremely long, in which case, just return the truncated filename.
-            return filename[:max_length]
-        return '.'.join(
-            [filename[:max_length - len(extension) - 1], extension])
-    else:
-        return filename[:max_length]
+  if '.' in filename:
+    filename, extension = filename.rsplit('.', 1)
+    # Subtract one for the extension's period.
+    if len(extension) > max_length - 1:
+      # This is kind of a degrenerate case where the extension is
+      # extremely long, in which case, just return the truncated filename.
+      return filename[:max_length]
+    return '.'.join(
+      [filename[:max_length - len(extension) - 1], extension])
+  else:
+    return filename[:max_length]
 
 
 def _sanitize_windows_filename(filename):
-    """Sanitizes a filename for Windows.
+  """Sanitizes a filename for Windows.
 
-    Refer to the following Windows documentation page for the rules:
-    https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#naming-conventions
+  Refer to the following Windows documentation page for the rules:
+  https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#naming-conventions
 
-    If the filename matches one of Window's reserved file namespaces, then the
-    `WINDOWS_RESERVED_FILENAME_PREFIX` (i.e. "mobly_") prefix will be appended
-    to the filename to convert it into a valid Windows filename.
+  If the filename matches one of Window's reserved file namespaces, then the
+  `WINDOWS_RESERVED_FILENAME_PREFIX` (i.e. "mobly_") prefix will be appended
+  to the filename to convert it into a valid Windows filename.
 
-    Args:
-      filename: string, the filename to sanitize for the Windows file system.
+  Args:
+    filename: string, the filename to sanitize for the Windows file system.
 
-    Returns:
-      A filename that should be safe to use on Windows.
-    """
-    if re.match(WINDOWS_RESERVED_FILENAME_REGEX, filename):
-        return WINDOWS_RESERVED_FILENAME_PREFIX + filename
+  Returns:
+    A filename that should be safe to use on Windows.
+  """
+  if re.match(WINDOWS_RESERVED_FILENAME_REGEX, filename):
+    return WINDOWS_RESERVED_FILENAME_PREFIX + filename
 
-    filename = _truncate_filename(filename, WINDOWS_MAX_FILENAME_LENGTH)
+  filename = _truncate_filename(filename, WINDOWS_MAX_FILENAME_LENGTH)
 
-    # In order to meet max length, none of these replacements should increase
-    # the length of the filename.
-    new_filename_chars = []
-    for char in filename:
-        if char in WINDOWS_RESERVED_CHARACTERS_REPLACEMENTS:
-            new_filename_chars.append(
-                WINDOWS_RESERVED_CHARACTERS_REPLACEMENTS[char])
-        else:
-            new_filename_chars.append(char)
-    filename = ''.join(new_filename_chars)
-    if filename.endswith('.') or filename.endswith(' '):
-        # Filenames cannot end with a period or space on Windows.
-        filename = filename[:-1] + '_'
+  # In order to meet max length, none of these replacements should increase
+  # the length of the filename.
+  new_filename_chars = []
+  for char in filename:
+    if char in WINDOWS_RESERVED_CHARACTERS_REPLACEMENTS:
+      new_filename_chars.append(
+        WINDOWS_RESERVED_CHARACTERS_REPLACEMENTS[char])
+    else:
+      new_filename_chars.append(char)
+  filename = ''.join(new_filename_chars)
+  if filename.endswith('.') or filename.endswith(' '):
+    # Filenames cannot end with a period or space on Windows.
+    filename = filename[:-1] + '_'
 
-    return filename
+  return filename
 
 
 def sanitize_filename(filename):
-    """Sanitizes a filename for various operating systems.
+  """Sanitizes a filename for various operating systems.
 
-    Args:
-        filename: string, the filename to sanitize.
+  Args:
+    filename: string, the filename to sanitize.
 
-    Returns:
-        A string that is safe to use as a filename on various operating systems.
-    """
-    # Split `filename` into the directory and filename in case the user
-    # accidentally passed in the full path instead of the name.
-    dirname = os.path.dirname(filename)
-    basename = os.path.basename(filename)
-    basename = _sanitize_windows_filename(basename)
-    basename = _truncate_filename(basename, LINUX_MAX_FILENAME_LENGTH)
-    # Replace spaces with underscores for convenience reasons.
-    basename = basename.replace(' ', '_')
-    return os.path.join(dirname, basename)
+  Returns:
+    A string that is safe to use as a filename on various operating systems.
+  """
+  # Split `filename` into the directory and filename in case the user
+  # accidentally passed in the full path instead of the name.
+  dirname = os.path.dirname(filename)
+  basename = os.path.basename(filename)
+  basename = _sanitize_windows_filename(basename)
+  basename = _truncate_filename(basename, LINUX_MAX_FILENAME_LENGTH)
+  # Replace spaces with underscores for convenience reasons.
+  basename = basename.replace(' ', '_')
+  return os.path.join(dirname, basename)
 
 
 def normalize_log_line_timestamp(log_line_timestamp):
-    """Replace special characters in log line timestamp with normal characters.
+  """Replace special characters in log line timestamp with normal characters.
 
-    .. deprecated:: 1.10
+  .. deprecated:: 1.10
 
-        This method is obsolete with the more general `sanitize_filename` method
-        and is only kept for backwards compatibility. In a future update, this
-        method may be removed.
+    This method is obsolete with the more general `sanitize_filename` method
+    and is only kept for backwards compatibility. In a future update, this
+    method may be removed.
 
-    Args:
-        log_line_timestamp: A string in the log line timestamp format. Obtained
-            with get_log_line_timestamp.
+  Args:
+    log_line_timestamp: A string in the log line timestamp format. Obtained
+      with get_log_line_timestamp.
 
-    Returns:
-        A string representing the same time as input timestamp, but without
-        special characters.
-    """
-    return sanitize_filename(log_line_timestamp)
+  Returns:
+    A string representing the same time as input timestamp, but without
+    special characters.
+  """
+  return sanitize_filename(log_line_timestamp)
diff --git a/mobly/records.py b/mobly/records.py
index 067dcac..0edba15 100644
--- a/mobly/records.py
+++ b/mobly/records.py
@@ -36,631 +36,631 @@
 
 
 class Error(Exception):
-    """Raised for errors in record module members."""
+  """Raised for errors in record module members."""
 
 
 def uid(uid):
-    """Decorator specifying the unique identifier (UID) of a test case.
+  """Decorator specifying the unique identifier (UID) of a test case.
 
-    The UID will be recorded in the test's record when executed by Mobly.
+  The UID will be recorded in the test's record when executed by Mobly.
 
-    If you use any other decorator for the test method, you may want to use
-    this as the outer-most one.
+  If you use any other decorator for the test method, you may want to use
+  this as the outer-most one.
 
-    Note a common UID system is the Universal Unitque Identifier (UUID), but
-    we are not limiting people to use UUID, hence the more generic name `UID`.
+  Note a common UID system is the Universal Unitque Identifier (UUID), but
+  we are not limiting people to use UUID, hence the more generic name `UID`.
 
-    Args:
-        uid: string, the uid for the decorated test function.
-    """
-    if uid is None:
-        raise ValueError('UID cannot be None.')
+  Args:
+    uid: string, the uid for the decorated test function.
+  """
+  if uid is None:
+    raise ValueError('UID cannot be None.')
 
-    def decorate(test_func):
-        @functools.wraps(test_func)
-        def wrapper(*args, **kwargs):
-            return test_func(*args, **kwargs)
+  def decorate(test_func):
+    @functools.wraps(test_func)
+    def wrapper(*args, **kwargs):
+      return test_func(*args, **kwargs)
 
-        setattr(wrapper, 'uid', uid)
-        return wrapper
+    setattr(wrapper, 'uid', uid)
+    return wrapper
 
-    return decorate
+  return decorate
 
 
 class TestSummaryEntryType(enum.Enum):
-    """Constants used to identify the type of entries in test summary file.
+  """Constants used to identify the type of entries in test summary file.
 
-    Test summary file contains multiple yaml documents. In order to parse this
-    file efficiently, the write adds the type of each entry when it writes the
-    entry to the file.
+  Test summary file contains multiple yaml documents. In order to parse this
+  file efficiently, the write adds the type of each entry when it writes the
+  entry to the file.
 
-    The idea is similar to how `TestResult.json_str` categorizes different
-    sections of a `TestResult` object in the serialized format.
-    """
-    # A list of all the tests requested for a test run.
-    # This is dumped at the beginning of a summary file so we know what was
-    # requested in case the test is interrupted and the final summary is not
-    # created.
-    TEST_NAME_LIST = 'TestNameList'
-    # Records of test results.
-    RECORD = 'Record'
-    # A summary of the test run stats, e.g. how many test failed.
-    SUMMARY = 'Summary'
-    # Information on the controllers used in a test class.
-    CONTROLLER_INFO = 'ControllerInfo'
-    # Additional data added by users during test.
-    # This can be added at any point in the test, so do not assume the location
-    # of these entries in the summary file.
-    USER_DATA = 'UserData'
+  The idea is similar to how `TestResult.json_str` categorizes different
+  sections of a `TestResult` object in the serialized format.
+  """
+  # A list of all the tests requested for a test run.
+  # This is dumped at the beginning of a summary file so we know what was
+  # requested in case the test is interrupted and the final summary is not
+  # created.
+  TEST_NAME_LIST = 'TestNameList'
+  # Records of test results.
+  RECORD = 'Record'
+  # A summary of the test run stats, e.g. how many test failed.
+  SUMMARY = 'Summary'
+  # Information on the controllers used in a test class.
+  CONTROLLER_INFO = 'ControllerInfo'
+  # Additional data added by users during test.
+  # This can be added at any point in the test, so do not assume the location
+  # of these entries in the summary file.
+  USER_DATA = 'UserData'
 
 
 class Error(Exception):
-    """Raised for errors in records."""
+  """Raised for errors in records."""
 
 
 class TestSummaryWriter(object):
-    """Writer for the test result summary file of a test run.
+  """Writer for the test result summary file of a test run.
 
-    For each test run, a writer is created to stream test results to the
-    summary file on disk.
+  For each test run, a writer is created to stream test results to the
+  summary file on disk.
 
-    The serialization and writing of the `TestResult` object is intentionally
-    kept out of `TestResult` class and put in this class. Because `TestResult`
-    can be operated on by suites, like `+` operation, and it is difficult to
-    guarantee the consistency between `TestResult` in memory and the files on
-    disk. Also, this separation makes it easier to provide a more generic way
-    for users to consume the test summary, like via a database instead of a
-    file.
+  The serialization and writing of the `TestResult` object is intentionally
+  kept out of `TestResult` class and put in this class. Because `TestResult`
+  can be operated on by suites, like `+` operation, and it is difficult to
+  guarantee the consistency between `TestResult` in memory and the files on
+  disk. Also, this separation makes it easier to provide a more generic way
+  for users to consume the test summary, like via a database instead of a
+  file.
+  """
+
+  def __init__(self, path):
+    self._path = path
+    self._lock = threading.Lock()
+
+  def __copy__(self):
+    """Make a "copy" of the object.
+
+    The writer is merely a wrapper object for a path with a global lock for
+    write operation. So we simply return the object itself for copy
+    operations.
     """
+    return self
 
-    def __init__(self, path):
-        self._path = path
-        self._lock = threading.Lock()
+  def __deepcopy__(self, *args):
+    return self.__copy__()
 
-    def __copy__(self):
-        """Make a "copy" of the object.
+  def dump(self, content, entry_type):
+    """Dumps a dictionary as a yaml document to the summary file.
 
-        The writer is merely a wrapper object for a path with a global lock for
-        write operation. So we simply return the object itself for copy
-        operations.
-        """
-        return self
+    Each call to this method dumps a separate yaml document to the same
+    summary file associated with a test run.
 
-    def __deepcopy__(self, *args):
-        return self.__copy__()
+    The content of the dumped dictionary has an extra field `TYPE` that
+    specifies the type of each yaml document, which is the flag for parsers
+    to identify each document.
 
-    def dump(self, content, entry_type):
-        """Dumps a dictionary as a yaml document to the summary file.
+    Args:
+      content: dictionary, the content to serialize and write.
+      entry_type: a member of enum TestSummaryEntryType.
 
-        Each call to this method dumps a separate yaml document to the same
-        summary file associated with a test run.
-
-        The content of the dumped dictionary has an extra field `TYPE` that
-        specifies the type of each yaml document, which is the flag for parsers
-        to identify each document.
-
-        Args:
-            content: dictionary, the content to serialize and write.
-            entry_type: a member of enum TestSummaryEntryType.
-
-        Raises:
-            recoreds.Error: An invalid entry type is passed in.
-        """
-        new_content = copy.deepcopy(content)
-        new_content['Type'] = entry_type.value
-        # Both user code and Mobly code can trigger this dump, hence the lock.
-        with self._lock:
-            # For Python3, setting the encoding on yaml.safe_dump does not work
-            # because Python3 file descriptors set an encoding by default, which
-            # PyYAML uses instead of the encoding on yaml.safe_dump. So, the
-            # encoding has to be set on the open call instead.
-            with io.open(self._path, 'a', encoding='utf-8') as f:
-                # Use safe_dump here to avoid language-specific tags in final
-                # output.
-                yaml.safe_dump(new_content,
-                               f,
-                               explicit_start=True,
-                               allow_unicode=True,
-                               indent=4)
+    Raises:
+      recoreds.Error: An invalid entry type is passed in.
+    """
+    new_content = copy.deepcopy(content)
+    new_content['Type'] = entry_type.value
+    # Both user code and Mobly code can trigger this dump, hence the lock.
+    with self._lock:
+      # For Python3, setting the encoding on yaml.safe_dump does not work
+      # because Python3 file descriptors set an encoding by default, which
+      # PyYAML uses instead of the encoding on yaml.safe_dump. So, the
+      # encoding has to be set on the open call instead.
+      with io.open(self._path, 'a', encoding='utf-8') as f:
+        # Use safe_dump here to avoid language-specific tags in final
+        # output.
+        yaml.safe_dump(new_content,
+                 f,
+                 explicit_start=True,
+                 allow_unicode=True,
+                 indent=4)
 
 
 class TestResultEnums(object):
-    """Enums used for TestResultRecord class.
+  """Enums used for TestResultRecord class.
 
-    Includes the tokens to mark test result with, and the string names for each
-    field in TestResultRecord.
-    """
+  Includes the tokens to mark test result with, and the string names for each
+  field in TestResultRecord.
+  """
 
-    RECORD_NAME = 'Test Name'
-    RECORD_CLASS = 'Test Class'
-    RECORD_BEGIN_TIME = 'Begin Time'
-    RECORD_END_TIME = 'End Time'
-    RECORD_RESULT = 'Result'
-    RECORD_UID = 'UID'
-    RECORD_EXTRAS = 'Extras'
-    RECORD_EXTRA_ERRORS = 'Extra Errors'
-    RECORD_DETAILS = 'Details'
-    RECORD_STACKTRACE = 'Stacktrace'
-    RECORD_POSITION = 'Position'
-    TEST_RESULT_PASS = 'PASS'
-    TEST_RESULT_FAIL = 'FAIL'
-    TEST_RESULT_SKIP = 'SKIP'
-    TEST_RESULT_ERROR = 'ERROR'
+  RECORD_NAME = 'Test Name'
+  RECORD_CLASS = 'Test Class'
+  RECORD_BEGIN_TIME = 'Begin Time'
+  RECORD_END_TIME = 'End Time'
+  RECORD_RESULT = 'Result'
+  RECORD_UID = 'UID'
+  RECORD_EXTRAS = 'Extras'
+  RECORD_EXTRA_ERRORS = 'Extra Errors'
+  RECORD_DETAILS = 'Details'
+  RECORD_STACKTRACE = 'Stacktrace'
+  RECORD_POSITION = 'Position'
+  TEST_RESULT_PASS = 'PASS'
+  TEST_RESULT_FAIL = 'FAIL'
+  TEST_RESULT_SKIP = 'SKIP'
+  TEST_RESULT_ERROR = 'ERROR'
 
 
 class ControllerInfoRecord(object):
-    """A record representing the controller info in test results."""
+  """A record representing the controller info in test results."""
 
-    KEY_TEST_CLASS = TestResultEnums.RECORD_CLASS
-    KEY_CONTROLLER_NAME = 'Controller Name'
-    KEY_CONTROLLER_INFO = 'Controller Info'
-    KEY_TIMESTAMP = 'Timestamp'
+  KEY_TEST_CLASS = TestResultEnums.RECORD_CLASS
+  KEY_CONTROLLER_NAME = 'Controller Name'
+  KEY_CONTROLLER_INFO = 'Controller Info'
+  KEY_TIMESTAMP = 'Timestamp'
 
-    def __init__(self, test_class, controller_name, info):
-        self.test_class = test_class
-        self.controller_name = controller_name
-        self.controller_info = info
-        self.timestamp = time.time()
+  def __init__(self, test_class, controller_name, info):
+    self.test_class = test_class
+    self.controller_name = controller_name
+    self.controller_info = info
+    self.timestamp = time.time()
 
-    def to_dict(self):
-        result = {}
-        result[self.KEY_TEST_CLASS] = self.test_class
-        result[self.KEY_CONTROLLER_NAME] = self.controller_name
-        result[self.KEY_CONTROLLER_INFO] = self.controller_info
-        result[self.KEY_TIMESTAMP] = self.timestamp
-        return result
+  def to_dict(self):
+    result = {}
+    result[self.KEY_TEST_CLASS] = self.test_class
+    result[self.KEY_CONTROLLER_NAME] = self.controller_name
+    result[self.KEY_CONTROLLER_INFO] = self.controller_info
+    result[self.KEY_TIMESTAMP] = self.timestamp
+    return result
 
-    def __repr__(self):
-        return str(self.to_dict())
+  def __repr__(self):
+    return str(self.to_dict())
 
 
 class ExceptionRecord(object):
-    """A record representing exception objects in TestResultRecord.
+  """A record representing exception objects in TestResultRecord.
 
-    Attributes:
-        exception: Exception object, the original Exception.
-        stacktrace: string, stacktrace of the Exception.
-        extras: optional serializable, this corresponds to the
-            `TestSignal.extras` field.
-        position: string, an optional label specifying the position where the
-            Exception ocurred.
+  Attributes:
+    exception: Exception object, the original Exception.
+    stacktrace: string, stacktrace of the Exception.
+    extras: optional serializable, this corresponds to the
+      `TestSignal.extras` field.
+    position: string, an optional label specifying the position where the
+      Exception ocurred.
+  """
+
+  def __init__(self, e, position=None):
+    self.exception = e
+    self.stacktrace = None
+    self.extras = None
+    self.position = position
+    self.is_test_signal = isinstance(e, signals.TestSignal)
+    # Record stacktrace of the exception.
+    # This check cannot be based on try...except, which messes up
+    # `exc_info`.
+    if hasattr(e, '__traceback__'):
+      exc_traceback = e.__traceback__
+    else:
+      # In py2, exception objects don't have built-in traceback, so we
+      # have to immediately retrieve stacktrace from `sys.exc_info`.
+      _, _, exc_traceback = sys.exc_info()
+    if exc_traceback:
+      self.stacktrace = ''.join(
+        traceback.format_exception(e.__class__, e, exc_traceback))
+    # Populate fields based on the type of the termination signal.
+    if self.is_test_signal:
+      self._set_details(e.details)
+      self.extras = e.extras
+    else:
+      self._set_details(e)
+
+  def _set_details(self, content):
+    """Sets the `details` field.
+
+    Args:
+      content: the content to extract details from.
     """
+    try:
+      self.details = str(content)
+    except UnicodeEncodeError:
+      if sys.version_info < (3, 0):
+        # If Py2 threw encode error, convert to unicode.
+        self.details = unicode(content)
+      else:
+        # We should never hit this in Py3, if this happens, record
+        # an encoded version of the content for users to handle.
+        logging.error(
+          'Unable to decode "%s" in Py3, encoding in utf-8.',
+          content)
+        self.details = content.encode('utf-8')
 
-    def __init__(self, e, position=None):
-        self.exception = e
-        self.stacktrace = None
-        self.extras = None
-        self.position = position
-        self.is_test_signal = isinstance(e, signals.TestSignal)
-        # Record stacktrace of the exception.
-        # This check cannot be based on try...except, which messes up
-        # `exc_info`.
-        if hasattr(e, '__traceback__'):
-            exc_traceback = e.__traceback__
-        else:
-            # In py2, exception objects don't have built-in traceback, so we
-            # have to immediately retrieve stacktrace from `sys.exc_info`.
-            _, _, exc_traceback = sys.exc_info()
-        if exc_traceback:
-            self.stacktrace = ''.join(
-                traceback.format_exception(e.__class__, e, exc_traceback))
-        # Populate fields based on the type of the termination signal.
-        if self.is_test_signal:
-            self._set_details(e.details)
-            self.extras = e.extras
-        else:
-            self._set_details(e)
+  def to_dict(self):
+    result = {}
+    result[TestResultEnums.RECORD_DETAILS] = self.details
+    result[TestResultEnums.RECORD_POSITION] = self.position
+    result[TestResultEnums.RECORD_STACKTRACE] = self.stacktrace
+    result[TestResultEnums.RECORD_EXTRAS] = copy.deepcopy(self.extras)
+    return result
 
-    def _set_details(self, content):
-        """Sets the `details` field.
+  def __deepcopy__(self, memo):
+    """Overrides deepcopy for the class.
 
-        Args:
-            content: the content to extract details from.
-        """
-        try:
-            self.details = str(content)
-        except UnicodeEncodeError:
-            if sys.version_info < (3, 0):
-                # If Py2 threw encode error, convert to unicode.
-                self.details = unicode(content)
-            else:
-                # We should never hit this in Py3, if this happens, record
-                # an encoded version of the content for users to handle.
-                logging.error(
-                    'Unable to decode "%s" in Py3, encoding in utf-8.',
-                    content)
-                self.details = content.encode('utf-8')
-
-    def to_dict(self):
-        result = {}
-        result[TestResultEnums.RECORD_DETAILS] = self.details
-        result[TestResultEnums.RECORD_POSITION] = self.position
-        result[TestResultEnums.RECORD_STACKTRACE] = self.stacktrace
-        result[TestResultEnums.RECORD_EXTRAS] = copy.deepcopy(self.extras)
-        return result
-
-    def __deepcopy__(self, memo):
-        """Overrides deepcopy for the class.
-
-        If the exception object has a constructor that takes extra args, deep
-        copy won't work. So we need to have a custom logic for deepcopy.
-        """
-        try:
-            exception = copy.deepcopy(self.exception)
-        except TypeError:
-            # If the exception object cannot be copied, use the original
-            # exception object.
-            exception = self.exception
-        result = ExceptionRecord(exception, self.position)
-        result.stacktrace = self.stacktrace
-        result.details = self.details
-        result.extras = copy.deepcopy(self.extras)
-        result.position = self.position
-        return result
+    If the exception object has a constructor that takes extra args, deep
+    copy won't work. So we need to have a custom logic for deepcopy.
+    """
+    try:
+      exception = copy.deepcopy(self.exception)
+    except TypeError:
+      # If the exception object cannot be copied, use the original
+      # exception object.
+      exception = self.exception
+    result = ExceptionRecord(exception, self.position)
+    result.stacktrace = self.stacktrace
+    result.details = self.details
+    result.extras = copy.deepcopy(self.extras)
+    result.position = self.position
+    return result
 
 
 class TestResultRecord(object):
-    """A record that holds the information of a single test.
+  """A record that holds the information of a single test.
 
-    The record object holds all information of a test, including all the
-    exceptions occurred during the test.
+  The record object holds all information of a test, including all the
+  exceptions occurred during the test.
 
-    A test can terminate for two reasons:
-      1. the test function executes to the end and completes naturally.
-      2. the test is terminated by an exception, which we call
-         "termination signal".
+  A test can terminate for two reasons:
+    1. the test function executes to the end and completes naturally.
+    2. the test is terminated by an exception, which we call
+     "termination signal".
 
-    The termination signal is treated differently. Its content are extracted
-    into first-tier attributes of the record object, like `details` and
-    `stacktrace`, for easy consumption.
+  The termination signal is treated differently. Its content are extracted
+  into first-tier attributes of the record object, like `details` and
+  `stacktrace`, for easy consumption.
 
-    Note the termination signal is not always an error, it can also be explicit
-    pass signal or abort/skip signals.
+  Note the termination signal is not always an error, it can also be explicit
+  pass signal or abort/skip signals.
 
-    Attributes:
-        test_name: string, the name of the test.
-        begin_time: Epoch timestamp of when the test started.
-        end_time: Epoch timestamp of when the test ended.
-        uid: Unique identifier of a test.
-        termination_signal: ExceptionRecord, the main exception of the test.
-        extra_errors: OrderedDict, all exceptions occurred during the entire
-            test lifecycle. The order of occurrence is preserved.
-        result: TestResultEnum.TEAT_RESULT_*, PASS/FAIL/SKIP.
+  Attributes:
+    test_name: string, the name of the test.
+    begin_time: Epoch timestamp of when the test started.
+    end_time: Epoch timestamp of when the test ended.
+    uid: Unique identifier of a test.
+    termination_signal: ExceptionRecord, the main exception of the test.
+    extra_errors: OrderedDict, all exceptions occurred during the entire
+      test lifecycle. The order of occurrence is preserved.
+    result: TestResultEnum.TEAT_RESULT_*, PASS/FAIL/SKIP.
+  """
+
+  def __init__(self, t_name, t_class=None):
+    self.test_name = t_name
+    self.test_class = t_class
+    self.begin_time = None
+    self.end_time = None
+    self.uid = None
+    self.termination_signal = None
+    self.extra_errors = collections.OrderedDict()
+    self.result = None
+
+  @property
+  def details(self):
+    """String description of the cause of the test's termination.
+
+    Note a passed test can have this as well due to the explicit pass
+    signal. If the test passed implicitly, this field would be None.
     """
+    if self.termination_signal:
+      return self.termination_signal.details
 
-    def __init__(self, t_name, t_class=None):
-        self.test_name = t_name
-        self.test_class = t_class
-        self.begin_time = None
-        self.end_time = None
-        self.uid = None
-        self.termination_signal = None
-        self.extra_errors = collections.OrderedDict()
-        self.result = None
+  @property
+  def stacktrace(self):
+    """The stacktrace string for the exception that terminated the test.
+    """
+    if self.termination_signal:
+      return self.termination_signal.stacktrace
 
-    @property
-    def details(self):
-        """String description of the cause of the test's termination.
+  @property
+  def extras(self):
+    """User defined extra information of the test result.
 
-        Note a passed test can have this as well due to the explicit pass
-        signal. If the test passed implicitly, this field would be None.
-        """
-        if self.termination_signal:
-            return self.termination_signal.details
+    Must be serializable.
+    """
+    if self.termination_signal:
+      return self.termination_signal.extras
 
-    @property
-    def stacktrace(self):
-        """The stacktrace string for the exception that terminated the test.
-        """
-        if self.termination_signal:
-            return self.termination_signal.stacktrace
+  def test_begin(self):
+    """Call this when the test begins execution.
 
-    @property
-    def extras(self):
-        """User defined extra information of the test result.
+    Sets the begin_time of this record.
+    """
+    self.begin_time = utils.get_current_epoch_time()
 
-        Must be serializable.
-        """
-        if self.termination_signal:
-            return self.termination_signal.extras
+  def _test_end(self, result, e):
+    """Marks the end of the test logic.
 
-    def test_begin(self):
-        """Call this when the test begins execution.
+    Args:
+      result: One of the TEST_RESULT enums in TestResultEnums.
+      e: A test termination signal (usually an exception object). It can
+        be any exception instance or of any subclass of
+        mobly.signals.TestSignal.
+    """
+    if self.begin_time is not None:
+      self.end_time = utils.get_current_epoch_time()
+    self.result = result
+    if e:
+      self.termination_signal = ExceptionRecord(e)
 
-        Sets the begin_time of this record.
-        """
-        self.begin_time = utils.get_current_epoch_time()
+  def update_record(self):
+    """Updates the content of a record.
 
-    def _test_end(self, result, e):
-        """Marks the end of the test logic.
+    Several display fields like "details" and "stacktrace" need to be
+    updated based on the content of the record object.
 
-        Args:
-            result: One of the TEST_RESULT enums in TestResultEnums.
-            e: A test termination signal (usually an exception object). It can
-                be any exception instance or of any subclass of
-                mobly.signals.TestSignal.
-        """
-        if self.begin_time is not None:
-            self.end_time = utils.get_current_epoch_time()
-        self.result = result
-        if e:
-            self.termination_signal = ExceptionRecord(e)
+    As the content of the record change, call this method to update all
+    the appropirate fields.
+    """
+    if self.extra_errors:
+      if self.result != TestResultEnums.TEST_RESULT_FAIL:
+        self.result = TestResultEnums.TEST_RESULT_ERROR
+    # If no termination signal is provided, use the first exception
+    # occurred as the termination signal.
+    if not self.termination_signal and self.extra_errors:
+      _, self.termination_signal = self.extra_errors.popitem(last=False)
 
-    def update_record(self):
-        """Updates the content of a record.
+  def test_pass(self, e=None):
+    """To mark the test as passed in this record.
 
-        Several display fields like "details" and "stacktrace" need to be
-        updated based on the content of the record object.
+    Args:
+      e: An instance of mobly.signals.TestPass.
+    """
+    self._test_end(TestResultEnums.TEST_RESULT_PASS, e)
 
-        As the content of the record change, call this method to update all
-        the appropirate fields.
-        """
-        if self.extra_errors:
-            if self.result != TestResultEnums.TEST_RESULT_FAIL:
-                self.result = TestResultEnums.TEST_RESULT_ERROR
-        # If no termination signal is provided, use the first exception
-        # occurred as the termination signal.
-        if not self.termination_signal and self.extra_errors:
-            _, self.termination_signal = self.extra_errors.popitem(last=False)
+  def test_fail(self, e=None):
+    """To mark the test as failed in this record.
 
-    def test_pass(self, e=None):
-        """To mark the test as passed in this record.
+    Only test_fail does instance check because we want 'assert xxx' to also
+    fail the test same way assert_true does.
 
-        Args:
-            e: An instance of mobly.signals.TestPass.
-        """
-        self._test_end(TestResultEnums.TEST_RESULT_PASS, e)
+    Args:
+      e: An exception object. It can be an instance of AssertionError or
+        mobly.base_test.TestFailure.
+    """
+    self._test_end(TestResultEnums.TEST_RESULT_FAIL, e)
 
-    def test_fail(self, e=None):
-        """To mark the test as failed in this record.
+  def test_skip(self, e=None):
+    """To mark the test as skipped in this record.
 
-        Only test_fail does instance check because we want 'assert xxx' to also
-        fail the test same way assert_true does.
+    Args:
+      e: An instance of mobly.signals.TestSkip.
+    """
+    self._test_end(TestResultEnums.TEST_RESULT_SKIP, e)
 
-        Args:
-            e: An exception object. It can be an instance of AssertionError or
-                mobly.base_test.TestFailure.
-        """
-        self._test_end(TestResultEnums.TEST_RESULT_FAIL, e)
+  def test_error(self, e=None):
+    """To mark the test as error in this record.
 
-    def test_skip(self, e=None):
-        """To mark the test as skipped in this record.
+    Args:
+      e: An exception object.
+    """
+    self._test_end(TestResultEnums.TEST_RESULT_ERROR, e)
 
-        Args:
-            e: An instance of mobly.signals.TestSkip.
-        """
-        self._test_end(TestResultEnums.TEST_RESULT_SKIP, e)
+  def add_error(self, position, e):
+    """Add extra error happened during a test.
 
-    def test_error(self, e=None):
-        """To mark the test as error in this record.
+    If the test has passed or skipped, this will mark the test result as
+    ERROR.
 
-        Args:
-            e: An exception object.
-        """
-        self._test_end(TestResultEnums.TEST_RESULT_ERROR, e)
+    If an error is added the test record, the record's result is equivalent
+    to the case where an uncaught exception happened.
 
-    def add_error(self, position, e):
-        """Add extra error happened during a test.
+    If the test record has not recorded any error, the newly added error
+    would be the main error of the test record. Otherwise the newly added
+    error is added to the record's extra errors.
 
-        If the test has passed or skipped, this will mark the test result as
-        ERROR.
+    Args:
+      position: string, where this error occurred, e.g. 'teardown_test'.
+      e: An exception or a `signals.ExceptionRecord` object.
+    """
+    if self.result != TestResultEnums.TEST_RESULT_FAIL:
+      self.result = TestResultEnums.TEST_RESULT_ERROR
+    if position in self.extra_errors:
+      raise Error('An exception is already recorded with position "%s",'
+            ' cannot reuse.' % position)
+    if isinstance(e, ExceptionRecord):
+      self.extra_errors[position] = e
+    else:
+      self.extra_errors[position] = ExceptionRecord(e, position=position)
 
-        If an error is added the test record, the record's result is equivalent
-        to the case where an uncaught exception happened.
+  def __str__(self):
+    d = self.to_dict()
+    l = ['%s = %s' % (k, v) for k, v in d.items()]
+    s = ', '.join(l)
+    return s
 
-        If the test record has not recorded any error, the newly added error
-        would be the main error of the test record. Otherwise the newly added
-        error is added to the record's extra errors.
+  def __repr__(self):
+    """This returns a short string representation of the test record."""
+    t = utils.epoch_to_human_time(self.begin_time)
+    return '%s %s %s' % (t, self.test_name, self.result)
 
-        Args:
-            position: string, where this error occurred, e.g. 'teardown_test'.
-            e: An exception or a `signals.ExceptionRecord` object.
-        """
-        if self.result != TestResultEnums.TEST_RESULT_FAIL:
-            self.result = TestResultEnums.TEST_RESULT_ERROR
-        if position in self.extra_errors:
-            raise Error('An exception is already recorded with position "%s",'
-                        ' cannot reuse.' % position)
-        if isinstance(e, ExceptionRecord):
-            self.extra_errors[position] = e
-        else:
-            self.extra_errors[position] = ExceptionRecord(e, position=position)
+  def to_dict(self):
+    """Gets a dictionary representating the content of this class.
 
-    def __str__(self):
-        d = self.to_dict()
-        l = ['%s = %s' % (k, v) for k, v in d.items()]
-        s = ', '.join(l)
-        return s
-
-    def __repr__(self):
-        """This returns a short string representation of the test record."""
-        t = utils.epoch_to_human_time(self.begin_time)
-        return '%s %s %s' % (t, self.test_name, self.result)
-
-    def to_dict(self):
-        """Gets a dictionary representating the content of this class.
-
-        Returns:
-            A dictionary representating the content of this class.
-        """
-        d = {}
-        d[TestResultEnums.RECORD_NAME] = self.test_name
-        d[TestResultEnums.RECORD_CLASS] = self.test_class
-        d[TestResultEnums.RECORD_BEGIN_TIME] = self.begin_time
-        d[TestResultEnums.RECORD_END_TIME] = self.end_time
-        d[TestResultEnums.RECORD_RESULT] = self.result
-        d[TestResultEnums.RECORD_UID] = self.uid
-        d[TestResultEnums.RECORD_EXTRAS] = self.extras
-        d[TestResultEnums.RECORD_DETAILS] = self.details
-        d[TestResultEnums.RECORD_EXTRA_ERRORS] = {
-            key: value.to_dict()
-            for (key, value) in self.extra_errors.items()
-        }
-        d[TestResultEnums.RECORD_STACKTRACE] = self.stacktrace
-        return d
+    Returns:
+      A dictionary representating the content of this class.
+    """
+    d = {}
+    d[TestResultEnums.RECORD_NAME] = self.test_name
+    d[TestResultEnums.RECORD_CLASS] = self.test_class
+    d[TestResultEnums.RECORD_BEGIN_TIME] = self.begin_time
+    d[TestResultEnums.RECORD_END_TIME] = self.end_time
+    d[TestResultEnums.RECORD_RESULT] = self.result
+    d[TestResultEnums.RECORD_UID] = self.uid
+    d[TestResultEnums.RECORD_EXTRAS] = self.extras
+    d[TestResultEnums.RECORD_DETAILS] = self.details
+    d[TestResultEnums.RECORD_EXTRA_ERRORS] = {
+      key: value.to_dict()
+      for (key, value) in self.extra_errors.items()
+    }
+    d[TestResultEnums.RECORD_STACKTRACE] = self.stacktrace
+    return d
 
 
 class TestResult(object):
-    """A class that contains metrics of a test run.
+  """A class that contains metrics of a test run.
 
-    This class is essentially a container of TestResultRecord objects.
+  This class is essentially a container of TestResultRecord objects.
 
-    Attributes:
-        requested: A list of strings, each is the name of a test requested
-            by user.
-        failed: A list of records for tests failed.
-        executed: A list of records for tests that were actually executed.
-        passed: A list of records for tests passed.
-        skipped: A list of records for tests skipped.
-        error: A list of records for tests with error result token.
-        controller_info: list of ControllerInfoRecord.
+  Attributes:
+    requested: A list of strings, each is the name of a test requested
+      by user.
+    failed: A list of records for tests failed.
+    executed: A list of records for tests that were actually executed.
+    passed: A list of records for tests passed.
+    skipped: A list of records for tests skipped.
+    error: A list of records for tests with error result token.
+    controller_info: list of ControllerInfoRecord.
+  """
+
+  def __init__(self):
+    self.requested = []
+    self.failed = []
+    self.executed = []
+    self.passed = []
+    self.skipped = []
+    self.error = []
+    self.controller_info = []
+
+  def __add__(self, r):
+    """Overrides '+' operator for TestResult class.
+
+    The add operator merges two TestResult objects by concatenating all of
+    their lists together.
+
+    Args:
+      r: another instance of TestResult to be added
+
+    Returns:
+      A TestResult instance that's the sum of two TestResult instances.
     """
+    if not isinstance(r, TestResult):
+      raise TypeError('Operand %s of type %s is not a TestResult.' %
+              (r, type(r)))
+    sum_result = TestResult()
+    for name in sum_result.__dict__:
+      r_value = getattr(r, name)
+      l_value = getattr(self, name)
+      if isinstance(r_value, list):
+        setattr(sum_result, name, l_value + r_value)
+    return sum_result
 
-    def __init__(self):
-        self.requested = []
-        self.failed = []
-        self.executed = []
-        self.passed = []
-        self.skipped = []
-        self.error = []
-        self.controller_info = []
+  def add_record(self, record):
+    """Adds a test record to test result.
 
-    def __add__(self, r):
-        """Overrides '+' operator for TestResult class.
+    A record is considered executed once it's added to the test result.
 
-        The add operator merges two TestResult objects by concatenating all of
-        their lists together.
+    Adding the record finalizes the content of a record, so no change
+    should be made to the record afterwards.
 
-        Args:
-            r: another instance of TestResult to be added
+    Args:
+      record: A test record object to add.
+    """
+    record.update_record()
+    if record.result == TestResultEnums.TEST_RESULT_SKIP:
+      self.skipped.append(record)
+      return
+    self.executed.append(record)
+    if record.result == TestResultEnums.TEST_RESULT_FAIL:
+      self.failed.append(record)
+    elif record.result == TestResultEnums.TEST_RESULT_PASS:
+      self.passed.append(record)
+    else:
+      self.error.append(record)
 
-        Returns:
-            A TestResult instance that's the sum of two TestResult instances.
-        """
-        if not isinstance(r, TestResult):
-            raise TypeError('Operand %s of type %s is not a TestResult.' %
-                            (r, type(r)))
-        sum_result = TestResult()
-        for name in sum_result.__dict__:
-            r_value = getattr(r, name)
-            l_value = getattr(self, name)
-            if isinstance(r_value, list):
-                setattr(sum_result, name, l_value + r_value)
-        return sum_result
+  def add_controller_info_record(self, controller_info_record):
+    """Adds a controller info record to results.
 
-    def add_record(self, record):
-        """Adds a test record to test result.
+    This can be called multiple times for each test class.
 
-        A record is considered executed once it's added to the test result.
+    Args:
+      controller_info_record: ControllerInfoRecord object to be added to
+        the result.
+    """
+    self.controller_info.append(controller_info_record)
 
-        Adding the record finalizes the content of a record, so no change
-        should be made to the record afterwards.
+  def add_class_error(self, test_record):
+    """Add a record to indicate a test class has failed before any test
+    could execute.
 
-        Args:
-            record: A test record object to add.
-        """
-        record.update_record()
-        if record.result == TestResultEnums.TEST_RESULT_SKIP:
-            self.skipped.append(record)
-            return
-        self.executed.append(record)
-        if record.result == TestResultEnums.TEST_RESULT_FAIL:
-            self.failed.append(record)
-        elif record.result == TestResultEnums.TEST_RESULT_PASS:
-            self.passed.append(record)
-        else:
-            self.error.append(record)
+    This is only called before any test is actually executed. So it only
+    adds an error entry that describes why the class failed to the tally
+    and does not affect the total number of tests requrested or exedcuted.
 
-    def add_controller_info_record(self, controller_info_record):
-        """Adds a controller info record to results.
+    Args:
+      test_record: A TestResultRecord object for the test class.
+    """
+    test_record.update_record()
+    self.error.append(test_record)
 
-        This can be called multiple times for each test class.
+  def is_test_executed(self, test_name):
+    """Checks if a specific test has been executed.
 
-        Args:
-            controller_info_record: ControllerInfoRecord object to be added to
-                the result.
-        """
-        self.controller_info.append(controller_info_record)
+    Args:
+      test_name: string, the name of the test to check.
 
-    def add_class_error(self, test_record):
-        """Add a record to indicate a test class has failed before any test
-        could execute.
+    Returns:
+      True if the test has been executed according to the test result,
+      False otherwise.
+    """
+    for record in self.executed:
+      if record.test_name == test_name:
+        return True
+    return False
 
-        This is only called before any test is actually executed. So it only
-        adds an error entry that describes why the class failed to the tally
-        and does not affect the total number of tests requrested or exedcuted.
+  @property
+  def is_all_pass(self):
+    """True if no tests failed or threw errors, False otherwise."""
+    num_of_failures = len(self.failed) + len(self.error)
+    if num_of_failures == 0:
+      return True
+    return False
 
-        Args:
-            test_record: A TestResultRecord object for the test class.
-        """
-        test_record.update_record()
-        self.error.append(test_record)
+  def requested_test_names_dict(self):
+    """Gets the requested test names of a test run in a dict format.
 
-    def is_test_executed(self, test_name):
-        """Checks if a specific test has been executed.
+    Note a test can be requested multiple times, so there can be duplicated
+    values
 
-        Args:
-            test_name: string, the name of the test to check.
+    Returns:
+      A dict with a key and the list of strings.
+    """
+    return {'Requested Tests': copy.deepcopy(self.requested)}
 
-        Returns:
-            True if the test has been executed according to the test result,
-            False otherwise.
-        """
-        for record in self.executed:
-            if record.test_name == test_name:
-                return True
-        return False
+  def summary_str(self):
+    """Gets a string that summarizes the stats of this test result.
 
-    @property
-    def is_all_pass(self):
-        """True if no tests failed or threw errors, False otherwise."""
-        num_of_failures = len(self.failed) + len(self.error)
-        if num_of_failures == 0:
-            return True
-        return False
+    The summary provides the counts of how many tests fall into each
+    category, like 'Passed', 'Failed' etc.
 
-    def requested_test_names_dict(self):
-        """Gets the requested test names of a test run in a dict format.
+    Format of the string is:
+      Requested <int>, Executed <int>, ...
 
-        Note a test can be requested multiple times, so there can be duplicated
-        values
+    Returns:
+      A summary string of this test result.
+    """
+    l = ['%s %d' % (k, v) for k, v in self.summary_dict().items()]
+    # Sort the list so the order is the same every time.
+    msg = ', '.join(sorted(l))
+    return msg
 
-        Returns:
-            A dict with a key and the list of strings.
-        """
-        return {'Requested Tests': copy.deepcopy(self.requested)}
+  def summary_dict(self):
+    """Gets a dictionary that summarizes the stats of this test result.
 
-    def summary_str(self):
-        """Gets a string that summarizes the stats of this test result.
+    The summary provides the counts of how many tests fall into each
+    category, like 'Passed', 'Failed' etc.
 
-        The summary provides the counts of how many tests fall into each
-        category, like 'Passed', 'Failed' etc.
-
-        Format of the string is:
-            Requested <int>, Executed <int>, ...
-
-        Returns:
-            A summary string of this test result.
-        """
-        l = ['%s %d' % (k, v) for k, v in self.summary_dict().items()]
-        # Sort the list so the order is the same every time.
-        msg = ', '.join(sorted(l))
-        return msg
-
-    def summary_dict(self):
-        """Gets a dictionary that summarizes the stats of this test result.
-
-        The summary provides the counts of how many tests fall into each
-        category, like 'Passed', 'Failed' etc.
-
-        Returns:
-            A dictionary with the stats of this test result.
-        """
-        d = {}
-        d['Requested'] = len(self.requested)
-        d['Executed'] = len(self.executed)
-        d['Passed'] = len(self.passed)
-        d['Failed'] = len(self.failed)
-        d['Skipped'] = len(self.skipped)
-        d['Error'] = len(self.error)
-        return d
+    Returns:
+      A dictionary with the stats of this test result.
+    """
+    d = {}
+    d['Requested'] = len(self.requested)
+    d['Executed'] = len(self.executed)
+    d['Passed'] = len(self.passed)
+    d['Failed'] = len(self.failed)
+    d['Skipped'] = len(self.skipped)
+    d['Error'] = len(self.error)
+    return d
diff --git a/mobly/runtime_test_info.py b/mobly/runtime_test_info.py
index 57b0742..90a780a 100644
--- a/mobly/runtime_test_info.py
+++ b/mobly/runtime_test_info.py
@@ -19,43 +19,43 @@
 
 
 class RuntimeTestInfo(object):
-    """Container class for runtime information of a test or test stage.
+  """Container class for runtime information of a test or test stage.
 
-    One object corresponds to one test. This is meant to be a read-only class.
+  One object corresponds to one test. This is meant to be a read-only class.
 
-    This also applies to test stages like `setup_class`, which has its own
-    runtime info but is not part of any single test.
+  This also applies to test stages like `setup_class`, which has its own
+  runtime info but is not part of any single test.
 
-    Attributes:
-        name: string, name of the test.
-        signature: string, an identifier of the test, a combination of test
-            name and begin time.
-        record: TestResultRecord, the current test result record. This changes
-            as the test's execution progresses.
-        output_path: string, path to the test's output directory. It's created
-            upon accessing.
-    """
+  Attributes:
+    name: string, name of the test.
+    signature: string, an identifier of the test, a combination of test
+      name and begin time.
+    record: TestResultRecord, the current test result record. This changes
+      as the test's execution progresses.
+    output_path: string, path to the test's output directory. It's created
+      upon accessing.
+  """
 
-    def __init__(self, test_name, log_path, record):
-        self._name = test_name
-        self._record = record
-        self._signature = '%s-%s' % (test_name, record.begin_time)
-        self._output_dir_path = utils.abs_path(
-            os.path.join(log_path, self._signature))
+  def __init__(self, test_name, log_path, record):
+    self._name = test_name
+    self._record = record
+    self._signature = '%s-%s' % (test_name, record.begin_time)
+    self._output_dir_path = utils.abs_path(
+      os.path.join(log_path, self._signature))
 
-    @property
-    def name(self):
-        return self._name
+  @property
+  def name(self):
+    return self._name
 
-    @property
-    def signature(self):
-        return self._signature
+  @property
+  def signature(self):
+    return self._signature
 
-    @property
-    def record(self):
-        return copy.deepcopy(self._record)
+  @property
+  def record(self):
+    return copy.deepcopy(self._record)
 
-    @property
-    def output_path(self):
-        utils.create_dir(self._output_dir_path)
-        return self._output_dir_path
+  @property
+  def output_path(self):
+    utils.create_dir(self._output_dir_path)
+    return self._output_dir_path
diff --git a/mobly/signals.py b/mobly/signals.py
index 1d0095d..cc16d86 100644
--- a/mobly/signals.py
+++ b/mobly/signals.py
@@ -18,63 +18,63 @@
 
 
 class TestSignalError(Exception):
-    """Raised when an error occurs inside a test signal."""
+  """Raised when an error occurs inside a test signal."""
 
 
 class TestSignal(Exception):
-    """Base class for all test result control signals. This is used to signal
-    the result of a test.
+  """Base class for all test result control signals. This is used to signal
+  the result of a test.
 
-    Attributes:
-        details: A string that describes the reason for raising this signal.
-        extras: A json-serializable data type to convey extra information about
-            a test result.
-    """
+  Attributes:
+    details: A string that describes the reason for raising this signal.
+    extras: A json-serializable data type to convey extra information about
+      a test result.
+  """
 
-    def __init__(self, details, extras=None):
-        super(TestSignal, self).__init__(details)
-        self.details = details
-        try:
-            json.dumps(extras)
-            self.extras = extras
-        except TypeError:
-            raise TestSignalError('Extras must be json serializable. %s '
-                                  'is not.' % extras)
+  def __init__(self, details, extras=None):
+    super(TestSignal, self).__init__(details)
+    self.details = details
+    try:
+      json.dumps(extras)
+      self.extras = extras
+    except TypeError:
+      raise TestSignalError('Extras must be json serializable. %s '
+                  'is not.' % extras)
 
-    def __str__(self):
-        return 'Details=%s, Extras=%s' % (self.details, self.extras)
+  def __str__(self):
+    return 'Details=%s, Extras=%s' % (self.details, self.extras)
 
 
 class TestError(TestSignal):
-    """Raised when a test has an unexpected error."""
+  """Raised when a test has an unexpected error."""
 
 
 class TestFailure(TestSignal):
-    """Raised when a test has failed."""
+  """Raised when a test has failed."""
 
 
 class TestPass(TestSignal):
-    """Raised when a test has passed."""
+  """Raised when a test has passed."""
 
 
 class TestSkip(TestSignal):
-    """Raised when a test has been skipped."""
+  """Raised when a test has been skipped."""
 
 
 class TestAbortSignal(TestSignal):
-    """Base class for abort signals.
-    """
+  """Base class for abort signals.
+  """
 
 
 class TestAbortClass(TestAbortSignal):
-    """Raised when all subsequent tests within the same test class should
-    be aborted.
-    """
+  """Raised when all subsequent tests within the same test class should
+  be aborted.
+  """
 
 
 class TestAbortAll(TestAbortSignal):
-    """Raised when all subsequent tests should be aborted."""
+  """Raised when all subsequent tests should be aborted."""
 
 
 class ControllerError(Exception):
-    """Raised when an error occurred in controller classes."""
+  """Raised when an error occurred in controller classes."""
diff --git a/mobly/suite_runner.py b/mobly/suite_runner.py
index 8e7eab0..ea870e6 100644
--- a/mobly/suite_runner.py
+++ b/mobly/suite_runner.py
@@ -18,13 +18,13 @@
 
 .. code-block:: python
 
-    from mobly import suite_runner
+  from mobly import suite_runner
 
-    from my.test.lib import foo_test
-    from my.test.lib import bar_test
-    ...
-    if __name__ == '__main__':
-        suite_runner.run_suite(foo_test.FooTest, bar_test.BarTest)
+  from my.test.lib import foo_test
+  from my.test.lib import bar_test
+  ...
+  if __name__ == '__main__':
+    suite_runner.run_suite(foo_test.FooTest, bar_test.BarTest)
 """
 
 import argparse
@@ -39,143 +39,143 @@
 
 
 class Error(Exception):
-    pass
+  pass
 
 
 def run_suite(test_classes, argv=None):
-    """Executes multiple test classes as a suite.
+  """Executes multiple test classes as a suite.
 
-    This is the default entry point for running a test suite script file
-    directly.
+  This is the default entry point for running a test suite script file
+  directly.
 
-    Args:
-        test_classes: List of python classes containing Mobly tests.
-        argv: A list that is then parsed as cli args. If None, defaults to cli
-            input.
-    """
-    # Parse cli args.
-    parser = argparse.ArgumentParser(description='Mobly Suite Executable.')
-    parser.add_argument('-c',
-                        '--config',
-                        type=str,
-                        required=True,
-                        metavar='<PATH>',
-                        help='Path to the test configuration file.')
-    parser.add_argument(
-        '--tests',
-        '--test_case',
-        nargs='+',
-        type=str,
-        metavar='[ClassA[.test_a] ClassB[.test_b] ...]',
-        help='A list of test classes and optional tests to execute.')
-    if not argv:
-        argv = sys.argv[1:]
-    args = parser.parse_args(argv)
-    # Load test config file.
-    test_configs = config_parser.load_test_config_file(args.config)
+  Args:
+    test_classes: List of python classes containing Mobly tests.
+    argv: A list that is then parsed as cli args. If None, defaults to cli
+      input.
+  """
+  # Parse cli args.
+  parser = argparse.ArgumentParser(description='Mobly Suite Executable.')
+  parser.add_argument('-c',
+            '--config',
+            type=str,
+            required=True,
+            metavar='<PATH>',
+            help='Path to the test configuration file.')
+  parser.add_argument(
+    '--tests',
+    '--test_case',
+    nargs='+',
+    type=str,
+    metavar='[ClassA[.test_a] ClassB[.test_b] ...]',
+    help='A list of test classes and optional tests to execute.')
+  if not argv:
+    argv = sys.argv[1:]
+  args = parser.parse_args(argv)
+  # Load test config file.
+  test_configs = config_parser.load_test_config_file(args.config)
 
-    # Check the classes that were passed in
-    for test_class in test_classes:
-        if not issubclass(test_class, base_test.BaseTestClass):
-            logging.error(
-                'Test class %s does not extend '
-                'mobly.base_test.BaseTestClass', test_class)
-            sys.exit(1)
+  # Check the classes that were passed in
+  for test_class in test_classes:
+    if not issubclass(test_class, base_test.BaseTestClass):
+      logging.error(
+        'Test class %s does not extend '
+        'mobly.base_test.BaseTestClass', test_class)
+      sys.exit(1)
 
-    # Find the full list of tests to execute
-    selected_tests = compute_selected_tests(test_classes, args.tests)
+  # Find the full list of tests to execute
+  selected_tests = compute_selected_tests(test_classes, args.tests)
 
-    # Execute the suite
-    ok = True
-    for config in test_configs:
-        runner = test_runner.TestRunner(config.log_path, config.testbed_name)
-        with runner.mobly_logger():
-            for (test_class, tests) in selected_tests.items():
-                runner.add_test_class(config, test_class, tests)
-            try:
-                runner.run()
-                ok = runner.results.is_all_pass and ok
-            except signals.TestAbortAll:
-                pass
-            except:
-                logging.exception('Exception when executing %s.',
-                                  config.testbed_name)
-                ok = False
-    if not ok:
-        sys.exit(1)
+  # Execute the suite
+  ok = True
+  for config in test_configs:
+    runner = test_runner.TestRunner(config.log_path, config.testbed_name)
+    with runner.mobly_logger():
+      for (test_class, tests) in selected_tests.items():
+        runner.add_test_class(config, test_class, tests)
+      try:
+        runner.run()
+        ok = runner.results.is_all_pass and ok
+      except signals.TestAbortAll:
+        pass
+      except:
+        logging.exception('Exception when executing %s.',
+                  config.testbed_name)
+        ok = False
+  if not ok:
+    sys.exit(1)
 
 
 def compute_selected_tests(test_classes, selected_tests):
-    """Computes tests to run for each class from selector strings.
+  """Computes tests to run for each class from selector strings.
 
-    This function transforms a list of selector strings (such as FooTest or
-    FooTest.test_method_a) to a dict where keys are test_name classes, and
-    values are lists of selected tests in those classes. None means all tests in
-    that class are selected.
+  This function transforms a list of selector strings (such as FooTest or
+  FooTest.test_method_a) to a dict where keys are test_name classes, and
+  values are lists of selected tests in those classes. None means all tests in
+  that class are selected.
 
-    Args:
-        test_classes: list of strings, names of all the classes that are part
-            of a suite.
-        selected_tests: list of strings, list of tests to execute. If empty,
-            all classes `test_classes` are selected. E.g.
+  Args:
+    test_classes: list of strings, names of all the classes that are part
+      of a suite.
+    selected_tests: list of strings, list of tests to execute. If empty,
+      all classes `test_classes` are selected. E.g.
 
-            .. code-block:: python
+      .. code-block:: python
 
-                [
-                    'FooTest',
-                    'BarTest',
-                    'BazTest.test_method_a',
-                    'BazTest.test_method_b'
-                ]
+        [
+          'FooTest',
+          'BarTest',
+          'BazTest.test_method_a',
+          'BazTest.test_method_b'
+        ]
 
-    Returns:
-        dict: Identifiers for TestRunner. Keys are test class names; valures
-            are lists of test names within class. E.g. the example in
-            `selected_tests` would translate to:
+  Returns:
+    dict: Identifiers for TestRunner. Keys are test class names; valures
+      are lists of test names within class. E.g. the example in
+      `selected_tests` would translate to:
 
-            .. code-block:: python
+      .. code-block:: python
 
-                {
-                    FooTest: None,
-                    BarTest: None,
-                    BazTest: ['test_method_a', 'test_method_b']
-                }
+        {
+          FooTest: None,
+          BarTest: None,
+          BazTest: ['test_method_a', 'test_method_b']
+        }
 
-            This dict is easy to consume for `TestRunner`.
-    """
-    class_to_tests = collections.OrderedDict()
-    if not selected_tests:
-        # No selection is needed; simply run all tests in all classes.
-        for test_class in test_classes:
-            class_to_tests[test_class] = None
-        return class_to_tests
-
-    # The user is selecting some tests to run. Parse the selectors.
-    # Dict from test_name class name to list of tests to execute (or None for all
-    # tests).
-    test_class_name_to_tests = collections.OrderedDict()
-    for test_name in selected_tests:
-        if '.' in test_name:  # Has a test method
-            (test_class_name, test_name) = test_name.split('.')
-            if test_class_name not in test_class_name_to_tests:
-                # Never seen this class before
-                test_class_name_to_tests[test_class_name] = [test_name]
-            elif test_class_name_to_tests[test_class_name] is None:
-                # Already running all tests in this class, so ignore this extra
-                # test.
-                pass
-            else:
-                test_class_name_to_tests[test_class_name].append(test_name)
-        else:  # No test method; run all tests in this class.
-            test_class_name_to_tests[test_name] = None
-
-    # Now transform class names to class objects.
-    # Dict from test_name class name to instance.
-    class_name_to_class = {cls.__name__: cls for cls in test_classes}
-    for test_class_name, tests in test_class_name_to_tests.items():
-        test_class = class_name_to_class.get(test_class_name)
-        if not test_class:
-            raise Error('Unknown test_name class %s' % test_class_name)
-        class_to_tests[test_class] = tests
-
+      This dict is easy to consume for `TestRunner`.
+  """
+  class_to_tests = collections.OrderedDict()
+  if not selected_tests:
+    # No selection is needed; simply run all tests in all classes.
+    for test_class in test_classes:
+      class_to_tests[test_class] = None
     return class_to_tests
+
+  # The user is selecting some tests to run. Parse the selectors.
+  # Dict from test_name class name to list of tests to execute (or None for all
+  # tests).
+  test_class_name_to_tests = collections.OrderedDict()
+  for test_name in selected_tests:
+    if '.' in test_name:  # Has a test method
+      (test_class_name, test_name) = test_name.split('.')
+      if test_class_name not in test_class_name_to_tests:
+        # Never seen this class before
+        test_class_name_to_tests[test_class_name] = [test_name]
+      elif test_class_name_to_tests[test_class_name] is None:
+        # Already running all tests in this class, so ignore this extra
+        # test.
+        pass
+      else:
+        test_class_name_to_tests[test_class_name].append(test_name)
+    else:  # No test method; run all tests in this class.
+      test_class_name_to_tests[test_name] = None
+
+  # Now transform class names to class objects.
+  # Dict from test_name class name to instance.
+  class_name_to_class = {cls.__name__: cls for cls in test_classes}
+  for test_class_name, tests in test_class_name_to_tests.items():
+    test_class = class_name_to_class.get(test_class_name)
+    if not test_class:
+      raise Error('Unknown test_name class %s' % test_class_name)
+    class_to_tests[test_class] = tests
+
+  return class_to_tests
diff --git a/mobly/test_runner.py b/mobly/test_runner.py
index c3c3b54..fbae843 100644
--- a/mobly/test_runner.py
+++ b/mobly/test_runner.py
@@ -32,322 +32,322 @@
 
 
 class Error(Exception):
-    pass
+  pass
 
 
 def main(argv=None):
-    """Execute the test class in a test module.
+  """Execute the test class in a test module.
 
-    This is the default entry point for running a test script file directly.
-    In this case, only one test class in a test script is allowed.
+  This is the default entry point for running a test script file directly.
+  In this case, only one test class in a test script is allowed.
 
-    To make your test script executable, add the following to your file:
+  To make your test script executable, add the following to your file:
 
-    .. code-block:: python
+  .. code-block:: python
 
-        from mobly import test_runner
-        ...
-        if __name__ == '__main__':
-            test_runner.main()
+    from mobly import test_runner
+    ...
+    if __name__ == '__main__':
+      test_runner.main()
 
-    If you want to implement your own cli entry point, you could use function
-    execute_one_test_class(test_class, test_config, test_identifier)
+  If you want to implement your own cli entry point, you could use function
+  execute_one_test_class(test_class, test_config, test_identifier)
 
-    Args:
-        argv: A list that is then parsed as cli args. If None, defaults to cli
-            input.
-    """
-    args = parse_mobly_cli_args(argv)
-    # Find the test class in the test script.
-    test_class = _find_test_class()
-    if args.list_tests:
-        _print_test_names(test_class)
-        sys.exit(0)
-    # Load test config file.
-    test_configs = config_parser.load_test_config_file(args.config,
-                                                       args.test_bed)
-    # Parse test specifiers if exist.
-    tests = None
-    if args.tests:
-        tests = args.tests
-    # Execute the test class with configs.
-    ok = True
-    for config in test_configs:
-        runner = TestRunner(log_dir=config.log_path,
-                            testbed_name=config.testbed_name)
-        with runner.mobly_logger():
-            runner.add_test_class(config, test_class, tests)
-            try:
-                runner.run()
-                ok = runner.results.is_all_pass and ok
-            except signals.TestAbortAll:
-                pass
-            except:
-                logging.exception('Exception when executing %s.',
-                                  config.testbed_name)
-                ok = False
-    if not ok:
-        sys.exit(1)
+  Args:
+    argv: A list that is then parsed as cli args. If None, defaults to cli
+      input.
+  """
+  args = parse_mobly_cli_args(argv)
+  # Find the test class in the test script.
+  test_class = _find_test_class()
+  if args.list_tests:
+    _print_test_names(test_class)
+    sys.exit(0)
+  # Load test config file.
+  test_configs = config_parser.load_test_config_file(args.config,
+                             args.test_bed)
+  # Parse test specifiers if exist.
+  tests = None
+  if args.tests:
+    tests = args.tests
+  # Execute the test class with configs.
+  ok = True
+  for config in test_configs:
+    runner = TestRunner(log_dir=config.log_path,
+              testbed_name=config.testbed_name)
+    with runner.mobly_logger():
+      runner.add_test_class(config, test_class, tests)
+      try:
+        runner.run()
+        ok = runner.results.is_all_pass and ok
+      except signals.TestAbortAll:
+        pass
+      except:
+        logging.exception('Exception when executing %s.',
+                  config.testbed_name)
+        ok = False
+  if not ok:
+    sys.exit(1)
 
 
 def parse_mobly_cli_args(argv):
-    """Parses cli args that are consumed by Mobly.
+  """Parses cli args that are consumed by Mobly.
 
-    This is the arg parsing logic for the default test_runner.main entry point.
+  This is the arg parsing logic for the default test_runner.main entry point.
 
-    Multiple arg parsers can be applied to the same set of cli input. So you
-    can use this logic in addition to any other args you want to parse. This
-    function ignores the args that don't apply to default `test_runner.main`.
+  Multiple arg parsers can be applied to the same set of cli input. So you
+  can use this logic in addition to any other args you want to parse. This
+  function ignores the args that don't apply to default `test_runner.main`.
 
-    Args:
-        argv: A list that is then parsed as cli args. If None, defaults to cli
-            input.
+  Args:
+    argv: A list that is then parsed as cli args. If None, defaults to cli
+      input.
 
-    Returns:
-        Namespace containing the parsed args.
-    """
-    parser = argparse.ArgumentParser(description='Mobly Test Executable.')
-    group = parser.add_mutually_exclusive_group(required=True)
-    group.add_argument('-c',
-                       '--config',
-                       type=str,
-                       metavar='<PATH>',
-                       help='Path to the test configuration file.')
-    group.add_argument(
-        '-l',
-        '--list_tests',
-        action='store_true',
-        help='Print the names of the tests defined in a script without '
-        'executing them.')
-    parser.add_argument('--tests',
-                        '--test_case',
-                        nargs='+',
-                        type=str,
-                        metavar='[test_a test_b...]',
-                        help='A list of tests in the test class to execute.')
-    parser.add_argument('-tb',
-                        '--test_bed',
-                        nargs='+',
-                        type=str,
-                        metavar='[<TEST BED NAME1> <TEST BED NAME2> ...]',
-                        help='Specify which test beds to run tests on.')
-    if not argv:
-        argv = sys.argv[1:]
-    return parser.parse_known_args(argv)[0]
+  Returns:
+    Namespace containing the parsed args.
+  """
+  parser = argparse.ArgumentParser(description='Mobly Test Executable.')
+  group = parser.add_mutually_exclusive_group(required=True)
+  group.add_argument('-c',
+             '--config',
+             type=str,
+             metavar='<PATH>',
+             help='Path to the test configuration file.')
+  group.add_argument(
+    '-l',
+    '--list_tests',
+    action='store_true',
+    help='Print the names of the tests defined in a script without '
+    'executing them.')
+  parser.add_argument('--tests',
+            '--test_case',
+            nargs='+',
+            type=str,
+            metavar='[test_a test_b...]',
+            help='A list of tests in the test class to execute.')
+  parser.add_argument('-tb',
+            '--test_bed',
+            nargs='+',
+            type=str,
+            metavar='[<TEST BED NAME1> <TEST BED NAME2> ...]',
+            help='Specify which test beds to run tests on.')
+  if not argv:
+    argv = sys.argv[1:]
+  return parser.parse_known_args(argv)[0]
 
 
 def _find_test_class():
-    """Finds the test class in a test script.
+  """Finds the test class in a test script.
 
-    Walk through module members and find the subclass of BaseTestClass. Only
-    one subclass is allowed in a test script.
+  Walk through module members and find the subclass of BaseTestClass. Only
+  one subclass is allowed in a test script.
 
-    Returns:
-        The test class in the test module.
+  Returns:
+    The test class in the test module.
 
-    Raises:
-      SystemExit: Raised if the number of test classes is not exactly one.
-    """
-    try:
-        return utils.find_subclass_in_module(base_test.BaseTestClass,
-                                             sys.modules['__main__'])
-    except ValueError:
-        logging.exception('Exactly one subclass of `base_test.BaseTestClass`'
-                          ' should be in the main file.')
-        sys.exit(1)
+  Raises:
+    SystemExit: Raised if the number of test classes is not exactly one.
+  """
+  try:
+    return utils.find_subclass_in_module(base_test.BaseTestClass,
+                       sys.modules['__main__'])
+  except ValueError:
+    logging.exception('Exactly one subclass of `base_test.BaseTestClass`'
+              ' should be in the main file.')
+    sys.exit(1)
 
 
 def _print_test_names(test_class):
-    """Prints the names of all the tests in a test module.
+  """Prints the names of all the tests in a test module.
 
-    If the module has generated tests defined based on controller info, this
-    may not be able to print the generated tests.
+  If the module has generated tests defined based on controller info, this
+  may not be able to print the generated tests.
 
-    Args:
-        test_class: module, the test module to print names from.
-    """
-    cls = test_class(config_parser.TestRunConfig())
-    test_names = []
-    try:
-        cls.setup_generated_tests()
-        test_names = cls.get_existing_test_names()
-    except:
-        logging.exception('Failed to retrieve generated tests.')
-    finally:
-        cls._controller_manager.unregister_controllers()
-    print('==========> %s <==========' % cls.TAG)
-    for name in test_names:
-        print(name)
+  Args:
+    test_class: module, the test module to print names from.
+  """
+  cls = test_class(config_parser.TestRunConfig())
+  test_names = []
+  try:
+    cls.setup_generated_tests()
+    test_names = cls.get_existing_test_names()
+  except:
+    logging.exception('Failed to retrieve generated tests.')
+  finally:
+    cls._controller_manager.unregister_controllers()
+  print('==========> %s <==========' % cls.TAG)
+  for name in test_names:
+    print(name)
 
 
 class TestRunner(object):
-    """The class that instantiates test classes, executes tests, and
-    report results.
+  """The class that instantiates test classes, executes tests, and
+  report results.
 
-    One TestRunner instance is associated with one specific output folder and
-    testbed. TestRunner.run() will generate a single set of output files and
-    results for all tests that have been added to this runner.
+  One TestRunner instance is associated with one specific output folder and
+  testbed. TestRunner.run() will generate a single set of output files and
+  results for all tests that have been added to this runner.
 
-    Attributes:
-        self.results: The test result object used to record the results of
-            this test run.
+  Attributes:
+    self.results: The test result object used to record the results of
+      this test run.
+  """
+
+  class _TestRunInfo(object):
+    """Identifies one test class to run, which tests to run, and config to
+    run it with.
     """
 
-    class _TestRunInfo(object):
-        """Identifies one test class to run, which tests to run, and config to
-        run it with.
-        """
+    def __init__(self,
+           config,
+           test_class,
+           tests=None,
+           test_class_name_suffix=None):
+      self.config = config
+      self.test_class = test_class
+      self.test_class_name_suffix = test_class_name_suffix
+      self.tests = tests
 
-        def __init__(self,
-                     config,
-                     test_class,
-                     tests=None,
-                     test_class_name_suffix=None):
-            self.config = config
-            self.test_class = test_class
-            self.test_class_name_suffix = test_class_name_suffix
-            self.tests = tests
+  def __init__(self, log_dir, testbed_name):
+    """Constructor for TestRunner.
 
-    def __init__(self, log_dir, testbed_name):
-        """Constructor for TestRunner.
+    Args:
+      log_dir: string, root folder where to write logs
+      testbed_name: string, name of the testbed to run tests on
+    """
+    self._log_dir = log_dir
+    self._testbed_name = testbed_name
 
-        Args:
-            log_dir: string, root folder where to write logs
-            testbed_name: string, name of the testbed to run tests on
-        """
-        self._log_dir = log_dir
-        self._testbed_name = testbed_name
+    self.results = records.TestResult()
+    self._test_run_infos = []
 
-        self.results = records.TestResult()
-        self._test_run_infos = []
+    # Set default logging values. Necessary if `run` is used outside of the
+    # `mobly_logger` context.
+    self._update_log_path()
 
-        # Set default logging values. Necessary if `run` is used outside of the
-        # `mobly_logger` context.
-        self._update_log_path()
+  def _update_log_path(self):
+    """Updates the logging values with the current timestamp."""
+    self._start_time = logger.get_log_file_timestamp()
+    self._root_output_path = os.path.join(self._log_dir,
+                        self._testbed_name,
+                        self._start_time)
 
-    def _update_log_path(self):
-        """Updates the logging values with the current timestamp."""
-        self._start_time = logger.get_log_file_timestamp()
-        self._root_output_path = os.path.join(self._log_dir,
-                                              self._testbed_name,
-                                              self._start_time)
+  @contextlib.contextmanager
+  def mobly_logger(self, alias='latest'):
+    """Starts and stops a logging context for a Mobly test run.
 
-    @contextlib.contextmanager
-    def mobly_logger(self, alias='latest'):
-        """Starts and stops a logging context for a Mobly test run.
+    Args:
+      alias: optional string, the name of the latest log alias directory to
+        create. If a falsy value is specified, then the directory will not
+        be created.
 
-        Args:
-          alias: optional string, the name of the latest log alias directory to
-              create. If a falsy value is specified, then the directory will not
-              be created.
+    Yields:
+      The host file path where the logs for the test run are stored.
+    """
+    self._update_log_path()
+    logger.setup_test_logger(self._root_output_path,
+                 self._testbed_name,
+                 alias=alias)
+    try:
+      yield self._root_output_path
+    finally:
+      logger.kill_test_logger(logging.getLogger())
 
-        Yields:
-            The host file path where the logs for the test run are stored.
-        """
-        self._update_log_path()
-        logger.setup_test_logger(self._root_output_path,
-                                 self._testbed_name,
-                                 alias=alias)
+  def add_test_class(self, config, test_class, tests=None, name_suffix=None):
+    """Adds tests to the execution plan of this TestRunner.
+
+    Args:
+      config: config_parser.TestRunConfig, configuration to execute this
+        test class with.
+      test_class: class, test class to execute.
+      tests: list of strings, optional list of test names within the
+        class to execute.
+      name_suffix: string, suffix to append to the class name for
+        reporting. This is used for differentiating the same class
+        executed with different parameters in a suite.
+
+    Raises:
+      Error: if the provided config has a log_path or testbed_name which
+        differs from the arguments provided to this TestRunner's
+        constructor.
+    """
+    if self._log_dir != config.log_path:
+      raise Error(
+        'TestRunner\'s log folder is "%s", but a test config with a '
+        'different log folder ("%s") was added.' %
+        (self._log_dir, config.log_path))
+    if self._testbed_name != config.testbed_name:
+      raise Error(
+        'TestRunner\'s test bed is "%s", but a test config with a '
+        'different test bed ("%s") was added.' %
+        (self._testbed_name, config.testbed_name))
+    self._test_run_infos.append(
+      TestRunner._TestRunInfo(config=config,
+                  test_class=test_class,
+                  tests=tests,
+                  test_class_name_suffix=name_suffix))
+
+  def _run_test_class(self, config, test_class, tests=None):
+    """Instantiates and executes a test class.
+
+    If tests is None, the tests listed in self.tests will be executed
+    instead. If self.tests is empty as well, every test in this test class
+    will be executed.
+
+    Args:
+      config: A config_parser.TestRunConfig object.
+      test_class: class, test class to execute.
+      tests: Optional list of test names within the class to execute.
+    """
+    test_instance = test_class(config)
+    logging.debug('Executing test class "%s" with config: %s',
+            test_class.__name__, config)
+    try:
+      cls_result = test_instance.run(tests)
+      self.results += cls_result
+    except signals.TestAbortAll as e:
+      self.results += e.results
+      raise e
+
+  def run(self):
+    """Executes tests.
+
+    This will instantiate controller and test classes, execute tests, and
+    print a summary.
+
+    Raises:
+      Error: if no tests have previously been added to this runner using
+        add_test_class(...).
+    """
+    if not self._test_run_infos:
+      raise Error('No tests to execute.')
+
+    # Ensure the log path exists. Necessary if `run` is used outside of the
+    # `mobly_logger` context.
+    utils.create_dir(self._root_output_path)
+
+    summary_writer = records.TestSummaryWriter(
+      os.path.join(self._root_output_path, records.OUTPUT_FILE_SUMMARY))
+    try:
+      for test_run_info in self._test_run_infos:
+        # Set up the test-specific config
+        test_config = test_run_info.config.copy()
+        test_config.log_path = self._root_output_path
+        test_config.summary_writer = summary_writer
+        test_config.test_class_name_suffix = test_run_info.test_class_name_suffix
         try:
-            yield self._root_output_path
-        finally:
-            logger.kill_test_logger(logging.getLogger())
-
-    def add_test_class(self, config, test_class, tests=None, name_suffix=None):
-        """Adds tests to the execution plan of this TestRunner.
-
-        Args:
-            config: config_parser.TestRunConfig, configuration to execute this
-                test class with.
-            test_class: class, test class to execute.
-            tests: list of strings, optional list of test names within the
-                class to execute.
-            name_suffix: string, suffix to append to the class name for
-                reporting. This is used for differentiating the same class
-                executed with different parameters in a suite.
-
-        Raises:
-            Error: if the provided config has a log_path or testbed_name which
-                differs from the arguments provided to this TestRunner's
-                constructor.
-        """
-        if self._log_dir != config.log_path:
-            raise Error(
-                'TestRunner\'s log folder is "%s", but a test config with a '
-                'different log folder ("%s") was added.' %
-                (self._log_dir, config.log_path))
-        if self._testbed_name != config.testbed_name:
-            raise Error(
-                'TestRunner\'s test bed is "%s", but a test config with a '
-                'different test bed ("%s") was added.' %
-                (self._testbed_name, config.testbed_name))
-        self._test_run_infos.append(
-            TestRunner._TestRunInfo(config=config,
-                                    test_class=test_class,
-                                    tests=tests,
-                                    test_class_name_suffix=name_suffix))
-
-    def _run_test_class(self, config, test_class, tests=None):
-        """Instantiates and executes a test class.
-
-        If tests is None, the tests listed in self.tests will be executed
-        instead. If self.tests is empty as well, every test in this test class
-        will be executed.
-
-        Args:
-            config: A config_parser.TestRunConfig object.
-            test_class: class, test class to execute.
-            tests: Optional list of test names within the class to execute.
-        """
-        test_instance = test_class(config)
-        logging.debug('Executing test class "%s" with config: %s',
-                      test_class.__name__, config)
-        try:
-            cls_result = test_instance.run(tests)
-            self.results += cls_result
+          self._run_test_class(config=test_config,
+                     test_class=test_run_info.test_class,
+                     tests=test_run_info.tests)
         except signals.TestAbortAll as e:
-            self.results += e.results
-            raise e
-
-    def run(self):
-        """Executes tests.
-
-        This will instantiate controller and test classes, execute tests, and
-        print a summary.
-
-        Raises:
-            Error: if no tests have previously been added to this runner using
-                add_test_class(...).
-        """
-        if not self._test_run_infos:
-            raise Error('No tests to execute.')
-
-        # Ensure the log path exists. Necessary if `run` is used outside of the
-        # `mobly_logger` context.
-        utils.create_dir(self._root_output_path)
-
-        summary_writer = records.TestSummaryWriter(
-            os.path.join(self._root_output_path, records.OUTPUT_FILE_SUMMARY))
-        try:
-            for test_run_info in self._test_run_infos:
-                # Set up the test-specific config
-                test_config = test_run_info.config.copy()
-                test_config.log_path = self._root_output_path
-                test_config.summary_writer = summary_writer
-                test_config.test_class_name_suffix = test_run_info.test_class_name_suffix
-                try:
-                    self._run_test_class(config=test_config,
-                                         test_class=test_run_info.test_class,
-                                         tests=test_run_info.tests)
-                except signals.TestAbortAll as e:
-                    logging.warning(
-                        'Abort all subsequent test classes. Reason: %s', e)
-                    raise
-        finally:
-            summary_writer.dump(self.results.summary_dict(),
-                                records.TestSummaryEntryType.SUMMARY)
-            # Stop and show summary.
-            msg = '\nSummary for test run %s@%s: %s\n' % (
-                self._testbed_name, self._start_time,
-                self.results.summary_str())
-            logging.info(msg.strip())
+          logging.warning(
+            'Abort all subsequent test classes. Reason: %s', e)
+          raise
+    finally:
+      summary_writer.dump(self.results.summary_dict(),
+                records.TestSummaryEntryType.SUMMARY)
+      # Stop and show summary.
+      msg = '\nSummary for test run %s@%s: %s\n' % (
+        self._testbed_name, self._start_time,
+        self.results.summary_str())
+      logging.info(msg.strip())
diff --git a/mobly/utils.py b/mobly/utils.py
index d636f87..55882ca 100644
--- a/mobly/utils.py
+++ b/mobly/utils.py
@@ -43,590 +43,590 @@
 valid_filename_chars = "-_." + ascii_letters_and_digits
 
 GMT_to_olson = {
-    "GMT-9": "America/Anchorage",
-    "GMT-8": "US/Pacific",
-    "GMT-7": "US/Mountain",
-    "GMT-6": "US/Central",
-    "GMT-5": "US/Eastern",
-    "GMT-4": "America/Barbados",
-    "GMT-3": "America/Buenos_Aires",
-    "GMT-2": "Atlantic/South_Georgia",
-    "GMT-1": "Atlantic/Azores",
-    "GMT+0": "Africa/Casablanca",
-    "GMT+1": "Europe/Amsterdam",
-    "GMT+2": "Europe/Athens",
-    "GMT+3": "Europe/Moscow",
-    "GMT+4": "Asia/Baku",
-    "GMT+5": "Asia/Oral",
-    "GMT+6": "Asia/Almaty",
-    "GMT+7": "Asia/Bangkok",
-    "GMT+8": "Asia/Hong_Kong",
-    "GMT+9": "Asia/Tokyo",
-    "GMT+10": "Pacific/Guam",
-    "GMT+11": "Pacific/Noumea",
-    "GMT+12": "Pacific/Fiji",
-    "GMT+13": "Pacific/Tongatapu",
-    "GMT-11": "Pacific/Midway",
-    "GMT-10": "Pacific/Honolulu"
+  "GMT-9": "America/Anchorage",
+  "GMT-8": "US/Pacific",
+  "GMT-7": "US/Mountain",
+  "GMT-6": "US/Central",
+  "GMT-5": "US/Eastern",
+  "GMT-4": "America/Barbados",
+  "GMT-3": "America/Buenos_Aires",
+  "GMT-2": "Atlantic/South_Georgia",
+  "GMT-1": "Atlantic/Azores",
+  "GMT+0": "Africa/Casablanca",
+  "GMT+1": "Europe/Amsterdam",
+  "GMT+2": "Europe/Athens",
+  "GMT+3": "Europe/Moscow",
+  "GMT+4": "Asia/Baku",
+  "GMT+5": "Asia/Oral",
+  "GMT+6": "Asia/Almaty",
+  "GMT+7": "Asia/Bangkok",
+  "GMT+8": "Asia/Hong_Kong",
+  "GMT+9": "Asia/Tokyo",
+  "GMT+10": "Pacific/Guam",
+  "GMT+11": "Pacific/Noumea",
+  "GMT+12": "Pacific/Fiji",
+  "GMT+13": "Pacific/Tongatapu",
+  "GMT-11": "Pacific/Midway",
+  "GMT-10": "Pacific/Honolulu"
 }
 
 
 class Error(Exception):
-    """Raised when an error occurs in a util"""
+  """Raised when an error occurs in a util"""
 
 
 def abs_path(path):
-    """Resolve the '.' and '~' in a path to get the absolute path.
+  """Resolve the '.' and '~' in a path to get the absolute path.
 
-    Args:
-        path: The path to expand.
+  Args:
+    path: The path to expand.
 
-    Returns:
-        The absolute path of the input path.
-    """
-    return os.path.abspath(os.path.expanduser(path))
+  Returns:
+    The absolute path of the input path.
+  """
+  return os.path.abspath(os.path.expanduser(path))
 
 
 def create_dir(path):
-    """Creates a directory if it does not exist already.
+  """Creates a directory if it does not exist already.
 
-    Args:
-        path: The path of the directory to create.
-    """
-    full_path = abs_path(path)
-    if not os.path.exists(full_path):
-        try:
-            os.makedirs(full_path)
-        except OSError as e:
-            # ignore the error for dir already exist.
-            if e.errno != errno.EEXIST:
-                raise
+  Args:
+    path: The path of the directory to create.
+  """
+  full_path = abs_path(path)
+  if not os.path.exists(full_path):
+    try:
+      os.makedirs(full_path)
+    except OSError as e:
+      # ignore the error for dir already exist.
+      if e.errno != errno.EEXIST:
+        raise
 
 
 def create_alias(target_path, alias_path):
-    """Creates an alias at 'alias_path' pointing to the file 'target_path'.
+  """Creates an alias at 'alias_path' pointing to the file 'target_path'.
 
-    On Unix, this is implemented via symlink. On Windows, this is done by
-    creating a Windows shortcut file.
+  On Unix, this is implemented via symlink. On Windows, this is done by
+  creating a Windows shortcut file.
 
-    Args:
-        target_path: Destination path that the alias should point to.
-        alias_path: Path at which to create the new alias.
-    """
-    if platform.system() == 'Windows' and not alias_path.endswith('.lnk'):
-        alias_path += '.lnk'
-    if os.path.lexists(alias_path):
-        os.remove(alias_path)
-    if platform.system() == 'Windows':
-        from win32com import client
-        shell = client.Dispatch('WScript.Shell')
-        shortcut = shell.CreateShortCut(alias_path)
-        shortcut.Targetpath = target_path
-        shortcut.save()
-    else:
-        os.symlink(target_path, alias_path)
+  Args:
+    target_path: Destination path that the alias should point to.
+    alias_path: Path at which to create the new alias.
+  """
+  if platform.system() == 'Windows' and not alias_path.endswith('.lnk'):
+    alias_path += '.lnk'
+  if os.path.lexists(alias_path):
+    os.remove(alias_path)
+  if platform.system() == 'Windows':
+    from win32com import client
+    shell = client.Dispatch('WScript.Shell')
+    shortcut = shell.CreateShortCut(alias_path)
+    shortcut.Targetpath = target_path
+    shortcut.save()
+  else:
+    os.symlink(target_path, alias_path)
 
 
 def get_current_epoch_time():
-    """Current epoch time in milliseconds.
+  """Current epoch time in milliseconds.
 
-    Returns:
-        An integer representing the current epoch time in milliseconds.
-    """
-    return int(round(time.time() * 1000))
+  Returns:
+    An integer representing the current epoch time in milliseconds.
+  """
+  return int(round(time.time() * 1000))
 
 
 def get_current_human_time():
-    """Returns the current time in human readable format.
+  """Returns the current time in human readable format.
 
-    Returns:
-        The current time stamp in Month-Day-Year Hour:Min:Sec format.
-    """
-    return time.strftime("%m-%d-%Y %H:%M:%S ")
+  Returns:
+    The current time stamp in Month-Day-Year Hour:Min:Sec format.
+  """
+  return time.strftime("%m-%d-%Y %H:%M:%S ")
 
 
 def epoch_to_human_time(epoch_time):
-    """Converts an epoch timestamp to human readable time.
+  """Converts an epoch timestamp to human readable time.
 
-    This essentially converts an output of get_current_epoch_time to an output
-    of get_current_human_time
+  This essentially converts an output of get_current_epoch_time to an output
+  of get_current_human_time
 
-    Args:
-        epoch_time: An integer representing an epoch timestamp in milliseconds.
+  Args:
+    epoch_time: An integer representing an epoch timestamp in milliseconds.
 
-    Returns:
-        A time string representing the input time.
-        None if input param is invalid.
-    """
-    if isinstance(epoch_time, int):
-        try:
-            d = datetime.datetime.fromtimestamp(epoch_time / 1000)
-            return d.strftime("%m-%d-%Y %H:%M:%S ")
-        except ValueError:
-            return None
+  Returns:
+    A time string representing the input time.
+    None if input param is invalid.
+  """
+  if isinstance(epoch_time, int):
+    try:
+      d = datetime.datetime.fromtimestamp(epoch_time / 1000)
+      return d.strftime("%m-%d-%Y %H:%M:%S ")
+    except ValueError:
+      return None
 
 
 def get_timezone_olson_id():
-    """Return the Olson ID of the local (non-DST) timezone.
+  """Return the Olson ID of the local (non-DST) timezone.
 
-    Returns:
-        A string representing one of the Olson IDs of the local (non-DST)
-        timezone.
-    """
-    tzoffset = int(time.timezone / 3600)
-    gmt = None
-    if tzoffset <= 0:
-        gmt = "GMT+{}".format(-tzoffset)
-    else:
-        gmt = "GMT-{}".format(tzoffset)
-    return GMT_to_olson[gmt]
+  Returns:
+    A string representing one of the Olson IDs of the local (non-DST)
+    timezone.
+  """
+  tzoffset = int(time.timezone / 3600)
+  gmt = None
+  if tzoffset <= 0:
+    gmt = "GMT+{}".format(-tzoffset)
+  else:
+    gmt = "GMT-{}".format(tzoffset)
+  return GMT_to_olson[gmt]
 
 
 def find_files(paths, file_predicate):
-    """Locate files whose names and extensions match the given predicate in
-    the specified directories.
+  """Locate files whose names and extensions match the given predicate in
+  the specified directories.
 
-    Args:
-        paths: A list of directory paths where to find the files.
-        file_predicate: A function that returns True if the file name and
-            extension are desired.
+  Args:
+    paths: A list of directory paths where to find the files.
+    file_predicate: A function that returns True if the file name and
+      extension are desired.
 
-    Returns:
-        A list of files that match the predicate.
-    """
-    file_list = []
-    for path in paths:
-        p = abs_path(path)
-        for dirPath, _, fileList in os.walk(p):
-            for fname in fileList:
-                name, ext = os.path.splitext(fname)
-                if file_predicate(name, ext):
-                    file_list.append((dirPath, name, ext))
-    return file_list
+  Returns:
+    A list of files that match the predicate.
+  """
+  file_list = []
+  for path in paths:
+    p = abs_path(path)
+    for dirPath, _, fileList in os.walk(p):
+      for fname in fileList:
+        name, ext = os.path.splitext(fname)
+        if file_predicate(name, ext):
+          file_list.append((dirPath, name, ext))
+  return file_list
 
 
 def load_file_to_base64_str(f_path):
-    """Loads the content of a file into a base64 string.
+  """Loads the content of a file into a base64 string.
 
-    Args:
-        f_path: full path to the file including the file name.
+  Args:
+    f_path: full path to the file including the file name.
 
-    Returns:
-        A base64 string representing the content of the file in utf-8 encoding.
-    """
-    path = abs_path(f_path)
-    with io.open(path, 'rb') as f:
-        f_bytes = f.read()
-        base64_str = base64.b64encode(f_bytes).decode("utf-8")
-        return base64_str
+  Returns:
+    A base64 string representing the content of the file in utf-8 encoding.
+  """
+  path = abs_path(f_path)
+  with io.open(path, 'rb') as f:
+    f_bytes = f.read()
+    base64_str = base64.b64encode(f_bytes).decode("utf-8")
+    return base64_str
 
 
 def find_field(item_list, cond, comparator, target_field):
-    """Finds the value of a field in a dict object that satisfies certain
-    conditions.
+  """Finds the value of a field in a dict object that satisfies certain
+  conditions.
 
-    Args:
-        item_list: A list of dict objects.
-        cond: A param that defines the condition.
-        comparator: A function that checks if an dict satisfies the condition.
-        target_field: Name of the field whose value to be returned if an item
-            satisfies the condition.
+  Args:
+    item_list: A list of dict objects.
+    cond: A param that defines the condition.
+    comparator: A function that checks if an dict satisfies the condition.
+    target_field: Name of the field whose value to be returned if an item
+      satisfies the condition.
 
-    Returns:
-        Target value or None if no item satisfies the condition.
-    """
-    for item in item_list:
-        if comparator(item, cond) and target_field in item:
-            return item[target_field]
-    return None
+  Returns:
+    Target value or None if no item satisfies the condition.
+  """
+  for item in item_list:
+    if comparator(item, cond) and target_field in item:
+      return item[target_field]
+  return None
 
 
 def rand_ascii_str(length):
-    """Generates a random string of specified length, composed of ascii letters
-    and digits.
+  """Generates a random string of specified length, composed of ascii letters
+  and digits.
 
-    Args:
-        length: The number of characters in the string.
+  Args:
+    length: The number of characters in the string.
 
-    Returns:
-        The random string generated.
-    """
-    letters = [random.choice(ascii_letters_and_digits) for _ in range(length)]
-    return ''.join(letters)
+  Returns:
+    The random string generated.
+  """
+  letters = [random.choice(ascii_letters_and_digits) for _ in range(length)]
+  return ''.join(letters)
 
 
 # Thead/Process related functions.
 def concurrent_exec(func, param_list, max_workers=30,
-                    raise_on_exception=False):
-    """Executes a function with different parameters pseudo-concurrently.
+          raise_on_exception=False):
+  """Executes a function with different parameters pseudo-concurrently.
 
-    This is basically a map function. Each element (should be an iterable) in
-    the param_list is unpacked and passed into the function. Due to Python's
-    GIL, there's no true concurrency. This is suited for IO-bound tasks.
+  This is basically a map function. Each element (should be an iterable) in
+  the param_list is unpacked and passed into the function. Due to Python's
+  GIL, there's no true concurrency. This is suited for IO-bound tasks.
 
-    Args:
-        func: The function that parforms a task.
-        param_list: A list of iterables, each being a set of params to be
-            passed into the function.
-        max_workers: int, the number of workers to use for parallelizing the
-            tasks. By default, this is 30 workers.
-        raise_on_exception: bool, raises all of the task failures if any of the
-            tasks failed if `True`. By default, this is `False`.
+  Args:
+    func: The function that parforms a task.
+    param_list: A list of iterables, each being a set of params to be
+      passed into the function.
+    max_workers: int, the number of workers to use for parallelizing the
+      tasks. By default, this is 30 workers.
+    raise_on_exception: bool, raises all of the task failures if any of the
+      tasks failed if `True`. By default, this is `False`.
 
-    Returns:
-        A list of return values from each function execution. If an execution
-        caused an exception, the exception object will be the corresponding
-        result.
+  Returns:
+    A list of return values from each function execution. If an execution
+    caused an exception, the exception object will be the corresponding
+    result.
 
-    Raises:
-        RuntimeError: If executing any of the tasks failed and
-          `raise_on_exception` is True.
-    """
-    with concurrent.futures.ThreadPoolExecutor(
-            max_workers=max_workers) as executor:
-        # Start the load operations and mark each future with its params
-        future_to_params = {executor.submit(func, *p): p for p in param_list}
-        return_vals = []
-        exceptions = []
-        for future in concurrent.futures.as_completed(future_to_params):
-            params = future_to_params[future]
-            try:
-                return_vals.append(future.result())
-            except Exception as exc:
-                logging.exception("{} generated an exception: {}".format(
-                    params, traceback.format_exc()))
-                return_vals.append(exc)
-                exceptions.append(exc)
-        if raise_on_exception and exceptions:
-            error_messages = []
-            if sys.version_info < (3, 0):
-                for exception in exceptions:
-                    error_messages.append(
-                        unicode(exception.message,
-                                encoding='utf-8',
-                                errors='replace'))
-            else:
-                for exception in exceptions:
-                    error_messages.append(''.join(
-                        traceback.format_exception(exception.__class__,
-                                                   exception,
-                                                   exception.__traceback__)))
-            raise RuntimeError('\n\n'.join(error_messages))
-        return return_vals
+  Raises:
+    RuntimeError: If executing any of the tasks failed and
+      `raise_on_exception` is True.
+  """
+  with concurrent.futures.ThreadPoolExecutor(
+      max_workers=max_workers) as executor:
+    # Start the load operations and mark each future with its params
+    future_to_params = {executor.submit(func, *p): p for p in param_list}
+    return_vals = []
+    exceptions = []
+    for future in concurrent.futures.as_completed(future_to_params):
+      params = future_to_params[future]
+      try:
+        return_vals.append(future.result())
+      except Exception as exc:
+        logging.exception("{} generated an exception: {}".format(
+          params, traceback.format_exc()))
+        return_vals.append(exc)
+        exceptions.append(exc)
+    if raise_on_exception and exceptions:
+      error_messages = []
+      if sys.version_info < (3, 0):
+        for exception in exceptions:
+          error_messages.append(
+            unicode(exception.message,
+                encoding='utf-8',
+                errors='replace'))
+      else:
+        for exception in exceptions:
+          error_messages.append(''.join(
+            traceback.format_exception(exception.__class__,
+                           exception,
+                           exception.__traceback__)))
+      raise RuntimeError('\n\n'.join(error_messages))
+    return return_vals
 
 
 def run_command(cmd,
-                stdout=None,
-                stderr=None,
-                shell=False,
-                timeout=None,
-                cwd=None,
-                env=None):
-    """Runs a command in a subprocess.
+        stdout=None,
+        stderr=None,
+        shell=False,
+        timeout=None,
+        cwd=None,
+        env=None):
+  """Runs a command in a subprocess.
 
-    This function is very similar to subprocess.check_output. The main
-    difference is that it returns the return code and std error output as well
-    as supporting a timeout parameter.
+  This function is very similar to subprocess.check_output. The main
+  difference is that it returns the return code and std error output as well
+  as supporting a timeout parameter.
 
-    Args:
-        cmd: string or list of strings, the command to run.
-            See subprocess.Popen() documentation.
-        stdout: file handle, the file handle to write std out to. If None is
-            given, then subprocess.PIPE is used. See subprocess.Popen()
-            documentation.
-        stderr: file handle, the file handle to write std err to. If None is
-            given, then subprocess.PIPE is used. See subprocess.Popen()
-            documentation.
-        shell: bool, True to run this command through the system shell,
-            False to invoke it directly. See subprocess.Popen() docs.
-        timeout: float, the number of seconds to wait before timing out.
-            If not specified, no timeout takes effect.
-        cwd: string, the path to change the child's current directory to before
-            it is executed. Note that this directory is not considered when
-            searching the executable, so you can't specify the program's path
-            relative to cwd.
-        env: dict, a mapping that defines the environment variables for the
-            new process. Default behavior is inheriting the current process'
-            environment.
+  Args:
+    cmd: string or list of strings, the command to run.
+      See subprocess.Popen() documentation.
+    stdout: file handle, the file handle to write std out to. If None is
+      given, then subprocess.PIPE is used. See subprocess.Popen()
+      documentation.
+    stderr: file handle, the file handle to write std err to. If None is
+      given, then subprocess.PIPE is used. See subprocess.Popen()
+      documentation.
+    shell: bool, True to run this command through the system shell,
+      False to invoke it directly. See subprocess.Popen() docs.
+    timeout: float, the number of seconds to wait before timing out.
+      If not specified, no timeout takes effect.
+    cwd: string, the path to change the child's current directory to before
+      it is executed. Note that this directory is not considered when
+      searching the executable, so you can't specify the program's path
+      relative to cwd.
+    env: dict, a mapping that defines the environment variables for the
+      new process. Default behavior is inheriting the current process'
+      environment.
 
-    Returns:
-        A 3-tuple of the consisting of the return code, the std output, and the
-            std error.
+  Returns:
+    A 3-tuple of the consisting of the return code, the std output, and the
+      std error.
 
-    Raises:
-        psutil.TimeoutExpired: The command timed out.
-    """
-    # Only import psutil when actually needed.
-    # psutil may cause import error in certain env. This way the utils module
-    # doesn't crash upon import.
-    import psutil
-    if stdout is None:
-        stdout = subprocess.PIPE
-    if stderr is None:
-        stderr = subprocess.PIPE
-    process = psutil.Popen(cmd,
-                           stdout=stdout,
-                           stderr=stderr,
-                           shell=shell,
-                           cwd=cwd,
-                           env=env)
-    timer = None
-    timer_triggered = threading.Event()
-    if timeout and timeout > 0:
-        # The wait method on process will hang when used with PIPEs with large
-        # outputs, so use a timer thread instead.
+  Raises:
+    psutil.TimeoutExpired: The command timed out.
+  """
+  # Only import psutil when actually needed.
+  # psutil may cause import error in certain env. This way the utils module
+  # doesn't crash upon import.
+  import psutil
+  if stdout is None:
+    stdout = subprocess.PIPE
+  if stderr is None:
+    stderr = subprocess.PIPE
+  process = psutil.Popen(cmd,
+               stdout=stdout,
+               stderr=stderr,
+               shell=shell,
+               cwd=cwd,
+               env=env)
+  timer = None
+  timer_triggered = threading.Event()
+  if timeout and timeout > 0:
+    # The wait method on process will hang when used with PIPEs with large
+    # outputs, so use a timer thread instead.
 
-        def timeout_expired():
-            timer_triggered.set()
-            process.terminate()
+    def timeout_expired():
+      timer_triggered.set()
+      process.terminate()
 
-        timer = threading.Timer(timeout, timeout_expired)
-        timer.start()
-    # If the command takes longer than the timeout, then the timer thread
-    # will kill the subprocess, which will make it terminate.
-    (out, err) = process.communicate()
-    if timer is not None:
-        timer.cancel()
-    if timer_triggered.is_set():
-        raise psutil.TimeoutExpired(timeout, pid=process.pid)
-    return (process.returncode, out, err)
+    timer = threading.Timer(timeout, timeout_expired)
+    timer.start()
+  # If the command takes longer than the timeout, then the timer thread
+  # will kill the subprocess, which will make it terminate.
+  (out, err) = process.communicate()
+  if timer is not None:
+    timer.cancel()
+  if timer_triggered.is_set():
+    raise psutil.TimeoutExpired(timeout, pid=process.pid)
+  return (process.returncode, out, err)
 
 
 def start_standing_subprocess(cmd, shell=False, env=None):
-    """Starts a long-running subprocess.
+  """Starts a long-running subprocess.
 
-    This is not a blocking call and the subprocess started by it should be
-    explicitly terminated with stop_standing_subprocess.
+  This is not a blocking call and the subprocess started by it should be
+  explicitly terminated with stop_standing_subprocess.
 
-    For short-running commands, you should use subprocess.check_call, which
-    blocks.
+  For short-running commands, you should use subprocess.check_call, which
+  blocks.
 
-    Args:
-        cmd: string, the command to start the subprocess with.
-        shell: bool, True to run this command through the system shell,
-            False to invoke it directly. See subprocess.Proc() docs.
-        env: dict, a custom environment to run the standing subprocess. If not
-            specified, inherits the current environment. See subprocess.Popen()
-            docs.
+  Args:
+    cmd: string, the command to start the subprocess with.
+    shell: bool, True to run this command through the system shell,
+      False to invoke it directly. See subprocess.Proc() docs.
+    env: dict, a custom environment to run the standing subprocess. If not
+      specified, inherits the current environment. See subprocess.Popen()
+      docs.
 
-    Returns:
-        The subprocess that was started.
-    """
-    logging.debug('Starting standing subprocess with: %s', cmd)
-    proc = subprocess.Popen(cmd,
-                            stdin=subprocess.PIPE,
-                            stdout=subprocess.PIPE,
-                            stderr=subprocess.PIPE,
-                            shell=shell,
-                            env=env)
-    # Leaving stdin open causes problems for input, e.g. breaking the
-    # code.inspect() shell (http://stackoverflow.com/a/25512460/1612937), so
-    # explicitly close it assuming it is not needed for standing subprocesses.
-    proc.stdin.close()
-    proc.stdin = None
-    logging.debug('Started standing subprocess %d', proc.pid)
-    return proc
+  Returns:
+    The subprocess that was started.
+  """
+  logging.debug('Starting standing subprocess with: %s', cmd)
+  proc = subprocess.Popen(cmd,
+              stdin=subprocess.PIPE,
+              stdout=subprocess.PIPE,
+              stderr=subprocess.PIPE,
+              shell=shell,
+              env=env)
+  # Leaving stdin open causes problems for input, e.g. breaking the
+  # code.inspect() shell (http://stackoverflow.com/a/25512460/1612937), so
+  # explicitly close it assuming it is not needed for standing subprocesses.
+  proc.stdin.close()
+  proc.stdin = None
+  logging.debug('Started standing subprocess %d', proc.pid)
+  return proc
 
 
 def stop_standing_subprocess(proc):
-    """Stops a subprocess started by start_standing_subprocess.
+  """Stops a subprocess started by start_standing_subprocess.
 
-    Before killing the process, we check if the process is running, if it has
-    terminated, Error is raised.
+  Before killing the process, we check if the process is running, if it has
+  terminated, Error is raised.
 
-    Catches and ignores the PermissionError which only happens on Macs.
+  Catches and ignores the PermissionError which only happens on Macs.
 
-    Args:
-        proc: Subprocess to terminate.
+  Args:
+    proc: Subprocess to terminate.
 
-    Raises:
-        Error: if the subprocess could not be stopped.
-    """
-    # Only import psutil when actually needed.
-    # psutil may cause import error in certain env. This way the utils module
-    # doesn't crash upon import.
-    import psutil
-    pid = proc.pid
-    logging.debug('Stopping standing subprocess %d', pid)
-    process = psutil.Process(pid)
-    failed = []
+  Raises:
+    Error: if the subprocess could not be stopped.
+  """
+  # Only import psutil when actually needed.
+  # psutil may cause import error in certain env. This way the utils module
+  # doesn't crash upon import.
+  import psutil
+  pid = proc.pid
+  logging.debug('Stopping standing subprocess %d', pid)
+  process = psutil.Process(pid)
+  failed = []
+  try:
+    children = process.children(recursive=True)
+  except AttributeError:
+    # Handle versions <3.0.0 of psutil.
+    children = process.get_children(recursive=True)
+  for child in children:
     try:
-        children = process.children(recursive=True)
-    except AttributeError:
-        # Handle versions <3.0.0 of psutil.
-        children = process.get_children(recursive=True)
-    for child in children:
-        try:
-            child.kill()
-            child.wait(timeout=10)
-        except psutil.NoSuchProcess:
-            # Ignore if the child process has already terminated.
-            pass
-        except:
-            failed.append(child.pid)
-            logging.exception('Failed to kill standing subprocess %d',
-                              child.pid)
-    try:
-        process.kill()
-        process.wait(timeout=10)
+      child.kill()
+      child.wait(timeout=10)
     except psutil.NoSuchProcess:
-        # Ignore if the process has already terminated.
-        pass
+      # Ignore if the child process has already terminated.
+      pass
     except:
-        failed.append(pid)
-        logging.exception('Failed to kill standing subprocess %d', pid)
-    if failed:
-        raise Error('Failed to kill standing subprocesses: %s' % failed)
-    # Call wait and close pipes on the original Python object so we don't get
-    # runtime warnings.
-    if proc.stdout:
-        proc.stdout.close()
-    if proc.stderr:
-        proc.stderr.close()
-    proc.wait()
-    logging.debug('Stopped standing subprocess %d', pid)
+      failed.append(child.pid)
+      logging.exception('Failed to kill standing subprocess %d',
+                child.pid)
+  try:
+    process.kill()
+    process.wait(timeout=10)
+  except psutil.NoSuchProcess:
+    # Ignore if the process has already terminated.
+    pass
+  except:
+    failed.append(pid)
+    logging.exception('Failed to kill standing subprocess %d', pid)
+  if failed:
+    raise Error('Failed to kill standing subprocesses: %s' % failed)
+  # Call wait and close pipes on the original Python object so we don't get
+  # runtime warnings.
+  if proc.stdout:
+    proc.stdout.close()
+  if proc.stderr:
+    proc.stderr.close()
+  proc.wait()
+  logging.debug('Stopped standing subprocess %d', pid)
 
 
 def wait_for_standing_subprocess(proc, timeout=None):
-    """Waits for a subprocess started by start_standing_subprocess to finish
-    or times out.
+  """Waits for a subprocess started by start_standing_subprocess to finish
+  or times out.
 
-    Propagates the exception raised by the subprocess.wait(.) function.
-    The subprocess.TimeoutExpired exception is raised if the process timed-out
-    rather than terminating.
+  Propagates the exception raised by the subprocess.wait(.) function.
+  The subprocess.TimeoutExpired exception is raised if the process timed-out
+  rather than terminating.
 
-    If no exception is raised: the subprocess terminated on its own. No need
-    to call stop_standing_subprocess() to kill it.
+  If no exception is raised: the subprocess terminated on its own. No need
+  to call stop_standing_subprocess() to kill it.
 
-    If an exception is raised: the subprocess is still alive - it did not
-    terminate. Either call stop_standing_subprocess() to kill it, or call
-    wait_for_standing_subprocess() to keep waiting for it to terminate on its
-    own.
+  If an exception is raised: the subprocess is still alive - it did not
+  terminate. Either call stop_standing_subprocess() to kill it, or call
+  wait_for_standing_subprocess() to keep waiting for it to terminate on its
+  own.
 
-    If the corresponding subprocess command generates a large amount of output
-    and this method is called with a timeout value, then the command can hang
-    indefinitely. See http://go/pylib/subprocess.html#subprocess.Popen.wait
+  If the corresponding subprocess command generates a large amount of output
+  and this method is called with a timeout value, then the command can hang
+  indefinitely. See http://go/pylib/subprocess.html#subprocess.Popen.wait
 
-    This function does not support Python 2.
+  This function does not support Python 2.
 
-    Args:
-        p: Subprocess to wait for.
-        timeout: An integer number of seconds to wait before timing out.
-    """
-    proc.wait(timeout)
+  Args:
+    p: Subprocess to wait for.
+    timeout: An integer number of seconds to wait before timing out.
+  """
+  proc.wait(timeout)
 
 
 def get_available_host_port():
-    """Gets a host port number available for adb forward.
+  """Gets a host port number available for adb forward.
 
-    Returns:
-        An integer representing a port number on the host available for adb
-        forward.
+  Returns:
+    An integer representing a port number on the host available for adb
+    forward.
 
-    Raises:
-        Error: when no port is found after MAX_PORT_ALLOCATION_RETRY times.
-    """
-    # Only import adb module if needed.
-    from mobly.controllers.android_device_lib import adb
-    for _ in range(MAX_PORT_ALLOCATION_RETRY):
-        port = portpicker.PickUnusedPort()
-        # Make sure adb is not using this port so we don't accidentally
-        # interrupt ongoing runs by trying to bind to the port.
-        if port not in adb.list_occupied_adb_ports():
-            return port
-    raise Error('Failed to find available port after {} retries'.format(
-        MAX_PORT_ALLOCATION_RETRY))
+  Raises:
+    Error: when no port is found after MAX_PORT_ALLOCATION_RETRY times.
+  """
+  # Only import adb module if needed.
+  from mobly.controllers.android_device_lib import adb
+  for _ in range(MAX_PORT_ALLOCATION_RETRY):
+    port = portpicker.PickUnusedPort()
+    # Make sure adb is not using this port so we don't accidentally
+    # interrupt ongoing runs by trying to bind to the port.
+    if port not in adb.list_occupied_adb_ports():
+      return port
+  raise Error('Failed to find available port after {} retries'.format(
+    MAX_PORT_ALLOCATION_RETRY))
 
 
 def grep(regex, output):
-    """Similar to linux's `grep`, this returns the line in an output stream
-    that matches a given regex pattern.
+  """Similar to linux's `grep`, this returns the line in an output stream
+  that matches a given regex pattern.
 
-    It does not rely on the `grep` binary and is not sensitive to line endings,
-    so it can be used cross-platform.
+  It does not rely on the `grep` binary and is not sensitive to line endings,
+  so it can be used cross-platform.
 
-    Args:
-        regex: string, a regex that matches the expected pattern.
-        output: byte string, the raw output of the adb cmd.
+  Args:
+    regex: string, a regex that matches the expected pattern.
+    output: byte string, the raw output of the adb cmd.
 
-    Returns:
-        A list of strings, all of which are output lines that matches the
-        regex pattern.
-    """
-    lines = output.decode('utf-8').strip().splitlines()
-    results = []
-    for line in lines:
-        if re.search(regex, line):
-            results.append(line.strip())
-    return results
+  Returns:
+    A list of strings, all of which are output lines that matches the
+    regex pattern.
+  """
+  lines = output.decode('utf-8').strip().splitlines()
+  results = []
+  for line in lines:
+    if re.search(regex, line):
+      results.append(line.strip())
+  return results
 
 
 def cli_cmd_to_string(args):
-    """Converts a cmd arg list to string.
+  """Converts a cmd arg list to string.
 
-    Args:
-        args: list of strings, the arguments of a command.
+  Args:
+    args: list of strings, the arguments of a command.
 
-    Returns:
-        String representation of the command.
-    """
-    if isinstance(args, str):
-        # Return directly if it's already a string.
-        return args
-    return ' '.join([pipes.quote(arg) for arg in args])
+  Returns:
+    String representation of the command.
+  """
+  if isinstance(args, str):
+    # Return directly if it's already a string.
+    return args
+  return ' '.join([pipes.quote(arg) for arg in args])
 
 
 def get_settable_properties(cls):
-    """Gets the settable properties of a class.
+  """Gets the settable properties of a class.
 
-    Only returns the explicitly defined properties with setters.
+  Only returns the explicitly defined properties with setters.
 
-    Args:
-        cls: A class in Python.
-    """
-    results = []
-    for attr, value in vars(cls).items():
-        if isinstance(value, property) and value.fset is not None:
-            results.append(attr)
-    return results
+  Args:
+    cls: A class in Python.
+  """
+  results = []
+  for attr, value in vars(cls).items():
+    if isinstance(value, property) and value.fset is not None:
+      results.append(attr)
+  return results
 
 
 def find_subclasses_in_module(base_classes, module):
-    """Finds the subclasses of the given classes in the given module.
+  """Finds the subclasses of the given classes in the given module.
 
-    Args:
-        base_classes: list of classes, the base classes to look for the
-            subclasses of in the module.
-        module: module, the module to look for the subclasses in.
+  Args:
+    base_classes: list of classes, the base classes to look for the
+      subclasses of in the module.
+    module: module, the module to look for the subclasses in.
 
-    Returns:
-      A list of all of the subclasses found in the module.
-    """
-    subclasses = []
-    for _, module_member in module.__dict__.items():
-        if inspect.isclass(module_member):
-            for base_class in base_classes:
-                if issubclass(module_member, base_class):
-                    subclasses.append(module_member)
-    return subclasses
+  Returns:
+    A list of all of the subclasses found in the module.
+  """
+  subclasses = []
+  for _, module_member in module.__dict__.items():
+    if inspect.isclass(module_member):
+      for base_class in base_classes:
+        if issubclass(module_member, base_class):
+          subclasses.append(module_member)
+  return subclasses
 
 
 def find_subclass_in_module(base_class, module):
-    """Finds the single subclass of the given base class in the given module.
+  """Finds the single subclass of the given base class in the given module.
 
-    Args:
-      base_class: class, the base class to look for a subclass of in the module.
-      module: module, the module to look for the single subclass in.
+  Args:
+    base_class: class, the base class to look for a subclass of in the module.
+    module: module, the module to look for the single subclass in.
 
-    Returns:
-      The single subclass of the given base class.
+  Returns:
+    The single subclass of the given base class.
 
-    Raises:
-      ValueError: If the number of subclasses found was not exactly one.
-    """
-    subclasses = find_subclasses_in_module([base_class], module)
-    if len(subclasses) != 1:
-        raise ValueError('Expected 1 subclass of %s per module, found %s.' %
-                         (base_class.__name__,
-                          [subclass.__name__ for subclass in subclasses]))
-    return subclasses[0]
+  Raises:
+    ValueError: If the number of subclasses found was not exactly one.
+  """
+  subclasses = find_subclasses_in_module([base_class], module)
+  if len(subclasses) != 1:
+    raise ValueError('Expected 1 subclass of %s per module, found %s.' %
+             (base_class.__name__,
+              [subclass.__name__ for subclass in subclasses]))
+  return subclasses[0]
diff --git a/setup.py b/setup.py
index 99b4459..d90bf33 100755
--- a/setup.py
+++ b/setup.py
@@ -18,52 +18,52 @@
 import sys
 
 install_requires = [
-    'future', 'portpicker', 'psutil>=5.4.4', 'pyserial', 'pyyaml',
-    'timeout_decorator'
+  'future', 'portpicker', 'psutil>=5.4.4', 'pyserial', 'pyyaml',
+  'timeout_decorator'
 ]
 
 if platform.system() == 'Windows':
-    install_requires.append('pywin32')
+  install_requires.append('pywin32')
 
 
 class PyTest(test.test):
-    """Class used to execute unit tests using PyTest. This allows us to execute
-    unit tests without having to install the package.
-    """
+  """Class used to execute unit tests using PyTest. This allows us to execute
+  unit tests without having to install the package.
+  """
 
-    def finalize_options(self):
-        test.test.finalize_options(self)
-        self.test_args = ['-x', "tests/mobly"]
-        self.test_suite = True
+  def finalize_options(self):
+    test.test.finalize_options(self)
+    self.test_args = ['-x', "tests/mobly"]
+    self.test_suite = True
 
-    def run_tests(self):
-        import pytest
-        errno = pytest.main(self.test_args)
-        sys.exit(errno)
+  def run_tests(self):
+    import pytest
+    errno = pytest.main(self.test_args)
+    sys.exit(errno)
 
 
 def main():
-    setuptools.setup(
-        name='mobly',
-        version='1.10.1',
-        maintainer='Ang Li',
-        maintainer_email='mobly-github@googlegroups.com',
-        description='Automation framework for special end-to-end test cases',
-        license='Apache2.0',
-        url='https://github.com/google/mobly',
-        download_url='https://github.com/google/mobly/tarball/1.10.1',
-        packages=setuptools.find_packages(exclude=['tests']),
-        include_package_data=False,
-        scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],
-        tests_require=[
-            'mock',
-            'pytest',
-            'pytz',
-        ],
-        install_requires=install_requires,
-        cmdclass={'test': PyTest},
-    )
+  setuptools.setup(
+    name='mobly',
+    version='1.10.1',
+    maintainer='Ang Li',
+    maintainer_email='mobly-github@googlegroups.com',
+    description='Automation framework for special end-to-end test cases',
+    license='Apache2.0',
+    url='https://github.com/google/mobly',
+    download_url='https://github.com/google/mobly/tarball/1.10.1',
+    packages=setuptools.find_packages(exclude=['tests']),
+    include_package_data=False,
+    scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],
+    tests_require=[
+      'mock',
+      'pytest',
+      'pytz',
+    ],
+    install_requires=install_requires,
+    cmdclass={'test': PyTest},
+  )
 
 
 if __name__ == '__main__':
-    main()
+  main()