[swarming_retry] Import Task classes from RECIPE_MODULES

This lets us deindent the bodies of Task subclasses.

This should be completely non-functional; the only expectation file
diffs are exception line number changes.

Change-Id: I288e15b16abb4d3dcfa192d7c6b9edf2be705232
diff --git a/recipe_modules/swarming_retry/examples/full.expected/last_task_max_attempts_high.json b/recipe_modules/swarming_retry/examples/full.expected/last_task_max_attempts_high.json
index c92c745..72b7dd1 100644
--- a/recipe_modules/swarming_retry/examples/full.expected/last_task_max_attempts_high.json
+++ b/recipe_modules/swarming_retry/examples/full.expected/last_task_max_attempts_high.json
@@ -126,7 +126,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -235,7 +235,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -344,7 +344,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -453,7 +453,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -562,7 +562,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
diff --git a/recipe_modules/swarming_retry/examples/full.expected/last_task_max_attempts_high_mixed.json b/recipe_modules/swarming_retry/examples/full.expected/last_task_max_attempts_high_mixed.json
index 61a40e2..f64027f 100644
--- a/recipe_modules/swarming_retry/examples/full.expected/last_task_max_attempts_high_mixed.json
+++ b/recipe_modules/swarming_retry/examples/full.expected/last_task_max_attempts_high_mixed.json
@@ -313,7 +313,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -328,7 +328,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -343,7 +343,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -358,7 +358,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -373,7 +373,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -388,7 +388,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -502,7 +502,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -611,7 +611,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -720,7 +720,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -829,7 +829,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
diff --git a/recipe_modules/swarming_retry/examples/full.expected/last_task_max_attempts_low.json b/recipe_modules/swarming_retry/examples/full.expected/last_task_max_attempts_low.json
index c62af6b..d20fde9 100644
--- a/recipe_modules/swarming_retry/examples/full.expected/last_task_max_attempts_low.json
+++ b/recipe_modules/swarming_retry/examples/full.expected/last_task_max_attempts_low.json
@@ -126,7 +126,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
diff --git a/recipe_modules/swarming_retry/examples/full.expected/max_attempts_three.json b/recipe_modules/swarming_retry/examples/full.expected/max_attempts_three.json
index 46e222e..8ffbacb 100644
--- a/recipe_modules/swarming_retry/examples/full.expected/max_attempts_three.json
+++ b/recipe_modules/swarming_retry/examples/full.expected/max_attempts_three.json
@@ -126,7 +126,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -235,7 +235,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -344,7 +344,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
diff --git a/recipe_modules/swarming_retry/examples/full.expected/raising_process_results.json b/recipe_modules/swarming_retry/examples/full.expected/raising_process_results.json
index afcc59c..18b073f 100644
--- a/recipe_modules/swarming_retry/examples/full.expected/raising_process_results.json
+++ b/recipe_modules/swarming_retry/examples/full.expected/raising_process_results.json
@@ -126,7 +126,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
@@ -235,7 +235,7 @@
       "@@@STEP_LOG_LINE@exception@Traceback (most recent call last):@@@",
       "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/api.py\", line 415, in _launch_and_collect@@@",
       "@@@STEP_LOG_LINE@exception@    task.process_result()@@@",
-      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 130, in process_result@@@",
+      "@@@STEP_LOG_LINE@exception@  File \"RECIPE_REPO[fuchsia]/recipe_modules/swarming_retry/examples/full.py\", line 111, in process_result@@@",
       "@@@STEP_LOG_LINE@exception@    raise self._api.step.StepFailure('something failed')@@@",
       "@@@STEP_LOG_LINE@exception@StepFailure: something failed@@@",
       "@@@STEP_LOG_END@exception@@@"
diff --git a/recipe_modules/swarming_retry/examples/full.py b/recipe_modules/swarming_retry/examples/full.py
index d032361..dcf9755 100644
--- a/recipe_modules/swarming_retry/examples/full.py
+++ b/recipe_modules/swarming_retry/examples/full.py
@@ -2,10 +2,10 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import time
-
 from recipe_engine.recipe_api import Property
 
+from RECIPE_MODULES.fuchsia.swarming_retry import api as swarming_retry_api
+
 DEPS = [
     'fuchsia/status_check',
     'fuchsia/swarming_retry',
@@ -56,102 +56,83 @@
 }  # yapf: disable
 
 
-def create_task_class(api):
+class Task(swarming_retry_api.Task):
+  """Required subclass for testing swarming_retry.
 
-  class Task(api.swarming_retry.Task):
-    """Required subclass for testing swarming_retry.
+  Defined inside a function because base class is inside api object.
+  """
 
-    Defined inside a function because base class is inside api object.
+  def __init__(self, initial_task_id, *args, **kwargs):
+    """Construct a Task object.
+
+    Args:
+      initial_task_id (int or str): integer decimal value (since this needs
+        to be incremented but is then used as a str later this method
+        accepts both int and str types to minimize confusion, so long as
+        int(initial_task_id) works)
     """
 
-    def __init__(self, initial_task_id, *args, **kwargs):
-      """Construct a Task object.
+    super(Task, self).__init__(*args, **kwargs)
+    self._next_task_id = int(initial_task_id)
 
-      Args:
-        initial_task_id (int or str): integer decimal value (since this needs
-          to be incremented but is then used as a str later this method
-          accepts both int and str types to minimize confusion, so long as
-          int(initial_task_id) works)
-      """
+  def launch(self):
+    kwargs = {
+        'index': len(self.attempts),
+        'task_id': str(self._next_task_id),
+    }
 
-      kwargs.setdefault('api', api)
-      super(Task, self).__init__(*args, **kwargs)
-      self._next_task_id = int(initial_task_id)
+    self._next_task_id += 1
 
-    def launch(self):
-      kwargs = {
-          'index': len(self.attempts),
-          'task_id': str(self._next_task_id),
-      }
+    # This looks funny but it's needed to ensure coverage of
+    # Attempt.task_ui_link.
+    if self._next_task_id % 2 == 0:
+      kwargs['host'] = 'testhost'
+    else:
+      kwargs['task_ui_link'] = ('https://testhost/task?id=%s' %
+                                kwargs['task_id'])
 
-      self._next_task_id += 1
+    attempt = self._api.swarming_retry.Attempt(**kwargs)
+    step = self._api.step('launch %s' % self.name, None)
+    step.presentation.step_summary_text = attempt.task_id
 
-      # This looks funny but it's needed to ensure coverage of
-      # Attempt.task_ui_link.
-      if self._next_task_id % 2 == 0:
-        kwargs['host'] = 'testhost'
-      else:
-        kwargs['task_ui_link'] = ('https://testhost/task?id=%s' %
-                                  kwargs['task_id'])
-
-      attempt = api.swarming_retry.Attempt(**kwargs)
-      step = api.step('launch %s' % self.name, None)
-      step.presentation.step_summary_text = attempt.task_id
-
-      self.attempts.append(attempt)
-      return attempt
-
-  return Task
+    self.attempts.append(attempt)
+    return attempt
 
 
-def create_task(api, name, initial_task_id, **kwargs):
-  Task = create_task_class(api)  # pylint: disable=invalid-name
-  return Task(name=name, initial_task_id=initial_task_id, **kwargs)
+class InternalFailureTask(Task):
+
+  def process_result(self):
+    self.attempts[-1].failure_reason = 'internal failure'
 
 
-def create_internal_failure_task(api, name, initial_task_id, **kwargs):
-  Task = create_task_class(api)  # pylint: disable=invalid-name
+class RaisingTask(Task):
 
-  class InternalFailureTask(Task):
-
-    def process_result(self):
-      self.attempts[-1].failure_reason = 'internal failure'
-
-  return InternalFailureTask(
-      name=name, initial_task_id=initial_task_id, **kwargs)
+  def process_result(self):
+    raise self._api.step.StepFailure('something failed')
 
 
-def create_raising_task(api, name, initial_task_id, **kwargs):
-  Task = create_task_class(api)  # pylint: disable=invalid-name
+class LedTask(swarming_retry_api.LedTask):
 
-  class RaisingTask(Task):
-
-    def process_result(self):
-      raise self._api.step.StepFailure('something failed')
-
-  return RaisingTask(name=name, initial_task_id=initial_task_id, **kwargs)
+  def __init__(self, initial_task_id, api, **kwargs):
+    del initial_task_id  # Unused.
+    super(LedTask, self).__init__(
+        api.led('get-build', 'builder'), api=api, **kwargs)
 
 
-def create_led_task(api, name, initial_task_id, **kwargs):
-  del initial_task_id  # Unused.
+class TriggeredTask(swarming_retry_api.TriggeredTask):
 
-  return api.swarming_retry.LedTask(
-      api=api, name=name, led_data=api.led('get-build', 'builder'), **kwargs)
+  def __init__(self, api, name, initial_task_id, **kwargs):
+    del initial_task_id  # Unused.
 
+    dimensions = {
+        'pool': 'pool',
+        'device_type': 'device_type',
+    }
 
-def create_triggered_task(api, name, initial_task_id, **kwargs):
-  del initial_task_id  # Unused.
+    request = api.swarming.task_request().with_name(name)
+    request = request.with_slice(0, request[0].with_dimensions(**dimensions))
 
-  dimensions = {
-      'pool': 'pool',
-      'device_type': 'device_type',
-  }
-
-  request = api.swarming.task_request().with_name(name)
-  request = request.with_slice(0, request[0].with_dimensions(**dimensions))
-
-  return api.swarming_retry.TriggeredTask(
-      api=api, name=name, request=request, **kwargs)
+    super(TriggeredTask, self).__init__(request, api=api, name=name, **kwargs)
 
 
 # pylint: disable=invalid-name
@@ -159,11 +140,11 @@
              launch_deadline_time):
 
   task_types = {
-      'test': create_task,
-      'internal_failure': create_internal_failure_task,
-      'raising': create_raising_task,
-      'led': create_led_task,
-      'triggered': create_triggered_task,
+      'test': Task,
+      'internal_failure': InternalFailureTask,
+      'raising': RaisingTask,
+      'led': LedTask,
+      'triggered': TriggeredTask,
   }
 
   _create_task = task_types[task_type]  # pylint: disable=invalid-name
diff --git a/recipe_modules/testing/api.py b/recipe_modules/testing/api.py
index 7c5c67b..854895d 100644
--- a/recipe_modules/testing/api.py
+++ b/recipe_modules/testing/api.py
@@ -2,7 +2,6 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import collections
 import operator
 
 import attr
@@ -10,6 +9,7 @@
 from recipe_engine import recipe_api
 from recipe_engine.config_types import Path
 
+from RECIPE_MODULES.fuchsia.swarming_retry import api as swarming_retry_api
 from RECIPE_MODULES.fuchsia.testsharder import api as testsharder_api
 
 # List of available targets.
@@ -268,286 +268,270 @@
               'Found failure string in log %s: %s' % (log_name, fail_str))
 
 
-def create_task(api, *args, **kwargs):
-  """Create a Task object.
+class Task(swarming_retry_api.TriggeredTask):
 
-  The base class of the class is inside the api object, so it can't be
-  top-level or otherwise defined at module load time. Defining it in this
-  function as an alternative.
+  def __init__(self, api, name, request, uses_legacy_qemu, targets_fuchsia,
+               symbolize_tool, llvm_symbolizer, tests, debug_symbol_gcs_bucket,
+               *args, **kwargs):
+    super(Task, self).__init__(
+        api=api, name=name, request=request, *args, **kwargs)
+    self._uses_legacy_qemu = uses_legacy_qemu
+    self._targets_fuchsia = targets_fuchsia
+    self._symbolize_tool = symbolize_tool
+    self._llvm_symbolizer = llvm_symbolizer
+    self._tests = tests
+    self._debug_symbol_gcs_bucket = debug_symbol_gcs_bucket
 
-  For full args list see Task.__init__ a few lines down.
-  """
+    # Test shards with the 'multiplied:' prefix come from
+    # tools/integration/testsharder/shard.go in fuchsia.git. They were
+    # specifically created to run a test or set of tests many times to look
+    # for flakes. It doesn't make sense to retry these when they fail--the
+    # goal is to see if they fail not to get them to pass.
+    if name.startswith('multiplied:'):
+      self.max_attempts = 1
 
-  class Task(api.swarming_retry.TriggeredTask):
+  def process_result(self):
+    """Unpacks the results archive produced by a test shard."""
 
-    def __init__(self, api, name, request, uses_legacy_qemu, targets_fuchsia,
-                 symbolize_tool, llvm_symbolizer, tests,
-                 debug_symbol_gcs_bucket, *args, **kwargs):
-      super(Task, self).__init__(
-          api=api, name=name, request=request, *args, **kwargs)
-      self._uses_legacy_qemu = uses_legacy_qemu
-      self._targets_fuchsia = targets_fuchsia
-      self._symbolize_tool = symbolize_tool
-      self._llvm_symbolizer = llvm_symbolizer
-      self._tests = tests
-      self._debug_symbol_gcs_bucket = debug_symbol_gcs_bucket
+    attempt = self.attempts[-1]
+    assert attempt.result
+    result = attempt.result
 
-      # Test shards with the 'multiplied:' prefix come from
-      # tools/integration/testsharder/shard.go in fuchsia.git. They were
-      # specifically created to run a test or set of tests many times to look
-      # for flakes. It doesn't make sense to retry these when they fail--the
-      # goal is to see if they fail not to get them to pass.
-      if name.startswith('multiplied:'):
-        self.max_attempts = 1
+    if result.isolated_outputs:
+      attempt.task_outputs_link = result.isolated_outputs.url
 
-    def process_result(self):
-      """Unpacks the results archive produced by a test shard."""
+    if result.state == self._api.swarming.TaskState.TIMED_OUT:
+      attempt.failure_reason = 'timed out'
 
-      attempt = self.attempts[-1]
-      assert attempt.result
-      result = attempt.result
+    attempt.test_results = None
 
-      if result.isolated_outputs:
-        attempt.task_outputs_link = result.isolated_outputs.url
+    with self._api.step.nest(result.name):
+      attempt.symbolizer_output = result.output_dir.join(
+          self._api.symbolize.LOG)
+      attempt.symbolizer_json_output = self._api.path['cleanup'].join(
+          '%s-%d-%s' %
+          (self.name, attempt.index, self._api.symbolize.OUTPUT_JSON))
+      # Figure out what happened to the swarming task.
+      if result.output:
+        # Fuchsia crashes have to be symbolized on the host.
+        if self._targets_fuchsia:
+          attempt.logs['symbolized log'] = self._api.symbolize(
+              symbolize_tool=self._symbolize_tool,
+              debug_symbol_gcs_bucket=self._debug_symbol_gcs_bucket,
+              llvm_symbolizer=self._llvm_symbolizer,
+              data=result.output,
+              symbolizer_output=attempt.symbolizer_output,
+              json_output=attempt.symbolizer_json_output,
+          )
+        # Non-Fuchsia should already be symbolized, and attempting to use
+        # the symbolizer may fail, if e.g. it was built on Mac and this is
+        # running on Linux.
+        else:
+          attempt.logs['symbolized log'] = result.output
 
-      if result.state == self._api.swarming.TaskState.TIMED_OUT:
-        attempt.failure_reason = 'timed out'
+      if 'KERNEL PANIC' in result.output:
+        attempt.failure_reason = 'KERNEL PANIC'  # pragma: no cover
 
-      attempt.test_results = None
+      self._check_logs_for_failures(attempt)
 
-      with self._api.step.nest(result.name):
-        attempt.symbolizer_output = result.output_dir.join(
-            self._api.symbolize.LOG)
-        attempt.symbolizer_json_output = self._api.path['cleanup'].join(
-            '%s-%d-%s' %
-            (self.name, attempt.index, self._api.symbolize.OUTPUT_JSON))
-        # Figure out what happened to the swarming task.
-        if result.output:
-          # Fuchsia crashes have to be symbolized on the host.
-          if self._targets_fuchsia:
-            attempt.logs['symbolized log'] = self._api.symbolize(
-                symbolize_tool=self._symbolize_tool,
-                debug_symbol_gcs_bucket=self._debug_symbol_gcs_bucket,
-                llvm_symbolizer=self._llvm_symbolizer,
-                data=result.output,
-                symbolizer_output=attempt.symbolizer_output,
-                json_output=attempt.symbolizer_json_output,
-            )
-          # Non-Fuchsia should already be symbolized, and attempting to use
-          # the symbolizer may fail, if e.g. it was built on Mac and this is
-          # running on Linux.
-          else:
-            attempt.logs['symbolized log'] = result.output
+      if result.success:
+        self._process_outputs(attempt)
 
-        if 'KERNEL PANIC' in result.output:
-          attempt.failure_reason = 'KERNEL PANIC'  # pragma: no cover
+  def _process_outputs(self, attempt):
+    """Reads the test results and output files of a swarming TaskResult.
 
-        self._check_logs_for_failures(attempt)
+    Sets attempt.test_results if successful.
 
-        if result.success:
-          self._process_outputs(attempt)
+    Args:
+      attempt (swarming_retry.Attempt): the attempt to process
+    """
 
-    def _process_outputs(self, attempt):
-      """Reads the test results and output files of a swarming TaskResult.
+    assert attempt.result
+    result = attempt.result
 
-      Sets attempt.test_results if successful.
-
-      Args:
-        attempt (swarming_retry.Attempt): the attempt to process
-      """
-
-      assert attempt.result
-      result = attempt.result
-
-      # Extract results if the task was not subject to an infra failure;
-      # otherwise, a step failure will be raised on exiting the
-      # defer_results() scope.
-      attempt.test_results_archive = None
-      for relative_path, absolute_path in sorted(result.outputs.iteritems()):
-        if relative_path in [
-            self._api.testing_requests.TEST_RESULTS_ARCHIVE_NAME,
-            self._api.testing_requests.TEST_RESULTS_MINFS_NAME
-        ]:
-          attempt.test_results_archive = absolute_path
-
-      assert attempt.test_results_archive, (
-          'test archive not found amongst outputs of task %s' % result.name)
-      self._parse_test_results(attempt)
-
-      attempt.logs[TEST_SUMMARY_JSON] = attempt.test_results.summary_lines
-
-      # Delete the archive so it doesn't get uploaded with the other files in
-      # the swarming task's output directory.
-      self._api.file.remove(
-          'remove %s' % self._api.path.basename(attempt.test_results_archive),
-          attempt.test_results_archive)
-
-    def _parse_test_results(self, attempt):
-      """Parse test results from attempt into a FuchsiaTestResults object.
-
-      Args:
-        attempt (swarming_retry.Attempt): the attempt to parse
-      """
-      assert attempt.result
-      result = attempt.result
-
-      results_dir = self._api.testing.results_dir_on_host.join(result.id)
-      # pylint: disable=protected-access
-      self._api.testing._extract_test_results_archive(
-          step_name='extract',
-          archive_path=attempt.test_results_archive,
-          directory=results_dir,
-          is_minfs=self._uses_legacy_qemu,
-      )
-      # pylint: enable=protected-access
-
-      attempt.test_results = FuchsiaTestResults(
-          from_fuchsia=self._targets_fuchsia,
-          results_dir=results_dir,
-          swarming_task_id=attempt.task_id,
-          symbolizer_json_output=attempt.symbolizer_json_output,
-          env_name=result.name,
-          tests=self._tests,
-          legacy_qemu=self._uses_legacy_qemu,
-          api=api,
-          symbolizer_output=attempt.symbolizer_output,
-          output_dir=result.output_dir,
-      )
-
-      failed_tests = attempt.test_results.failed_tests
-      if failed_tests:
-        attempt.failure_reason = '%d test(s) failed' % len(failed_tests)
-
-    def _check_logs_for_failures(self, attempt):
-      """Check for failure strings in logs.
-
-      Args:
-        attempt (swarming_retry.Attempt): the attempt to check for logs in
-      """
-      # Check serial log for failure messages
-      # TODO(9936): Replace with running binary tool once created.
-      fail_strings = ['DEVICE SUSPEND TIMED OUT', 'ASSERT FAILED']
-      log_path = attempt.result.output_dir.join(
-          self._api.testing_requests.SERIAL_LOG_NAME)
-      self._api.path.mock_add_paths(log_path)
-      if self._api.path.exists(log_path):
-        log_name = self._api.path.basename(log_path)
-        with self._api.step.nest('check log %s' % log_name) as presentation:
-          contents = self._api.file.read_text('read', log_path)
-          for fail_str in fail_strings:
-            if fail_str in contents:
-              presentation.logs[log_name] = contents.splitlines()
-              presentation.status = self._api.step.FAILURE
-              presentation.step_summary_text = 'found "%s"' % fail_str
-              attempt.failure_reason = ('found "%s" in %s' %
-                                        (fail_str, log_name))
-
-    def present_status(self, parent_step, attempt, **kwargs):
-      """Present an Attempt while showing progress in launch/collect step.
-
-      Args:
-        parent_step (Step): will always be 'passed tasks' or 'failed tasks'
-        attempt (Attempt): the Attempt to present
-      """
-      del kwargs, parent_step  # Unused.
-      with api.step.nest('%s (%s)' % (self.name, attempt.name)) as presentation:
-        self._present(
-            presentation,
-            attempt,
-            show_failures_in_red=False,
-            show_passed=False)
-
-    def present_attempt(self, _, attempt, category=None):
-      """Present an Attempt when summarizing results at the end of the run.
-
-      Args:
-        attempt (Attempt): the Attempt to present
-        category (str): the group of tasks ('passes', 'failures', or
-          'flakes') that this attempt should be presented under
-      """
-      show_failures_in_red = True
-      # The 'passes' category includes all attempts of all tasks that
-      # eventually passed, so it includes some failures. Show those in
-      # green so people don't get confused and think the overall task
-      # failed.
-      # TODO(fxb/36647) after this bug is fixed show these steps in
-      # red, but show parent steps of those in green.
-      if category == 'passes':
-        show_failures_in_red = False
-
-      name = '%s (%s)' % (attempt.name, 'pass' if attempt.success else 'fail')
-      with api.step.nest(name) as presentation:
-        if show_failures_in_red and not attempt.success:
-          presentation.status = self._api.step.FAILURE
-        self._present(
-            presentation,
-            attempt,
-            show_failures_in_red=show_failures_in_red,
-            show_passed=True,
-        )
-
-    def _present(self, presentation, attempt, show_failures_in_red,
-                 show_passed):
-      """Present an Attempt.
-
-      Choosing to do largely the same thing for both kinds of presentations.
-
-      Args:
-        presentation (StepPresentation): where to present the attempt info
-        attempt (api.swarming_retry.Attempt): object to present
-        show_failures_in_red (bool): show failures in red (for final
-            'flakes' and 'failures' steps) or not (for 'launch/collect'
-            progress and 'passes' steps)
-        show_passed (bool): show the names of passed tests (only done for
-            the end)
-
-      Note: the 'passes' step can have failures underneath it because the
-      first attempt can fail but the retry passed.
-      """
-      if attempt.result.duration_secs:
-        presentation.step_text = nice_time(attempt.result.duration_secs)
-
-      presentation.presentation.links['swarming task'] = attempt.task_ui_link
-      if attempt.task_outputs_link:
-        presentation.links['task outputs'] = attempt.task_outputs_link
-
-      if attempt.failure_reason:
-        presentation.step_summary_text = attempt.failure_reason
-
-      for log, data in attempt.logs.iteritems():
-        presentation.logs[log] = data
-
-      if attempt.test_results:
-        test_results = attempt.test_results
-
-        # Log the contents of each output file mentioned in the summary.
-        # Note this assumes the outputs are all valid UTF-8 (See fxb/9500).
-        for name, path in test_results.summary.get('outputs', {}).iteritems():
-          presentation.logs[name] = test_results.get_output(path).split('\n')
-
-        test_results.present_tests(
-            show_failures_in_red=show_failures_in_red, show_passed=show_passed)
-
-      for log_name in [
-          self._api.testing_requests.SYSLOG_NAME,
-          self._api.testing_requests.SERIAL_LOG_NAME
+    # Extract results if the task was not subject to an infra failure;
+    # otherwise, a step failure will be raised on exiting the
+    # defer_results() scope.
+    attempt.test_results_archive = None
+    for relative_path, absolute_path in sorted(result.outputs.iteritems()):
+      if relative_path in [
+          self._api.testing_requests.TEST_RESULTS_ARCHIVE_NAME,
+          self._api.testing_requests.TEST_RESULTS_MINFS_NAME
       ]:
-        if log_name in attempt.result.outputs:
-          self._present_output_file(
-              name=log_name,
-              path=attempt.result.outputs[log_name],
-              step=presentation)
+        attempt.test_results_archive = absolute_path
 
-    def _present_output_file(self, name, path, step):
-      """Records file contents to the test results step's presentation."""
-      contents = self._api.file.read_text(
-          'read %s' % name,
-          path,
-          test_data='extra log contents',
+    assert attempt.test_results_archive, (
+        'test archive not found amongst outputs of task %s' % result.name)
+    self._parse_test_results(attempt)
+
+    attempt.logs[TEST_SUMMARY_JSON] = attempt.test_results.summary_lines
+
+    # Delete the archive so it doesn't get uploaded with the other files in
+    # the swarming task's output directory.
+    self._api.file.remove(
+        'remove %s' % self._api.path.basename(attempt.test_results_archive),
+        attempt.test_results_archive)
+
+  def _parse_test_results(self, attempt):
+    """Parse test results from attempt into a FuchsiaTestResults object.
+
+    Args:
+      attempt (swarming_retry.Attempt): the attempt to parse
+    """
+    assert attempt.result
+    result = attempt.result
+
+    results_dir = self._api.testing.results_dir_on_host.join(result.id)
+    # pylint: disable=protected-access
+    self._api.testing._extract_test_results_archive(
+        step_name='extract',
+        archive_path=attempt.test_results_archive,
+        directory=results_dir,
+        is_minfs=self._uses_legacy_qemu,
+    )
+    # pylint: enable=protected-access
+
+    attempt.test_results = FuchsiaTestResults(
+        from_fuchsia=self._targets_fuchsia,
+        results_dir=results_dir,
+        swarming_task_id=attempt.task_id,
+        symbolizer_json_output=attempt.symbolizer_json_output,
+        env_name=result.name,
+        tests=self._tests,
+        legacy_qemu=self._uses_legacy_qemu,
+        api=self._api,
+        symbolizer_output=attempt.symbolizer_output,
+        output_dir=result.output_dir,
+    )
+
+    failed_tests = attempt.test_results.failed_tests
+    if failed_tests:
+      attempt.failure_reason = '%d test(s) failed' % len(failed_tests)
+
+  def _check_logs_for_failures(self, attempt):
+    """Check for failure strings in logs.
+
+    Args:
+      attempt (swarming_retry.Attempt): the attempt to check for logs in
+    """
+    # Check serial log for failure messages
+    # TODO(9936): Replace with running binary tool once created.
+    fail_strings = ['DEVICE SUSPEND TIMED OUT', 'ASSERT FAILED']
+    log_path = attempt.result.output_dir.join(
+        self._api.testing_requests.SERIAL_LOG_NAME)
+    self._api.path.mock_add_paths(log_path)
+    if self._api.path.exists(log_path):
+      log_name = self._api.path.basename(log_path)
+      with self._api.step.nest('check log %s' % log_name) as presentation:
+        contents = self._api.file.read_text('read', log_path)
+        for fail_str in fail_strings:
+          if fail_str in contents:
+            presentation.logs[log_name] = contents.splitlines()
+            presentation.status = self._api.step.FAILURE
+            presentation.step_summary_text = 'found "%s"' % fail_str
+            attempt.failure_reason = ('found "%s" in %s' % (fail_str, log_name))
+
+  def present_status(self, parent_step, attempt, **kwargs):
+    """Present an Attempt while showing progress in launch/collect step.
+
+    Args:
+      parent_step (Step): will always be 'passed tasks' or 'failed tasks'
+      attempt (Attempt): the Attempt to present
+    """
+    del kwargs, parent_step  # Unused.
+    with self._api.step.nest('%s (%s)' %
+                             (self.name, attempt.name)) as presentation:
+      self._present(
+          presentation, attempt, show_failures_in_red=False, show_passed=False)
+
+  def present_attempt(self, _, attempt, category=None):
+    """Present an Attempt when summarizing results at the end of the run.
+
+    Args:
+      attempt (Attempt): the Attempt to present
+      category (str): the group of tasks ('passes', 'failures', or
+        'flakes') that this attempt should be presented under
+    """
+    show_failures_in_red = True
+    # The 'passes' category includes all attempts of all tasks that
+    # eventually passed, so it includes some failures. Show those in
+    # green so people don't get confused and think the overall task
+    # failed.
+    # TODO(fxb/36647) after this bug is fixed show these steps in
+    # red, but show parent steps of those in green.
+    if category == 'passes':
+      show_failures_in_red = False
+
+    name = '%s (%s)' % (attempt.name, 'pass' if attempt.success else 'fail')
+    with self._api.step.nest(name) as presentation:
+      if show_failures_in_red and not attempt.success:
+        presentation.status = self._api.step.FAILURE
+      self._present(
+          presentation,
+          attempt,
+          show_failures_in_red=show_failures_in_red,
+          show_passed=True,
       )
-      step.presentation.logs[name] = contents.splitlines()
 
-  return Task(*args, api=api, **kwargs)
+  def _present(self, presentation, attempt, show_failures_in_red, show_passed):
+    """Present an Attempt.
+
+    Choosing to do largely the same thing for both kinds of presentations.
+
+    Args:
+      presentation (StepPresentation): where to present the attempt info
+      attempt (api.swarming_retry.Attempt): object to present
+      show_failures_in_red (bool): show failures in red (for final
+          'flakes' and 'failures' steps) or not (for 'launch/collect'
+          progress and 'passes' steps)
+      show_passed (bool): show the names of passed tests (only done for
+          the end)
+
+    Note: the 'passes' step can have failures underneath it because the
+    first attempt can fail but the retry passed.
+    """
+    if attempt.result.duration_secs:
+      presentation.step_text = nice_time(attempt.result.duration_secs)
+
+    presentation.presentation.links['swarming task'] = attempt.task_ui_link
+    if attempt.task_outputs_link:
+      presentation.links['task outputs'] = attempt.task_outputs_link
+
+    if attempt.failure_reason:
+      presentation.step_summary_text = attempt.failure_reason
+
+    for log, data in attempt.logs.iteritems():
+      presentation.logs[log] = data
+
+    if attempt.test_results:
+      test_results = attempt.test_results
+
+      # Log the contents of each output file mentioned in the summary.
+      # Note this assumes the outputs are all valid UTF-8 (See fxb/9500).
+      for name, path in test_results.summary.get('outputs', {}).iteritems():
+        presentation.logs[name] = test_results.get_output(path).split('\n')
+
+      test_results.present_tests(
+          show_failures_in_red=show_failures_in_red, show_passed=show_passed)
+
+    for log_name in [
+        self._api.testing_requests.SYSLOG_NAME,
+        self._api.testing_requests.SERIAL_LOG_NAME
+    ]:
+      if log_name in attempt.result.outputs:
+        self._present_output_file(
+            name=log_name,
+            path=attempt.result.outputs[log_name],
+            step=presentation)
+
+  def _present_output_file(self, name, path, step):
+    """Records file contents to the test results step's presentation."""
+    contents = self._api.file.read_text(
+        'read %s' % name,
+        path,
+        test_data='extra log contents',
+    )
+    step.presentation.logs[name] = contents.splitlines()
 
 
 class _ShardedTestRunner(object):
@@ -581,7 +565,7 @@
       targets_fuchsia = shard_request.task_request[0].dimensions.get(
           'os', '').lower() not in ('linux', 'mac')
       self.tasks.append(
-          create_task(
+          Task(
               api=self._api,
               name=shard_request.task_request.name,
               request=shard_request.task_request,
diff --git a/recipes/recipes.py b/recipes/recipes.py
index 6407af0..26553fe 100644
--- a/recipes/recipes.py
+++ b/recipes/recipes.py
@@ -10,6 +10,7 @@
 from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2
 from recipe_engine.recipe_api import Property
 
+from RECIPE_MODULES.fuchsia.swarming_retry import api as swarming_retry_api
 from RECIPE_MODULES.fuchsia.utils.api import memoize
 
 DEPS = [
@@ -185,16 +186,16 @@
   return build
 
 
+class Build(swarming_retry_api.LedTask):
+
+  def include_cl(self, cl):
+    self._led_data = self._led_data.then('edit-cr-cl', cl)
+
+  def include_recipe_bundle(self):
+    self._led_data = self._led_data.then('edit-recipe-bundle')
+
+
 def create_led_build(api, orig_build, selftest_cl):
-
-  class Build(api.swarming_retry.LedTask):
-
-    def include_cl(self, cl):
-      self._led_data = self._led_data.then('edit-cr-cl', cl)
-
-    def include_recipe_bundle(self):
-      self._led_data = self._led_data.then('edit-recipe-bundle')
-
   builder = orig_build.builder.builder
   led_data = api.led('get-build', orig_build.id)
   led_data.result['top_level']['name'] = 'recipes-cq:%s' % builder
diff --git a/recipes/zbi_test.py b/recipes/zbi_test.py
index 00c6f72..5fe009f 100644
--- a/recipes/zbi_test.py
+++ b/recipes/zbi_test.py
@@ -10,6 +10,8 @@
 from recipe_engine.post_process import StatusSuccess, StatusFailure
 from recipe_engine.recipe_api import Property
 
+from RECIPE_MODULES.fuchsia.swarming_retry import api as swarming_retry_api
+
 DEPS = [
     'fuchsia/build',
     'fuchsia/checkout',
@@ -83,220 +85,207 @@
   return checkout_root.join('prebuilt', *path)
 
 
-def create_task(api, **kwargs):
-  """Create a Task object.
+class Task(swarming_retry_api.TriggeredTask):
 
-  The base class of the class is inside the api object, so it can't be
-  top-level or otherwise defined at module load time. Defining it in this
-  function as an alternative.
+  def __init__(self, api, name, zbi_test, checkout_root, gn_results, target_cpu,
+               device_type, **kwargs):
+    super(Task, self).__init__(api=api, name=name, request=None, **kwargs)
+    self._checkout_root = checkout_root
+    self._gn_results = gn_results
+    self._target_cpu = target_cpu
+    self._zbi_test = copy.deepcopy(zbi_test)
+    self._device_type = device_type
 
-  For full args list see Task.__init__ a few lines down.
-  """
+    if api.emu.is_emulator_type(device_type):
+      self._create_emu_request()
+    else:
+      self._create_device_request()
 
-  class Task(api.swarming_retry.TriggeredTask):
+  def _create_emu_request(self):
+    isolate_tree = self._api.file.symlink_tree(
+        root=self._api.path.mkdtemp('isolate'))
+    isolate_tree.register_link(
+        target=self._gn_results.fuchsia_build_dir.join(self._zbi_test['path']),
+        linkname=isolate_tree.root.join(
+            os.path.basename(self._zbi_test['path'])))
 
-    def __init__(self, api, name, zbi_test, checkout_root, gn_results,
-                 target_cpu, device_type, **kwargs):
-      super(Task, self).__init__(api=api, name=name, request=None, **kwargs)
-      self._checkout_root = checkout_root
-      self._gn_results = gn_results
-      self._target_cpu = target_cpu
-      self._zbi_test = copy.deepcopy(zbi_test)
-      self._device_type = device_type
+    qemu_kernel = copy.deepcopy([
+        image for image in self._gn_results.image_manifest
+        if image['name'] == QEMU_KERNEL_NAME
+    ][0])
 
-      if api.emu.is_emulator_type(device_type):
-        self._create_emu_request()
-      else:
-        self._create_device_request()
+    qemu_kernel_basename = self._api.path.basename(qemu_kernel['path'])
+    isolate_tree.register_link(
+        target=self._gn_results.fuchsia_build_dir.join(qemu_kernel['path']),
+        linkname=isolate_tree.root.join(qemu_kernel_basename))
+    qemu_kernel['path'] = qemu_kernel_basename
 
-    def _create_emu_request(self):
-      isolate_tree = api.file.symlink_tree(
-          root=self._api.path.mkdtemp('isolate'))
+    isolate_tree.register_link(
+        target=self._gn_results.tool('botanist', self._target_cpu),
+        linkname=isolate_tree.root.join('botanist'),
+    )
+    isolate_tree.create_links('create tree of images')
+
+    # TODO(IN-1420) botanist expects test to be named 'zircon-a'
+    self._zbi_test['name'] = 'zircon-a'
+    self._zbi_test['path'] = os.path.basename(self._zbi_test['path'])
+
+    image_manifest_path = isolate_tree.root.join(IMAGES_JSON)
+    image_manifest = [self._zbi_test, qemu_kernel]
+    self._api.file.write_json(
+        'write image manifest', image_manifest_path, image_manifest, indent=2)
+
+    isolated = self._api.isolated.isolated(isolate_tree.root)
+    isolated.add_dir(isolate_tree.root)
+    isolated_hash = isolated.archive('isolate images')
+
+    ensure_file = self._api.cipd.EnsureFile()
+
+    if self._device_type == 'QEMU':
+      self._api.emu.add_qemu_to_ensure_file(ensure_file, subdir='qemu')
+
+    dimensions = {
+        'pool': 'fuchsia.tests',
+        'os': 'Debian',
+        'cpu': self._zbi_test['cpu'],
+        'kvm': '1',
+    }
+
+    cmd = [
+        './botanist',
+        '-level', BOTANIST_LOG_LEVEL,
+        'qemu',
+        '-type', '%s' % self._device_type.lower(),
+        '-qemu-dir', './%s/bin' % self._device_type.lower(),
+        '-images', IMAGES_JSON,
+        '-arch', self._zbi_test['cpu'],
+        '-use-kvm',
+    ]  # yapf: disable
+
+    env_name = '%s-%s' % (self._device_type, self._zbi_test['cpu'])
+    tags = {'test_environment_name': [env_name]}
+    request = self._api.swarming.task_request().with_name(
+        self.name).with_tags(tags)
+    self._request = request.with_slice(0, request[0]
+        .with_command(cmd)
+        .with_isolated(isolated_hash)
+        .with_dimensions(**dimensions)
+        .with_execution_timeout_secs(TEST_EXECUTION_TIMEOUT_SECS)
+        .with_expiration_secs(TEST_EXPIRATION_TIMEOUT_SECS)
+        .with_io_timeout_secs(TEST_IO_TIMEOUT_SECS)
+        .with_cipd_ensure_file(ensure_file)
+    )  # yapf: disable
+
+  def _create_device_request(self):
+    isolate_tree = self._api.file.symlink_tree(
+        root=self._api.path.mkdtemp('isolate'))
+    isolate_tree.register_link(
+        target=self._gn_results.fuchsia_build_dir.join(self._zbi_test['path']),
+        linkname=isolate_tree.root.join(
+            os.path.basename(self._zbi_test['path'])))
+    # We isolate the ZBI to the root of the directory to be isolated.
+    self._zbi_test['path'] = os.path.basename(self._zbi_test['path'])
+
+    for tool in ('botanist', 'seriallistener', 'bootserver_new'):
       isolate_tree.register_link(
-          target=self._gn_results.fuchsia_build_dir.join(
-              self._zbi_test['path']),
-          linkname=isolate_tree.root.join(
-              os.path.basename(self._zbi_test['path'])))
-
-      qemu_kernel = copy.deepcopy([
-          image for image in self._gn_results.image_manifest
-          if image['name'] == QEMU_KERNEL_NAME
-      ][0])
-
-      qemu_kernel_basename = api.path.basename(qemu_kernel['path'])
-      isolate_tree.register_link(
-          target=self._gn_results.fuchsia_build_dir.join(qemu_kernel['path']),
-          linkname=isolate_tree.root.join(qemu_kernel_basename))
-      qemu_kernel['path'] = qemu_kernel_basename
-
-      isolate_tree.register_link(
-          target=self._gn_results.tool('botanist', self._target_cpu),
-          linkname=isolate_tree.root.join('botanist'),
-      )
-      isolate_tree.create_links('create tree of images')
-
-      # TODO(IN-1420) botanist expects test to be named 'zircon-a'
-      self._zbi_test['name'] = 'zircon-a'
-      self._zbi_test['path'] = os.path.basename(self._zbi_test['path'])
-
-      image_manifest_path = isolate_tree.root.join(IMAGES_JSON)
-      image_manifest = [self._zbi_test, qemu_kernel]
-      self._api.file.write_json(
-          'write image manifest', image_manifest_path, image_manifest, indent=2)
-
-      isolated = self._api.isolated.isolated(isolate_tree.root)
-      isolated.add_dir(isolate_tree.root)
-      isolated_hash = isolated.archive('isolate images')
-
-      ensure_file = self._api.cipd.EnsureFile()
-
-      if self._device_type == 'QEMU':
-        self._api.emu.add_qemu_to_ensure_file(ensure_file, subdir='qemu')
-
-      dimensions = {
-          'pool': 'fuchsia.tests',
-          'os': 'Debian',
-          'cpu': self._zbi_test['cpu'],
-          'kvm': '1',
-      }
-
-      cmd = [
-          './botanist',
-          '-level', BOTANIST_LOG_LEVEL,
-          'qemu',
-          '-type', '%s' % self._device_type.lower(),
-          '-qemu-dir', './%s/bin' % self._device_type.lower(),
-          '-images', IMAGES_JSON,
-          '-arch', self._zbi_test['cpu'],
-          '-use-kvm',
-      ]  # yapf: disable
-
-      env_name = '%s-%s' % (self._device_type, self._zbi_test['cpu'])
-      tags = {'test_environment_name': [env_name]}
-      request = self._api.swarming.task_request().with_name(
-          self.name).with_tags(tags)
-      self._request = request.with_slice(0, request[0]
-          .with_command(cmd)
-          .with_isolated(isolated_hash)
-          .with_dimensions(**dimensions)
-          .with_execution_timeout_secs(TEST_EXECUTION_TIMEOUT_SECS)
-          .with_expiration_secs(TEST_EXPIRATION_TIMEOUT_SECS)
-          .with_io_timeout_secs(TEST_IO_TIMEOUT_SECS)
-          .with_cipd_ensure_file(ensure_file)
-      )  # yapf: disable
-
-    def _create_device_request(self):
-      isolate_tree = api.file.symlink_tree(
-          root=self._api.path.mkdtemp('isolate'))
-      isolate_tree.register_link(
-          target=self._gn_results.fuchsia_build_dir.join(
-              self._zbi_test['path']),
-          linkname=isolate_tree.root.join(
-              os.path.basename(self._zbi_test['path'])))
-      # We isolate the ZBI to the root of the directory to be isolated.
-      self._zbi_test['path'] = os.path.basename(self._zbi_test['path'])
-
-      for tool in ('botanist', 'seriallistener', 'bootserver_new'):
-        isolate_tree.register_link(
-            target=self._gn_results.tool(tool, 'x64'),
-            linkname=isolate_tree.root.join(tool),
-        )
-
-      image_manifest_path = isolate_tree.root.join(IMAGES_JSON)
-      image_manifest = [self._zbi_test]
-      self._api.file.write_json(
-          'write image manifest', image_manifest_path, image_manifest, indent=2)
-
-      ensure_file = self._api.cipd.EnsureFile()
-
-      config = BOTANIST_DEVICE_CONFIG
-      dimensions = {
-          'pool': 'fuchsia.tests',
-          'device_type': self._device_type,
-          'serial': '1',
-      }
-
-      # Construct the botanist command
-      cmd = [
-          './botanist',
-          '-level', BOTANIST_LOG_LEVEL,
-          'run',
-          '-images', IMAGES_JSON,
-          '-serial-log', self._api.testing_requests.SERIAL_LOG_NAME,
-          '-config', config,
-          '-netboot',
-          './seriallistener',
-          '-timeout', '5m',
-          '-stdout',
-          '-success-str', self._zbi_test['success_string'],
-      ] # yapf: disable
-
-      isolate_tree.create_links('create tree of images')
-      isolated = self._api.isolated.isolated(isolate_tree.root)
-      isolated.add_dir(isolate_tree.root)
-      isolated_hash = isolated.archive('isolate images')
-
-      outputs = [self._api.testing_requests.SERIAL_LOG_NAME]
-      env_name = '%s-%s' % (self._device_type, self._zbi_test['cpu'])
-      tags = {'test_environment_name': [env_name]}
-      request = api.swarming.task_request().with_name(self.name).with_tags(tags)
-      self._request = request.with_slice(0, request[0].
-          with_command(cmd).
-          with_isolated(isolated_hash).
-          with_dimensions(**dimensions).
-          with_execution_timeout_secs(TEST_EXECUTION_TIMEOUT_SECS).
-          with_expiration_secs(TEST_EXPIRATION_TIMEOUT_SECS).
-          with_io_timeout_secs(TEST_IO_TIMEOUT_SECS).
-          with_cipd_ensure_file(ensure_file).
-          with_outputs(outputs)
-      )  # yapf: disable
-
-    def process_result(self):
-      attempt = self.attempts[-1]
-      assert attempt.result
-      result = attempt.result
-
-      symbolize_tool = self._gn_results.tool('symbolize')
-      clang_dir = prebuilt_path(self._api, self._checkout_root, 'third_party',
-                                'clang')
-      llvm_symbolizer = self._gn_results.tool('llvm-symbolizer')
-      build_id_dirs = (
-          self._gn_results.zircon_build_dir.join('.build-id'),
-          clang_dir.join('lib', 'debug', '.build-id'),
+          target=self._gn_results.tool(tool, 'x64'),
+          linkname=isolate_tree.root.join(tool),
       )
 
-      with self._api.step.nest(result.name) as presentation:
-        attempt.logs['symbolized log'] = self._api.symbolize(
-            symbolize_tool=symbolize_tool,
-            build_id_dirs=build_id_dirs,
-            llvm_symbolizer=llvm_symbolizer,
-            data=result.output,
-            presentation=presentation)
+    image_manifest_path = isolate_tree.root.join(IMAGES_JSON)
+    image_manifest = [self._zbi_test]
+    self._api.file.write_json(
+        'write image manifest', image_manifest_path, image_manifest, indent=2)
 
-        # A kernel panic may be present in the logs even if the task timed
-        # out, so check for that first.
-        if 'KERNEL PANIC' in result.output:
-          attempt.failure_reason = 'kernel panic'
+    ensure_file = self._api.cipd.EnsureFile()
 
-        # Because of the way these tests run (they are the only user-mode
-        # process in the system, and then the system shuts down) we can't
-        # collect an exit code or nicely structured output, so we have to
-        # search the output for a hard-coded string to detect success.
-        is_emu_type = self._api.emu.is_emulator_type(self._device_type)
-        if is_emu_type and self._zbi_test['success_string'] not in result.output:
-          attempt.failure_reason = 'failed to find success string'
+    config = BOTANIST_DEVICE_CONFIG
+    dimensions = {
+        'pool': 'fuchsia.tests',
+        'device_type': self._device_type,
+        'serial': '1',
+    }
 
-    def present_attempt(self, task_step, attempt, **kwargs):
-      del task_step, kwargs  # Unused.
-      name = '%s (%s)' % (attempt.name, 'pass' if attempt.success else 'fail')
-      step = api.step(name, None)
-      step.presentation.step_summary_text = attempt.failure_reason
-      step.presentation.links['task UI'] = attempt.task_ui_link
+    # Construct the botanist command
+    cmd = [
+        './botanist',
+        '-level', BOTANIST_LOG_LEVEL,
+        'run',
+        '-images', IMAGES_JSON,
+        '-serial-log', self._api.testing_requests.SERIAL_LOG_NAME,
+        '-config', config,
+        '-netboot',
+        './seriallistener',
+        '-timeout', '5m',
+        '-stdout',
+        '-success-str', self._zbi_test['success_string'],
+    ] # yapf: disable
 
-      for log, data in attempt.logs.iteritems():
-        step.presentation.logs[log] = data
+    isolate_tree.create_links('create tree of images')
+    isolated = self._api.isolated.isolated(isolate_tree.root)
+    isolated.add_dir(isolate_tree.root)
+    isolated_hash = isolated.archive('isolate images')
 
-  return Task(api=api, **kwargs)
+    outputs = [self._api.testing_requests.SERIAL_LOG_NAME]
+    env_name = '%s-%s' % (self._device_type, self._zbi_test['cpu'])
+    tags = {'test_environment_name': [env_name]}
+    request = self._api.swarming.task_request().with_name(
+        self.name).with_tags(tags)
+    self._request = request.with_slice(0, request[0].
+        with_command(cmd).
+        with_isolated(isolated_hash).
+        with_dimensions(**dimensions).
+        with_execution_timeout_secs(TEST_EXECUTION_TIMEOUT_SECS).
+        with_expiration_secs(TEST_EXPIRATION_TIMEOUT_SECS).
+        with_io_timeout_secs(TEST_IO_TIMEOUT_SECS).
+        with_cipd_ensure_file(ensure_file).
+        with_outputs(outputs)
+    )  # yapf: disable
+
+  def process_result(self):
+    attempt = self.attempts[-1]
+    assert attempt.result
+    result = attempt.result
+
+    symbolize_tool = self._gn_results.tool('symbolize')
+    clang_dir = prebuilt_path(self._api, self._checkout_root, 'third_party',
+                              'clang')
+    llvm_symbolizer = self._gn_results.tool('llvm-symbolizer')
+    build_id_dirs = (
+        self._gn_results.zircon_build_dir.join('.build-id'),
+        clang_dir.join('lib', 'debug', '.build-id'),
+    )
+
+    with self._api.step.nest(result.name) as presentation:
+      attempt.logs['symbolized log'] = self._api.symbolize(
+          symbolize_tool=symbolize_tool,
+          build_id_dirs=build_id_dirs,
+          llvm_symbolizer=llvm_symbolizer,
+          data=result.output,
+          presentation=presentation)
+
+      # A kernel panic may be present in the logs even if the task timed
+      # out, so check for that first.
+      if 'KERNEL PANIC' in result.output:
+        attempt.failure_reason = 'kernel panic'
+
+      # Because of the way these tests run (they are the only user-mode
+      # process in the system, and then the system shuts down) we can't
+      # collect an exit code or nicely structured output, so we have to
+      # search the output for a hard-coded string to detect success.
+      is_emu_type = self._api.emu.is_emulator_type(self._device_type)
+      if is_emu_type and self._zbi_test['success_string'] not in result.output:
+        attempt.failure_reason = 'failed to find success string'
+
+  def present_attempt(self, task_step, attempt, **kwargs):
+    del task_step, kwargs  # Unused.
+    name = '%s (%s)' % (attempt.name, 'pass' if attempt.success else 'fail')
+    step = self._api.step(name, None)
+    step.presentation.step_summary_text = attempt.failure_reason
+    step.presentation.links['task UI'] = attempt.task_ui_link
+
+    for log, data in attempt.logs.iteritems():
+      step.presentation.logs[log] = data
 
 
 def RunSteps(api, manifest, remote, target_cpu, variants, allowed_device_types):
@@ -346,7 +335,7 @@
       task_name = '%s - %s' % (name, device_type)
       with api.step.nest('prepare test: %s' % task_name):
         tasks.append(
-            create_task(
+            Task(
                 api,
                 name=task_name,
                 zbi_test=zbi_test,