Adds tools for deploying Cobalt to Google Container Engine.

(1) Adds the following commands to cobaltb.py

deploy authenticate: Authenticate the local machine with GKE and
                     associate with a particluar GKE cluster
deploy show : shows the state of the cluster
deploy build: Builds new Docker images
deploy push: Pushes Docker images to the repository
deploy start: Starts one of the jobs
deploy stop: Stops one of the jobs
deploy upload_secret_key: Copies an Analyzer private key to a secure key
                          storage in the cluster
deploy delete_secret_key: Deletes a previously uploaded private key

(2) Introduces the file personal_cluster.json. This is a file that each
developer must maintain if he wishes to work with Cobalt on
container engine. It specifies details about his personal cluster.

(3) Adds the ability for the observation querier to query Cloud Bigtable.
This is useful for debugging Cobalt in the Cloud.

My next CL will be a large update to the Cobalt documentation so that other
people besides me will know how to do all of this.

Change-Id: I835f6a6d668f940700e334395f0526e09cb24023
diff --git a/.gitignore b/.gitignore
index 928de09..887fd24 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
 *.pyc
 *.pem
+personal_cluster.json
 service_account_credentials.json
 /out/
 /sysroot/
diff --git a/cobaltb.py b/cobaltb.py
index e2f075b..d78cdd0 100755
--- a/cobaltb.py
+++ b/cobaltb.py
@@ -16,12 +16,14 @@
 """The Cobalt build system command-line interface."""
 
 import argparse
+import json
 import logging
 import os
 import shutil
 import subprocess
 import sys
 
+import tools.container_util as container_util
 import tools.cpplint as cpplint
 import tools.golint as golint
 import tools.process_starter as process_starter
@@ -34,24 +36,13 @@
 from tools.process_starter import DEFAULT_REPORT_MASTER_PORT
 from tools.process_starter import DEMO_CONFIG_DIR
 from tools.process_starter import SHUFFLER_DEMO_CONFIG_FILE
-from tools.process_starter import DEFAULT_ANALYZER_PUBLIC_KEY_PEM
 
 THIS_DIR = os.path.abspath(os.path.dirname(__file__))
 OUT_DIR = os.path.abspath(os.path.join(THIS_DIR, 'out'))
 SYSROOT_DIR = os.path.abspath(os.path.join(THIS_DIR, 'sysroot'))
 SERVICE_ACCOUNT_CREDENTIALS_FILE = os.path.join(THIS_DIR,
     'service_account_credentials.json')
-
-IMAGES = ["analyzer", "shuffler"]
-
-GCE_PROJECT = "shuffler-test"
-GCE_CLUSTER = "cluster-1"
-GCE_TAG = "us.gcr.io/google.com/%s" % GCE_PROJECT
-
-A_BT_INSTANCE = "cobalt-analyzer"
-A_BT_TABLE = "observations"
-A_BT_TABLE_NAME = "projects/google.com:%s/instances/%s/tables/%s" \
-                % (GCE_PROJECT, A_BT_INSTANCE, A_BT_TABLE)
+PERSONAL_CLUSTER_JSON_FILE = os.path.join(THIS_DIR, 'personal_cluster.json')
 
 _logger = logging.getLogger()
 _verbose_count = 0
@@ -83,18 +74,6 @@
   if not os.path.exists(dir_path):
     os.makedirs(dir_path)
 
-def setGCEImages(args):
-  """Sets the list of GCE images to be built/deployed/started and stopped.
-
-  Args:
-    args{list} List of parsed command line arguments.
-  """
-  global IMAGES
-  if args.shuffler_gce:
-    IMAGES = ["shuffler"]
-  elif args.analyzer_gce:
-    IMAGES = ["analyzer"]
-
 def _setup(args):
   subprocess.check_call(["git", "submodule", "init"])
   subprocess.check_call(["git", "submodule", "update"])
@@ -149,9 +128,12 @@
   success = True
   print ("Will run tests in the following directories: %s." %
       ", ".join(test_dirs))
+
+  bigtable_project_name = ''
+  bigtable_instance_name = ''
   for test_dir in test_dirs:
     start_bt_emulator = ((test_dir in NEEDS_BT_EMULATOR)
-        and not args.bigtable_instance_name)
+        and not args.use_cloud_bt)
     start_cobalt_processes = (test_dir in NEEDS_COBALT_PROCESSES)
     test_args = None
     if (test_dir == 'gtests_cloud_bt'):
@@ -172,25 +154,32 @@
           "--bigtable_project_name=%s" % args.bigtable_project_name,
           "--bigtable_instance_name=%s" % args.bigtable_instance_name
       ]
+      bigtable_project_name = args.bigtable_project_name
+      bigtable_instance_name = args.bigtable_instance_name
     if (test_dir == 'e2e_tests'):
       test_args = [
           "-analyzer_uri=localhost:%d" % DEFAULT_ANALYZER_SERVICE_PORT,
           "-analyzer_pk_pem_file=%s" % E2E_TEST_ANALYZER_PUBLIC_KEY_PEM,
           "-shuffler_uri=localhost:%d" % DEFAULT_SHUFFLER_PORT,
           "-report_master_uri=localhost:%d" % DEFAULT_REPORT_MASTER_PORT,
-          "-bigtable_project_name=%s" % args.bigtable_project_name,
-          "-bigtable_instance_name=%s" % args.bigtable_instance_name,
           ("-observation_querier_path=%s" %
               process_starter.OBSERVATION_QUERIER_PATH),
           "-test_app_path=%s" % process_starter.TEST_APP_PATH,
           "-sub_process_v=%d"%_verbose_count
       ]
+      if args.use_cloud_bt:
+        test_args = test_args + [
+          "-bigtable_project_name=%s" % args.bigtable_project_name,
+          "-bigtable_instance_name=%s" % args.bigtable_instance_name,
+        ]
+        bigtable_project_name = args.bigtable_project_name
+        bigtable_instance_name = args.bigtable_instance_name
     print '********************************************************'
     success = (test_runner.run_all_tests(
         test_dir, start_bt_emulator=start_bt_emulator,
         start_cobalt_processes=start_cobalt_processes,
-        bigtable_project_name=args.bigtable_project_name,
-        bigtable_instance_name=args.bigtable_instance_name,
+        bigtable_project_name=bigtable_project_name,
+        bigtable_instance_name=bigtable_instance_name,
         verbose_count=_verbose_count,
         test_args=test_args) == 0) and success
 
@@ -263,7 +252,15 @@
       verbose_count=_verbose_count)
 
 def _start_observation_querier(args):
-  process_starter.start_observation_querier(verbose_count=_verbose_count)
+  bigtable_project_name = ''
+  bigtable_instance_name = ''
+  if args.use_cloud_bt:
+    bigtable_project_name = args.bigtable_project_name
+    bigtable_instance_name = args.bigtable_instance_name
+  process_starter.start_observation_querier(
+      bigtable_project_name=bigtable_project_name,
+      bigtable_instance_name=bigtable_instance_name,
+      verbose_count=_verbose_count)
 
 def _generate_keys(args):
   path = os.path.join(OUT_DIR, 'tools', 'key_generator', 'key_generator')
@@ -287,87 +284,86 @@
       "--bigtable_project_name", args.bigtable_project_name,
       "--bigtable_instance_name", args.bigtable_instance_name])
 
-def _gce_build(args):
-  setGCEImages(args)
+def _deploy_show(args):
+  container_util.display()
 
-  # Copy over the dependencies for the cobalt base image
-  cobalt = "%s/cobalt" % OUT_DIR
+def _deploy_authenticate(args):
+  container_util.authenticate(args.cluster_name, args.cloud_project_prefix,
+      args.cloud_project_name)
 
-  if not os.path.exists(cobalt):
-    os.mkdir(cobalt)
+def _deploy_build(args):
+  container_util.build_all_docker_images(
+      shuffler_config_file=args.shuffler_config_file)
 
-  for dep in ["lib/libprotobuf.so.10",
-              "lib/libgoogleapis.so",
-              "lib/libgrpc++.so.1",
-              "lib/libgrpc.so.1",
-              "lib/libunwind.so.1",
-              "share/grpc/roots.pem",
-             ]:
-    shutil.copy("%s/%s" % (SYSROOT_DIR, dep), cobalt)
+def _deploy_push(args):
+  if args.job == 'shuffler':
+    container_util.push_shuffler_to_container_registry(
+        args.cloud_project_prefix, args.cloud_project_name)
+  elif args.job == 'analyzer-service':
+    container_util.push_analyzer_service_to_container_registry(
+        args.cloud_project_prefix, args.cloud_project_name)
+  elif args.job == 'report-master':
+    container_util.push_report_master_to_container_registry(
+        args.cloud_project_prefix, args.cloud_project_name)
+  else:
+    print('Unknown job "%s". I only know how to push "shuffler", '
+          '"analyzer-service" and "report-master".' % args.job)
 
-  # Copy configuration files
-  for conf in ["registered_metrics.txt",
-               "registered_encodings.txt",
-               "registered_reports.txt"
-              ]:
-    shutil.copy("%s/config/demo/%s" % (THIS_DIR, conf),
-                "%s/analyzer/" % OUT_DIR)
+def _deploy_start(args):
+  if args.job == 'shuffler':
+    container_util.start_shuffler(args.cloud_project_prefix,
+                                  args.cloud_project_name,
+                                  args.gce_pd_name)
+  elif args.job == 'analyzer-service':
+    if args.bigtable_instance_name == '':
+        print '--bigtable_instance_name must be specified'
+        return
+    container_util.start_analyzer_service(
+        args.cloud_project_prefix, args.cloud_project_name,
+        args.bigtable_instance_name)
+  elif args.job == 'report-master':
+    if args.bigtable_instance_name == '':
+        print '--bigtable_instance_name must be specified'
+        return
+    container_util.start_report_master(
+        args.cloud_project_prefix, args.cloud_project_name,
+        args.bigtable_instance_name)
+  else:
+    print('Unknown job "%s". I only know how to start "shuffler", '
+          '"analyzer-service" and "report-master".' % args.job)
 
-  # Build all images
-  for i in ["cobalt"] + IMAGES:
-    # copy over the dockerfile
-    dstdir = "%s/%s" % (OUT_DIR, i)
-    shutil.copy("%s/docker/%s/Dockerfile" % (THIS_DIR, i), dstdir)
+def _deploy_stop(args):
+  if args.job == 'shuffler':
+    container_util.stop_shuffler()
+  elif args.job == 'analyzer-service':
+    container_util.stop_analyzer_service()
+  elif args.job == 'report-master':
+    container_util.stop_report_master()
+  else:
+    print('Unknown job "%s". I only know how to stop "shuffler", '
+          '"analyzer-service" and "report-master".' % args.job)
 
-    subprocess.check_call(["docker", "build", "-t", i, dstdir])
+def _deploy_upload_secret_key(args):
+  container_util.create_analyzer_private_key_secret()
 
-def _gce_push(args):
-  setGCEImages(args)
-
-  for i in IMAGES:
-    tag = "%s/%s" % (GCE_TAG, i)
-    subprocess.check_call(["docker", "tag", i, tag])
-    subprocess.check_call(["gcloud", "docker", "--", "push", tag])
-
-def kube_setup():
-  subprocess.check_call(["gcloud", "container", "clusters", "get-credentials",
-                         GCE_CLUSTER, "--project",
-                         "google.com:%s" % GCE_PROJECT])
-
-def _gce_start(args):
-  setGCEImages(args)
-
-  kube_setup()
-
-  for i in IMAGES:
-    print("Starting %s" % i)
-
-    if (i == "analyzer"):
-      args = ["-table", A_BT_TABLE_NAME,
-              "-metrics", "/etc/cobalt/registered_metrics.txt",
-              "-reports", "/etc/cobalt/registered_reports.txt",
-              "-encodings", "/etc/cobalt/registered_encodings.txt"]
-
-      subprocess.check_call(["kubectl", "run", i, "--image=%s/%s" % (GCE_TAG, i),
-                             "--port=8080", "--"] + args)
-    else:
-      subprocess.check_call(["kubectl", "run", i, "--image=%s/%s" % (GCE_TAG, i),
-                             "--port=50051"])
-
-    subprocess.check_call(["kubectl", "expose", "deployment", i,
-                           "--type=LoadBalancer"])
-
-def _gce_stop(args):
-  setGCEImages(args)
-
-  kube_setup()
-
-  for i in IMAGES:
-    print("Stopping %s" % i)
-
-    subprocess.check_call(["kubectl", "delete", "service,deployment", i,])
+def _deploy_delete_secret_key(args):
+  container_util.delete_analyzer_private_key_secret()
 
 def main():
+  personal_cluster_settings = {
+    'cloud_project_prefix': '',
+    'cloud_project_name': '',
+    'cluster_name': '',
+    'gce_pd_name': '',
+    'bigtable_project_name' : '',
+    'bigtable_instance_name': '',
+  }
+  if os.path.exists(PERSONAL_CLUSTER_JSON_FILE):
+    print ('Default deployment options will be taken from %s.' %
+           PERSONAL_CLUSTER_JSON_FILE)
+    with open(PERSONAL_CLUSTER_JSON_FILE) as f:
+      personal_cluster_settings = json.load(f)
+
   parser = argparse.ArgumentParser(description='The Cobalt command-line '
       'interface.')
 
@@ -418,17 +414,22 @@
   sub_parser.add_argument('--tests', choices=TEST_FILTERS,
       help='Specify a subset of tests to run. Default=all',
       default='all')
+  sub_parser.add_argument('-use_cloud_bt',
+      help='Causes the end-to-end tests to run against an instance of Cloud '
+      'Bigtable. Otherwise a local instance of the Bigtable Emulator will be '
+      'used.', action='store_true')
   sub_parser.add_argument('--bigtable_project_name',
       help='Specify a Cloud project against which to run some of the tests.'
-      ' Only used for the cloud_bt and e2e tests. Required for the former.'
-      ' The e2e tests will use the local Bigtable Emulator if not specified.',
-      default='')
+      ' Only used for the cloud_bt tests and e2e tests when -use_cloud_bt is'
+      ' specified.'
+      ' default=%s'%personal_cluster_settings['bigtable_project_name'],
+      default=personal_cluster_settings['bigtable_project_name'])
   sub_parser.add_argument('--bigtable_instance_name',
       help='Specify a Cloud Bigtable instance within the specified Cloud'
       ' project against which to run some of the tests.'
-      ' Only used for the cloud_bt and e2e tests. Required for the former.'
-      ' The e2e tests will use the local Bigtable Emulator if not specified.',
-      default='')
+      ' Only used for the cloud_bt tests and e2e tests in cloud mode.'
+      ' default=%s'%personal_cluster_settings['bigtable_instance_name'],
+      default=personal_cluster_settings['bigtable_instance_name'])
 
   sub_parser = subparsers.add_parser('clean', parents=[parent_parser],
     help='Deletes some or all of the build products.')
@@ -437,6 +438,9 @@
       help='Delete the entire "out" directory.',
       action='store_true')
 
+  ########################################################
+  # start command
+  ########################################################
   start_parser = subparsers.add_parser('start',
     help='Start one of the Cobalt processes running locally.')
   start_subparsers = start_parser.add_subparsers()
@@ -508,6 +512,20 @@
       parents=[parent_parser], help='Start the Cobalt ObservationStore '
                                     'querying tool.')
   sub_parser.set_defaults(func=_start_observation_querier)
+  sub_parser.add_argument('-use_cloud_bt',
+      help='Causes the query to be performed against an instance of Cloud '
+      'Bigtable. Otherwise a local instance of the Bigtable Emulator will be '
+      'used.', action='store_true')
+  sub_parser.add_argument('--bigtable_project_name',
+      help='Specify a Cloud project against which to query. '
+      'Only used if -use_cloud_bt is set. '
+      'default=%s'%personal_cluster_settings['bigtable_project_name'],
+      default=personal_cluster_settings['bigtable_project_name'])
+  sub_parser.add_argument('--bigtable_instance_name',
+      help='Specify a Cloud Bigtable instance within the specified Cloud '
+      'project against which to query. Only used if -use_cloud_bt is set. '
+      'default=%s'%personal_cluster_settings['bigtable_instance_name'],
+      default=personal_cluster_settings['bigtable_instance_name'])
 
   sub_parser = start_subparsers.add_parser('bigtable_emulator',
     parents=[parent_parser],
@@ -524,51 +542,139 @@
   sub_parser.set_defaults(func=_generate_keys)
   sub_parser.add_argument('--bigtable_project_name',
       help='Specify the Cloud project containing the Bigtable instance '
-      ' to be provisioned.', default='')
+      ' to be provisioned. '
+      'default=%s'%personal_cluster_settings['bigtable_project_name'],
+      default=personal_cluster_settings['bigtable_project_name'])
   sub_parser.add_argument('--bigtable_instance_name',
       help='Specify the Cloud Bigtable instance within the specified Cloud'
-      ' project that is to be provisioned.', default='')
+      ' project that is to be provisioned. '
+      'default=%s'%personal_cluster_settings['bigtable_instance_name'],
+      default=personal_cluster_settings['bigtable_instance_name'])
   sub_parser.set_defaults(func=_provision_bigtable)
 
-  sub_parser = subparsers.add_parser('gce_build', parents=[parent_parser],
-    help='Builds Docker images for GCE.')
-  sub_parser.set_defaults(func=_gce_build)
-  sub_parser.add_argument('--a',
-      help='Builds Analyzer Docker image for GCE.',
-      action='store_true', dest='analyzer_gce')
-  sub_parser.add_argument('--s',
-      help='Builds Shuffler Docker image for GCE.',
-      action='store_true', dest='shuffler_gce')
+  ########################################################
+  # deploy command
+  ########################################################
+  deploy_parser = subparsers.add_parser('deploy',
+    help='Build Docker containers. Push to Container Regitry. Deploy to GKE.')
+  deploy_subparsers = deploy_parser.add_subparsers()
 
-  sub_parser = subparsers.add_parser('gce_push', parents=[parent_parser],
-    help='Push docker images to GCE.')
-  sub_parser.set_defaults(func=_gce_push)
-  sub_parser.add_argument('--a',
-      help='Push Analyzer Docker image to GCE.',
-      action='store_true', dest='analyzer_gce')
-  sub_parser.add_argument('--s',
-      help='Push Shuffler Docker image to GCE.',
-      action='store_true', dest='shuffler_gce')
+  sub_parser = deploy_subparsers.add_parser('show',
+      parents=[parent_parser], help='Display information about currently '
+      'deployed jobs on GKE, including their public URIs.')
+  sub_parser.set_defaults(func=_deploy_show)
 
-  sub_parser = subparsers.add_parser('gce_start', parents=[parent_parser],
-    help='Start GCE instances.')
-  sub_parser.set_defaults(func=_gce_start)
-  sub_parser.add_argument('--a',
-      help='Starts Analyzer GCE instance.',
-      action='store_true', dest='analyzer_gce')
-  sub_parser.add_argument('--s',
-      help='Starts Shuffler GCE instance.',
-      action='store_true', dest='shuffler_gce')
+  sub_parser = deploy_subparsers.add_parser('authenticate',
+      parents=[parent_parser], help='Refresh your authentication token if '
+      'necessary. Also associates your local computer with a particular '
+      'GKE cluster to which you will be deploying.')
+  sub_parser.set_defaults(func=_deploy_authenticate)
+  sub_parser.add_argument('--cloud_project_prefix',
+      help='The prefix part of the Cloud project name to which your are '
+           'deploying. This is usually an organization domain name if your '
+           'Cloud project is associated with one. Pass the empty string for no '
+           'prefix. '
+           'Default=%s.' %personal_cluster_settings['cloud_project_prefix'],
+      default=personal_cluster_settings['cloud_project_prefix'])
+  sub_parser.add_argument('--cloud_project_name',
+      help='The main part of the name of the Cloud project to which you are '
+           'deploying. This is the full project name if --cloud_project_prefix '
+           'is empty. Otherwise the full project name is '
+           '<cloud_project_prefix>:<cloud_project_name>. '
+           'Default=%s' % personal_cluster_settings['cloud_project_name'],
+      default=personal_cluster_settings['cloud_project_name'])
+  sub_parser.add_argument('--cluster_name',
+      help='The GKE "container cluster" within your Cloud project to which you '
+           'are deploying. '
+           'Default=%s' % personal_cluster_settings['cluster_name'],
+      default=personal_cluster_settings['cluster_name'])
 
-  sub_parser = subparsers.add_parser('gce_stop', parents=[parent_parser],
-    help='Stop GCE instances.')
-  sub_parser.set_defaults(func=_gce_stop)
-  sub_parser.add_argument('--a',
-      help='Stops Analyzer GCE instance.',
-      action='store_true', dest='analyzer_gce')
-  sub_parser.add_argument('--s',
-      help='Stops Shuffler GCE instance.',
-      action='store_true', dest='shuffler_gce')
+  sub_parser = deploy_subparsers.add_parser('build',
+      parents=[parent_parser], help='Rebuild all Docker images. '
+          'You must have the Docker daemon running.')
+  sub_parser.set_defaults(func=_deploy_build)
+  sub_parser.add_argument('--shuffler_config_file',
+      help='Path to the Shuffler configuration file. '
+           'Default=%s' % SHUFFLER_DEMO_CONFIG_FILE,
+      default=SHUFFLER_DEMO_CONFIG_FILE)
+
+  sub_parser = deploy_subparsers.add_parser('push',
+      parents=[parent_parser], help='Push a Docker image to the Google'
+          'Container Registry.')
+  sub_parser.set_defaults(func=_deploy_push)
+  sub_parser.add_argument('--job',
+      help='The job you wish to push. Valid choices are "shuffler", '
+           '"analyzer-service", "report-master". Required.')
+  sub_parser.add_argument('--cloud_project_prefix',
+      help='The prefix part of the Cloud project name to which your are '
+           'deploying. This is usually an organization domain name if your '
+           'Cloud project is associated with one. Pass the empty string for no '
+           'prefix. '
+           'Default=%s.' %personal_cluster_settings['cloud_project_prefix'],
+      default=personal_cluster_settings['cloud_project_prefix'])
+  sub_parser.add_argument('--cloud_project_name',
+      help='The main part of the name of the Cloud project to which you are '
+           'deploying. This is the full project name if --cloud_project_prefix '
+           'is empty. Otherwise the full project name is '
+           '<cloud_project_prefix>:<cloud_project_name>. '
+           'Default=%s' % personal_cluster_settings['cloud_project_name'],
+      default=personal_cluster_settings['cloud_project_name'])
+
+  sub_parser = deploy_subparsers.add_parser('start',
+      parents=[parent_parser], help='Start one of the jobs on GKE.')
+  sub_parser.set_defaults(func=_deploy_start)
+  sub_parser.add_argument('--job',
+      help='The job you wish to start. Valid choices are "shuffler", '
+           '"analyzer-service", "report-master". Required.')
+  sub_parser.add_argument('--bigtable_instance_name',
+      help='Specify a Cloud Bigtable instance within the specified Cloud '
+           'project that the Analyzer should connect to. This is required '
+           'if and only if you are starting one of the two Analyzer jobs. '
+           'Default=%s' % personal_cluster_settings['bigtable_instance_name'],
+      default=personal_cluster_settings['bigtable_instance_name'])
+  sub_parser.add_argument('--cloud_project_prefix',
+      help='The prefix part of the Cloud project name to which your are '
+           'deploying. This is usually an organization domain name if your '
+           'Cloud project is associated with one. Pass the empty string for no '
+           'prefix. '
+           'Default=%s.' %personal_cluster_settings['cloud_project_prefix'],
+      default=personal_cluster_settings['cloud_project_prefix'])
+  sub_parser.add_argument('--cloud_project_name',
+      help='The main part of the name of the Cloud project to which you are '
+           'deploying. This is the full project name if --cloud_project_prefix '
+           'is empty. Otherwise the full project name is '
+           '<cloud_project_prefix>:<cloud_project_name>. '
+           'Default=%s' % personal_cluster_settings['cloud_project_name'],
+      default=personal_cluster_settings['cloud_project_name'])
+  sub_parser.add_argument('--gce_pd_name',
+      help='The name of a GCE persistent disk. This is used only when starting '
+           'the Shuffler. The disk must already have been created in the same '
+           'Cloud project in which the Shuffler is being deployed. '
+           'Default=%s' % personal_cluster_settings['gce_pd_name'],
+      default=personal_cluster_settings['gce_pd_name'])
+
+  sub_parser = deploy_subparsers.add_parser('stop',
+      parents=[parent_parser], help='Stop one of the jobs on GKE.')
+  sub_parser.set_defaults(func=_deploy_stop)
+  sub_parser.add_argument('--job',
+      help='The job you wish to stop. Valid choices are "shuffler", '
+           '"analyzer-service", "report-master". Required.')
+
+  sub_parser = deploy_subparsers.add_parser('upload_secret_key',
+      parents=[parent_parser], help='Creates a |secret| object in the '
+      'cluster to store a private key for the Analyzer. The private key must '
+      'first be generated using the "generate_keys" command. This must be done '
+      'at least once before starting the Analyzer Service. To replace the '
+      'key first delete the old one using the "deploy delete_secret_key" '
+      'command.')
+  sub_parser.set_defaults(func=_deploy_upload_secret_key)
+
+  sub_parser = deploy_subparsers.add_parser('delete_secret_key',
+      parents=[parent_parser], help='Deletes a |secret| object in the '
+      'cluster that was created using the "deploy upload_secret_key" '
+      'command.')
+  sub_parser.set_defaults(func=_deploy_delete_secret_key)
+
 
   args = parser.parse_args()
   global _verbose_count
diff --git a/docker/README.md b/docker/README.md
deleted file mode 100644
index fb2c01a..0000000
--- a/docker/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-Docker images
-=============
-
-cobalt/     - The base cobalt image.
-
-analyzer/   - Imports the base cobalt image and adds the analyzer binaries.
-
-shuffler/   - Imports the base cobalt image and adds the shuffler binaries.
-
-To build the docker images run: ../cobaltb.py gce\_build
diff --git a/docker/shuffler/Dockerfile b/kubernetes/analyzer_service/Dockerfile
similarity index 76%
rename from docker/shuffler/Dockerfile
rename to kubernetes/analyzer_service/Dockerfile
index 87cd148..70da6f8 100644
--- a/docker/shuffler/Dockerfile
+++ b/kubernetes/analyzer_service/Dockerfile
@@ -12,9 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM cobalt
+FROM cobalt-common
 
-COPY shuffler /usr/local/bin/
+COPY analyzer_service /usr/local/bin/
 
-ENTRYPOINT ["shuffler"]
-EXPOSE 50051
+ENTRYPOINT ["analyzer_service"]
+# This should match the port specified in analyzer_service_deployment.yaml
+EXPOSE 6001
diff --git a/kubernetes/analyzer_service/analyzer_service_deployment.yaml b/kubernetes/analyzer_service/analyzer_service_deployment.yaml
new file mode 100644
index 0000000..239fe2f
--- /dev/null
+++ b/kubernetes/analyzer_service/analyzer_service_deployment.yaml
@@ -0,0 +1,82 @@
+# Copyright 2016 The Fuchsia Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file contans the definitions of the Analyzer Service's  Deployment object
+# (which includes an embedded defintion of the it's Pod object) and the
+# Analyzer Service's Service object.
+#
+# The definitions contain some parameters (indicated by $$PARAMETER_NAME$$)
+# that will be replaced by the script tools/container_util.py prior to being
+# passed to "kubectl create"
+
+# The defintion of the Deployment and Pod
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: analyzer-service
+  labels:
+    name: analyzer-service
+# Pod object
+spec:
+  replicas: 1
+  template:
+    metadata:
+      name: analyzer-service
+      labels:
+        name: analyzer-service
+    spec:
+      containers:
+      - name: analyzer-service
+        # The URI of a Docker image in a Docker repository.
+        image: $$ANALYZER_SERVICE_IMAGE_URI$$
+        args:
+          - '-port'
+          - '6001'
+          - '-bigtable_project_name'
+          - $$BIGTABLE_PROJECT_NAME$$
+          - '-bigtable_instance_name'
+          - $$BIGTABLE_INSTANCE_NAME$$
+          - '-private_key_pem_file'
+          # The directory path must match mountPath below.
+          - '/var/lib/cobalt/$$ANALYZER_PRIVATE_PEM_NAME$$'
+          # TODO(rudominer) Eventually remove this.
+          - '-logtostderr'
+          - '-v=3'
+        ports:
+          - containerPort: 6001
+        volumeMounts:
+            # This name must match the volumes.name below.
+          - name: analyzer-key-storage
+            mountPath: /var/lib/cobalt
+            readOnly: true
+      volumes:
+        - name: analyzer-key-storage
+          secret:
+            secretName: $$ANALYZER_PRIVATE_KEY_SECRET_NAME$$
+---
+# The defintion of the Service
+apiVersion: v1
+kind: Service
+metadata:
+  name: analyzer-service
+spec:
+  ports:
+    # The port that this service should serve on. This should match the port
+    # used several times above and the one specified in Dockerfile.
+  - port: 6001
+  selector:
+    # This must match the metadata.labels section of the Deployment above.
+    name: analyzer-service
+  type: LoadBalancer
+
diff --git a/docker/cobalt/Dockerfile b/kubernetes/cobalt_common/Dockerfile
similarity index 100%
rename from docker/cobalt/Dockerfile
rename to kubernetes/cobalt_common/Dockerfile
diff --git a/docker/analyzer/Dockerfile b/kubernetes/report_master/Dockerfile
similarity index 72%
rename from docker/analyzer/Dockerfile
rename to kubernetes/report_master/Dockerfile
index 1ace68e..2e5b8bc 100644
--- a/docker/analyzer/Dockerfile
+++ b/kubernetes/report_master/Dockerfile
@@ -12,12 +12,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM cobalt
+FROM cobalt-common
 
-COPY analyzer /usr/local/bin/
+COPY analyzer_report_master /usr/local/bin/
+
+# The path to where we are copying the config files must match the path
+# specified in the -cobalt_config_dir flag in report_master_deployment.yaml.
 COPY registered_encodings.txt /etc/cobalt/
 COPY registered_metrics.txt /etc/cobalt/
 COPY registered_reports.txt /etc/cobalt/
 
-ENTRYPOINT ["analyzer"]
-EXPOSE 8080
+ENTRYPOINT ["analyzer_report_master"]
+EXPOSE 7001
diff --git a/kubernetes/report_master/report_master_deployment.yaml b/kubernetes/report_master/report_master_deployment.yaml
new file mode 100644
index 0000000..ffa86c0
--- /dev/null
+++ b/kubernetes/report_master/report_master_deployment.yaml
@@ -0,0 +1,74 @@
+# Copyright 2016 The Fuchsia Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file contans the definitions of the Analyzer Service's  Deployment object
+# (which includes an embedded defintion of the it's Pod object) and the
+# Analyzer Service's Service object.
+#
+# The definitions contain some parameters (indicated by $$PARAMETER_NAME$$)
+# that will be replaced by the script tools/container_util.py prior to being
+# passed to "kubectl create"
+
+# The defintion of the Deployment and Pod
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: report-master
+  labels:
+    name: report-master
+# Pod object
+spec:
+  replicas: 1
+  template:
+    metadata:
+      name: report-master
+      labels:
+        name: report-master
+    spec:
+      containers:
+      - name: report-master
+        # The URI of a Docker image in a Docker repository.
+        image: $$REPORT_MASTER_IMAGE_URI$$
+        args:
+          - '-port'
+          - '7001'
+          - '-bigtable_project_name'
+          - $$BIGTABLE_PROJECT_NAME$$
+          - '-bigtable_instance_name'
+          - $$BIGTABLE_INSTANCE_NAME$$
+          - '-cobalt_config_dir'
+          # This path must match the path to where the config files are copied
+          # in Dockerfile.
+          - '/etc/cobalt'
+          # TODO(rudominer) Eventually remove this.
+          - '-logtostderr'
+          - '-v=3'
+        ports:
+          - containerPort: 7001
+---
+# The defintion of the Service
+apiVersion: v1
+kind: Service
+metadata:
+  name: report-master
+spec:
+  ports:
+    # The port that this service should serve on. This should match the port
+    # used several times above and the one specified in Dockerfile.
+  - port: 7001
+  selector:
+    # This must match the metadata.labels section of the Deployment above.
+    name: report-master
+  type: LoadBalancer
+
diff --git a/docker/shuffler/Dockerfile b/kubernetes/shuffler/Dockerfile
similarity index 72%
copy from docker/shuffler/Dockerfile
copy to kubernetes/shuffler/Dockerfile
index 87cd148..f7b6f0c 100644
--- a/docker/shuffler/Dockerfile
+++ b/kubernetes/shuffler/Dockerfile
@@ -12,9 +12,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM cobalt
+FROM cobalt-common
 
 COPY shuffler /usr/local/bin/
 
+ARG config_file
+# This path must match the -config_file flag in shuffler-deployment.yaml
+COPY ${config_file} /etc/cobalt/shuffler_config.txt
+
 ENTRYPOINT ["shuffler"]
-EXPOSE 50051
+# This port number must match the one used in shuffler_deployment.yaml
+EXPOSE 5001
diff --git a/kubernetes/shuffler/shuffler_deployment.yaml b/kubernetes/shuffler/shuffler_deployment.yaml
new file mode 100644
index 0000000..007df68
--- /dev/null
+++ b/kubernetes/shuffler/shuffler_deployment.yaml
@@ -0,0 +1,86 @@
+# Copyright 2016 The Fuchsia Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file contans the definitions of the Shuffler's  Deployment object
+# (which includes an embedded defintion of the Shuffler's Pod object) and the
+# Shuffler's Service object.
+#
+# The definitions contain some parameters (indicated by $$PARAMETER_NAME$$)
+# that will be replaced by the script tools/container_util.py prior to being
+# passed to "kubectl create"
+
+# The defintion of the Deployment and Pod
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: shuffler
+  labels:
+    name: shuffler
+# Pod object
+spec:
+  replicas: 1
+  template:
+    metadata:
+      name: shuffler
+      labels:
+        name: shuffler
+    spec:
+      containers:
+      - name: shuffler
+        # The URI of a Docker image in a Docker repository.
+        image: $$SHUFFLER_IMAGE_URI$$
+        args:
+          - '-port'
+          - '5001'
+          - '-analyzer_uri'
+          # This must match the defintion of the "analyzer-service" Service
+          # in analyzer_service_deployment.yaml
+          - 'analyzer-service:6001'
+          - '-config_file'
+          # This is the path to where the file is copied in Dockerfile.
+          -  '/etc/cobalt/shuffler_config.txt'
+          - '-db_dir'
+          # This path must match mountPath below.
+          - '/var/lib/cobalt'
+          # TODO(rudominer) Eventually remove this.
+          - '-logtostderr'
+          - '-v=3'
+        ports:
+          - containerPort: 5001
+        volumeMounts:
+            # This name must match the volumes.name below.
+          - name: shuffler-persistent-storage
+            mountPath: /var/lib/cobalt
+      volumes:
+        - name: shuffler-persistent-storage
+          gcePersistentDisk:
+            # The name of a GCe persistent disk that has already been created.
+            pdName: $$GCE_PERSISTENT_DISK_NAME$$
+            fsType: ext4
+---
+# The defintion of the Service
+apiVersion: v1
+kind: Service
+metadata:
+  name: shuffler
+spec:
+  ports:
+    # The port that this service should serve on. This should match the
+    # port used several times above and the port specified in the Dockerfile.
+  - port: 5001
+  selector:
+    # This must match the metadata.labels section of the Deployment above.
+    name: shuffler
+  type: LoadBalancer
+
diff --git a/tools/container_util.py b/tools/container_util.py
new file mode 100755
index 0000000..d33d3a6
--- /dev/null
+++ b/tools/container_util.py
@@ -0,0 +1,344 @@
+#!/usr/bin/env python
+# Copyright 2017 The Fuchsia Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A library with functions to help work with Docker, Kubernetes and GKE."""
+
+import fileinput
+import os
+import shutil
+import subprocess
+import sys
+
+import process_starter
+from process_starter import ANALYZER_PRIVATE_KEY_PEM_NAME
+from process_starter import DEFAULT_ANALYZER_PRIVATE_KEY_PEM
+from process_starter import ANALYZER_SERVICE_PATH
+from process_starter import DEFAULT_ANALYZER_SERVICE_PORT
+from process_starter import DEFAULT_REPORT_MASTER_PORT
+from process_starter import DEFAULT_SHUFFLER_PORT
+from process_starter import REGISTERED_CONFIG_DIR
+from process_starter import REPORT_MASTER_PATH
+from process_starter import SHUFFLER_CONFIG_FILE
+from process_starter import SHUFFLER_PATH
+
+THIS_DIR = os.path.dirname(__file__)
+SRC_ROOT_DIR = os.path.join(THIS_DIR, os.pardir)
+OUT_DIR = os.path.abspath(os.path.join(SRC_ROOT_DIR, 'out'))
+SYS_ROOT_DIR = os.path.join(SRC_ROOT_DIR, 'sysroot')
+
+# The URI of the Google Container Registry.
+CONTAINER_REGISTRY_URI = 'us.gcr.io'
+
+# Dockerfile/Kubernetes source file paths
+KUBE_SRC_DIR = os.path.join(SRC_ROOT_DIR, 'kubernetes')
+COBALT_COMMON_DOCKER_FILE= os.path.join(KUBE_SRC_DIR, 'cobalt_common',
+    'Dockerfile')
+ANALYZER_SERVICE_DOCKER_FILE = os.path.join(KUBE_SRC_DIR, 'analyzer_service',
+    'Dockerfile')
+REPORT_MASTER_DOCKER_FILE = os.path.join(KUBE_SRC_DIR, 'report_master',
+    'Dockerfile')
+SHUFFLER_DOCKER_FILE = os.path.join(KUBE_SRC_DIR, 'shuffler',
+    'Dockerfile')
+
+# Kubernetes deployment yaml template files with replaceable tokens.
+ANALYZER_SERVICE_DEPLOYMENT_YAML = 'analyzer_service_deployment.yaml'
+ANALYZER_SERVICE_DEPLOYMENT_TEMPLATE_FILE = os.path.join(KUBE_SRC_DIR,
+    'analyzer_service', ANALYZER_SERVICE_DEPLOYMENT_YAML)
+REPORT_MASTER_DEPLOYMENT_YAML = 'report_master_deployment.yaml'
+REPORT_MASTER_DEPLOYMENT_TEMPLATE_FILE = os.path.join(KUBE_SRC_DIR,
+    'report_master', REPORT_MASTER_DEPLOYMENT_YAML)
+SHUFFLER_DEPLOYMENT_YAML = 'shuffler_deployment.yaml'
+SHUFFLER_DEPLOYMENT_TEMPLATE_FILE = os.path.join(KUBE_SRC_DIR, 'shuffler',
+    SHUFFLER_DEPLOYMENT_YAML)
+
+# Kubernetes output directory
+KUBE_OUT_DIR = os.path.join(OUT_DIR, 'kubernetes')
+
+# Post-processed kubernetes deployment yaml files. These have had their tokens
+# replaced and are ready to be used by "kubectl create"
+ANALYZER_SERVICE_DEPLOYMENT_FILE = os.path.join(KUBE_OUT_DIR,
+    ANALYZER_SERVICE_DEPLOYMENT_YAML)
+REPORT_MASTER_DEPLOYMENT_FILE = os.path.join(KUBE_OUT_DIR,
+    REPORT_MASTER_DEPLOYMENT_YAML)
+SHUFFLER_DEPLOYMENT_FILE = os.path.join(KUBE_OUT_DIR, SHUFFLER_DEPLOYMENT_YAML)
+
+# Docker image deployment directories
+COBALT_COMMON_DOCKER_BUILD_DIR = os.path.join(KUBE_OUT_DIR,
+    'cobalt_common')
+ANALYZER_SERVICE_DOCKER_BUILD_DIR = os.path.join(KUBE_OUT_DIR,
+    'analyzer_service')
+REPORT_MASTER_DOCKER_BUILD_DIR = os.path.join(KUBE_OUT_DIR,
+    'report_master')
+SHUFFLER_DOCKER_BUILD_DIR = os.path.join(KUBE_OUT_DIR,
+    'shuffler')
+
+# Docker Image Names
+COBALT_COMMON_IMAGE_NAME = "cobalt-common"
+ANALYZER_SERVICE_IMAGE_NAME = "analyzer-service"
+REPORT_MASTER_IMAGE_NAME = "report-master"
+SHUFFLER_IMAGE_NAME = "shuffler"
+
+COBALT_COMMON_SO_FILES = [os.path.join(SYS_ROOT_DIR, 'lib', f) for f in
+    ["libgoogleapis.so",
+     "libgrpc.so.1",
+     "libgrpc++.so.1",
+     "libprotobuf.so.10",
+     "libunwind.so.1",
+    ]]
+
+ROOTS_PEM = os.path.join(SYS_ROOT_DIR, 'share', 'grpc', 'roots.pem')
+
+ANALYZER_CONFIG_FILES = [os.path.join(REGISTERED_CONFIG_DIR, f) for f in
+    ["registered_encodings.txt",
+     "registered_metrics.txt",
+     "registered_reports.txt"
+    ]]
+
+ANALYZER_PRIVATE_KEY_SECRET_NAME = "analyzer-private-key"
+
+def _ensure_dir(dir_path):
+  """Ensures that the directory at |dir_path| exists. If not it is created.
+
+  Args:
+    dir_path{string} The path to a directory. If it does not exist it will be
+    created.
+  """
+  if not os.path.exists(dir_path):
+    os.makedirs(dir_path)
+
+def _set_contents_of_dir(dir_name, files_to_copy):
+  shutil.rmtree(dir_name, ignore_errors=True)
+  os.makedirs(dir_name)
+  for f in files_to_copy:
+    shutil.copy(f, dir_name)
+
+def _build_cobalt_common_deploy_dir():
+  files_to_copy = [COBALT_COMMON_DOCKER_FILE, ROOTS_PEM] +  \
+                  COBALT_COMMON_SO_FILES
+  _set_contents_of_dir(COBALT_COMMON_DOCKER_BUILD_DIR, files_to_copy)
+
+def _build_analyzer_service_deploy_dir():
+  files_to_copy = [ANALYZER_SERVICE_DOCKER_FILE, ANALYZER_SERVICE_PATH]
+  _set_contents_of_dir(ANALYZER_SERVICE_DOCKER_BUILD_DIR, files_to_copy)
+
+def _build_report_master_deploy_dir():
+  files_to_copy = [REPORT_MASTER_DOCKER_FILE, REPORT_MASTER_PATH] + \
+                   ANALYZER_CONFIG_FILES
+  _set_contents_of_dir(REPORT_MASTER_DOCKER_BUILD_DIR, files_to_copy)
+
+def _build_shuffler_deploy_dir(config_file):
+  files_to_copy = [SHUFFLER_DOCKER_FILE, SHUFFLER_PATH, config_file]
+  _set_contents_of_dir(SHUFFLER_DOCKER_BUILD_DIR, files_to_copy)
+
+def _build_docker_image(image_name, deploy_dir, extra_args=None):
+  cmd = ["docker", "build"]
+  if extra_args:
+    cmd = cmd + extra_args
+  cmd = cmd + ["-t", image_name, deploy_dir]
+  subprocess.check_call(cmd)
+
+def build_all_docker_images(shuffler_config_file=SHUFFLER_CONFIG_FILE):
+  _build_cobalt_common_deploy_dir()
+  _build_docker_image(COBALT_COMMON_IMAGE_NAME,
+                      COBALT_COMMON_DOCKER_BUILD_DIR)
+
+  _build_analyzer_service_deploy_dir()
+  _build_docker_image(ANALYZER_SERVICE_IMAGE_NAME,
+                      ANALYZER_SERVICE_DOCKER_BUILD_DIR)
+
+  _build_report_master_deploy_dir()
+  _build_docker_image(REPORT_MASTER_IMAGE_NAME,
+                      REPORT_MASTER_DOCKER_BUILD_DIR)
+
+  # Pass the full path of the config file to be copied into the deoply dir.
+  _build_shuffler_deploy_dir(shuffler_config_file)
+
+  # But pass only the basename to be found by Docker and copied into the image.
+  config_file_name = os.path.basename(shuffler_config_file)
+  _build_docker_image(SHUFFLER_IMAGE_NAME, SHUFFLER_DOCKER_BUILD_DIR,
+      extra_args=["--build-arg", "config_file=%s"%config_file_name])
+
+def _image_registry_uri(cloud_project_prefix, cloud_project_name, image_name):
+  if not cloud_project_prefix:
+    return "%s/%s/%s" % (CONTAINER_REGISTRY_URI, cloud_project_name, image_name)
+  return "%s/%s/%s/%s" % (CONTAINER_REGISTRY_URI, cloud_project_prefix,
+                          cloud_project_name, image_name)
+
+def _push_to_container_registry(cloud_project_prefix, cloud_project_name,
+                                image_name):
+  registry_tag = _image_registry_uri(cloud_project_prefix, cloud_project_name,
+                                     image_name)
+  subprocess.check_call(["docker", "tag", image_name, registry_tag])
+  subprocess.check_call(["gcloud", "docker", "--", "push", registry_tag])
+
+def push_analyzer_service_to_container_registry(cloud_project_prefix,
+                                               cloud_project_name):
+  _push_to_container_registry(cloud_project_prefix, cloud_project_name,
+                              ANALYZER_SERVICE_IMAGE_NAME)
+
+def push_report_master_to_container_registry(cloud_project_prefix,
+                                             cloud_project_name):
+  _push_to_container_registry(cloud_project_prefix, cloud_project_name,
+                              REPORT_MASTER_IMAGE_NAME)
+
+def push_shuffler_to_container_registry(cloud_project_prefix,
+                                        cloud_project_name):
+  _push_to_container_registry(cloud_project_prefix, cloud_project_name,
+                              SHUFFLER_IMAGE_NAME)
+
+def _replace_tokens_in_template(template_file, out_file, token_replacements):
+  _ensure_dir(os.path.dirname(out_file))
+  with open(out_file, 'w+b') as f:
+    for line in fileinput.input(template_file):
+      for token in token_replacements:
+        line = line.replace(token, token_replacements[token])
+      f.write(line)
+
+def _compound_project_name(cloud_project_prefix, cloud_project_name):
+  if not cloud_project_prefix:
+    return cloud_project_name
+  return "%s:%s"%(cloud_project_prefix, cloud_project_name)
+
+
+def _create_secret_from_file(secret_name, data_key, file_path):
+  subprocess.check_call(["kubectl", "create", "secret", "generic", secret_name,
+    "--from-file", "%s=%s"%(data_key, file_path)])
+
+def _delete_secret(secret_name):
+  subprocess.check_call(["kubectl", "delete", "secret",  secret_name])
+
+def create_analyzer_private_key_secret(
+    path_to_pem=DEFAULT_ANALYZER_PRIVATE_KEY_PEM):
+  _create_secret_from_file(ANALYZER_PRIVATE_KEY_SECRET_NAME,
+                           ANALYZER_PRIVATE_KEY_PEM_NAME,
+                           path_to_pem)
+
+def delete_analyzer_private_key_secret():
+  _delete_secret(ANALYZER_PRIVATE_KEY_SECRET_NAME)
+
+def _start_gke_service(deployment_template_file, deployment_file,
+                       token_substitutions):
+  # Generate the kubernetes deployment file by performing token replacement.
+  _replace_tokens_in_template(deployment_template_file, deployment_file,
+                              token_substitutions)
+
+  # Invoke "kubectl create" on the deployment file we just generated.
+  subprocess.check_call(["kubectl", "create", "-f", deployment_file])
+
+def start_analyzer_service(cloud_project_prefix,
+                           cloud_project_name,
+                           bigtable_instance_name):
+  """ Starts the analyzer-service deployment and service.
+  cloud_project_prefix {sring}: For example "google.com"
+  cloud_project_name {sring}: For example "shuffler-test". The prefix and
+      name are used when forming the URI to the image in the registry and
+      also the bigtable project name.
+  bigtable_instance_name {string}: The name of the instance of Cloud Bigtable
+      within the specified project to be used by the Analyzer Service.
+  """
+  image_uri = _image_registry_uri(cloud_project_prefix, cloud_project_name,
+                                  ANALYZER_SERVICE_IMAGE_NAME)
+
+  bigtable_project_name = _compound_project_name(cloud_project_prefix,
+                                                 cloud_project_name)
+
+  # These are the token replacements that must be made inside the deployment
+  # template file.
+  token_substitutions = {
+      '$$ANALYZER_SERVICE_IMAGE_URI$$' : image_uri,
+      '$$BIGTABLE_PROJECT_NAME$$' : bigtable_project_name,
+      '$$BIGTABLE_INSTANCE_NAME$$' :bigtable_instance_name,
+      '$$ANALYZER_PRIVATE_PEM_NAME$$' : ANALYZER_PRIVATE_KEY_PEM_NAME,
+      '$$ANALYZER_PRIVATE_KEY_SECRET_NAME$$' : ANALYZER_PRIVATE_KEY_SECRET_NAME}
+  _start_gke_service(ANALYZER_SERVICE_DEPLOYMENT_TEMPLATE_FILE,
+                     ANALYZER_SERVICE_DEPLOYMENT_FILE,
+                     token_substitutions)
+
+def start_report_master(cloud_project_prefix,
+                        cloud_project_name,
+                        bigtable_instance_name):
+  """ Starts the report-master deployment and service.
+  cloud_project_prefix {sring}: For example "google.com"
+  cloud_project_name {sring}: For example "shuffler-test". The prefix and
+      name are used when forming the URI to the image in the registry and
+      also the bigtable project name.
+  bigtable_instance_name {string}: The name of the instance of Cloud Bigtable
+      within the specified project to be used by the Report Master.
+  """
+  image_uri = _image_registry_uri(cloud_project_prefix, cloud_project_name,
+                                  REPORT_MASTER_IMAGE_NAME)
+
+  bigtable_project_name = _compound_project_name(cloud_project_prefix,
+                                                 cloud_project_name)
+
+  # These are the token replacements that must be made inside the deployment
+  # template file.
+  token_substitutions = {'$$REPORT_MASTER_IMAGE_URI$$' : image_uri,
+                         '$$BIGTABLE_PROJECT_NAME$$' : bigtable_project_name,
+                         '$$BIGTABLE_INSTANCE_NAME$$' :bigtable_instance_name}
+  _start_gke_service(REPORT_MASTER_DEPLOYMENT_TEMPLATE_FILE,
+                     REPORT_MASTER_DEPLOYMENT_FILE,
+                     token_substitutions)
+
+def start_shuffler(cloud_project_prefix,
+                   cloud_project_name,
+                   gce_pd_name):
+  """ Starts the shuffler deployment and service.
+  cloud_project_prefix {sring}: For example "google.com"
+  cloud_project_name {sring}: For example "shuffler-test". The prefix and
+      name are used when forming the URI to the image in the registry.
+  gce_pd_name: {string} The name of a GCE persistent disk. This must have
+      already been created. The shuffler will use this disk for it LevelDB
+      storage so that the data persists between Shuffler updates.
+  """
+  image_uri = _image_registry_uri(cloud_project_prefix, cloud_project_name,
+                                  SHUFFLER_IMAGE_NAME)
+  # These are the token replacements that must be made inside the deployment
+  # template file.
+  token_substitutions = {'$$SHUFFLER_IMAGE_URI$$' : image_uri,
+                         '$$GCE_PERSISTENT_DISK_NAME$$' : gce_pd_name}
+  _start_gke_service(SHUFFLER_DEPLOYMENT_TEMPLATE_FILE,
+                     SHUFFLER_DEPLOYMENT_FILE,
+                     token_substitutions)
+
+def _stop_gke_service(name):
+  subprocess.check_call(["kubectl", "delete", "service,deployment", name])
+
+def stop_analyzer_service():
+  _stop_gke_service(ANALYZER_SERVICE_IMAGE_NAME)
+
+def stop_report_master():
+  _stop_gke_service(REPORT_MASTER_IMAGE_NAME)
+
+def stop_shuffler():
+  _stop_gke_service(SHUFFLER_IMAGE_NAME)
+
+def authenticate(cluster_name,
+                 cloud_project_prefix,
+                 cloud_project_name):
+  subprocess.check_call(["gcloud", "container", "clusters", "get-credentials",
+      cluster_name, "--project",
+      _compound_project_name(cloud_project_prefix, cloud_project_name)])
+
+def display():
+   subprocess.check_call(["kubectl", "get", "services"])
+
+def main():
+  _process_shuffler_yaml_file('cloud_project_prefix', 'cloud_project_name',
+                              'gce_pd_name')
+
+if __name__ == '__main__':
+  main()
+
diff --git a/tools/cpplint.py b/tools/cpplint.py
index 88fa9e9..64914a9 100755
--- a/tools/cpplint.py
+++ b/tools/cpplint.py
@@ -28,7 +28,7 @@
 # tree looking for C++ files to be linted. We also skip directories starting
 # with a "." such as ".git"
 SKIP_LINT_DIRS = [
-    os.path.join(SRC_ROOT_DIR, 'docker'),
+    os.path.join(SRC_ROOT_DIR, 'kubernetes'),
     os.path.join(SRC_ROOT_DIR, 'out'),
     os.path.join(SRC_ROOT_DIR, 'prototype'),
     os.path.join(SRC_ROOT_DIR, 'shuffler'),
diff --git a/tools/process_starter.py b/tools/process_starter.py
index 0fbcf7a..399752b 100644
--- a/tools/process_starter.py
+++ b/tools/process_starter.py
@@ -26,22 +26,32 @@
 
 DEMO_CONFIG_DIR = os.path.abspath(os.path.join(SRC_ROOT_DIR, 'config',
     'demo'))
+# Note(rudominer) Currently we don't have any real customers so our registered
+# config is our demo config
+REGISTERED_CONFIG_DIR = DEMO_CONFIG_DIR
 SHUFFLER_DEMO_CONFIG_FILE = os.path.abspath(os.path.join(SRC_ROOT_DIR,
     'shuffler', 'src', 'config', 'config_demo.txt'))
 SHUFFLER_DB_DIR = os.path.join("/tmp/cobalt_shuffler")
 
+SHUFFLER_CONFIG_DIR = os.path.abspath(os.path.join(SRC_ROOT_DIR, 'shuffler',
+    'src', 'config'))
+SHUFFLER_CONFIG_FILE = os.path.join(SHUFFLER_CONFIG_DIR, 'config_v0.txt')
+SHUFFLER_DEMO_CONFIG_FILE = os.path.join(SHUFFLER_CONFIG_DIR, 'config_demo.txt')
+SHUFFLER_TMP_DB_DIR = os.path.join("/tmp/cobalt_shuffler")
+
 DEFAULT_SHUFFLER_PORT=5001
 DEFAULT_ANALYZER_SERVICE_PORT=6001
 DEFAULT_REPORT_MASTER_PORT=7001
 
 DEFAULT_ANALYZER_PUBLIC_KEY_PEM=os.path.join(SRC_ROOT_DIR,
-                                             "analyzer_public_key.pem")
+                                             "analyzer_public.pem")
+ANALYZER_PRIVATE_KEY_PEM_NAME="analyzer_private.pem"
 DEFAULT_ANALYZER_PRIVATE_KEY_PEM=os.path.join(SRC_ROOT_DIR,
-                                             "analyzer_private_key.pem")
+                                             ANALYZER_PRIVATE_KEY_PEM_NAME)
 DEFAULT_SHUFFLER_PUBLIC_KEY_PEM=os.path.join(SRC_ROOT_DIR,
-                                             "shuffler_public_key.pem")
+                                             "shuffler_public.pem")
 DEFAULT_SHUFFLER_PRIVATE_KEY_PEM=os.path.join(SRC_ROOT_DIR,
-                                             "shuffler_private_key.pem")
+                                             "shuffler_private.pem")
 
 
 def kill_process(process, name):
@@ -93,10 +103,11 @@
   cmd = [path]
   return execute_command(cmd, wait)
 
-# If db_dir is not set then the shuffler will use the MemStore.
+SHUFFLER_PATH = os.path.abspath(os.path.join(OUT_DIR, 'shuffler', 'shuffler'))
 def start_shuffler(port=DEFAULT_SHUFFLER_PORT,
     analyzer_uri='localhost:%d' % DEFAULT_ANALYZER_SERVICE_PORT,
-    use_memstore=False, erase_db=True, config_file=SHUFFLER_DEMO_CONFIG_FILE,
+    use_memstore=False, erase_db=True, db_dir=SHUFFLER_TMP_DB_DIR,
+    config_file=SHUFFLER_DEMO_CONFIG_FILE,
     verbose_count=0, wait=True):
   """Starts the Shuffler.
 
@@ -109,8 +120,7 @@
     config_file {string} The path to the Shuffler's config file.
   """
   print
-  path = os.path.abspath(os.path.join(OUT_DIR, 'shuffler', 'shuffler'))
-  cmd = [path,
+  cmd = [SHUFFLER_PATH,
         "-port", str(port),
         "-analyzer_uri", analyzer_uri,
         "-config_file", config_file,
@@ -120,15 +130,17 @@
   if use_memstore:
     cmd.append("-use_memstore")
   else:
-    cmd = cmd + ["-db_dir", SHUFFLER_DB_DIR]
+    cmd = cmd + ["-db_dir", db_dir]
     if erase_db:
-      print "Erasing Shuffler's LevelDB store at %s." % SHUFFLER_DB_DIR
-      shutil.rmtree(SHUFFLER_DB_DIR, ignore_errors=True)
+      print "Erasing Shuffler's LevelDB store at %s." % db_dir
+      shutil.rmtree(db_dir, ignore_errors=True)
 
   print "Starting the shuffler..."
   print
   return execute_command(cmd, wait)
 
+ANALYZER_SERVICE_PATH = os.path.abspath(os.path.join(OUT_DIR, 'analyzer',
+    'analyzer_service', 'analyzer_service'))
 def start_analyzer_service(port=DEFAULT_ANALYZER_SERVICE_PORT,
     bigtable_project_name='', bigtable_instance_name='',
     private_key_pem_file=DEFAULT_ANALYZER_PRIVATE_KEY_PEM,
@@ -137,9 +149,7 @@
   print
   print "Starting the analyzer service..."
   print
-  path = os.path.abspath(os.path.join(OUT_DIR, 'analyzer', 'analyzer_service',
-      'analyzer_service'))
-  cmd = [path,
+  cmd = [ANALYZER_SERVICE_PATH,
       "-port", str(port),
       "-private_key_pem_file", private_key_pem_file,
       "-logtostderr"]
@@ -157,6 +167,8 @@
     cmd.append("-v=%d"%verbose_count)
   return execute_command(cmd, wait)
 
+REPORT_MASTER_PATH = os.path.abspath(os.path.join(OUT_DIR, 'analyzer',
+    'report_master', 'analyzer_report_master'))
 def start_report_master(port=DEFAULT_REPORT_MASTER_PORT,
                         bigtable_project_name='', bigtable_instance_name='',
                         cobalt_config_dir=DEMO_CONFIG_DIR,
@@ -164,9 +176,7 @@
   print
   print "Starting the analyzer ReportMaster service..."
   print
-  path = os.path.abspath(os.path.join(OUT_DIR, 'analyzer', 'report_master',
-      'analyzer_report_master'))
-  cmd = [path,
+  cmd = [REPORT_MASTER_PATH,
       "-port", str(port),
       "-cobalt_config_dir", cobalt_config_dir,
       "-logtostderr"]
@@ -212,10 +222,16 @@
 OBSERVATION_QUERIER_PATH = os.path.abspath(os.path.join(OUT_DIR, 'tools',
                                            'observation_querier',
                                            'query_observations'))
-def start_observation_querier(verbose_count=0):
+def start_observation_querier(bigtable_project_name='',
+                              bigtable_instance_name='',
+                              verbose_count=0):
   cmd = [OBSERVATION_QUERIER_PATH,
-      "-for_testing_only_use_bigtable_emulator",
       "-logtostderr"]
+  if not bigtable_project_name or not bigtable_instance_name:
+    cmd.append("-for_testing_only_use_bigtable_emulator")
+  else:
+    cmd = cmd + ["-bigtable_project_name", bigtable_project_name,
+                 "-bigtable_instance_name", bigtable_instance_name]
   if verbose_count > 0:
     cmd.append("-v=%d"%verbose_count)
   return execute_command(cmd, wait=True)