Initial commit of Bazel rules
diff --git a/BUILD b/BUILD
new file mode 100644
index 0000000..a397411
--- /dev/null
+++ b/BUILD
@@ -0,0 +1,5 @@
+exports_files([
+    "__init__.py",
+    "debug.bzl",
+    "subpar.bzl",
+])
diff --git a/WORKSPACE b/WORKSPACE
new file mode 100644
index 0000000..6be5ead
--- /dev/null
+++ b/WORKSPACE
@@ -0,0 +1,7 @@
+workspace(name = "subpar")
+
+# Used by integration tests
+local_repository(
+    name = "test_workspace",
+    path = __workspace_dir__ + "/test/test_workspace",
+)
diff --git a/__init__.py b/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/__init__.py
diff --git a/debug.bzl b/debug.bzl
new file mode 100644
index 0000000..f6f3fad
--- /dev/null
+++ b/debug.bzl
@@ -0,0 +1,120 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+def dump(obj, obj_name):
+    """Debugging method that recursively prints object fields to stderr
+
+    Args:
+      obj: Object to dump
+      obj_name: Name to print for that object
+
+    Example Usage:
+    ```
+    load("debug", "dump")
+    ...
+    dump(ctx, "ctx")
+    ```
+
+    Example Output:
+    ```
+    WARNING: /code/rrrrr/subpar/debug.bzl:11:5:
+    ctx[ctx]:
+        action[string]: <getattr(action) failed>
+        attr[struct]:
+            _action_listener[list]: []
+            _compiler[RuleConfiguredTarget]:
+                data_runfiles[runfiles]:
+    ```
+    """
+
+    s = '\n' + _dumpstr(obj, obj_name)
+    print(s)
+
+def _dumpstr(root_obj, root_obj_name):
+    """Helper method for dump() to just generate the string
+
+    Some fields always raise errors if we getattr() on them.  We
+    manually blacklist them here.  Other fields raise errors only if
+    we getattr() without a default.  Those are handled below.
+
+    A bug was filed against Bazel, but it got fixed in a way that
+    didn't actually fix this.
+
+    """
+    BLACKLIST = [
+        "InputFileConfiguredTarget.output_group",
+        "Label.Label",
+        "Label.relative",
+        "License.to_json",
+        "RuleConfiguredTarget.output_group",
+        "ctx.action",
+        "ctx.check_placeholders",
+        "ctx.empty_action",
+        "ctx.expand",
+        "ctx.expand_location",
+        "ctx.expand_make_variables",
+        "ctx.file_action",
+        "ctx.middle_man",
+        "ctx.new_file",
+        "ctx.resolve_command",
+        "ctx.rule",
+        "ctx.runfiles",
+        "ctx.template_action",
+        "ctx.tokenize",
+        "fragments.apple",
+        "fragments.cpp",
+        "fragments.java",
+        "fragments.jvm",
+        "fragments.objc",
+        "runfiles.symlinks",
+        "struct.output_licenses",
+        "struct.to_json",
+        "struct.to_proto",
+    ]
+    MAXLINES = 4000
+    ROOT_MAXDEPTH = 5
+
+    # List of printable lines
+    lines = []
+
+    # Bazel doesn't allow a function to recursively call itself, so
+    # use an explicit stack
+    stack = [(root_obj, root_obj_name, 0, ROOT_MAXDEPTH)]
+    # No while() in Bazel, so use for loop over large range
+    for _ in range(MAXLINES):
+        if len(stack) == 0:
+            break
+        obj, obj_name, indent, maxdepth = stack.pop()
+
+        obj_type = type(obj)
+        indent_str = ' '*indent
+        line = '{indent_str}{obj_name}[{obj_type}]:'.format(
+            indent_str=indent_str, obj_name=obj_name, obj_type=obj_type)
+
+        if maxdepth == 0 or obj_type in ['dict', 'list', 'set', 'string']:
+            # Dump value as string, inline
+            line += ' ' + str(obj)
+        else:
+            # Dump all of value's fields on separate lines
+            attrs = dir(obj)
+            # Push each field to stack in reverse order, so they pop
+            # in sorted order
+            for attr in reversed(attrs):
+                if "%s.%s" % (obj_type, attr) in BLACKLIST:
+                    value = '<blacklisted attr (%s)>' % attr
+                else:
+                    value = getattr(obj, attr, '<getattr(%s) failed>' % attr)
+                stack.append((value, attr, indent+4, maxdepth-1))
+        lines.append(line)
+    return '\n'.join(lines)
diff --git a/subpar.bzl b/subpar.bzl
new file mode 100644
index 0000000..ad68d30
--- /dev/null
+++ b/subpar.bzl
@@ -0,0 +1,155 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Build self-contained python executables."""
+
+load("//:debug.bzl", "dump")
+
+def _parfile_impl(ctx):
+    """Implementation of parfile() rule"""
+    # Find the main entry point
+    py_files = ctx.files.main
+    if len(py_files) == 0:
+        fail('Expected exactly one .py file, found none', 'main')
+    elif len(py_files) > 1:
+        fail('Expected exactly one .py file, found these: [%s]' % py_files, 'main')
+    main_py_file = py_files[0]
+    if main_py_file not in ctx.attr.src.data_runfiles.files:
+        fail('Main entry point [%s] not listed in srcs' % main_py_file, 'main')
+
+    # Find the list of things that must be built before this thing is built
+    # TODO: also handle ctx.attr.src.data_runfiles.symlinks
+    inputs = list(ctx.attr.src.default_runfiles.files)
+
+    # Make a manifest of files to store in the .par file.  The
+    # runfiles manifest is not quite right, so we make our own.
+    sources_map = {}
+    # First, add the zero-length __init__.py files
+    for empty in ctx.attr.src.default_runfiles.empty_filenames:
+        stored_path = _prepend_workspace(empty, ctx)
+        local_path = ''
+        sources_map[stored_path] = local_path
+    # Now add the regular (source and generated) files
+    for input_file in inputs:
+        stored_path = _prepend_workspace(input_file.short_path, ctx)
+        local_path = input_file.path
+        sources_map[stored_path] = local_path
+    # Now make a nice sorted list
+    sources_lines = []
+    for k,v in sorted(sources_map.items()):
+        sources_lines.append('%s %s' % (k, v))
+    sources_content = '\n'.join(sources_lines) + '\n'
+
+    # Write the list to the manifest file
+    sources_file = ctx.new_file(ctx.label.name + '_SOURCES')
+    ctx.file_action(
+        output=sources_file,
+        content=sources_content,
+        executable=False)
+
+    # Find the list of directories to add to sys.path
+    # TODO(b/29227737): Use 'imports' provider from Bazel
+    stub_file = ctx.attr.src.files_to_run.executable.path
+
+    # Inputs to the action, but don't actually get stored in the .par file
+    extra_inputs = [
+        sources_file,
+        ctx.attr.src.files_to_run.executable,
+        ctx.attr.src.files_to_run.runfiles_manifest,
+        ]
+
+    # Assemble command line for .par compiler
+    args = [
+        '--imports_from_stub', stub_file,
+        '--manifest_file', sources_file.path,
+        '--outputpar', ctx.outputs.executable.path,
+        main_py_file.path,
+    ]
+    ctx.action(
+        inputs=inputs + extra_inputs,
+        outputs=[ctx.outputs.executable],
+        progress_message='Building par file %s' % ctx.label,
+        executable=ctx.executable._compiler,
+        arguments=args,
+        mnemonic="PythonCompile",
+    )
+
+    # .par file itself has no runfiles and no providers
+    return struct()
+
+def _prepend_workspace(path, ctx):
+    """Given a path, prepend the workspace name as the parent directory"""
+    # It feels like there should be an easier, less fragile way.
+    if path.startswith('../'):
+        # External workspace
+        stored_path = path[len('../'):]
+    else:
+        # Main workspace
+        stored_path = ctx.workspace_name + '/' + path
+    return stored_path
+
+# Rule to create a parfile given a py_binary() as input
+parfile = rule(
+    attrs = {
+        "src": attr.label(mandatory = True),
+        "main": attr.label(
+            mandatory = True,
+            allow_files = True,
+            single_file = True,
+        ),
+        "imports": attr.string_list(default = []),
+        "_compiler": attr.label(
+            default = Label("//compiler"),
+            executable = True,
+        ),
+    },
+    executable = True,
+    implementation = _parfile_impl,
+)
+
+"""A self-contained, single-file Python program, with a .par file extension.
+
+You probably want to use par_binary() instead of this.
+
+Args:
+  src: A py_binary() target
+  main: The name of the source file that is the main entry point of
+    the application.
+
+    See [py_binary.main](http://www.bazel.io/docs/be/python.html#py_binary.main)
+
+  imports: List of import directories to be added to the PYTHONPATH.
+
+    See [py_binary.imports](http://www.bazel.io/docs/be/python.html#py_binary.imports)
+
+TODO(b/27502830): A directory foo.par.runfiles is also created. This
+is a bug, don't use or depend on it.
+
+"""
+
+def par_binary(name, **kwargs):
+    """An executable Python program.
+
+    par_binary() is a drop-in replacement for py_binary() that also
+    builds a self-contained, single-file executable for the
+    application, with a .par file extension.
+
+    See [py_binary](http://www.bazel.io/docs/be/python.html#py_binary)
+    for arguments and usage.
+
+    """
+    native.py_binary(name=name, **kwargs)
+    main = kwargs.get('main', name + '.py')
+    imports = kwargs.get('imports')
+    parfile(name=name + '.par', src=name, main=main, imports=imports)