Merge remote-tracking branch 'origin/master' into bump_v1.7.0

* origin/master: (999 commits)
  Review feedback:     - Match verbiage with other output     - Remove dead code and clearer flow
  Vendoring in libnetwork 2da2dc055de5a474c8540871ad88a48213b0994f
  Restore the stripped registry version number
  Use SELinux labels for volumes
  apply selinux labels volume patch on volumes refactor
  Modify volume mounts SELinux labels on the fly based on :Z or :z
  Remove unused code
  Remove redundant set header
  Return err if we got err on parseForm
  script cleaned up
  Fix unregister stats on when rm running container
  Fix container unmount networkMounts
  Windows: Set default exec driver to windows
  Fixes title, line wrap, and Adds install area Tibor's comment Updating with the new plugins Entering comments from Seb
  Add regression test to make sure we can load old containers with volumes.
  Do not force `syscall.Unmount` on container cleanup.
  Revert "Add docker exec run a command in privileged mode"
  Cleanup container rm funcs
  Allow mirroring only for the official index
  Registry v2 mirror support.
  ...

Conflicts:
	CHANGELOG.md
	VERSION
	api/client/commands.go
	api/client/utils.go
	api/server/server.go
	api/server/server_linux.go
	builder/shell_parser.go
	builder/words
	daemon/config.go
	daemon/container.go
	daemon/daemon.go
	daemon/delete.go
	daemon/execdriver/execdrivers/execdrivers_linux.go
	daemon/execdriver/lxc/driver.go
	daemon/execdriver/native/driver.go
	daemon/graphdriver/aufs/aufs.go
	daemon/graphdriver/driver.go
	daemon/logger/syslog/syslog.go
	daemon/networkdriver/bridge/driver.go
	daemon/networkdriver/portallocator/portallocator.go
	daemon/networkdriver/portmapper/mapper.go
	daemon/networkdriver/portmapper/mapper_test.go
	daemon/volumes.go
	docs/Dockerfile
	docs/man/docker-create.1.md
	docs/man/docker-login.1.md
	docs/man/docker-logout.1.md
	docs/man/docker-run.1.md
	docs/man/docker.1.md
	docs/mkdocs.yml
	docs/s3_website.json
	docs/sources/installation/windows.md
	docs/sources/reference/api/docker_remote_api_v1.18.md
	docs/sources/reference/api/registry_api_client_libraries.md
	docs/sources/reference/builder.md
	docs/sources/reference/run.md
	docs/sources/release-notes.md
	graph/graph.go
	graph/push.go
	hack/install.sh
	hack/vendor.sh
	integration-cli/docker_cli_build_test.go
	integration-cli/docker_cli_pull_test.go
	integration-cli/docker_cli_run_test.go
	pkg/archive/changes.go
	pkg/broadcastwriter/broadcastwriter.go
	pkg/ioutils/readers.go
	pkg/ioutils/readers_test.go
	pkg/progressreader/progressreader.go
	registry/auth.go
	vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go
	vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go
	vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go
	vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go
	vendor/src/github.com/docker/libcontainer/container_linux.go
	vendor/src/github.com/docker/libcontainer/init_linux.go
	vendor/src/github.com/docker/libcontainer/integration/exec_test.go
	vendor/src/github.com/docker/libcontainer/integration/utils_test.go
	vendor/src/github.com/docker/libcontainer/nsinit/README.md
	vendor/src/github.com/docker/libcontainer/process.go
	vendor/src/github.com/docker/libcontainer/rootfs_linux.go
	vendor/src/github.com/docker/libcontainer/update-vendor.sh
	vendor/src/github.com/docker/libnetwork/portallocator/portallocator_test.go
diff --git a/.gitignore b/.gitignore
index 9696b9a..61a00fe 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,33 +1,34 @@
 # Docker project generated files to ignore
 #  if you want to ignore files created by your editor/tools,
 #  please consider a global .gitignore https://help.github.com/articles/ignoring-files
-.vagrant*
-bin
-docker/docker
 *.exe
-.*.swp
-a.out
 *.orig
-build_src
-.flymake*
-.idea
+*.rej
+*.test
+.*.swp
 .DS_Store
+.bashrc
+.dotcloud
+.flymake*
+.git/
+.gopath/
+.hg/
+.idea
+.vagrant*
+Vagrantfile
+a.out
+autogen/
+bin
+build_src
+bundles/
+docker/docker
+docs/AWS_S3_BUCKET
+docs/GITCOMMIT
+docs/GIT_BRANCH
+docs/VERSION
 docs/_build
 docs/_static
 docs/_templates
-.gopath/
-.dotcloud
-*.test
-bundles/
-.hg/
-.git/
-vendor/pkg/
-pyenv
-Vagrantfile
-docs/AWS_S3_BUCKET
-docs/GIT_BRANCH
-docs/VERSION
-docs/GITCOMMIT
 docs/changed-files
-autogen/
-.bashrc
+pyenv
+vendor/pkg/
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 28d9569..0894d62 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -21,10 +21,10 @@
 
 #### Builder
 + Building images from an image ID
-+ build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...`
++ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...`
 + `commit --change` to apply specified Dockerfile instructions while committing the image
 + `import --change` to apply specified Dockerfile instructions while importing the image
-+ basic build cancellation
++ Builds no longer continue in the background when canceled with CTRL-C
 
 #### Client
 + Windows Support
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index e6bf6ad..d9068d1 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -129,12 +129,12 @@
   <col width="45%">
   <col width="65%">
   <tr>
-    <td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</th>
+    <td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</td>
     <td>
       <p>
         IRC a direct line to our most knowledgeable Docker users; we have
-        both the  <code>#docker</code> and <code>#docker-dev</code> group on 
-        <strong>irc.freenode.net</strong>.  
+        both the  <code>#docker</code> and <code>#docker-dev</code> group on
+        <strong>irc.freenode.net</strong>.
         IRC is a rich chat protocol but it can overwhelm new users. You can search
         <a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
       </p>
@@ -146,9 +146,9 @@
     <td>
       There are two groups.
       <a href="https://groups.google.com/forum/#!forum/docker-user" target="_blank">Docker-user</a>
-      is for people using Docker containers. 
-      The <a href="https://groups.google.com/forum/#!forum/docker-dev" target="_blank">docker-dev</a> 
-      group is for contributors and other people contributing to the Docker 
+      is for people using Docker containers.
+      The <a href="https://groups.google.com/forum/#!forum/docker-dev" target="_blank">docker-dev</a>
+      group is for contributors and other people contributing to the Docker
       project.
     </td>
   </tr>
@@ -156,15 +156,15 @@
     <td>Twitter</td>
     <td>
       You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
-      to get updates on our products. You can also tweet us questions or just 
+      to get updates on our products. You can also tweet us questions or just
       share blogs or stories.
     </td>
   </tr>
   <tr>
     <td>Stack Overflow</td>
     <td>
-      Stack Overflow has over 7000K Docker questions listed. We regularly 
-      monitor <a href="http://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
+      Stack Overflow has over 7000K Docker questions listed. We regularly
+      monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
       and so do many other knowledgeable Docker users.
     </td>
   </tr>
diff --git a/Dockerfile b/Dockerfile
index b064076..a74712a 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -26,11 +26,15 @@
 FROM ubuntu:14.04
 MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
 
+RUN	apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61
+RUN	echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list
+
 # Packaged dependencies
 RUN apt-get update && apt-get install -y \
 	apparmor \
 	aufs-tools \
 	automake \
+	bash-completion \
 	btrfs-tools \
 	build-essential \
 	curl \
@@ -49,6 +53,8 @@
 	ruby1.9.1 \
 	ruby1.9.1-dev \
 	s3cmd=1.1.0* \
+	ubuntu-zfs \
+	libzfs-dev \
 	--no-install-recommends
 
 # Get lvm2 source for compiling statically
@@ -97,12 +103,19 @@
 			./make.bash --no-clean 2>&1; \
 	done
 
-# We still support compiling with older Go, so need to grab older "gofmt"
-ENV GOFMT_VERSION 1.3.3
-RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt
+# This has been commented out and kept as reference because we don't support compiling with older Go anymore.
+# ENV GOFMT_VERSION 1.3.3
+# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt
 
+# Update this sha when we upgrade to go 1.5.0
+ENV GO_TOOLS_COMMIT 069d2f3bcb68257b627205f0486d6cc69a231ff9
 # Grab Go's cover tool for dead-simple code coverage testing
-RUN go get golang.org/x/tools/cmd/cover
+# Grab Go's vet tool for examining go code to find suspicious constructs
+# and help prevent errors that the compiler might not catch
+RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
+	&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \
+	&& go install -v golang.org/x/tools/cmd/cover \
+	&& go install -v golang.org/x/tools/cmd/vet
 
 # TODO replace FPM with some very minimal debhelper stuff
 RUN gem install --no-rdoc --no-ri fpm --version 1.3.2
@@ -113,7 +126,8 @@
 	&& git clone https://github.com/docker/distribution.git /go/src/github.com/docker/distribution \
 	&& (cd /go/src/github.com/docker/distribution && git checkout -q $REGISTRY_COMMIT) \
 	&& GOPATH=/go/src/github.com/docker/distribution/Godeps/_workspace:/go \
-		go build -o /go/bin/registry-v2 github.com/docker/distribution/cmd/registry
+		go build -o /go/bin/registry-v2 github.com/docker/distribution/cmd/registry \
+	&& rm -rf /go/src/github.com/docker/distribution/
 
 # Get the "docker-py" source so we can run their integration tests
 ENV DOCKER_PY_COMMIT 91985b239764fe54714fa0a93d52aa362357d251
@@ -137,32 +151,37 @@
 
 VOLUME /var/lib/docker
 WORKDIR /go/src/github.com/docker/docker
-ENV DOCKER_BUILDTAGS apparmor selinux btrfs_noversion
+ENV DOCKER_BUILDTAGS apparmor selinux
 
 # Let us use a .bashrc file
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
 
+# Register Docker's bash completion.
+RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
+
 # Get useful and necessary Hub images so we can "docker load" locally instead of pulling
 COPY contrib/download-frozen-image.sh /go/src/github.com/docker/docker/contrib/
 RUN ./contrib/download-frozen-image.sh /docker-frozen-images \
 	busybox:latest@4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 \
-	hello-world:frozen@e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5
+	hello-world:frozen@e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5 \
+	jess/unshare@5c9f6ea50341a2a8eb6677527f2bdedbf331ae894a41714fda770fb130f3314d
 # see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
 
-# Install man page generator
-COPY vendor /go/src/github.com/docker/docker/vendor
-# (copy vendor/ because go-md2man needs golang.org/x/net)
+# Download man page generator
 RUN set -x \
 	&& git clone -b v1.0.1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \
-	&& git clone -b v1.2 https://github.com/russross/blackfriday.git /go/src/github.com/russross/blackfriday \
-	&& go install -v github.com/cpuguy83/go-md2man
+	&& git clone -b v1.2 https://github.com/russross/blackfriday.git /go/src/github.com/russross/blackfriday
 
-# install toml validator
+# Download toml validator
 ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
 RUN set -x \
 	&& git clone https://github.com/BurntSushi/toml.git /go/src/github.com/BurntSushi/toml \
-	&& (cd /go/src/github.com/BurntSushi/toml && git checkout -q $TOMLV_COMMIT) \
-	&& go install -v github.com/BurntSushi/toml/cmd/tomlv
+	&& (cd /go/src/github.com/BurntSushi/toml && git checkout -q $TOMLV_COMMIT)
+
+# copy vendor/ because go-md2man needs golang.org/x/net
+COPY vendor /go/src/github.com/docker/docker/vendor
+RUN go install -v github.com/cpuguy83/go-md2man \
+	github.com/BurntSushi/toml/cmd/tomlv
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 ENTRYPOINT ["hack/dind"]
diff --git a/LICENSE b/LICENSE
index 508036e..c7a3f0c 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,7 +1,7 @@
 
                                  Apache License
                            Version 2.0, January 2004
-                        http://www.apache.org/licenses/
+                        https://www.apache.org/licenses/
 
    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 
@@ -182,7 +182,7 @@
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at
 
-       http://www.apache.org/licenses/LICENSE-2.0
+       https://www.apache.org/licenses/LICENSE-2.0
 
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/MAINTAINERS b/MAINTAINERS
index 04951bf..0e06d88 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -37,7 +37,7 @@
 		text = """
 Docker follows the timeless, highly efficient and totally unfair system
 known as [Benevolent dictator for
-life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with
+life](https://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with
 yours truly, Solomon Hykes, in the role of BDFL. This means that all
 decisions are made, by default, by Solomon. Since making every decision
 myself would be highly un-scalable, in practice decisions are spread
@@ -113,7 +113,7 @@
 manner.
 
 * If the change affects areas of the code which are not part of a subsystem,
-or if subsystem maintainers are unable to reach a timely decision, it must be approved by 
+or if subsystem maintainers are unable to reach a timely decision, it must be approved by
 the core maintainers.
 
 * If the change affects the UI or public APIs, or if it represents a major change in architecture,
@@ -131,7 +131,7 @@
 """
 
 		# Triage
-		[Rules.review.states.0-triage]
+		[Rules.review.states.0-needs-triage]
 
 			# Maintainers are expected to triage new incoming pull requests by removing
 			# the `0-triage` label and adding the correct labels (e.g. `1-design-review`)
@@ -149,7 +149,7 @@
 			1-design-review = "general case"
 
 		# Design review
-		[Rules.review.states.1-design-review]
+		[Rules.review.states.1-needs-design-review]
 
 			# Maintainers are expected to comment on the design of the pull request.
 			# Review of documentation is expected only in the context of design validation,
@@ -166,7 +166,7 @@
 			2-code-review = "general case"
 
 		# Code review
-		[Rules.review.states.2-code-review]
+		[Rules.review.states.2-needs-code-review]
 
 			# Maintainers are expected to review the code and ensure that it is good
 			# quality and in accordance with the documentation in the PR.
@@ -184,7 +184,7 @@
 			3-docs-review = "general case"
 
 		# Docs review
-		[Rules.review.states.3-docs-review]
+		[Rules.review.states.3-needs-docs-review]
 
 			# Maintainers are expected to review the documentation in its bigger context,
 			# ensuring consistency, completeness, validity, and breadth of coverage across
@@ -193,6 +193,11 @@
 			# They should ask for any editorial change that makes the documentation more
 			# consistent and easier to understand.
 			#
+			# Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of
+			# two docs sub-project maintainers. If the docs change originates with a docs
+			# maintainer, only one additional LGTM is required (since we assume a docs maintainer
+			# approves of their own PR).
+			#
 			# Once documentation is approved (see below), a maintainer should make sure to remove this
 			# label and add the next one.
 
@@ -200,14 +205,9 @@
 			2-code-review = "requires more code changes"
 			1-design-review = "raises design concerns"
 			4-merge = "general case"
-			
-		# Docs approval
-		[Rules.review.docs-approval]
-			# Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs sub-project maintainers.
-			# If the docs change originates with a docs maintainer, only one additional LGTM is required (since we assume a docs maintainer approves of their own PR). 	
 
 		# Merge
-		[Rules.review.states.4-merge]
+		[Rules.review.states.4-needs-merge]
 
 			# Maintainers are expected to merge this pull request as soon as possible.
 			# They can ask for a rebase, or carry the pull request themselves.
@@ -268,7 +268,7 @@
 
 	# The chief architect is responsible for the overall integrity of the technical architecture
 	# across all subsystems, and the consistency of APIs and UI.
-	# 
+	#
 	# Changes to UI, public APIs and overall architecture (for example a plugin system) must
 	# be approved by the chief architect.
 	"Chief Architect" = "shykes"
@@ -296,7 +296,9 @@
 		[Org.Operators.security]
 
 			people = [
-				"erw"
+				"erw",
+				"diogomonica",
+				"nathanmccauley"
 			]
 
 		[Org.Operators."monthly meetings"]
@@ -312,13 +314,29 @@
 				"jfrazelle",
 				"crosbymichael"
 			]
+		
+		[Org.Operators.community]
+			people = [
+				"theadactyl"
+			]
 
 	# The chief maintainer is responsible for all aspects of quality for the project including
-	# code reviews, usability, stability, security, performance, etc. 
+	# code reviews, usability, stability, security, performance, etc.
 	# The most important function of the chief maintainer is to lead by example. On the first
 	# day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll
 	# be fine".
 	"Chief Maintainer" = "crosbymichael"
+	
+	# The community manager is responsible for serving the project community, including users, 
+	# contributors and partners. This involves:
+	#	- facilitating communication between maintainers, contributors and users
+	#	- organizing contributor and maintainer events
+	#	- helping new contributors get involved
+	#	- anything the project community needs to be successful
+	#
+	# The community manager is a point of contact for any contributor who has questions, concerns 
+	# or feedback about project operations. 
+	"Community Manager" = "theadactyl"
 
 	[Org."Core maintainers"]
 
@@ -339,14 +357,15 @@
 
 
 		people = [
-			"unclejack",
 			"crosbymichael",
 			"erikh",
 			"estesp",
 			"icecrime",
 			"jfrazelle",
 			"lk4d4",
+			"runcom",
 			"tibor",
+			"unclejack",
 			"vbatts",
 			"vieux",
 			"vishh"
@@ -359,80 +378,81 @@
 	# has a dedicated group of maintainers, which are dedicated to that subsytem and responsible
 	# for its quality.
 	# This "cellular division" is the primary mechanism for scaling maintenance of the project as it grows.
-	# 
+	#
 	# The maintainers of each subsytem are responsible for:
-	# 
+	#
 	# 1. Exposing a clear road map for improving their subsystem.
 	# 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem.
 	# 3. Be available to anyone with questions, bug reports, criticism etc.
-	#   on their component. This includes IRC, GitHub requests and the mailing
-	#   list.
+	#	on their component. This includes IRC, GitHub requests and the mailing
+	#	list.
 	# 4. Make sure their subsystem respects the philosophy, design and
-	#   road map of the project.
+	#	road map of the project.
 	#
 	# #### How to review patches to your subsystem
-	# 
+	#
 	# Accepting pull requests:
-	# 
-	#   - If the pull request appears to be ready to merge, give it a `LGTM`, which
-	#     stands for "Looks Good To Me".
-	#   - If the pull request has some small problems that need to be changed, make
-	#     a comment adressing the issues.
-	#   - If the changes needed to a PR are small, you can add a "LGTM once the
-	#     following comments are adressed..." this will reduce needless back and
-	#     forth.
-	#   - If the PR only needs a few changes before being merged, any MAINTAINER can
-	#     make a replacement PR that incorporates the existing commits and fixes the
-	#     problems before a fast track merge.
-	# 
+	#
+	#	- If the pull request appears to be ready to merge, give it a `LGTM`, which
+	#	  stands for "Looks Good To Me".
+	#	- If the pull request has some small problems that need to be changed, make
+	#	  a comment adressing the issues.
+	#	- If the changes needed to a PR are small, you can add a "LGTM once the
+	#	  following comments are adressed..." this will reduce needless back and
+	#	  forth.
+	#	- If the PR only needs a few changes before being merged, any MAINTAINER can
+	#	  make a replacement PR that incorporates the existing commits and fixes the
+	#	  problems before a fast track merge.
+	#
 	# Closing pull requests:
-	# 
-	#   - If a PR appears to be abandoned, after having attempted to contact the
-	#     original contributor, then a replacement PR may be made.  Once the
-	#     replacement PR is made, any contributor may close the original one.
-	#   - If you are not sure if the pull request implements a good feature or you
-	#     do not understand the purpose of the PR, ask the contributor to provide
-	#     more documentation.  If the contributor is not able to adequately explain
-	#     the purpose of the PR, the PR may be closed by any MAINTAINER.
-	#   - If a MAINTAINER feels that the pull request is sufficiently architecturally
-	#     flawed, or if the pull request needs significantly more design discussion
-	#     before being considered, the MAINTAINER should close the pull request with
-	#     a short explanation of what discussion still needs to be had.  It is
-	#     important not to leave such pull requests open, as this will waste both the
-	#     MAINTAINER's time and the contributor's time.  It is not good to string a
-	#     contributor on for weeks or months, having them make many changes to a PR
-	#     that will eventually be rejected.
+	#
+	#	- If a PR appears to be abandoned, after having attempted to contact the
+	#	  original contributor, then a replacement PR may be made. Once the
+	#	  replacement PR is made, any contributor may close the original one.
+	#	- If you are not sure if the pull request implements a good feature or you
+	#	  do not understand the purpose of the PR, ask the contributor to provide
+	#	  more documentation.  If the contributor is not able to adequately explain
+	#	  the purpose of the PR, the PR may be closed by any MAINTAINER.
+	#	- If a MAINTAINER feels that the pull request is sufficiently architecturally
+	#	  flawed, or if the pull request needs significantly more design discussion
+	#	  before being considered, the MAINTAINER should close the pull request with
+	#	  a short explanation of what discussion still needs to be had.  It is
+	#	  important not to leave such pull requests open, as this will waste both the
+	#	  MAINTAINER's time and the contributor's time.  It is not good to string a
+	#	  contributor on for weeks or months, having them make many changes to a PR
+	#	  that will eventually be rejected.
 
 		[Org.Subsystems.Documentation]
 
 			people = [
 				"fredlf",
 				"james",
-				"sven",
+				"moxiegirl",
+				"thaJeztah",
+				"jamtur01",
 				"spf13",
-				"mary"
+				"sven"
 			]
 
 		[Org.Subsystems.libcontainer]
 
 			people = [
 				"crosbymichael",
-				"vmarmol",
-				"mpatel",
 				"jnagal",
-				"lk4d4"
+				"lk4d4",
+				"mpatel",
+				"vmarmol"
 			]
 
 		[Org.Subsystems.registry]
 
 			people = [
+				"dmcg",
 				"dmp42",
-				"vbatts",
-				"joffrey",
+				"jlhawn",
 				"samalba",
 				"sday",
-				"jlhawn",
-				"dmcg"
+				"vbatts"
 			]
 
 		[Org.Subsystems."build tools"]
@@ -471,11 +491,27 @@
 		[Org.Subsystem.builder]
 
 			people = [
+				"duglin",
 				"erikh",
-				"tibor",
-				"duglin"
+				"tibor"
 			]
 
+	[Org.Curators]
+
+	# The curators help ensure that incoming issues and pull requests are properly triaged and
+	# that our various contribution and reviewing processes are respected. With their knowledge of
+	# the repository activity, they can also guide contributors to relevant material or
+	# discussions.
+	#
+	# They are neither code nor docs reviewers, so they are never expected to merge. They can
+	# however:
+	# - close an issue or pull request when it's an exact duplicate
+	# - close an issue or pull request when it's inappropriate or off-topic
+
+	people = [
+		"thajeztah"
+	]
+
 
 [people]
 
@@ -500,11 +536,21 @@
 	Email = "ben@firshman.co.uk"
 	GitHub = "bfirsh"
 
+	[people.cpuguy83]
+	Name = "Brian Goff"
+	Email = "cpuguy83@gmail.com"
+	Github = "cpuguy83"
+
 	[people.crosbymichael]
 	Name = "Michael Crosby"
 	Email = "crosbymichael@gmail.com"
 	GitHub = "crosbymichael"
 
+	[people.diogomonica]
+	Name = "Diogo Monica"
+	Email = "diogo@docker.com"
+	GitHub = "diogomonica"
+
 	[people.duglin]
 	Name = "Doug Davis"
 	Email = "dug@us.ibm.com"
@@ -552,7 +598,7 @@
 
 	[people.jfrazelle]
 	Name = "Jessie Frazelle"
-	Email = "jess@docker.com"
+	Email = "j@docker.com"
 	GitHub = "jfrazelle"
 
 	[people.jlhawn]
@@ -560,21 +606,26 @@
 	Email = "josh.hawn@docker.com"
 	Github = "jlhawn"
 
-	[people.joffrey]
-	Name = "Joffrey Fuhrer"
-	Email = "joffrey@docker.com"
-	Github = "shin-"
-
 	[people.lk4d4]
 	Name = "Alexander Morozov"
 	Email = "lk4d4@docker.com"
 	GitHub = "lk4d4"
 
-	[people.mary]
+	[people.moxiegirl]
 	Name = "Mary Anthony"
 	Email = "mary.anthony@docker.com"
 	GitHub = "moxiegirl"
 
+	[people.nathanmccauley]
+	Name = "Nathan McCauley"
+	Email = "nathan.mccauley@docker.com"
+	GitHub = "nathanmccauley"
+
+	[people.runcom]
+	Name = "Antonio Murdaca"
+	Email = "me@runcom.ninja"
+	GitHub = "runcom"
+
 	[people.sday]
 	Name = "Stephen Day"
 	Email = "stephen.day@docker.com"
@@ -584,17 +635,27 @@
 	Name = "Solomon Hykes"
 	Email = "solomon@docker.com"
 	GitHub = "shykes"
-    
+
 	[people.spf13]
 	Name = "Steve Francia"
 	Email = "steve.francia@gmail.com"
 	GitHub = "spf13"
-	
+
 	[people.sven]
 	Name = "Sven Dowideit"
 	Email = "SvenDowideit@home.org.au"
 	GitHub = "SvenDowideit"
 
+	[people.thajeztah]
+	Name = "Sebastiaan van Stijn"
+	Email = "github@gone.nl"
+	GitHub = "thaJeztah"
+	
+	[people.theadactyl]
+	Name = "Thea Lamkin"
+	Email = "thea@docker.com"
+	GitHub = "theadactyl"
+
 	[people.tianon]
 	Name = "Tianon Gravi"
 	Email = "admwiggin@gmail.com"
diff --git a/Makefile b/Makefile
index 9bf1b16..d73c8c1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate
+.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration-cli test-docker-py validate
 
 # env vars passed through directly to Docker's build scripts
 # to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
@@ -7,7 +7,10 @@
 	-e BUILDFLAGS \
 	-e DOCKER_CLIENTONLY \
 	-e DOCKER_EXECDRIVER \
+	-e DOCKER_EXPERIMENTAL \
 	-e DOCKER_GRAPHDRIVER \
+	-e DOCKER_STORAGE_OPTS \
+	-e DOCKER_USERLANDPROXY \
 	-e TESTDIRS \
 	-e TESTFLAGS \
 	-e TIMEOUT
@@ -26,7 +29,7 @@
 DOCSPORT := 8000
 
 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
-DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
+DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
 DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
 
 DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
@@ -62,14 +65,11 @@
 	$(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./test.sh
 
 test: build
-	$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli test-docker-py
+	$(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration-cli test-docker-py
 
 test-unit: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh test-unit
 
-test-integration: build
-	$(DOCKER_RUN_DOCKER) hack/make.sh test-integration
-
 test-integration-cli: build
 	$(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli
 
@@ -77,7 +77,7 @@
 	$(DOCKER_RUN_DOCKER) hack/make.sh binary test-docker-py
 
 validate: build
-	$(DOCKER_RUN_DOCKER) hack/make.sh validate-gofmt validate-dco validate-toml
+	$(DOCKER_RUN_DOCKER) hack/make.sh validate-dco validate-gofmt validate-test validate-toml validate-vet
 
 shell: build
 	$(DOCKER_RUN_DOCKER) bash
diff --git a/NOTICE b/NOTICE
index 435ace7..6e6f469 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,7 +1,7 @@
 Docker
 Copyright 2012-2015 Docker, Inc.
 
-This product includes software developed at Docker, Inc. (http://www.docker.com).
+This product includes software developed at Docker, Inc. (https://www.docker.com).
 
 This product contains software (https://github.com/kr/pty) developed
 by Keith Rarick, licensed under the MIT License.
@@ -10,10 +10,10 @@
 
 
 Use and transfer of Docker may be subject to certain restrictions by the
-United States and other governments.  
+United States and other governments.
 It is your responsibility to ensure that your use and/or transfer does not
-violate applicable laws. 
+violate applicable laws.
 
-For more information, please see http://www.bis.doc.gov
+For more information, please see https://www.bis.doc.gov
 
-See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/README.md b/README.md
index 079713c..ad15e56 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
 ==================================
 
 Docker is an open source project to pack, ship and run any application
-as a lightweight container
+as a lightweight container.
 
 Docker containers are both *hardware-agnostic* and *platform-agnostic*.
 This means they can run anywhere, from your laptop to the largest
@@ -13,7 +13,7 @@
 or provider.
 
 Docker began as an open-source implementation of the deployment engine which
-powers [dotCloud](http://dotcloud.com), a popular Platform-as-a-Service.
+powers [dotCloud](https://dotcloud.com), a popular Platform-as-a-Service.
 It benefits directly from the experience accumulated over several years
 of large-scale operation and support of hundreds of thousands of
 applications and databases.
@@ -56,12 +56,12 @@
 *containerization*. Unlike traditional virtualization, containerization
 takes place at the kernel level. Most modern operating system kernels
 now support the primitives necessary for containerization, including
-Linux with [openvz](http://openvz.org),
+Linux with [openvz](https://openvz.org),
 [vserver](http://linux-vserver.org) and more recently
 [lxc](http://lxc.sourceforge.net), Solaris with
-[zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc),
+[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc),
 and FreeBSD with
-[Jails](http://www.freebsd.org/doc/handbook/jails.html).
+[Jails](https://www.freebsd.org/doc/handbook/jails.html).
 
 Docker builds on top of these low-level primitives to offer developers a
 portable format and runtime environment that solves all four problems.
@@ -105,7 +105,7 @@
     these situations with various degrees of ease - but they all
     handle them in different and incompatible ways, which again forces
     the developer to do extra work.
-  
+
   * *Custom dependencies*. A developer may need to prepare a custom
     version of their application's dependency. Some packaging systems
     can handle custom versions of a dependency, others can't - and all
@@ -115,7 +115,7 @@
 Docker solves the problem of dependency hell by giving the developer a simple
 way to express *all* their application's dependencies in one place, while
 streamlining the process of assembling them. If this makes you think of
-[XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't
+[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't
 *replace* your favorite packaging systems. It simply orchestrates
 their use in a simple and repeatable way. How does it do that? With
 layers.
@@ -147,19 +147,19 @@
 bare metal and virtualized.  It is available as a binary on most modern
 Linux systems, or as a VM on Windows, Mac and other systems.
 
-We also offer an [interactive tutorial](http://www.docker.com/tryit/)
+We also offer an [interactive tutorial](https://www.docker.com/tryit/)
 for quickly learning the basics of using Docker.
 
-For up-to-date install instructions, see the [Docs](http://docs.docker.com).
+For up-to-date install instructions, see the [Docs](https://docs.docker.com).
 
 Usage examples
 ==============
 
 Docker can be used to run short-lived commands, long-running daemons
-(app servers, databases etc.), interactive shell sessions, etc.
+(app servers, databases, etc.), interactive shell sessions, etc.
 
 You can find a [list of real-world
-examples](http://docs.docker.com/examples/) in the
+examples](https://docs.docker.com/examples/) in the
 documentation.
 
 Under the hood
@@ -171,15 +171,15 @@
   [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c)
   and
   [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part)
-  capabilities of the Linux kernel;
-* The [Go](http://golang.org) programming language.
-* The [Docker Image Specification] (https://github.com/docker/docker/blob/master/image/spec/v1.md)
-* The [Libcontainer Specification] (https://github.com/docker/libcontainer/blob/master/SPEC.md)
+  capabilities of the Linux kernel
+* The [Go](https://golang.org) programming language
+* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md)
+* The [Libcontainer Specification](https://github.com/docker/libcontainer/blob/master/SPEC.md)
 
 Contributing to Docker
 ======================
 
-[![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker)
+[![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker)
 [![Jenkins Build Status](https://jenkins.dockerproject.com/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.com/job/Docker%20Master/)
 
 Want to hack on Docker? Awesome! We have [instructions to help you get
@@ -207,17 +207,65 @@
 
 We are always open to suggestions on process improvements, and are always looking for more maintainers.
 
+### Talking to other Docker users and contributors
+
+<table class="tg">
+  <col width="45%">
+  <col width="65%">
+  <tr>
+    <td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</td>
+    <td>
+      <p>
+        IRC a direct line to our most knowledgeable Docker users; we have
+        both the  <code>#docker</code> and <code>#docker-dev</code> group on
+        <strong>irc.freenode.net</strong>.
+        IRC is a rich chat protocol but it can overwhelm new users. You can search
+        <a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
+      </p>
+      Read our <a href="https://docs.docker.com/project/get-help/#irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started.
+    </td>
+  </tr>
+  <tr>
+    <td>Google Groups</td>
+    <td>
+      There are two groups.
+      <a href="https://groups.google.com/forum/#!forum/docker-user" target="_blank">Docker-user</a>
+      is for people using Docker containers.
+      The <a href="https://groups.google.com/forum/#!forum/docker-dev" target="_blank">docker-dev</a>
+      group is for contributors and other people contributing to the Docker
+      project.
+    </td>
+  </tr>
+  <tr>
+    <td>Twitter</td>
+    <td>
+      You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
+      to get updates on our products. You can also tweet us questions or just
+      share blogs or stories.
+    </td>
+  </tr>
+  <tr>
+    <td>Stack Overflow</td>
+    <td>
+      Stack Overflow has over 7000K Docker questions listed. We regularly
+      monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
+      and so do many other knowledgeable Docker users.
+    </td>
+  </tr>
+</table>
+
 ### Legal
 
 *Brought to you courtesy of our legal counsel. For more context,
-please see the "NOTICE" document in this repo.*
+please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.*
 
 Use and transfer of Docker may be subject to certain restrictions by the
-United States and other governments.  
-It is your responsibility to ensure that your use and/or transfer does not
-violate applicable laws. 
+United States and other governments.
 
-For more information, please see http://www.bis.doc.gov
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
 
 
 Licensing
@@ -230,17 +278,18 @@
 =============================
 There are a number of projects under development that are based on Docker's
 core technology. These projects expand the tooling built around the
-Docker platform to broaden its application and utility. 
-
-If you know of another project underway that should be listed here, please help
-us keep this list up-to-date by submitting a PR.
+Docker platform to broaden its application and utility.
 
 * [Docker Registry](https://github.com/docker/distribution): Registry 
 server for Docker (hosting/delivery of repositories and images)
 * [Docker Machine](https://github.com/docker/machine): Machine management 
-for a container-centric world 
+for a container-centric world
 * [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering 
-system 
+system
 * [Docker Compose](https://github.com/docker/compose) (formerly Fig): 
 Define and run multi-container apps
+* [Kitematic](https://github.com/kitematic/kitematic): The easiest way to use 
+Docker on a Mac
 
+If you know of another project underway that should be listed here, please help 
+us keep this list up-to-date by submitting a PR.
diff --git a/VERSION b/VERSION
index fdd3be6..de023c9 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.6.2
+1.7.0-dev
diff --git a/api/client/attach.go b/api/client/attach.go
new file mode 100644
index 0000000..8ab3248
--- /dev/null
+++ b/api/client/attach.go
@@ -0,0 +1,83 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/url"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/api/types"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/signal"
+)
+
+// CmdAttach attaches to a running container.
+//
+// Usage: docker attach [OPTIONS] CONTAINER
+func (cli *DockerCli) CmdAttach(args ...string) error {
+	var (
+		cmd     = cli.Subcmd("attach", "CONTAINER", "Attach to a running container", true)
+		noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN")
+		proxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process")
+	)
+	cmd.Require(flag.Exact, 1)
+
+	cmd.ParseFlags(args, true)
+	name := cmd.Arg(0)
+
+	stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, nil)
+	if err != nil {
+		return err
+	}
+
+	var c types.ContainerJSON
+	if err := json.NewDecoder(stream).Decode(&c); err != nil {
+		return err
+	}
+
+	if !c.State.Running {
+		return fmt.Errorf("You cannot attach to a stopped container, start it first")
+	}
+
+	if err := cli.CheckTtyInput(!*noStdin, c.Config.Tty); err != nil {
+		return err
+	}
+
+	if c.Config.Tty && cli.isTerminalOut {
+		if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
+			logrus.Debugf("Error monitoring TTY size: %s", err)
+		}
+	}
+
+	var in io.ReadCloser
+
+	v := url.Values{}
+	v.Set("stream", "1")
+	if !*noStdin && c.Config.OpenStdin {
+		v.Set("stdin", "1")
+		in = cli.in
+	}
+
+	v.Set("stdout", "1")
+	v.Set("stderr", "1")
+
+	if *proxy && !c.Config.Tty {
+		sigc := cli.forwardAllSignals(cmd.Arg(0))
+		defer signal.StopCatch(sigc)
+	}
+
+	if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), c.Config.Tty, in, cli.out, cli.err, nil, nil); err != nil {
+		return err
+	}
+
+	_, status, err := getExitCode(cli, cmd.Arg(0))
+	if err != nil {
+		return err
+	}
+	if status != 0 {
+		return StatusError{StatusCode: status}
+	}
+
+	return nil
+}
diff --git a/api/client/build.go b/api/client/build.go
new file mode 100644
index 0000000..b095971
--- /dev/null
+++ b/api/client/build.go
@@ -0,0 +1,310 @@
+package client
+
+import (
+	"bufio"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"os"
+	"os/exec"
+	"path"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/graph/tags"
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/fileutils"
+	"github.com/docker/docker/pkg/jsonmessage"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/progressreader"
+	"github.com/docker/docker/pkg/streamformatter"
+	"github.com/docker/docker/pkg/symlink"
+	"github.com/docker/docker/pkg/units"
+	"github.com/docker/docker/pkg/urlutil"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/utils"
+)
+
+const (
+	tarHeaderSize = 512
+)
+
+// CmdBuild builds a new image from the source code at a given path.
+//
+// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN.
+//
+// Usage: docker build [OPTIONS] PATH | URL | -
+func (cli *DockerCli) CmdBuild(args ...string) error {
+	cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH", true)
+	tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image")
+	suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
+	noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
+	rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
+	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers")
+	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
+	dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
+	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
+	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
+	flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
+	flCpuPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
+	flCpuQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
+	flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
+	flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
+	flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")
+
+	cmd.Require(flag.Exact, 1)
+	cmd.ParseFlags(args, true)
+
+	var (
+		context  archive.Archive
+		isRemote bool
+		err      error
+	)
+
+	_, err = exec.LookPath("git")
+	hasGit := err == nil
+	if cmd.Arg(0) == "-" {
+		// As a special case, 'docker build -' will build from either an empty context with the
+		// contents of stdin as a Dockerfile, or a tar-ed context from stdin.
+		buf := bufio.NewReader(cli.in)
+		magic, err := buf.Peek(tarHeaderSize)
+		if err != nil && err != io.EOF {
+			return fmt.Errorf("failed to peek context header from STDIN: %v", err)
+		}
+		if !archive.IsArchive(magic) {
+			dockerfile, err := ioutil.ReadAll(buf)
+			if err != nil {
+				return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err)
+			}
+
+			// -f option has no meaning when we're reading it from stdin,
+			// so just use our default Dockerfile name
+			*dockerfileName = api.DefaultDockerfileName
+			context, err = archive.Generate(*dockerfileName, string(dockerfile))
+		} else {
+			context = ioutil.NopCloser(buf)
+		}
+	} else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) {
+		isRemote = true
+	} else {
+		root := cmd.Arg(0)
+		if urlutil.IsGitURL(root) {
+			root, err = utils.GitClone(root)
+			if err != nil {
+				return err
+			}
+			defer os.RemoveAll(root)
+		}
+		if _, err := os.Stat(root); err != nil {
+			return err
+		}
+
+		absRoot, err := filepath.Abs(root)
+		if err != nil {
+			return err
+		}
+
+		filename := *dockerfileName // path to Dockerfile
+
+		if *dockerfileName == "" {
+			// No -f/--file was specified so use the default
+			*dockerfileName = api.DefaultDockerfileName
+			filename = filepath.Join(absRoot, *dockerfileName)
+
+			// Just to be nice ;-) look for 'dockerfile' too but only
+			// use it if we found it, otherwise ignore this check
+			if _, err = os.Lstat(filename); os.IsNotExist(err) {
+				tmpFN := path.Join(absRoot, strings.ToLower(*dockerfileName))
+				if _, err = os.Lstat(tmpFN); err == nil {
+					*dockerfileName = strings.ToLower(*dockerfileName)
+					filename = tmpFN
+				}
+			}
+		}
+
+		origDockerfile := *dockerfileName // used for error msg
+		if filename, err = filepath.Abs(filename); err != nil {
+			return err
+		}
+
+		// Verify that 'filename' is within the build context
+		filename, err = symlink.FollowSymlinkInScope(filename, absRoot)
+		if err != nil {
+			return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root)
+		}
+
+		// Now reset the dockerfileName to be relative to the build context
+		*dockerfileName, err = filepath.Rel(absRoot, filename)
+		if err != nil {
+			return err
+		}
+		// And canonicalize dockerfile name to a platform-independent one
+		*dockerfileName, err = archive.CanonicalTarNameForPath(*dockerfileName)
+		if err != nil {
+			return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", *dockerfileName, err)
+		}
+
+		if _, err = os.Lstat(filename); os.IsNotExist(err) {
+			return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
+		}
+		var includes = []string{"."}
+
+		excludes, err := utils.ReadDockerIgnore(path.Join(root, ".dockerignore"))
+		if err != nil {
+			return err
+		}
+
+		// If .dockerignore mentions .dockerignore or the Dockerfile
+		// then make sure we send both files over to the daemon
+		// because Dockerfile is, obviously, needed no matter what, and
+		// .dockerignore is needed to know if either one needs to be
+		// removed.  The deamon will remove them for us, if needed, after it
+		// parses the Dockerfile.
+		keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
+		keepThem2, _ := fileutils.Matches(*dockerfileName, excludes)
+		if keepThem1 || keepThem2 {
+			includes = append(includes, ".dockerignore", *dockerfileName)
+		}
+
+		if err := utils.ValidateContextDirectory(root, excludes); err != nil {
+			return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
+		}
+		options := &archive.TarOptions{
+			Compression:     archive.Uncompressed,
+			ExcludePatterns: excludes,
+			IncludeFiles:    includes,
+		}
+		context, err = archive.TarWithOptions(root, options)
+		if err != nil {
+			return err
+		}
+	}
+
+	// windows: show error message about modified file permissions
+	// FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build.
+	if runtime.GOOS == "windows" {
+		fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
+	}
+
+	var body io.Reader
+	// Setup an upload progress bar
+	// FIXME: ProgressReader shouldn't be this annoying to use
+	if context != nil {
+		sf := streamformatter.NewStreamFormatter()
+		body = progressreader.New(progressreader.Config{
+			In:        context,
+			Out:       cli.out,
+			Formatter: sf,
+			NewLines:  true,
+			ID:        "",
+			Action:    "Sending build context to Docker daemon",
+		})
+	}
+
+	var memory int64
+	if *flMemoryString != "" {
+		parsedMemory, err := units.RAMInBytes(*flMemoryString)
+		if err != nil {
+			return err
+		}
+		memory = parsedMemory
+	}
+
+	var memorySwap int64
+	if *flMemorySwap != "" {
+		if *flMemorySwap == "-1" {
+			memorySwap = -1
+		} else {
+			parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap)
+			if err != nil {
+				return err
+			}
+			memorySwap = parsedMemorySwap
+		}
+	}
+	// Send the build context
+	v := &url.Values{}
+
+	//Check if the given image name can be resolved
+	if *tag != "" {
+		repository, tag := parsers.ParseRepositoryTag(*tag)
+		if err := registry.ValidateRepositoryName(repository); err != nil {
+			return err
+		}
+		if len(tag) > 0 {
+			if err := tags.ValidateTagName(tag); err != nil {
+				return err
+			}
+		}
+	}
+
+	v.Set("t", *tag)
+
+	if *suppressOutput {
+		v.Set("q", "1")
+	}
+	if isRemote {
+		v.Set("remote", cmd.Arg(0))
+	}
+	if *noCache {
+		v.Set("nocache", "1")
+	}
+	if *rm {
+		v.Set("rm", "1")
+	} else {
+		v.Set("rm", "0")
+	}
+
+	if *forceRm {
+		v.Set("forcerm", "1")
+	}
+
+	if *pull {
+		v.Set("pull", "1")
+	}
+
+	v.Set("cpusetcpus", *flCPUSetCpus)
+	v.Set("cpusetmems", *flCPUSetMems)
+	v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10))
+	v.Set("cpuquota", strconv.FormatInt(*flCpuQuota, 10))
+	v.Set("cpuperiod", strconv.FormatInt(*flCpuPeriod, 10))
+	v.Set("memory", strconv.FormatInt(memory, 10))
+	v.Set("memswap", strconv.FormatInt(memorySwap, 10))
+	v.Set("cgroupparent", *flCgroupParent)
+
+	v.Set("dockerfile", *dockerfileName)
+
+	headers := http.Header(make(map[string][]string))
+	buf, err := json.Marshal(cli.configFile.AuthConfigs)
+	if err != nil {
+		return err
+	}
+	headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
+
+	if context != nil {
+		headers.Set("Content-Type", "application/tar")
+	}
+	sopts := &streamOpts{
+		rawTerminal: true,
+		in:          body,
+		out:         cli.out,
+		headers:     headers,
+	}
+	err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts)
+	if jerr, ok := err.(*jsonmessage.JSONError); ok {
+		// If no error code is set, default to 1
+		if jerr.Code == 0 {
+			jerr.Code = 1
+		}
+		return StatusError{Status: jerr.Message, StatusCode: jerr.Code}
+	}
+	return err
+}
diff --git a/api/client/cli.go b/api/client/cli.go
index e0ab419..f78827d 100644
--- a/api/client/cli.go
+++ b/api/client/cli.go
@@ -6,39 +6,53 @@
 	"errors"
 	"fmt"
 	"io"
-	"net"
 	"net/http"
 	"os"
+	"path/filepath"
 	"reflect"
 	"strings"
 	"text/template"
-	"time"
 
+	"github.com/docker/docker/cliconfig"
 	"github.com/docker/docker/pkg/homedir"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/term"
-	"github.com/docker/docker/registry"
+	"github.com/docker/docker/utils"
 )
 
+// DockerCli represents the docker command line client.
+// Instances of the client can be returned from NewDockerCli.
 type DockerCli struct {
-	proto      string
-	addr       string
-	configFile *registry.ConfigFile
-	in         io.ReadCloser
-	out        io.Writer
-	err        io.Writer
-	keyFile    string
-	tlsConfig  *tls.Config
-	scheme     string
-	// inFd holds file descriptor of the client's STDIN, if it's a valid file
+	// proto holds the client protocol i.e. unix.
+	proto string
+	// addr holds the client address.
+	addr string
+
+	// configFile has the client configuration file
+	configFile *cliconfig.ConfigFile
+	// in holds the input stream and closer (io.ReadCloser) for the client.
+	in io.ReadCloser
+	// out holds the output stream (io.Writer) for the client.
+	out io.Writer
+	// err holds the error stream (io.Writer) for the client.
+	err io.Writer
+	// keyFile holds the key file as a string.
+	keyFile string
+	// tlsConfig holds the TLS configuration for the client, and will
+	// set the scheme to https in NewDockerCli if present.
+	tlsConfig *tls.Config
+	// scheme holds the scheme of the client i.e. https.
+	scheme string
+	// inFd holds the file descriptor of the client's STDIN (if valid).
 	inFd uintptr
-	// outFd holds file descriptor of the client's STDOUT, if it's a valid file
+	// outFd holds file descriptor of the client's STDOUT (if valid).
 	outFd uintptr
-	// isTerminalIn describes if client's STDIN is a TTY
+	// isTerminalIn indicates whether the client's STDIN is a TTY
 	isTerminalIn bool
-	// isTerminalOut describes if client's STDOUT is a TTY
+	// isTerminalOut dindicates whether the client's STDOUT is a TTY
 	isTerminalOut bool
-	transport     *http.Transport
+	// transport holds the client transport instance.
+	transport *http.Transport
 }
 
 var funcMap = template.FuncMap{
@@ -48,6 +62,14 @@
 	},
 }
 
+func (cli *DockerCli) Out() io.Writer {
+	return cli.out
+}
+
+func (cli *DockerCli) Err() io.Writer {
+	return cli.err
+}
+
 func (cli *DockerCli) getMethod(args ...string) (func(...string) error, bool) {
 	camelArgs := make([]string, len(args))
 	for i, s := range args {
@@ -64,7 +86,7 @@
 	return method.Interface().(func(...string) error), true
 }
 
-// Cmd executes the specified command
+// Cmd executes the specified command.
 func (cli *DockerCli) Cmd(args ...string) error {
 	if len(args) > 1 {
 		method, exists := cli.getMethod(args[:2]...)
@@ -75,14 +97,18 @@
 	if len(args) > 0 {
 		method, exists := cli.getMethod(args[0])
 		if !exists {
-			fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0])
-			os.Exit(1)
+			return fmt.Errorf("docker: '%s' is not a docker command. See 'docker --help'.", args[0])
 		}
 		return method(args[1:]...)
 	}
 	return cli.CmdHelp()
 }
 
+// Subcmd is a subcommand of the main "docker" command.
+// A subcommand represents an action that can be performed
+// from the Docker command line client.
+//
+// To see all available subcommands, run "docker --help".
 func (cli *DockerCli) Subcmd(name, signature, description string, exitOnError bool) *flag.FlagSet {
 	var errorHandling flag.ErrorHandling
 	if exitOnError {
@@ -107,14 +133,8 @@
 	return flags
 }
 
-func (cli *DockerCli) LoadConfigFile() (err error) {
-	cli.configFile, err = registry.LoadConfig(homedir.Get())
-	if err != nil {
-		fmt.Fprintf(cli.err, "WARNING: %s\n", err)
-	}
-	return err
-}
-
+// CheckTtyInput checks if we are trying to attach to a container tty
+// from a non-tty client input stream, and if so, returns an error.
 func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error {
 	// In order to attach to a container tty, input stream for the client must
 	// be a tty itself: redirecting or piping the client standard input is
@@ -125,6 +145,10 @@
 	return nil
 }
 
+// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
+// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config
+// is set the client scheme will be set to https.
+// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035).
 func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, addr string, tlsConfig *tls.Config) *DockerCli {
 	var (
 		inFd          uintptr
@@ -149,27 +173,21 @@
 		err = out
 	}
 
-	// The transport is created here for reuse during the client session
+	// The transport is created here for reuse during the client session.
 	tr := &http.Transport{
 		TLSClientConfig: tlsConfig,
 	}
+	utils.ConfigureTCPTransport(tr, proto, addr)
 
-	// Why 32? See issue 8035
-	timeout := 32 * time.Second
-	if proto == "unix" {
-		// no need in compressing for local communications
-		tr.DisableCompression = true
-		tr.Dial = func(_, _ string) (net.Conn, error) {
-			return net.DialTimeout(proto, addr, timeout)
-		}
-	} else {
-		tr.Proxy = http.ProxyFromEnvironment
-		tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
+	configFile, e := cliconfig.Load(filepath.Join(homedir.Get(), ".docker"))
+	if e != nil {
+		fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e)
 	}
 
 	return &DockerCli{
 		proto:         proto,
 		addr:          addr,
+		configFile:    configFile,
 		in:            in,
 		out:           out,
 		err:           err,
diff --git a/api/client/client.go b/api/client/client.go
new file mode 100644
index 0000000..3170881
--- /dev/null
+++ b/api/client/client.go
@@ -0,0 +1,17 @@
+// Package client provides a command-line interface for Docker.
+//
+// Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand.
+// See https://docs.docker.com/installation/ for instructions on installing Docker.
+package client
+
+import "fmt"
+
+// An StatusError reports an unsuccessful exit by a command.
+type StatusError struct {
+	Status     string
+	StatusCode int
+}
+
+func (e StatusError) Error() string {
+	return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
+}
diff --git a/api/client/commands.go b/api/client/commands.go
deleted file mode 100644
index 84e426e..0000000
--- a/api/client/commands.go
+++ /dev/null
@@ -1,2888 +0,0 @@
-package client
-
-import (
-	"bufio"
-	"bytes"
-	"encoding/base64"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-	"os"
-	"os/exec"
-	"path"
-	"path/filepath"
-	"runtime"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-	"text/tabwriter"
-	"text/template"
-	"time"
-
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/api"
-	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/autogen/dockerversion"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/graph"
-	"github.com/docker/docker/nat"
-	"github.com/docker/docker/opts"
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/common"
-	"github.com/docker/docker/pkg/fileutils"
-	"github.com/docker/docker/pkg/homedir"
-	flag "github.com/docker/docker/pkg/mflag"
-	"github.com/docker/docker/pkg/networkfs/resolvconf"
-	"github.com/docker/docker/pkg/parsers"
-	"github.com/docker/docker/pkg/parsers/filters"
-	"github.com/docker/docker/pkg/progressreader"
-	"github.com/docker/docker/pkg/promise"
-	"github.com/docker/docker/pkg/signal"
-	"github.com/docker/docker/pkg/symlink"
-	"github.com/docker/docker/pkg/term"
-	"github.com/docker/docker/pkg/timeutils"
-	"github.com/docker/docker/pkg/units"
-	"github.com/docker/docker/pkg/urlutil"
-	"github.com/docker/docker/registry"
-	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/utils"
-)
-
-const (
-	tarHeaderSize = 512
-)
-
-func (cli *DockerCli) CmdHelp(args ...string) error {
-	if len(args) > 1 {
-		method, exists := cli.getMethod(args[:2]...)
-		if exists {
-			method("--help")
-			return nil
-		}
-	}
-	if len(args) > 0 {
-		method, exists := cli.getMethod(args[0])
-		if !exists {
-			fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0])
-			os.Exit(1)
-		} else {
-			method("--help")
-			return nil
-		}
-	}
-
-	flag.Usage()
-
-	return nil
-}
-
-func (cli *DockerCli) CmdBuild(args ...string) error {
-	cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH", true)
-	tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image")
-	suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
-	noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
-	rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
-	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers")
-	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
-	dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
-	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
-	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
-	flCpuShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
-	flCpuSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
-
-	cmd.Require(flag.Exact, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	var (
-		context  archive.Archive
-		isRemote bool
-		err      error
-	)
-
-	_, err = exec.LookPath("git")
-	hasGit := err == nil
-	if cmd.Arg(0) == "-" {
-		// As a special case, 'docker build -' will build from either an empty context with the
-		// contents of stdin as a Dockerfile, or a tar-ed context from stdin.
-		buf := bufio.NewReader(cli.in)
-		magic, err := buf.Peek(tarHeaderSize)
-		if err != nil && err != io.EOF {
-			return fmt.Errorf("failed to peek context header from STDIN: %v", err)
-		}
-		if !archive.IsArchive(magic) {
-			dockerfile, err := ioutil.ReadAll(buf)
-			if err != nil {
-				return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err)
-			}
-
-			// -f option has no meaning when we're reading it from stdin,
-			// so just use our default Dockerfile name
-			*dockerfileName = api.DefaultDockerfileName
-			context, err = archive.Generate(*dockerfileName, string(dockerfile))
-		} else {
-			context = ioutil.NopCloser(buf)
-		}
-	} else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) {
-		isRemote = true
-	} else {
-		root := cmd.Arg(0)
-		if urlutil.IsGitURL(root) {
-			remoteURL := cmd.Arg(0)
-			if !urlutil.IsGitTransport(remoteURL) {
-				remoteURL = "https://" + remoteURL
-			}
-
-			root, err = ioutil.TempDir("", "docker-build-git")
-			if err != nil {
-				return err
-			}
-			defer os.RemoveAll(root)
-
-			if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
-				return fmt.Errorf("Error trying to use git: %s (%s)", err, output)
-			}
-		}
-		if _, err := os.Stat(root); err != nil {
-			return err
-		}
-
-		absRoot, err := filepath.Abs(root)
-		if err != nil {
-			return err
-		}
-
-		filename := *dockerfileName // path to Dockerfile
-
-		if *dockerfileName == "" {
-			// No -f/--file was specified so use the default
-			*dockerfileName = api.DefaultDockerfileName
-			filename = filepath.Join(absRoot, *dockerfileName)
-
-			// Just to be nice ;-) look for 'dockerfile' too but only
-			// use it if we found it, otherwise ignore this check
-			if _, err = os.Lstat(filename); os.IsNotExist(err) {
-				tmpFN := path.Join(absRoot, strings.ToLower(*dockerfileName))
-				if _, err = os.Lstat(tmpFN); err == nil {
-					*dockerfileName = strings.ToLower(*dockerfileName)
-					filename = tmpFN
-				}
-			}
-		}
-
-		origDockerfile := *dockerfileName // used for error msg
-		if filename, err = filepath.Abs(filename); err != nil {
-			return err
-		}
-
-		// Verify that 'filename' is within the build context
-		filename, err = symlink.FollowSymlinkInScope(filename, absRoot)
-		if err != nil {
-			return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root)
-		}
-
-		// Now reset the dockerfileName to be relative to the build context
-		*dockerfileName, err = filepath.Rel(absRoot, filename)
-		if err != nil {
-			return err
-		}
-		// And canonicalize dockerfile name to a platform-independent one
-		*dockerfileName, err = archive.CanonicalTarNameForPath(*dockerfileName)
-		if err != nil {
-			return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", dockerfileName, err)
-		}
-
-		if _, err = os.Lstat(filename); os.IsNotExist(err) {
-			return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
-		}
-		var includes = []string{"."}
-
-		excludes, err := utils.ReadDockerIgnore(path.Join(root, ".dockerignore"))
-		if err != nil {
-			return err
-		}
-
-		// If .dockerignore mentions .dockerignore or the Dockerfile
-		// then make sure we send both files over to the daemon
-		// because Dockerfile is, obviously, needed no matter what, and
-		// .dockerignore is needed to know if either one needs to be
-		// removed.  The deamon will remove them for us, if needed, after it
-		// parses the Dockerfile.
-		keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
-		keepThem2, _ := fileutils.Matches(*dockerfileName, excludes)
-		if keepThem1 || keepThem2 {
-			includes = append(includes, ".dockerignore", *dockerfileName)
-		}
-
-		if err = utils.ValidateContextDirectory(root, excludes); err != nil {
-			return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
-		}
-		options := &archive.TarOptions{
-			Compression:     archive.Uncompressed,
-			ExcludePatterns: excludes,
-			IncludeFiles:    includes,
-		}
-		context, err = archive.TarWithOptions(root, options)
-		if err != nil {
-			return err
-		}
-	}
-
-	// windows: show error message about modified file permissions
-	// FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build.
-	if runtime.GOOS == "windows" {
-		log.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
-	}
-
-	var body io.Reader
-	// Setup an upload progress bar
-	// FIXME: ProgressReader shouldn't be this annoying to use
-	if context != nil {
-		sf := utils.NewStreamFormatter(false)
-		body = progressreader.New(progressreader.Config{
-			In:        context,
-			Out:       cli.out,
-			Formatter: sf,
-			NewLines:  true,
-			ID:        "",
-			Action:    "Sending build context to Docker daemon",
-		})
-	}
-
-	var memory int64
-	if *flMemoryString != "" {
-		parsedMemory, err := units.RAMInBytes(*flMemoryString)
-		if err != nil {
-			return err
-		}
-		memory = parsedMemory
-	}
-
-	var memorySwap int64
-	if *flMemorySwap != "" {
-		if *flMemorySwap == "-1" {
-			memorySwap = -1
-		} else {
-			parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap)
-			if err != nil {
-				return err
-			}
-			memorySwap = parsedMemorySwap
-		}
-	}
-	// Send the build context
-	v := &url.Values{}
-
-	//Check if the given image name can be resolved
-	if *tag != "" {
-		repository, tag := parsers.ParseRepositoryTag(*tag)
-		if err := registry.ValidateRepositoryName(repository); err != nil {
-			return err
-		}
-		if len(tag) > 0 {
-			if err := graph.ValidateTagName(tag); err != nil {
-				return err
-			}
-		}
-	}
-
-	v.Set("t", *tag)
-
-	if *suppressOutput {
-		v.Set("q", "1")
-	}
-	if isRemote {
-		v.Set("remote", cmd.Arg(0))
-	}
-	if *noCache {
-		v.Set("nocache", "1")
-	}
-	if *rm {
-		v.Set("rm", "1")
-	} else {
-		v.Set("rm", "0")
-	}
-
-	if *forceRm {
-		v.Set("forcerm", "1")
-	}
-
-	if *pull {
-		v.Set("pull", "1")
-	}
-
-	v.Set("cpusetcpus", *flCpuSetCpus)
-	v.Set("cpushares", strconv.FormatInt(*flCpuShares, 10))
-	v.Set("memory", strconv.FormatInt(memory, 10))
-	v.Set("memswap", strconv.FormatInt(memorySwap, 10))
-
-	v.Set("dockerfile", *dockerfileName)
-
-	cli.LoadConfigFile()
-
-	headers := http.Header(make(map[string][]string))
-	buf, err := json.Marshal(cli.configFile)
-	if err != nil {
-		return err
-	}
-	headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
-
-	if context != nil {
-		headers.Set("Content-Type", "application/tar")
-	}
-	err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers)
-	if jerr, ok := err.(*utils.JSONError); ok {
-		// If no error code is set, default to 1
-		if jerr.Code == 0 {
-			jerr.Code = 1
-		}
-		return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
-	}
-	return err
-}
-
-// 'docker login': login / register a user to registry service.
-func (cli *DockerCli) CmdLogin(args ...string) error {
-	cmd := cli.Subcmd("login", "[SERVER]", "Register or log in to a Docker registry server, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true)
-	cmd.Require(flag.Max, 1)
-
-	var username, password, email string
-
-	cmd.StringVar(&username, []string{"u", "-username"}, "", "Username")
-	cmd.StringVar(&password, []string{"p", "-password"}, "", "Password")
-	cmd.StringVar(&email, []string{"e", "-email"}, "", "Email")
-
-	utils.ParseFlags(cmd, args, true)
-
-	serverAddress := registry.IndexServerAddress()
-	if len(cmd.Args()) > 0 {
-		serverAddress = cmd.Arg(0)
-	}
-
-	promptDefault := func(prompt string, configDefault string) {
-		if configDefault == "" {
-			fmt.Fprintf(cli.out, "%s: ", prompt)
-		} else {
-			fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault)
-		}
-	}
-
-	readInput := func(in io.Reader, out io.Writer) string {
-		reader := bufio.NewReader(in)
-		line, _, err := reader.ReadLine()
-		if err != nil {
-			fmt.Fprintln(out, err.Error())
-			os.Exit(1)
-		}
-		return string(line)
-	}
-
-	cli.LoadConfigFile()
-	authconfig, ok := cli.configFile.Configs[serverAddress]
-	if !ok {
-		authconfig = registry.AuthConfig{}
-	}
-
-	if username == "" {
-		promptDefault("Username", authconfig.Username)
-		username = readInput(cli.in, cli.out)
-		username = strings.Trim(username, " ")
-		if username == "" {
-			username = authconfig.Username
-		}
-	}
-	// Assume that a different username means they may not want to use
-	// the password or email from the config file, so prompt them
-	if username != authconfig.Username {
-		if password == "" {
-			oldState, err := term.SaveState(cli.inFd)
-			if err != nil {
-				return err
-			}
-			fmt.Fprintf(cli.out, "Password: ")
-			term.DisableEcho(cli.inFd, oldState)
-
-			password = readInput(cli.in, cli.out)
-			fmt.Fprint(cli.out, "\n")
-
-			term.RestoreTerminal(cli.inFd, oldState)
-			if password == "" {
-				return fmt.Errorf("Error : Password Required")
-			}
-		}
-
-		if email == "" {
-			promptDefault("Email", authconfig.Email)
-			email = readInput(cli.in, cli.out)
-			if email == "" {
-				email = authconfig.Email
-			}
-		}
-	} else {
-		// However, if they don't override the username use the
-		// password or email from the cmd line if specified. IOW, allow
-		// then to change/override them.  And if not specified, just
-		// use what's in the config file
-		if password == "" {
-			password = authconfig.Password
-		}
-		if email == "" {
-			email = authconfig.Email
-		}
-	}
-	authconfig.Username = username
-	authconfig.Password = password
-	authconfig.Email = email
-	authconfig.ServerAddress = serverAddress
-	cli.configFile.Configs[serverAddress] = authconfig
-
-	stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], nil)
-	if statusCode == 401 {
-		delete(cli.configFile.Configs, serverAddress)
-		registry.SaveConfig(cli.configFile)
-		return err
-	}
-	if err != nil {
-		return err
-	}
-	var out2 engine.Env
-	err = out2.Decode(stream)
-	if err != nil {
-		cli.configFile, _ = registry.LoadConfig(homedir.Get())
-		return err
-	}
-	registry.SaveConfig(cli.configFile)
-	fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s.\n", path.Join(homedir.Get(), registry.CONFIGFILE))
-
-	if out2.Get("Status") != "" {
-		fmt.Fprintf(cli.out, "%s\n", out2.Get("Status"))
-	}
-	return nil
-}
-
-// log out from a Docker registry
-func (cli *DockerCli) CmdLogout(args ...string) error {
-	cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true)
-	cmd.Require(flag.Max, 1)
-
-	utils.ParseFlags(cmd, args, false)
-	serverAddress := registry.IndexServerAddress()
-	if len(cmd.Args()) > 0 {
-		serverAddress = cmd.Arg(0)
-	}
-
-	cli.LoadConfigFile()
-	if _, ok := cli.configFile.Configs[serverAddress]; !ok {
-		fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress)
-	} else {
-		fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress)
-		delete(cli.configFile.Configs, serverAddress)
-
-		if err := registry.SaveConfig(cli.configFile); err != nil {
-			return fmt.Errorf("Failed to save docker config: %v", err)
-		}
-	}
-	return nil
-}
-
-// 'docker wait': block until a container stops
-func (cli *DockerCli) CmdWait(args ...string) error {
-	cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.", true)
-	cmd.Require(flag.Min, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	var encounteredError error
-	for _, name := range cmd.Args() {
-		status, err := waitForExit(cli, name)
-		if err != nil {
-			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to wait one or more containers")
-		} else {
-			fmt.Fprintf(cli.out, "%d\n", status)
-		}
-	}
-	return encounteredError
-}
-
-// 'docker version': show version information
-func (cli *DockerCli) CmdVersion(args ...string) error {
-	cmd := cli.Subcmd("version", "", "Show the Docker version information.", true)
-	cmd.Require(flag.Exact, 0)
-
-	utils.ParseFlags(cmd, args, false)
-
-	if dockerversion.VERSION != "" {
-		fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION)
-	}
-	fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION)
-	fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version())
-	if dockerversion.GITCOMMIT != "" {
-		fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT)
-	}
-	fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH)
-
-	body, _, err := readBody(cli.call("GET", "/version", nil, nil))
-	if err != nil {
-		return err
-	}
-
-	out := engine.NewOutput()
-	remoteVersion, err := out.AddEnv()
-	if err != nil {
-		log.Errorf("Error reading remote version: %s", err)
-		return err
-	}
-	if _, err := out.Write(body); err != nil {
-		log.Errorf("Error reading remote version: %s", err)
-		return err
-	}
-	out.Close()
-	fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version"))
-	if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" {
-		fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion)
-	}
-	fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
-	fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
-	fmt.Fprintf(cli.out, "OS/Arch (server): %s/%s\n", remoteVersion.Get("Os"), remoteVersion.Get("Arch"))
-	return nil
-}
-
-// 'docker info': display system-wide information.
-func (cli *DockerCli) CmdInfo(args ...string) error {
-	cmd := cli.Subcmd("info", "", "Display system-wide information", true)
-	cmd.Require(flag.Exact, 0)
-	utils.ParseFlags(cmd, args, false)
-
-	body, _, err := readBody(cli.call("GET", "/info", nil, nil))
-	if err != nil {
-		return err
-	}
-
-	out := engine.NewOutput()
-	remoteInfo, err := out.AddEnv()
-	if err != nil {
-		return err
-	}
-
-	if _, err := out.Write(body); err != nil {
-		log.Errorf("Error reading remote info: %s", err)
-		return err
-	}
-	out.Close()
-
-	if remoteInfo.Exists("Containers") {
-		fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers"))
-	}
-	if remoteInfo.Exists("Images") {
-		fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images"))
-	}
-	if remoteInfo.Exists("Driver") {
-		fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver"))
-	}
-	if remoteInfo.Exists("DriverStatus") {
-		var driverStatus [][2]string
-		if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
-			return err
-		}
-		for _, pair := range driverStatus {
-			fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
-		}
-	}
-	if remoteInfo.Exists("ExecutionDriver") {
-		fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
-	}
-	if remoteInfo.Exists("KernelVersion") {
-		fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
-	}
-	if remoteInfo.Exists("OperatingSystem") {
-		fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem"))
-	}
-	if remoteInfo.Exists("NCPU") {
-		fmt.Fprintf(cli.out, "CPUs: %d\n", remoteInfo.GetInt("NCPU"))
-	}
-	if remoteInfo.Exists("MemTotal") {
-		fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal"))))
-	}
-	if remoteInfo.Exists("Name") {
-		fmt.Fprintf(cli.out, "Name: %s\n", remoteInfo.Get("Name"))
-	}
-	if remoteInfo.Exists("ID") {
-		fmt.Fprintf(cli.out, "ID: %s\n", remoteInfo.Get("ID"))
-	}
-
-	if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
-		if remoteInfo.Exists("Debug") {
-			fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
-		}
-		fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
-		if remoteInfo.Exists("NFd") {
-			fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd"))
-		}
-		if remoteInfo.Exists("NGoroutines") {
-			fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
-		}
-		if remoteInfo.Exists("SystemTime") {
-			t, err := remoteInfo.GetTime("SystemTime")
-			if err != nil {
-				log.Errorf("Error reading system time: %v", err)
-			} else {
-				fmt.Fprintf(cli.out, "System Time: %s\n", t.Format(time.UnixDate))
-			}
-		}
-		if remoteInfo.Exists("NEventsListener") {
-			fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
-		}
-		if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" {
-			fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1)
-		}
-		if initPath := remoteInfo.Get("InitPath"); initPath != "" {
-			fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
-		}
-		if root := remoteInfo.Get("DockerRootDir"); root != "" {
-			fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", root)
-		}
-	}
-	if remoteInfo.Exists("HttpProxy") {
-		fmt.Fprintf(cli.out, "Http Proxy: %s\n", remoteInfo.Get("HttpProxy"))
-	}
-	if remoteInfo.Exists("HttpsProxy") {
-		fmt.Fprintf(cli.out, "Https Proxy: %s\n", remoteInfo.Get("HttpsProxy"))
-	}
-	if remoteInfo.Exists("NoProxy") {
-		fmt.Fprintf(cli.out, "No Proxy: %s\n", remoteInfo.Get("NoProxy"))
-	}
-	if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
-		cli.LoadConfigFile()
-		u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username
-		if len(u) > 0 {
-			fmt.Fprintf(cli.out, "Username: %v\n", u)
-			fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress"))
-		}
-	}
-	if remoteInfo.Exists("MemoryLimit") && !remoteInfo.GetBool("MemoryLimit") {
-		fmt.Fprintf(cli.err, "WARNING: No memory limit support\n")
-	}
-	if remoteInfo.Exists("SwapLimit") && !remoteInfo.GetBool("SwapLimit") {
-		fmt.Fprintf(cli.err, "WARNING: No swap limit support\n")
-	}
-	if remoteInfo.Exists("IPv4Forwarding") && !remoteInfo.GetBool("IPv4Forwarding") {
-		fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n")
-	}
-	if remoteInfo.Exists("Labels") {
-		fmt.Fprintln(cli.out, "Labels:")
-		for _, attribute := range remoteInfo.GetList("Labels") {
-			fmt.Fprintf(cli.out, " %s\n", attribute)
-		}
-	}
-
-	return nil
-}
-
-func (cli *DockerCli) CmdStop(args ...string) error {
-	cmd := cli.Subcmd("stop", "CONTAINER [CONTAINER...]", "Stop a running container by sending SIGTERM and then SIGKILL after a\ngrace period", true)
-	nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it")
-	cmd.Require(flag.Min, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	v := url.Values{}
-	v.Set("t", strconv.Itoa(*nSeconds))
-
-	var encounteredError error
-	for _, name := range cmd.Args() {
-		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, nil))
-		if err != nil {
-			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to stop one or more containers")
-		} else {
-			fmt.Fprintf(cli.out, "%s\n", name)
-		}
-	}
-	return encounteredError
-}
-
-func (cli *DockerCli) CmdRestart(args ...string) error {
-	cmd := cli.Subcmd("restart", "CONTAINER [CONTAINER...]", "Restart a running container", true)
-	nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container")
-	cmd.Require(flag.Min, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	v := url.Values{}
-	v.Set("t", strconv.Itoa(*nSeconds))
-
-	var encounteredError error
-	for _, name := range cmd.Args() {
-		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, nil))
-		if err != nil {
-			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to restart one or more containers")
-		} else {
-			fmt.Fprintf(cli.out, "%s\n", name)
-		}
-	}
-	return encounteredError
-}
-
-func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
-	sigc := make(chan os.Signal, 128)
-	signal.CatchAll(sigc)
-	go func() {
-		for s := range sigc {
-			if s == signal.SIGCHLD {
-				continue
-			}
-			var sig string
-			for sigStr, sigN := range signal.SignalMap {
-				if sigN == s {
-					sig = sigStr
-					break
-				}
-			}
-			if sig == "" {
-				log.Errorf("Unsupported signal: %v. Discarding.", s)
-			}
-			if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, nil)); err != nil {
-				log.Debugf("Error sending signal: %s", err)
-			}
-		}
-	}()
-	return sigc
-}
-
-func (cli *DockerCli) CmdStart(args ...string) error {
-	var (
-		cErr chan error
-		tty  bool
-
-		cmd       = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Start one or more stopped containers", true)
-		attach    = cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals")
-		openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN")
-	)
-
-	cmd.Require(flag.Min, 1)
-	utils.ParseFlags(cmd, args, true)
-
-	if *attach || *openStdin {
-		if cmd.NArg() > 1 {
-			return fmt.Errorf("You cannot start and attach multiple containers at once.")
-		}
-
-		stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil)
-		if err != nil {
-			return err
-		}
-
-		env := engine.Env{}
-		if err := env.Decode(stream); err != nil {
-			return err
-		}
-		config := env.GetSubEnv("Config")
-		tty = config.GetBool("Tty")
-
-		if !tty {
-			sigc := cli.forwardAllSignals(cmd.Arg(0))
-			defer signal.StopCatch(sigc)
-		}
-
-		var in io.ReadCloser
-
-		v := url.Values{}
-		v.Set("stream", "1")
-
-		if *openStdin && config.GetBool("OpenStdin") {
-			v.Set("stdin", "1")
-			in = cli.in
-		}
-
-		v.Set("stdout", "1")
-		v.Set("stderr", "1")
-
-		hijacked := make(chan io.Closer)
-		// Block the return until the chan gets closed
-		defer func() {
-			log.Debugf("CmdStart() returned, defer waiting for hijack to finish.")
-			if _, ok := <-hijacked; ok {
-				log.Errorf("Hijack did not finish (chan still open)")
-			}
-			cli.in.Close()
-		}()
-		cErr = promise.Go(func() error {
-			return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil)
-		})
-
-		// Acknowledge the hijack before starting
-		select {
-		case closer := <-hijacked:
-			// Make sure that the hijack gets closed when returning (results
-			// in closing the hijack chan and freeing server's goroutines)
-			if closer != nil {
-				defer closer.Close()
-			}
-		case err := <-cErr:
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	var encounteredError error
-	for _, name := range cmd.Args() {
-		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, nil))
-		if err != nil {
-			if !*attach && !*openStdin {
-				// attach and openStdin is false means it could be starting multiple containers
-				// when a container start failed, show the error message and start next
-				fmt.Fprintf(cli.err, "%s\n", err)
-				encounteredError = fmt.Errorf("Error: failed to start one or more containers")
-			} else {
-				encounteredError = err
-			}
-		} else {
-			if !*attach && !*openStdin {
-				fmt.Fprintf(cli.out, "%s\n", name)
-			}
-		}
-	}
-
-	if encounteredError != nil {
-		return encounteredError
-	}
-
-	if *openStdin || *attach {
-		if tty && cli.isTerminalOut {
-			if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
-				log.Errorf("Error monitoring TTY size: %s", err)
-			}
-		}
-		if attchErr := <-cErr; attchErr != nil {
-			return attchErr
-		}
-		_, status, err := getExitCode(cli, cmd.Arg(0))
-		if err != nil {
-			return err
-		}
-		if status != 0 {
-			return &utils.StatusError{StatusCode: status}
-		}
-	}
-	return nil
-}
-
-func (cli *DockerCli) CmdUnpause(args ...string) error {
-	cmd := cli.Subcmd("unpause", "CONTAINER [CONTAINER...]", "Unpause all processes within a container", true)
-	cmd.Require(flag.Min, 1)
-	utils.ParseFlags(cmd, args, false)
-
-	var encounteredError error
-	for _, name := range cmd.Args() {
-		if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, nil)); err != nil {
-			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to unpause container named %s", name)
-		} else {
-			fmt.Fprintf(cli.out, "%s\n", name)
-		}
-	}
-	return encounteredError
-}
-
-func (cli *DockerCli) CmdPause(args ...string) error {
-	cmd := cli.Subcmd("pause", "CONTAINER [CONTAINER...]", "Pause all processes within a container", true)
-	cmd.Require(flag.Min, 1)
-	utils.ParseFlags(cmd, args, false)
-
-	var encounteredError error
-	for _, name := range cmd.Args() {
-		if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, nil)); err != nil {
-			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to pause container named %s", name)
-		} else {
-			fmt.Fprintf(cli.out, "%s\n", name)
-		}
-	}
-	return encounteredError
-}
-
-func (cli *DockerCli) CmdRename(args ...string) error {
-	cmd := cli.Subcmd("rename", "OLD_NAME NEW_NAME", "Rename a container", true)
-	if err := cmd.Parse(args); err != nil {
-		return nil
-	}
-
-	if cmd.NArg() != 2 {
-		cmd.Usage()
-		return nil
-	}
-	old_name := cmd.Arg(0)
-	new_name := cmd.Arg(1)
-
-	if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/rename?name=%s", old_name, new_name), nil, nil)); err != nil {
-		fmt.Fprintf(cli.err, "%s\n", err)
-		return fmt.Errorf("Error: failed to rename container named %s", old_name)
-	}
-	return nil
-}
-
-func (cli *DockerCli) CmdInspect(args ...string) error {
-	cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image", true)
-	tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template")
-	cmd.Require(flag.Min, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	var tmpl *template.Template
-	if *tmplStr != "" {
-		var err error
-		if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil {
-			fmt.Fprintf(cli.err, "Template parsing error: %v\n", err)
-			return &utils.StatusError{StatusCode: 64,
-				Status: "Template parsing error: " + err.Error()}
-		}
-	}
-
-	indented := new(bytes.Buffer)
-	indented.WriteByte('[')
-	status := 0
-
-	for _, name := range cmd.Args() {
-		obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, nil))
-		if err != nil {
-			if strings.Contains(err.Error(), "Too many") {
-				fmt.Fprintf(cli.err, "Error: %v", err)
-				status = 1
-				continue
-			}
-
-			obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, nil))
-			if err != nil {
-				if strings.Contains(err.Error(), "No such") {
-					fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name)
-				} else {
-					fmt.Fprintf(cli.err, "%s", err)
-				}
-				status = 1
-				continue
-			}
-		}
-
-		if tmpl == nil {
-			if err = json.Indent(indented, obj, "", "    "); err != nil {
-				fmt.Fprintf(cli.err, "%s\n", err)
-				status = 1
-				continue
-			}
-		} else {
-			// Has template, will render
-			var value interface{}
-			if err := json.Unmarshal(obj, &value); err != nil {
-				fmt.Fprintf(cli.err, "%s\n", err)
-				status = 1
-				continue
-			}
-			if err := tmpl.Execute(cli.out, value); err != nil {
-				return err
-			}
-			cli.out.Write([]byte{'\n'})
-		}
-		indented.WriteString(",")
-	}
-
-	if indented.Len() > 1 {
-		// Remove trailing ','
-		indented.Truncate(indented.Len() - 1)
-	}
-	indented.WriteString("]\n")
-
-	if tmpl == nil {
-		if _, err := io.Copy(cli.out, indented); err != nil {
-			return err
-		}
-	}
-
-	if status != 0 {
-		return &utils.StatusError{StatusCode: status}
-	}
-	return nil
-}
-
-func (cli *DockerCli) CmdTop(args ...string) error {
-	cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Display the running processes of a container", true)
-	cmd.Require(flag.Min, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	val := url.Values{}
-	if cmd.NArg() > 1 {
-		val.Set("ps_args", strings.Join(cmd.Args()[1:], " "))
-	}
-
-	stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, nil)
-	if err != nil {
-		return err
-	}
-	var procs engine.Env
-	if err := procs.Decode(stream); err != nil {
-		return err
-	}
-	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
-	fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t"))
-	processes := [][]string{}
-	if err := procs.GetJson("Processes", &processes); err != nil {
-		return err
-	}
-	for _, proc := range processes {
-		fmt.Fprintln(w, strings.Join(proc, "\t"))
-	}
-	w.Flush()
-	return nil
-}
-
-func (cli *DockerCli) CmdPort(args ...string) error {
-	cmd := cli.Subcmd("port", "CONTAINER [PRIVATE_PORT[/PROTO]]", "List port mappings for the CONTAINER, or lookup the public-facing port that\nis NAT-ed to the PRIVATE_PORT", true)
-	cmd.Require(flag.Min, 1)
-	utils.ParseFlags(cmd, args, true)
-
-	stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil)
-	if err != nil {
-		return err
-	}
-
-	env := engine.Env{}
-	if err := env.Decode(stream); err != nil {
-		return err
-	}
-	ports := nat.PortMap{}
-	if err := env.GetSubEnv("NetworkSettings").GetJson("Ports", &ports); err != nil {
-		return err
-	}
-
-	if cmd.NArg() == 2 {
-		var (
-			port  = cmd.Arg(1)
-			proto = "tcp"
-			parts = strings.SplitN(port, "/", 2)
-		)
-
-		if len(parts) == 2 && len(parts[1]) != 0 {
-			port = parts[0]
-			proto = parts[1]
-		}
-		natPort := port + "/" + proto
-		if frontends, exists := ports[nat.Port(port+"/"+proto)]; exists && frontends != nil {
-			for _, frontend := range frontends {
-				fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
-			}
-			return nil
-		}
-		return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0))
-	}
-
-	for from, frontends := range ports {
-		for _, frontend := range frontends {
-			fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIp, frontend.HostPort)
-		}
-	}
-
-	return nil
-}
-
-// 'docker rmi IMAGE' removes all images with the name IMAGE
-func (cli *DockerCli) CmdRmi(args ...string) error {
-	var (
-		cmd     = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images", true)
-		force   = cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image")
-		noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents")
-	)
-	cmd.Require(flag.Min, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	v := url.Values{}
-	if *force {
-		v.Set("force", "1")
-	}
-	if *noprune {
-		v.Set("noprune", "1")
-	}
-
-	var encounteredError error
-	for _, name := range cmd.Args() {
-		body, _, err := readBody(cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, nil))
-		if err != nil {
-			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to remove one or more images")
-		} else {
-			outs := engine.NewTable("Created", 0)
-			if _, err := outs.ReadListFrom(body); err != nil {
-				fmt.Fprintf(cli.err, "%s\n", err)
-				encounteredError = fmt.Errorf("Error: failed to remove one or more images")
-				continue
-			}
-			for _, out := range outs.Data {
-				if out.Get("Deleted") != "" {
-					fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted"))
-				} else {
-					fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged"))
-				}
-			}
-		}
-	}
-	return encounteredError
-}
-
-func (cli *DockerCli) CmdHistory(args ...string) error {
-	cmd := cli.Subcmd("history", "IMAGE", "Show the history of an image", true)
-	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
-	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
-	cmd.Require(flag.Exact, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, nil))
-	if err != nil {
-		return err
-	}
-
-	outs := engine.NewTable("Created", 0)
-	if _, err := outs.ReadListFrom(body); err != nil {
-		return err
-	}
-
-	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
-	if !*quiet {
-		fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE")
-	}
-
-	for _, out := range outs.Data {
-		outID := out.Get("Id")
-		if !*quiet {
-			if *noTrunc {
-				fmt.Fprintf(w, "%s\t", outID)
-			} else {
-				fmt.Fprintf(w, "%s\t", common.TruncateID(outID))
-			}
-
-			fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))))
-
-			if *noTrunc {
-				fmt.Fprintf(w, "%s\t", out.Get("CreatedBy"))
-			} else {
-				fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45))
-			}
-			fmt.Fprintf(w, "%s\n", units.HumanSize(float64(out.GetInt64("Size"))))
-		} else {
-			if *noTrunc {
-				fmt.Fprintln(w, outID)
-			} else {
-				fmt.Fprintln(w, common.TruncateID(outID))
-			}
-		}
-	}
-	w.Flush()
-	return nil
-}
-
-func (cli *DockerCli) CmdRm(args ...string) error {
-	cmd := cli.Subcmd("rm", "CONTAINER [CONTAINER...]", "Remove one or more containers", true)
-	v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container")
-	link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link")
-	force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)")
-	cmd.Require(flag.Min, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	val := url.Values{}
-	if *v {
-		val.Set("v", "1")
-	}
-	if *link {
-		val.Set("link", "1")
-	}
-
-	if *force {
-		val.Set("force", "1")
-	}
-
-	var encounteredError error
-	for _, name := range cmd.Args() {
-		_, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, nil))
-		if err != nil {
-			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to remove one or more containers")
-		} else {
-			fmt.Fprintf(cli.out, "%s\n", name)
-		}
-	}
-	return encounteredError
-}
-
-// 'docker kill NAME' kills a running container
-func (cli *DockerCli) CmdKill(args ...string) error {
-	cmd := cli.Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container using SIGKILL or a specified signal", true)
-	signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container")
-	cmd.Require(flag.Min, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	var encounteredError error
-	for _, name := range cmd.Args() {
-		if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, nil)); err != nil {
-			fmt.Fprintf(cli.err, "%s\n", err)
-			encounteredError = fmt.Errorf("Error: failed to kill one or more containers")
-		} else {
-			fmt.Fprintf(cli.out, "%s\n", name)
-		}
-	}
-	return encounteredError
-}
-
-func (cli *DockerCli) CmdImport(args ...string) error {
-	cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the\ntarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then\noptionally tag it.", true)
-	flChanges := opts.NewListOpts(nil)
-	cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image")
-	cmd.Require(flag.Min, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	var (
-		v          = url.Values{}
-		src        = cmd.Arg(0)
-		repository = cmd.Arg(1)
-	)
-
-	v.Set("fromSrc", src)
-	v.Set("repo", repository)
-	for _, change := range flChanges.GetAll() {
-		v.Add("changes", change)
-	}
-	if cmd.NArg() == 3 {
-		fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' has been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
-		v.Set("tag", cmd.Arg(2))
-	}
-
-	if repository != "" {
-		//Check if the given image name can be resolved
-		repo, _ := parsers.ParseRepositoryTag(repository)
-		if err := registry.ValidateRepositoryName(repo); err != nil {
-			return err
-		}
-	}
-
-	var in io.Reader
-
-	if src == "-" {
-		in = cli.in
-	}
-
-	return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil)
-}
-
-func (cli *DockerCli) CmdPush(args ...string) error {
-	cmd := cli.Subcmd("push", "NAME[:TAG]", "Push an image or a repository to the registry", true)
-	cmd.Require(flag.Exact, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	name := cmd.Arg(0)
-
-	cli.LoadConfigFile()
-
-	remote, tag := parsers.ParseRepositoryTag(name)
-
-	// Resolve the Repository name from fqn to RepositoryInfo
-	repoInfo, err := registry.ParseRepositoryInfo(remote)
-	if err != nil {
-		return err
-	}
-	// Resolve the Auth config relevant for this server
-	authConfig := cli.configFile.ResolveAuthConfig(repoInfo.Index)
-	// If we're not using a custom registry, we know the restrictions
-	// applied to repository names and can warn the user in advance.
-	// Custom repositories can have different rules, and we must also
-	// allow pushing by image ID.
-	if repoInfo.Official {
-		username := authConfig.Username
-		if username == "" {
-			username = "<user>"
-		}
-		return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository to <user>/<repo> (ex: %s/%s)", username, repoInfo.LocalName)
-	}
-
-	v := url.Values{}
-	v.Set("tag", tag)
-
-	_, _, err = cli.clientRequestAttemptLogin("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, repoInfo.Index, "push")
-	return err
-}
-
-func (cli *DockerCli) CmdPull(args ...string) error {
-	cmd := cli.Subcmd("pull", "NAME[:TAG|@DIGEST]", "Pull an image or a repository from the registry", true)
-	allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository")
-	cmd.Require(flag.Exact, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	var (
-		v         = url.Values{}
-		remote    = cmd.Arg(0)
-		newRemote = remote
-	)
-	taglessRemote, tag := parsers.ParseRepositoryTag(remote)
-	if tag == "" && !*allTags {
-		newRemote = utils.ImageReference(taglessRemote, graph.DEFAULTTAG)
-	}
-	if tag != "" && *allTags {
-		return fmt.Errorf("tag can't be used with --all-tags/-a")
-	}
-
-	v.Set("fromImage", newRemote)
-
-	// Resolve the Repository name from fqn to RepositoryInfo
-	repoInfo, err := registry.ParseRepositoryInfo(taglessRemote)
-	if err != nil {
-		return err
-	}
-
-	cli.LoadConfigFile()
-
-	_, _, err = cli.clientRequestAttemptLogin("POST", "/images/create?"+v.Encode(), nil, cli.out, repoInfo.Index, "pull")
-	return err
-}
-
-func (cli *DockerCli) CmdImages(args ...string) error {
-	cmd := cli.Subcmd("images", "[REPOSITORY]", "List images", true)
-	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
-	all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)")
-	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
-	showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests")
-	// FIXME: --viz and --tree are deprecated. Remove them in a future version.
-	flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format")
-	flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format")
-
-	flFilter := opts.NewListOpts(nil)
-	cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
-	cmd.Require(flag.Max, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	// Consolidate all filter flags, and sanity check them early.
-	// They'll get process in the daemon/server.
-	imageFilterArgs := filters.Args{}
-	for _, f := range flFilter.GetAll() {
-		var err error
-		imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs)
-		if err != nil {
-			return err
-		}
-	}
-
-	matchName := cmd.Arg(0)
-	// FIXME: --viz and --tree are deprecated. Remove them in a future version.
-	if *flViz || *flTree {
-		v := url.Values{
-			"all": []string{"1"},
-		}
-		if len(imageFilterArgs) > 0 {
-			filterJson, err := filters.ToParam(imageFilterArgs)
-			if err != nil {
-				return err
-			}
-			v.Set("filters", filterJson)
-		}
-
-		body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, nil))
-		if err != nil {
-			return err
-		}
-
-		outs := engine.NewTable("Created", 0)
-		if _, err := outs.ReadListFrom(body); err != nil {
-			return err
-		}
-
-		var (
-			printNode  func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)
-			startImage *engine.Env
-
-			roots    = engine.NewTable("Created", outs.Len())
-			byParent = make(map[string]*engine.Table)
-		)
-
-		for _, image := range outs.Data {
-			if image.Get("ParentId") == "" {
-				roots.Add(image)
-			} else {
-				if children, exists := byParent[image.Get("ParentId")]; exists {
-					children.Add(image)
-				} else {
-					byParent[image.Get("ParentId")] = engine.NewTable("Created", 1)
-					byParent[image.Get("ParentId")].Add(image)
-				}
-			}
-
-			if matchName != "" {
-				if matchName == image.Get("Id") || matchName == common.TruncateID(image.Get("Id")) {
-					startImage = image
-				}
-
-				for _, repotag := range image.GetList("RepoTags") {
-					if repotag == matchName {
-						startImage = image
-					}
-				}
-			}
-		}
-
-		if *flViz {
-			fmt.Fprintf(cli.out, "digraph docker {\n")
-			printNode = (*DockerCli).printVizNode
-		} else {
-			printNode = (*DockerCli).printTreeNode
-		}
-
-		if startImage != nil {
-			root := engine.NewTable("Created", 1)
-			root.Add(startImage)
-			cli.WalkTree(*noTrunc, root, byParent, "", printNode)
-		} else if matchName == "" {
-			cli.WalkTree(*noTrunc, roots, byParent, "", printNode)
-		}
-		if *flViz {
-			fmt.Fprintf(cli.out, " base [style=invisible]\n}\n")
-		}
-	} else {
-		v := url.Values{}
-		if len(imageFilterArgs) > 0 {
-			filterJson, err := filters.ToParam(imageFilterArgs)
-			if err != nil {
-				return err
-			}
-			v.Set("filters", filterJson)
-		}
-
-		if cmd.NArg() == 1 {
-			// FIXME rename this parameter, to not be confused with the filters flag
-			v.Set("filter", matchName)
-		}
-		if *all {
-			v.Set("all", "1")
-		}
-
-		body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, nil))
-
-		if err != nil {
-			return err
-		}
-
-		outs := engine.NewTable("Created", 0)
-		if _, err := outs.ReadListFrom(body); err != nil {
-			return err
-		}
-
-		w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
-		if !*quiet {
-			if *showDigests {
-				fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
-			} else {
-				fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
-			}
-		}
-
-		for _, out := range outs.Data {
-			outID := out.Get("Id")
-			if !*noTrunc {
-				outID = common.TruncateID(outID)
-			}
-
-			repoTags := out.GetList("RepoTags")
-			repoDigests := out.GetList("RepoDigests")
-
-			if len(repoTags) == 1 && repoTags[0] == "<none>:<none>" && len(repoDigests) == 1 && repoDigests[0] == "<none>@<none>" {
-				// dangling image - clear out either repoTags or repoDigsts so we only show it once below
-				repoDigests = []string{}
-			}
-
-			// combine the tags and digests lists
-			tagsAndDigests := append(repoTags, repoDigests...)
-			for _, repoAndRef := range tagsAndDigests {
-				repo, ref := parsers.ParseRepositoryTag(repoAndRef)
-				// default tag and digest to none - if there's a value, it'll be set below
-				tag := "<none>"
-				digest := "<none>"
-				if utils.DigestReference(ref) {
-					digest = ref
-				} else {
-					tag = ref
-				}
-
-				if !*quiet {
-					if *showDigests {
-						fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(float64(out.GetInt64("VirtualSize"))))
-					} else {
-						fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(float64(out.GetInt64("VirtualSize"))))
-					}
-				} else {
-					fmt.Fprintln(w, outID)
-				}
-			}
-		}
-
-		if !*quiet {
-			w.Flush()
-		}
-	}
-	return nil
-}
-
-// FIXME: --viz and --tree are deprecated. Remove them in a future version.
-func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) {
-	length := images.Len()
-	if length > 1 {
-		for index, image := range images.Data {
-			if index+1 == length {
-				printNode(cli, noTrunc, image, prefix+"└─")
-				if subimages, exists := byParent[image.Get("Id")]; exists {
-					cli.WalkTree(noTrunc, subimages, byParent, prefix+"  ", printNode)
-				}
-			} else {
-				printNode(cli, noTrunc, image, prefix+"\u251C─")
-				if subimages, exists := byParent[image.Get("Id")]; exists {
-					cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode)
-				}
-			}
-		}
-	} else {
-		for _, image := range images.Data {
-			printNode(cli, noTrunc, image, prefix+"└─")
-			if subimages, exists := byParent[image.Get("Id")]; exists {
-				cli.WalkTree(noTrunc, subimages, byParent, prefix+"  ", printNode)
-			}
-		}
-	}
-}
-
-// FIXME: --viz and --tree are deprecated. Remove them in a future version.
-func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) {
-	var (
-		imageID  string
-		parentID string
-	)
-	if noTrunc {
-		imageID = image.Get("Id")
-		parentID = image.Get("ParentId")
-	} else {
-		imageID = common.TruncateID(image.Get("Id"))
-		parentID = common.TruncateID(image.Get("ParentId"))
-	}
-	if parentID == "" {
-		fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID)
-	} else {
-		fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID)
-	}
-	if image.GetList("RepoTags")[0] != "<none>:<none>" {
-		fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n",
-			imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n"))
-	}
-}
-
-// FIXME: --viz and --tree are deprecated. Remove them in a future version.
-func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) {
-	var imageID string
-	if noTrunc {
-		imageID = image.Get("Id")
-	} else {
-		imageID = common.TruncateID(image.Get("Id"))
-	}
-
-	fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.GetInt64("VirtualSize"))))
-	if image.GetList("RepoTags")[0] != "<none>:<none>" {
-		fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", "))
-	} else {
-		fmt.Fprint(cli.out, "\n")
-	}
-}
-
-func (cli *DockerCli) CmdPs(args ...string) error {
-	var (
-		err error
-
-		psFilterArgs = filters.Args{}
-		v            = url.Values{}
-
-		cmd      = cli.Subcmd("ps", "", "List containers", true)
-		quiet    = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
-		size     = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes")
-		all      = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)")
-		noTrunc  = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
-		nLatest  = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container, include non-running")
-		since    = cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show created since Id or Name, include non-running")
-		before   = cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name")
-		last     = cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running")
-		flFilter = opts.NewListOpts(nil)
-	)
-	cmd.Require(flag.Exact, 0)
-
-	cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
-
-	utils.ParseFlags(cmd, args, true)
-	if *last == -1 && *nLatest {
-		*last = 1
-	}
-
-	if *all {
-		v.Set("all", "1")
-	}
-
-	if *last != -1 {
-		v.Set("limit", strconv.Itoa(*last))
-	}
-
-	if *since != "" {
-		v.Set("since", *since)
-	}
-
-	if *before != "" {
-		v.Set("before", *before)
-	}
-
-	if *size {
-		v.Set("size", "1")
-	}
-
-	// Consolidate all filter flags, and sanity check them.
-	// They'll get processed in the daemon/server.
-	for _, f := range flFilter.GetAll() {
-		if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil {
-			return err
-		}
-	}
-
-	if len(psFilterArgs) > 0 {
-		filterJson, err := filters.ToParam(psFilterArgs)
-		if err != nil {
-			return err
-		}
-
-		v.Set("filters", filterJson)
-	}
-
-	body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, nil))
-	if err != nil {
-		return err
-	}
-
-	outs := engine.NewTable("Created", 0)
-	if _, err := outs.ReadListFrom(body); err != nil {
-		return err
-	}
-
-	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
-	if !*quiet {
-		fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES")
-
-		if *size {
-			fmt.Fprintln(w, "\tSIZE")
-		} else {
-			fmt.Fprint(w, "\n")
-		}
-	}
-
-	stripNamePrefix := func(ss []string) []string {
-		for i, s := range ss {
-			ss[i] = s[1:]
-		}
-
-		return ss
-	}
-
-	for _, out := range outs.Data {
-		outID := out.Get("Id")
-
-		if !*noTrunc {
-			outID = common.TruncateID(outID)
-		}
-
-		if *quiet {
-			fmt.Fprintln(w, outID)
-
-			continue
-		}
-
-		var (
-			outNames   = stripNamePrefix(out.GetList("Names"))
-			outCommand = strconv.Quote(out.Get("Command"))
-			ports      = engine.NewTable("", 0)
-		)
-
-		if !*noTrunc {
-			outCommand = utils.Trunc(outCommand, 20)
-
-			// only display the default name for the container with notrunc is passed
-			for _, name := range outNames {
-				if len(strings.Split(name, "/")) == 1 {
-					outNames = []string{name}
-
-					break
-				}
-			}
-		}
-
-		ports.ReadListFrom([]byte(out.Get("Ports")))
-
-		image := out.Get("Image")
-		if image == "" {
-			image = "<no image>"
-		}
-
-		fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, image, outCommand,
-			units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))),
-			out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ","))
-
-		if *size {
-			if out.GetInt("SizeRootFs") > 0 {
-				fmt.Fprintf(w, "%s (virtual %s)\n", units.HumanSize(float64(out.GetInt64("SizeRw"))), units.HumanSize(float64(out.GetInt64("SizeRootFs"))))
-			} else {
-				fmt.Fprintf(w, "%s\n", units.HumanSize(float64(out.GetInt64("SizeRw"))))
-			}
-
-			continue
-		}
-
-		fmt.Fprint(w, "\n")
-	}
-
-	if !*quiet {
-		w.Flush()
-	}
-
-	return nil
-}
-
-func (cli *DockerCli) CmdCommit(args ...string) error {
-	cmd := cli.Subcmd("commit", "CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes", true)
-	flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit")
-	flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
-	flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith <hannibal@a-team.com>\")")
-	flChanges := opts.NewListOpts(nil)
-	cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image")
-	// FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands.
-	flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands")
-	cmd.Require(flag.Max, 2)
-	cmd.Require(flag.Min, 1)
-	utils.ParseFlags(cmd, args, true)
-
-	var (
-		name            = cmd.Arg(0)
-		repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1))
-	)
-
-	//Check if the given image name can be resolved
-	if repository != "" {
-		if err := registry.ValidateRepositoryName(repository); err != nil {
-			return err
-		}
-	}
-
-	v := url.Values{}
-	v.Set("container", name)
-	v.Set("repo", repository)
-	v.Set("tag", tag)
-	v.Set("comment", *flComment)
-	v.Set("author", *flAuthor)
-	for _, change := range flChanges.GetAll() {
-		v.Add("changes", change)
-	}
-
-	if *flPause != true {
-		v.Set("pause", "0")
-	}
-
-	var (
-		config *runconfig.Config
-		env    engine.Env
-	)
-	if *flConfig != "" {
-		config = &runconfig.Config{}
-		if err := json.Unmarshal([]byte(*flConfig), config); err != nil {
-			return err
-		}
-	}
-	stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, nil)
-	if err != nil {
-		return err
-	}
-	if err := env.Decode(stream); err != nil {
-		return err
-	}
-
-	fmt.Fprintf(cli.out, "%s\n", env.Get("Id"))
-	return nil
-}
-
-func (cli *DockerCli) CmdEvents(args ...string) error {
-	cmd := cli.Subcmd("events", "", "Get real time events from the server", true)
-	since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp")
-	until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp")
-	flFilter := opts.NewListOpts(nil)
-	cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
-	cmd.Require(flag.Exact, 0)
-
-	utils.ParseFlags(cmd, args, true)
-
-	var (
-		v               = url.Values{}
-		loc             = time.FixedZone(time.Now().Zone())
-		eventFilterArgs = filters.Args{}
-	)
-
-	// Consolidate all filter flags, and sanity check them early.
-	// They'll get process in the daemon/server.
-	for _, f := range flFilter.GetAll() {
-		var err error
-		eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs)
-		if err != nil {
-			return err
-		}
-	}
-	var setTime = func(key, value string) {
-		format := timeutils.RFC3339NanoFixed
-		if len(value) < len(format) {
-			format = format[:len(value)]
-		}
-		if t, err := time.ParseInLocation(format, value, loc); err == nil {
-			v.Set(key, strconv.FormatInt(t.Unix(), 10))
-		} else {
-			v.Set(key, value)
-		}
-	}
-	if *since != "" {
-		setTime("since", *since)
-	}
-	if *until != "" {
-		setTime("until", *until)
-	}
-	if len(eventFilterArgs) > 0 {
-		filterJson, err := filters.ToParam(eventFilterArgs)
-		if err != nil {
-			return err
-		}
-		v.Set("filters", filterJson)
-	}
-	if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (cli *DockerCli) CmdExport(args ...string) error {
-	cmd := cli.Subcmd("export", "CONTAINER", "Export a filesystem as a tar archive (streamed to STDOUT by default)", true)
-	outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT")
-	cmd.Require(flag.Exact, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	var (
-		output io.Writer = cli.out
-		err    error
-	)
-	if *outfile != "" {
-		output, err = os.Create(*outfile)
-		if err != nil {
-			return err
-		}
-	} else if cli.isTerminalOut {
-		return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
-	}
-
-	if len(cmd.Args()) == 1 {
-		image := cmd.Arg(0)
-		if err := cli.stream("GET", "/containers/"+image+"/export", nil, output, nil); err != nil {
-			return err
-		}
-	} else {
-		v := url.Values{}
-		for _, arg := range cmd.Args() {
-			v.Add("names", arg)
-		}
-		if err := cli.stream("GET", "/containers/get?"+v.Encode(), nil, output, nil); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (cli *DockerCli) CmdDiff(args ...string) error {
-	cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem", true)
-	cmd.Require(flag.Exact, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, nil))
-
-	if err != nil {
-		return err
-	}
-
-	outs := engine.NewTable("", 0)
-	if _, err := outs.ReadListFrom(body); err != nil {
-		return err
-	}
-	for _, change := range outs.Data {
-		var kind string
-		switch change.GetInt("Kind") {
-		case archive.ChangeModify:
-			kind = "C"
-		case archive.ChangeAdd:
-			kind = "A"
-		case archive.ChangeDelete:
-			kind = "D"
-		}
-		fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path"))
-	}
-	return nil
-}
-
-func (cli *DockerCli) CmdLogs(args ...string) error {
-	var (
-		cmd    = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container", true)
-		follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
-		times  = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
-		tail   = cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs")
-	)
-	cmd.Require(flag.Exact, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	name := cmd.Arg(0)
-
-	stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, nil)
-	if err != nil {
-		return err
-	}
-
-	env := engine.Env{}
-	if err := env.Decode(stream); err != nil {
-		return err
-	}
-
-	if env.GetSubEnv("HostConfig").GetSubEnv("LogConfig").Get("Type") != "json-file" {
-		return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver")
-	}
-
-	v := url.Values{}
-	v.Set("stdout", "1")
-	v.Set("stderr", "1")
-
-	if *times {
-		v.Set("timestamps", "1")
-	}
-
-	if *follow {
-		v.Set("follow", "1")
-	}
-	v.Set("tail", *tail)
-
-	return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), env.GetSubEnv("Config").GetBool("Tty"), nil, cli.out, cli.err, nil)
-}
-
-func (cli *DockerCli) CmdAttach(args ...string) error {
-	var (
-		cmd     = cli.Subcmd("attach", "CONTAINER", "Attach to a running container", true)
-		noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN")
-		proxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process")
-	)
-	cmd.Require(flag.Exact, 1)
-
-	utils.ParseFlags(cmd, args, true)
-	name := cmd.Arg(0)
-
-	stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, nil)
-	if err != nil {
-		return err
-	}
-
-	env := engine.Env{}
-	if err := env.Decode(stream); err != nil {
-		return err
-	}
-
-	if !env.GetSubEnv("State").GetBool("Running") {
-		return fmt.Errorf("You cannot attach to a stopped container, start it first")
-	}
-
-	var (
-		config = env.GetSubEnv("Config")
-		tty    = config.GetBool("Tty")
-	)
-
-	if err := cli.CheckTtyInput(!*noStdin, tty); err != nil {
-		return err
-	}
-
-	if tty && cli.isTerminalOut {
-		if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
-			log.Debugf("Error monitoring TTY size: %s", err)
-		}
-	}
-
-	var in io.ReadCloser
-
-	v := url.Values{}
-	v.Set("stream", "1")
-	if !*noStdin && config.GetBool("OpenStdin") {
-		v.Set("stdin", "1")
-		in = cli.in
-	}
-
-	v.Set("stdout", "1")
-	v.Set("stderr", "1")
-
-	if *proxy && !tty {
-		sigc := cli.forwardAllSignals(cmd.Arg(0))
-		defer signal.StopCatch(sigc)
-	}
-
-	if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil); err != nil {
-		return err
-	}
-
-	_, status, err := getExitCode(cli, cmd.Arg(0))
-	if err != nil {
-		return err
-	}
-	if status != 0 {
-		return &utils.StatusError{StatusCode: status}
-	}
-
-	return nil
-}
-
-func (cli *DockerCli) CmdSearch(args ...string) error {
-	cmd := cli.Subcmd("search", "TERM", "Search the Docker Hub for images", true)
-	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
-	trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds")
-	automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds")
-	stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least x stars")
-	cmd.Require(flag.Exact, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	name := cmd.Arg(0)
-	v := url.Values{}
-	v.Set("term", name)
-
-	// Resolve the Repository name from fqn to hostname + name
-	taglessRemote, _ := parsers.ParseRepositoryTag(name)
-	repoInfo, err := registry.ParseRepositoryInfo(taglessRemote)
-	if err != nil {
-		return err
-	}
-
-	cli.LoadConfigFile()
-
-	body, statusCode, errReq := cli.clientRequestAttemptLogin("GET", "/images/search?"+v.Encode(), nil, nil, repoInfo.Index, "search")
-	rawBody, _, err := readBody(body, statusCode, errReq)
-	if err != nil {
-		return err
-	}
-
-	outs := engine.NewTable("star_count", 0)
-	if _, err := outs.ReadListFrom(rawBody); err != nil {
-		return err
-	}
-	w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0)
-	fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n")
-	for _, out := range outs.Data {
-		if ((*automated || *trusted) && (!out.GetBool("is_trusted") && !out.GetBool("is_automated"))) || (*stars > out.GetInt("star_count")) {
-			continue
-		}
-		desc := strings.Replace(out.Get("description"), "\n", " ", -1)
-		desc = strings.Replace(desc, "\r", " ", -1)
-		if !*noTrunc && len(desc) > 45 {
-			desc = utils.Trunc(desc, 42) + "..."
-		}
-		fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count"))
-		if out.GetBool("is_official") {
-			fmt.Fprint(w, "[OK]")
-
-		}
-		fmt.Fprint(w, "\t")
-		if out.GetBool("is_automated") || out.GetBool("is_trusted") {
-			fmt.Fprint(w, "[OK]")
-		}
-		fmt.Fprint(w, "\n")
-	}
-	w.Flush()
-	return nil
-}
-
-// Ports type - Used to parse multiple -p flags
-type ports []int
-
-func (cli *DockerCli) CmdTag(args ...string) error {
-	cmd := cli.Subcmd("tag", "IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository", true)
-	force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force")
-	cmd.Require(flag.Exact, 2)
-
-	utils.ParseFlags(cmd, args, true)
-
-	var (
-		repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1))
-		v               = url.Values{}
-	)
-
-	//Check if the given image name can be resolved
-	if err := registry.ValidateRepositoryName(repository); err != nil {
-		return err
-	}
-	v.Set("repo", repository)
-	v.Set("tag", tag)
-
-	if *force {
-		v.Set("force", "1")
-	}
-
-	if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, nil)); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (cli *DockerCli) pullImage(image string) error {
-	return cli.pullImageCustomOut(image, cli.out)
-}
-
-func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error {
-	v := url.Values{}
-	repos, tag := parsers.ParseRepositoryTag(image)
-	// pull only the image tagged 'latest' if no tag was specified
-	if tag == "" {
-		tag = graph.DEFAULTTAG
-	}
-	v.Set("fromImage", repos)
-	v.Set("tag", tag)
-
-	// Resolve the Repository name from fqn to RepositoryInfo
-	repoInfo, err := registry.ParseRepositoryInfo(repos)
-	if err != nil {
-		return err
-	}
-
-	// Load the auth config file, to be able to pull the image
-	cli.LoadConfigFile()
-
-	// Resolve the Auth config relevant for this server
-	authConfig := cli.configFile.ResolveAuthConfig(repoInfo.Index)
-	buf, err := json.Marshal(authConfig)
-	if err != nil {
-		return err
-	}
-
-	registryAuthHeader := []string{
-		base64.URLEncoding.EncodeToString(buf),
-	}
-	if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, out, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil {
-		return err
-	}
-	return nil
-}
-
-type cidFile struct {
-	path    string
-	file    *os.File
-	written bool
-}
-
-func newCIDFile(path string) (*cidFile, error) {
-	if _, err := os.Stat(path); err == nil {
-		return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path)
-	}
-
-	f, err := os.Create(path)
-	if err != nil {
-		return nil, fmt.Errorf("Failed to create the container ID file: %s", err)
-	}
-
-	return &cidFile{path: path, file: f}, nil
-}
-
-func (cid *cidFile) Close() error {
-	cid.file.Close()
-
-	if !cid.written {
-		if err := os.Remove(cid.path); err != nil {
-			return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err)
-		}
-	}
-
-	return nil
-}
-
-func (cid *cidFile) Write(id string) error {
-	if _, err := cid.file.Write([]byte(id)); err != nil {
-		return fmt.Errorf("Failed to write the container ID to the file: %s", err)
-	}
-	cid.written = true
-	return nil
-}
-
-func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runconfig.HostConfig, cidfile, name string) (*types.ContainerCreateResponse, error) {
-	containerValues := url.Values{}
-	if name != "" {
-		containerValues.Set("name", name)
-	}
-
-	mergedConfig := runconfig.MergeConfigs(config, hostConfig)
-
-	var containerIDFile *cidFile
-	if cidfile != "" {
-		var err error
-		if containerIDFile, err = newCIDFile(cidfile); err != nil {
-			return nil, err
-		}
-		defer containerIDFile.Close()
-	}
-
-	//create the container
-	stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil)
-	//if image not found try to pull it
-	if statusCode == 404 {
-		repo, tag := parsers.ParseRepositoryTag(config.Image)
-		if tag == "" {
-			tag = graph.DEFAULTTAG
-		}
-		fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", utils.ImageReference(repo, tag))
-
-		// we don't want to write to stdout anything apart from container.ID
-		if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil {
-			return nil, err
-		}
-		// Retry
-		if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil); err != nil {
-			return nil, err
-		}
-	} else if err != nil {
-		return nil, err
-	}
-
-	var response types.ContainerCreateResponse
-	if err := json.NewDecoder(stream).Decode(&response); err != nil {
-		return nil, err
-	}
-	for _, warning := range response.Warnings {
-		fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
-	}
-	if containerIDFile != nil {
-		if err = containerIDFile.Write(response.ID); err != nil {
-			return nil, err
-		}
-	}
-	return &response, nil
-}
-
-func (cli *DockerCli) CmdCreate(args ...string) error {
-	cmd := cli.Subcmd("create", "IMAGE [COMMAND] [ARG...]", "Create a new container", true)
-
-	// These are flags not stored in Config/HostConfig
-	var (
-		flName = cmd.String([]string{"-name"}, "", "Assign a name to the container")
-	)
-
-	config, hostConfig, cmd, err := runconfig.Parse(cmd, args)
-	if err != nil {
-		utils.ReportError(cmd, err.Error(), true)
-	}
-	if config.Image == "" {
-		cmd.Usage()
-		return nil
-	}
-	response, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
-	if err != nil {
-		return err
-	}
-	fmt.Fprintf(cli.out, "%s\n", response.ID)
-	return nil
-}
-
-func (cli *DockerCli) CmdRun(args ...string) error {
-	// FIXME: just use runconfig.Parse already
-	cmd := cli.Subcmd("run", "IMAGE [COMMAND] [ARG...]", "Run a command in a new container", true)
-
-	// These are flags not stored in Config/HostConfig
-	var (
-		flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits")
-		flDetach     = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID")
-		flSigProxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process")
-		flName       = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container")
-		flAttach     *opts.ListOpts
-
-		ErrConflictAttachDetach               = fmt.Errorf("Conflicting options: -a and -d")
-		ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm")
-		ErrConflictDetachAutoRemove           = fmt.Errorf("Conflicting options: --rm and -d")
-	)
-
-	config, hostConfig, cmd, err := runconfig.Parse(cmd, args)
-	// just in case the Parse does not exit
-	if err != nil {
-		utils.ReportError(cmd, err.Error(), true)
-	}
-
-	if len(hostConfig.Dns) > 0 {
-		// check the DNS settings passed via --dns against
-		// localhost regexp to warn if they are trying to
-		// set a DNS to a localhost address
-		for _, dnsIP := range hostConfig.Dns {
-			if resolvconf.IsLocalhost(dnsIP) {
-				fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
-				break
-			}
-		}
-	}
-	if config.Image == "" {
-		cmd.Usage()
-		return nil
-	}
-
-	if !*flDetach {
-		if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil {
-			return err
-		}
-	} else {
-		if fl := cmd.Lookup("-attach"); fl != nil {
-			flAttach = fl.Value.(*opts.ListOpts)
-			if flAttach.Len() != 0 {
-				return ErrConflictAttachDetach
-			}
-		}
-		if *flAutoRemove {
-			return ErrConflictDetachAutoRemove
-		}
-
-		config.AttachStdin = false
-		config.AttachStdout = false
-		config.AttachStderr = false
-		config.StdinOnce = false
-	}
-
-	// Disable flSigProxy when in TTY mode
-	sigProxy := *flSigProxy
-	if config.Tty {
-		sigProxy = false
-	}
-
-	createResponse, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
-	if err != nil {
-		return err
-	}
-	if sigProxy {
-		sigc := cli.forwardAllSignals(createResponse.ID)
-		defer signal.StopCatch(sigc)
-	}
-	var (
-		waitDisplayId chan struct{}
-		errCh         chan error
-	)
-	if !config.AttachStdout && !config.AttachStderr {
-		// Make this asynchronous to allow the client to write to stdin before having to read the ID
-		waitDisplayId = make(chan struct{})
-		go func() {
-			defer close(waitDisplayId)
-			fmt.Fprintf(cli.out, "%s\n", createResponse.ID)
-		}()
-	}
-	if *flAutoRemove && (hostConfig.RestartPolicy.Name == "always" || hostConfig.RestartPolicy.Name == "on-failure") {
-		return ErrConflictRestartPolicyAndAutoRemove
-	}
-	// We need to instantiate the chan because the select needs it. It can
-	// be closed but can't be uninitialized.
-	hijacked := make(chan io.Closer)
-	// Block the return until the chan gets closed
-	defer func() {
-		log.Debugf("End of CmdRun(), Waiting for hijack to finish.")
-		if _, ok := <-hijacked; ok {
-			log.Errorf("Hijack did not finish (chan still open)")
-		}
-	}()
-	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
-		var (
-			out, stderr io.Writer
-			in          io.ReadCloser
-			v           = url.Values{}
-		)
-		v.Set("stream", "1")
-		if config.AttachStdin {
-			v.Set("stdin", "1")
-			in = cli.in
-		}
-		if config.AttachStdout {
-			v.Set("stdout", "1")
-			out = cli.out
-		}
-		if config.AttachStderr {
-			v.Set("stderr", "1")
-			if config.Tty {
-				stderr = cli.out
-			} else {
-				stderr = cli.err
-			}
-		}
-		errCh = promise.Go(func() error {
-			return cli.hijack("POST", "/containers/"+createResponse.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil)
-		})
-	} else {
-		close(hijacked)
-	}
-	// Acknowledge the hijack before starting
-	select {
-	case closer := <-hijacked:
-		// Make sure that the hijack gets closed when returning (results
-		// in closing the hijack chan and freeing server's goroutines)
-		if closer != nil {
-			defer closer.Close()
-		}
-	case err := <-errCh:
-		if err != nil {
-			log.Debugf("Error hijack: %s", err)
-			return err
-		}
-	}
-
-	//start the container
-	if _, _, err = readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, nil)); err != nil {
-		return err
-	}
-
-	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
-		if err := cli.monitorTtySize(createResponse.ID, false); err != nil {
-			log.Errorf("Error monitoring TTY size: %s", err)
-		}
-	}
-
-	if errCh != nil {
-		if err := <-errCh; err != nil {
-			log.Debugf("Error hijack: %s", err)
-			return err
-		}
-	}
-
-	// Detached mode: wait for the id to be displayed and return.
-	if !config.AttachStdout && !config.AttachStderr {
-		// Detached mode
-		<-waitDisplayId
-		return nil
-	}
-
-	var status int
-
-	// Attached mode
-	if *flAutoRemove {
-		// Autoremove: wait for the container to finish, retrieve
-		// the exit code and remove the container
-		if _, _, err := readBody(cli.call("POST", "/containers/"+createResponse.ID+"/wait", nil, nil)); err != nil {
-			return err
-		}
-		if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
-			return err
-		}
-		if _, _, err := readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, nil)); err != nil {
-			return err
-		}
-	} else {
-		// No Autoremove: Simply retrieve the exit code
-		if !config.Tty {
-			// In non-TTY mode, we can't detach, so we must wait for container exit
-			if status, err = waitForExit(cli, createResponse.ID); err != nil {
-				return err
-			}
-		} else {
-			// In TTY mode, there is a race: if the process dies too slowly, the state could
-			// be updated after the getExitCode call and result in the wrong exit code being reported
-			if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
-				return err
-			}
-		}
-	}
-	if status != 0 {
-		return &utils.StatusError{StatusCode: status}
-	}
-	return nil
-}
-
-func (cli *DockerCli) CmdCp(args ...string) error {
-	cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTDIR|-", "Copy files/folders from a PATH on the container to a HOSTDIR on the host\nrunning the command. Use '-' to write the data\nas a tar file to STDOUT.", true)
-	cmd.Require(flag.Exact, 2)
-
-	utils.ParseFlags(cmd, args, true)
-
-	var copyData engine.Env
-	info := strings.Split(cmd.Arg(0), ":")
-
-	if len(info) != 2 {
-		return fmt.Errorf("Error: Path not specified")
-	}
-
-	copyData.Set("Resource", info[1])
-	copyData.Set("HostPath", cmd.Arg(1))
-
-	stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, nil)
-	if stream != nil {
-		defer stream.Close()
-	}
-	if statusCode == 404 {
-		return fmt.Errorf("No such container: %v", info[0])
-	}
-	if err != nil {
-		return err
-	}
-
-	if statusCode == 200 {
-		dest := copyData.Get("HostPath")
-
-		if dest == "-" {
-			_, err = io.Copy(cli.out, stream)
-		} else {
-			err = archive.Untar(stream, dest, &archive.TarOptions{NoLchown: true})
-		}
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (cli *DockerCli) CmdSave(args ...string) error {
-	cmd := cli.Subcmd("save", "IMAGE [IMAGE...]", "Save an image(s) to a tar archive (streamed to STDOUT by default)", true)
-	outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT")
-	cmd.Require(flag.Min, 1)
-
-	utils.ParseFlags(cmd, args, true)
-
-	var (
-		output io.Writer = cli.out
-		err    error
-	)
-	if *outfile != "" {
-		output, err = os.Create(*outfile)
-		if err != nil {
-			return err
-		}
-	} else if cli.isTerminalOut {
-		return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
-	}
-
-	if len(cmd.Args()) == 1 {
-		image := cmd.Arg(0)
-		if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil {
-			return err
-		}
-	} else {
-		v := url.Values{}
-		for _, arg := range cmd.Args() {
-			v.Add("names", arg)
-		}
-		if err := cli.stream("GET", "/images/get?"+v.Encode(), nil, output, nil); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (cli *DockerCli) CmdLoad(args ...string) error {
-	cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN", true)
-	infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN")
-	cmd.Require(flag.Exact, 0)
-
-	utils.ParseFlags(cmd, args, true)
-
-	var (
-		input io.Reader = cli.in
-		err   error
-	)
-	if *infile != "" {
-		input, err = os.Open(*infile)
-		if err != nil {
-			return err
-		}
-	}
-	if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (cli *DockerCli) CmdExec(args ...string) error {
-	cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in a running container", true)
-
-	execConfig, err := runconfig.ParseExec(cmd, args)
-	// just in case the ParseExec does not exit
-	if execConfig.Container == "" || err != nil {
-		return &utils.StatusError{StatusCode: 1}
-	}
-
-	stream, _, err := cli.call("POST", "/containers/"+execConfig.Container+"/exec", execConfig, nil)
-	if err != nil {
-		return err
-	}
-
-	var execResult engine.Env
-	if err := execResult.Decode(stream); err != nil {
-		return err
-	}
-
-	execID := execResult.Get("Id")
-
-	if execID == "" {
-		fmt.Fprintf(cli.out, "exec ID empty")
-		return nil
-	}
-
-	if !execConfig.Detach {
-		if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil {
-			return err
-		}
-	} else {
-		if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execConfig, nil)); err != nil {
-			return err
-		}
-		// For now don't print this - wait for when we support exec wait()
-		// fmt.Fprintf(cli.out, "%s\n", execID)
-		return nil
-	}
-
-	// Interactive exec requested.
-	var (
-		out, stderr io.Writer
-		in          io.ReadCloser
-		hijacked    = make(chan io.Closer)
-		errCh       chan error
-	)
-
-	// Block the return until the chan gets closed
-	defer func() {
-		log.Debugf("End of CmdExec(), Waiting for hijack to finish.")
-		if _, ok := <-hijacked; ok {
-			log.Errorf("Hijack did not finish (chan still open)")
-		}
-	}()
-
-	if execConfig.AttachStdin {
-		in = cli.in
-	}
-	if execConfig.AttachStdout {
-		out = cli.out
-	}
-	if execConfig.AttachStderr {
-		if execConfig.Tty {
-			stderr = cli.out
-		} else {
-			stderr = cli.err
-		}
-	}
-	errCh = promise.Go(func() error {
-		return cli.hijack("POST", "/exec/"+execID+"/start", execConfig.Tty, in, out, stderr, hijacked, execConfig)
-	})
-
-	// Acknowledge the hijack before starting
-	select {
-	case closer := <-hijacked:
-		// Make sure that hijack gets closed when returning. (result
-		// in closing hijack chan and freeing server's goroutines.
-		if closer != nil {
-			defer closer.Close()
-		}
-	case err := <-errCh:
-		if err != nil {
-			log.Debugf("Error hijack: %s", err)
-			return err
-		}
-	}
-
-	if execConfig.Tty && cli.isTerminalIn {
-		if err := cli.monitorTtySize(execID, true); err != nil {
-			log.Errorf("Error monitoring TTY size: %s", err)
-		}
-	}
-
-	if err := <-errCh; err != nil {
-		log.Debugf("Error hijack: %s", err)
-		return err
-	}
-
-	var status int
-	if _, status, err = getExecExitCode(cli, execID); err != nil {
-		return err
-	}
-
-	if status != 0 {
-		return &utils.StatusError{StatusCode: status}
-	}
-
-	return nil
-}
-
-type containerStats struct {
-	Name             string
-	CpuPercentage    float64
-	Memory           float64
-	MemoryLimit      float64
-	MemoryPercentage float64
-	NetworkRx        float64
-	NetworkTx        float64
-	mu               sync.RWMutex
-	err              error
-}
-
-func (s *containerStats) Collect(cli *DockerCli) {
-	stream, _, err := cli.call("GET", "/containers/"+s.Name+"/stats", nil, nil)
-	if err != nil {
-		s.err = err
-		return
-	}
-	defer stream.Close()
-	var (
-		previousCpu    uint64
-		previousSystem uint64
-		start          = true
-		dec            = json.NewDecoder(stream)
-		u              = make(chan error, 1)
-	)
-	go func() {
-		for {
-			var v *types.Stats
-			if err := dec.Decode(&v); err != nil {
-				u <- err
-				return
-			}
-			var (
-				memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0
-				cpuPercent = 0.0
-			)
-			if !start {
-				cpuPercent = calculateCpuPercent(previousCpu, previousSystem, v)
-			}
-			start = false
-			s.mu.Lock()
-			s.CpuPercentage = cpuPercent
-			s.Memory = float64(v.MemoryStats.Usage)
-			s.MemoryLimit = float64(v.MemoryStats.Limit)
-			s.MemoryPercentage = memPercent
-			s.NetworkRx = float64(v.Network.RxBytes)
-			s.NetworkTx = float64(v.Network.TxBytes)
-			s.mu.Unlock()
-			previousCpu = v.CpuStats.CpuUsage.TotalUsage
-			previousSystem = v.CpuStats.SystemUsage
-			u <- nil
-		}
-	}()
-	for {
-		select {
-		case <-time.After(2 * time.Second):
-			// zero out the values if we have not received an update within
-			// the specified duration.
-			s.mu.Lock()
-			s.CpuPercentage = 0
-			s.Memory = 0
-			s.MemoryPercentage = 0
-			s.mu.Unlock()
-		case err := <-u:
-			if err != nil {
-				s.mu.Lock()
-				s.err = err
-				s.mu.Unlock()
-				return
-			}
-		}
-	}
-}
-
-func (s *containerStats) Display(w io.Writer) error {
-	s.mu.RLock()
-	defer s.mu.RUnlock()
-	if s.err != nil {
-		return s.err
-	}
-	fmt.Fprintf(w, "%s\t%.2f%%\t%s/%s\t%.2f%%\t%s/%s\n",
-		s.Name,
-		s.CpuPercentage,
-		units.BytesSize(s.Memory), units.BytesSize(s.MemoryLimit),
-		s.MemoryPercentage,
-		units.BytesSize(s.NetworkRx), units.BytesSize(s.NetworkTx))
-	return nil
-}
-
-func (cli *DockerCli) CmdStats(args ...string) error {
-	cmd := cli.Subcmd("stats", "CONTAINER [CONTAINER...]", "Display a live stream of one or more containers' resource usage statistics", true)
-	cmd.Require(flag.Min, 1)
-	utils.ParseFlags(cmd, args, true)
-
-	names := cmd.Args()
-	sort.Strings(names)
-	var (
-		cStats []*containerStats
-		w      = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
-	)
-	printHeader := func() {
-		fmt.Fprint(cli.out, "\033[2J")
-		fmt.Fprint(cli.out, "\033[H")
-		fmt.Fprintln(w, "CONTAINER\tCPU %\tMEM USAGE/LIMIT\tMEM %\tNET I/O")
-	}
-	for _, n := range names {
-		s := &containerStats{Name: n}
-		cStats = append(cStats, s)
-		go s.Collect(cli)
-	}
-	// do a quick pause so that any failed connections for containers that do not exist are able to be
-	// evicted before we display the initial or default values.
-	time.Sleep(500 * time.Millisecond)
-	var errs []string
-	for _, c := range cStats {
-		c.mu.Lock()
-		if c.err != nil {
-			errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err))
-		}
-		c.mu.Unlock()
-	}
-	if len(errs) > 0 {
-		return fmt.Errorf("%s", strings.Join(errs, ", "))
-	}
-	for _ = range time.Tick(500 * time.Millisecond) {
-		printHeader()
-		toRemove := []int{}
-		for i, s := range cStats {
-			if err := s.Display(w); err != nil {
-				toRemove = append(toRemove, i)
-			}
-		}
-		for j := len(toRemove) - 1; j >= 0; j-- {
-			i := toRemove[j]
-			cStats = append(cStats[:i], cStats[i+1:]...)
-		}
-		if len(cStats) == 0 {
-			return nil
-		}
-		w.Flush()
-	}
-	return nil
-}
-
-func calculateCpuPercent(previousCpu, previousSystem uint64, v *types.Stats) float64 {
-	var (
-		cpuPercent = 0.0
-		// calculate the change for the cpu usage of the container in between readings
-		cpuDelta = float64(v.CpuStats.CpuUsage.TotalUsage - previousCpu)
-		// calculate the change for the entire system between readings
-		systemDelta = float64(v.CpuStats.SystemUsage - previousSystem)
-	)
-
-	if systemDelta > 0.0 && cpuDelta > 0.0 {
-		cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CpuStats.CpuUsage.PercpuUsage)) * 100.0
-	}
-	return cpuPercent
-}
diff --git a/api/client/commit.go b/api/client/commit.go
new file mode 100644
index 0000000..dd1d24f
--- /dev/null
+++ b/api/client/commit.go
@@ -0,0 +1,80 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/opts"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/runconfig"
+)
+
+// CmdCommit creates a new image from a container's changes.
+//
+// Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]
+func (cli *DockerCli) CmdCommit(args ...string) error {
+	cmd := cli.Subcmd("commit", "CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes", true)
+	flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit")
+	flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
+	flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith <hannibal@a-team.com>\")")
+	flChanges := opts.NewListOpts(nil)
+	cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image")
+	// FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands.
+	flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands")
+	cmd.Require(flag.Max, 2)
+	cmd.Require(flag.Min, 1)
+	cmd.ParseFlags(args, true)
+
+	var (
+		name            = cmd.Arg(0)
+		repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1))
+	)
+
+	//Check if the given image name can be resolved
+	if repository != "" {
+		if err := registry.ValidateRepositoryName(repository); err != nil {
+			return err
+		}
+	}
+
+	v := url.Values{}
+	v.Set("container", name)
+	v.Set("repo", repository)
+	v.Set("tag", tag)
+	v.Set("comment", *flComment)
+	v.Set("author", *flAuthor)
+	for _, change := range flChanges.GetAll() {
+		v.Add("changes", change)
+	}
+
+	if *flPause != true {
+		v.Set("pause", "0")
+	}
+
+	var (
+		config   *runconfig.Config
+		response types.ContainerCommitResponse
+	)
+
+	if *flConfig != "" {
+		config = &runconfig.Config{}
+		if err := json.Unmarshal([]byte(*flConfig), config); err != nil {
+			return err
+		}
+	}
+	stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, nil)
+	if err != nil {
+		return err
+	}
+
+	if err := json.NewDecoder(stream).Decode(&response); err != nil {
+		return err
+	}
+
+	fmt.Fprintln(cli.out, response.ID)
+	return nil
+}
diff --git a/api/client/cp.go b/api/client/cp.go
new file mode 100644
index 0000000..d195601
--- /dev/null
+++ b/api/client/cp.go
@@ -0,0 +1,57 @@
+package client
+
+import (
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/pkg/archive"
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdCp copies files/folders from a path on the container to a directory on the host running the command.
+//
+// If HOSTDIR is '-', the data is written as a tar file to STDOUT.
+//
+// Usage: docker cp CONTAINER:PATH HOSTDIR
+func (cli *DockerCli) CmdCp(args ...string) error {
+	cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTDIR|-", "Copy files/folders from a PATH on the container to a HOSTDIR on the host\nrunning the command. Use '-' to write the data as a tar file to STDOUT.", true)
+	cmd.Require(flag.Exact, 2)
+
+	cmd.ParseFlags(args, true)
+
+	// deal with path name with `:`
+	info := strings.SplitN(cmd.Arg(0), ":", 2)
+
+	if len(info) != 2 {
+		return fmt.Errorf("Error: Path not specified")
+	}
+
+	cfg := &types.CopyConfig{
+		Resource: info[1],
+	}
+	stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", cfg, nil)
+	if stream != nil {
+		defer stream.Close()
+	}
+	if statusCode == 404 {
+		return fmt.Errorf("No such container: %v", info[0])
+	}
+	if err != nil {
+		return err
+	}
+
+	hostPath := cmd.Arg(1)
+	if statusCode == 200 {
+		if hostPath == "-" {
+			_, err = io.Copy(cli.out, stream)
+		} else {
+			err = archive.Untar(stream, hostPath, &archive.TarOptions{NoLchown: true})
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/api/client/create.go b/api/client/create.go
new file mode 100644
index 0000000..a59c09c
--- /dev/null
+++ b/api/client/create.go
@@ -0,0 +1,159 @@
+package client
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/url"
+	"os"
+	"strings"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/graph/tags"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
+)
+
+func (cli *DockerCli) pullImage(image string) error {
+	return cli.pullImageCustomOut(image, cli.out)
+}
+
+func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error {
+	v := url.Values{}
+	repos, tag := parsers.ParseRepositoryTag(image)
+	// pull only the image tagged 'latest' if no tag was specified
+	if tag == "" {
+		tag = tags.DEFAULTTAG
+	}
+	v.Set("fromImage", repos)
+	v.Set("tag", tag)
+
+	// Resolve the Repository name from fqn to RepositoryInfo
+	repoInfo, err := registry.ParseRepositoryInfo(repos)
+	if err != nil {
+		return err
+	}
+
+	// Resolve the Auth config relevant for this server
+	authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index)
+	buf, err := json.Marshal(authConfig)
+	if err != nil {
+		return err
+	}
+
+	registryAuthHeader := []string{
+		base64.URLEncoding.EncodeToString(buf),
+	}
+	sopts := &streamOpts{
+		rawTerminal: true,
+		out:         out,
+		headers:     map[string][]string{"X-Registry-Auth": registryAuthHeader},
+	}
+	if err := cli.stream("POST", "/images/create?"+v.Encode(), sopts); err != nil {
+		return err
+	}
+	return nil
+}
+
+type cidFile struct {
+	path    string
+	file    *os.File
+	written bool
+}
+
+func newCIDFile(path string) (*cidFile, error) {
+	if _, err := os.Stat(path); err == nil {
+		return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path)
+	}
+
+	f, err := os.Create(path)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to create the container ID file: %s", err)
+	}
+
+	return &cidFile{path: path, file: f}, nil
+}
+
+func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runconfig.HostConfig, cidfile, name string) (*types.ContainerCreateResponse, error) {
+	containerValues := url.Values{}
+	if name != "" {
+		containerValues.Set("name", name)
+	}
+
+	mergedConfig := runconfig.MergeConfigs(config, hostConfig)
+
+	var containerIDFile *cidFile
+	if cidfile != "" {
+		var err error
+		if containerIDFile, err = newCIDFile(cidfile); err != nil {
+			return nil, err
+		}
+		defer containerIDFile.Close()
+	}
+
+	//create the container
+	stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil)
+	//if image not found try to pull it
+	if statusCode == 404 && strings.Contains(err.Error(), config.Image) {
+		repo, tag := parsers.ParseRepositoryTag(config.Image)
+		if tag == "" {
+			tag = tags.DEFAULTTAG
+		}
+		fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", utils.ImageReference(repo, tag))
+
+		// we don't want to write to stdout anything apart from container.ID
+		if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil {
+			return nil, err
+		}
+		// Retry
+		if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil); err != nil {
+			return nil, err
+		}
+	} else if err != nil {
+		return nil, err
+	}
+
+	var response types.ContainerCreateResponse
+	if err := json.NewDecoder(stream).Decode(&response); err != nil {
+		return nil, err
+	}
+	for _, warning := range response.Warnings {
+		fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
+	}
+	if containerIDFile != nil {
+		if err = containerIDFile.Write(response.ID); err != nil {
+			return nil, err
+		}
+	}
+	return &response, nil
+}
+
+// CmdCreate creates a new container from a given image.
+//
+// Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...]
+func (cli *DockerCli) CmdCreate(args ...string) error {
+	cmd := cli.Subcmd("create", "IMAGE [COMMAND] [ARG...]", "Create a new container", true)
+
+	// These are flags not stored in Config/HostConfig
+	var (
+		flName = cmd.String([]string{"-name"}, "", "Assign a name to the container")
+	)
+
+	config, hostConfig, cmd, err := runconfig.Parse(cmd, args)
+	if err != nil {
+		cmd.ReportError(err.Error(), true)
+	}
+	if config.Image == "" {
+		cmd.Usage()
+		return nil
+	}
+	response, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
+	if err != nil {
+		return err
+	}
+	fmt.Fprintf(cli.out, "%s\n", response.ID)
+	return nil
+}
diff --git a/api/client/diff.go b/api/client/diff.go
new file mode 100644
index 0000000..6000c6b
--- /dev/null
+++ b/api/client/diff.go
@@ -0,0 +1,52 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/pkg/archive"
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdDiff shows changes on a container's filesystem.
+//
+// Each changed file is printed on a separate line, prefixed with a single
+// character that indicates the status of the file: C (modified), A (added),
+// or D (deleted).
+//
+// Usage: docker diff CONTAINER
+func (cli *DockerCli) CmdDiff(args ...string) error {
+	cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem", true)
+	cmd.Require(flag.Exact, 1)
+	cmd.ParseFlags(args, true)
+
+	if cmd.Arg(0) == "" {
+		return fmt.Errorf("Container name cannot be empty")
+	}
+
+	rdr, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, nil)
+	if err != nil {
+		return err
+	}
+
+	changes := []types.ContainerChange{}
+	if err := json.NewDecoder(rdr).Decode(&changes); err != nil {
+		return err
+	}
+
+	for _, change := range changes {
+		var kind string
+		switch change.Kind {
+		case archive.ChangeModify:
+			kind = "C"
+		case archive.ChangeAdd:
+			kind = "A"
+		case archive.ChangeDelete:
+			kind = "D"
+		}
+		fmt.Fprintf(cli.out, "%s %s\n", kind, change.Path)
+	}
+
+	return nil
+}
diff --git a/api/client/events.go b/api/client/events.go
new file mode 100644
index 0000000..75144b0
--- /dev/null
+++ b/api/client/events.go
@@ -0,0 +1,60 @@
+package client
+
+import (
+	"net/url"
+
+	"github.com/docker/docker/opts"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers/filters"
+	"github.com/docker/docker/pkg/timeutils"
+)
+
+// CmdEvents prints a live stream of real time events from the server.
+//
+// Usage: docker events [OPTIONS]
+func (cli *DockerCli) CmdEvents(args ...string) error {
+	cmd := cli.Subcmd("events", "", "Get real time events from the server", true)
+	since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp")
+	until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp")
+	flFilter := opts.NewListOpts(nil)
+	cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
+	cmd.Require(flag.Exact, 0)
+
+	cmd.ParseFlags(args, true)
+
+	var (
+		v               = url.Values{}
+		eventFilterArgs = filters.Args{}
+	)
+
+	// Consolidate all filter flags, and sanity check them early.
+	// They'll get process in the daemon/server.
+	for _, f := range flFilter.GetAll() {
+		var err error
+		eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs)
+		if err != nil {
+			return err
+		}
+	}
+	if *since != "" {
+		v.Set("since", timeutils.GetTimestamp(*since))
+	}
+	if *until != "" {
+		v.Set("until", timeutils.GetTimestamp(*until))
+	}
+	if len(eventFilterArgs) > 0 {
+		filterJSON, err := filters.ToParam(eventFilterArgs)
+		if err != nil {
+			return err
+		}
+		v.Set("filters", filterJSON)
+	}
+	sopts := &streamOpts{
+		rawTerminal: true,
+		out:         cli.out,
+	}
+	if err := cli.stream("GET", "/events?"+v.Encode(), sopts); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/api/client/exec.go b/api/client/exec.go
new file mode 100644
index 0000000..f247ec5
--- /dev/null
+++ b/api/client/exec.go
@@ -0,0 +1,131 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/pkg/promise"
+	"github.com/docker/docker/runconfig"
+)
+
+// CmdExec runs a command in a running container.
+//
+// Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...]
+func (cli *DockerCli) CmdExec(args ...string) error {
+	cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in a running container", true)
+
+	execConfig, err := runconfig.ParseExec(cmd, args)
+	// just in case the ParseExec does not exit
+	if execConfig.Container == "" || err != nil {
+		return StatusError{StatusCode: 1}
+	}
+
+	stream, _, err := cli.call("POST", "/containers/"+execConfig.Container+"/exec", execConfig, nil)
+	if err != nil {
+		return err
+	}
+
+	var response types.ContainerExecCreateResponse
+	if err := json.NewDecoder(stream).Decode(&response); err != nil {
+		return err
+	}
+
+	execID := response.ID
+
+	if execID == "" {
+		fmt.Fprintf(cli.out, "exec ID empty")
+		return nil
+	}
+
+	//Temp struct for execStart so that we don't need to transfer all the execConfig
+	execStartCheck := &types.ExecStartCheck{
+		Detach: execConfig.Detach,
+		Tty:    execConfig.Tty,
+	}
+
+	if !execConfig.Detach {
+		if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil {
+			return err
+		}
+	} else {
+		if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execStartCheck, nil)); err != nil {
+			return err
+		}
+		// For now don't print this - wait for when we support exec wait()
+		// fmt.Fprintf(cli.out, "%s\n", execID)
+		return nil
+	}
+
+	// Interactive exec requested.
+	var (
+		out, stderr io.Writer
+		in          io.ReadCloser
+		hijacked    = make(chan io.Closer)
+		errCh       chan error
+	)
+
+	// Block the return until the chan gets closed
+	defer func() {
+		logrus.Debugf("End of CmdExec(), Waiting for hijack to finish.")
+		if _, ok := <-hijacked; ok {
+			fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
+		}
+	}()
+
+	if execConfig.AttachStdin {
+		in = cli.in
+	}
+	if execConfig.AttachStdout {
+		out = cli.out
+	}
+	if execConfig.AttachStderr {
+		if execConfig.Tty {
+			stderr = cli.out
+		} else {
+			stderr = cli.err
+		}
+	}
+	errCh = promise.Go(func() error {
+		return cli.hijack("POST", "/exec/"+execID+"/start", execConfig.Tty, in, out, stderr, hijacked, execConfig)
+	})
+
+	// Acknowledge the hijack before starting
+	select {
+	case closer := <-hijacked:
+		// Make sure that hijack gets closed when returning. (result
+		// in closing hijack chan and freeing server's goroutines.
+		if closer != nil {
+			defer closer.Close()
+		}
+	case err := <-errCh:
+		if err != nil {
+			logrus.Debugf("Error hijack: %s", err)
+			return err
+		}
+	}
+
+	if execConfig.Tty && cli.isTerminalIn {
+		if err := cli.monitorTtySize(execID, true); err != nil {
+			fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
+		}
+	}
+
+	if err := <-errCh; err != nil {
+		logrus.Debugf("Error hijack: %s", err)
+		return err
+	}
+
+	var status int
+	if _, status, err = getExecExitCode(cli, execID); err != nil {
+		return err
+	}
+
+	if status != 0 {
+		return StatusError{StatusCode: status}
+	}
+
+	return nil
+}
diff --git a/api/client/export.go b/api/client/export.go
new file mode 100644
index 0000000..42b0834
--- /dev/null
+++ b/api/client/export.go
@@ -0,0 +1,46 @@
+package client
+
+import (
+	"errors"
+	"io"
+	"os"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdExport exports a filesystem as a tar archive.
+//
+// The tar archive is streamed to STDOUT by default or written to a file.
+//
+// Usage: docker export [OPTIONS] CONTAINER
+func (cli *DockerCli) CmdExport(args ...string) error {
+	cmd := cli.Subcmd("export", "CONTAINER", "Export a filesystem as a tar archive (streamed to STDOUT by default)", true)
+	outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT")
+	cmd.Require(flag.Exact, 1)
+
+	cmd.ParseFlags(args, true)
+
+	var (
+		output io.Writer = cli.out
+		err    error
+	)
+	if *outfile != "" {
+		output, err = os.Create(*outfile)
+		if err != nil {
+			return err
+		}
+	} else if cli.isTerminalOut {
+		return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
+	}
+
+	image := cmd.Arg(0)
+	sopts := &streamOpts{
+		rawTerminal: true,
+		out:         output,
+	}
+	if err := cli.stream("GET", "/containers/"+image+"/export", sopts); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/api/client/help.go b/api/client/help.go
new file mode 100644
index 0000000..8e1dc85
--- /dev/null
+++ b/api/client/help.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+	"fmt"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdHelp displays information on a Docker command.
+//
+// If more than one command is specified, information is only shown for the first command.
+//
+// Usage: docker help COMMAND or docker COMMAND --help
+func (cli *DockerCli) CmdHelp(args ...string) error {
+	if len(args) > 1 {
+		method, exists := cli.getMethod(args[:2]...)
+		if exists {
+			method("--help")
+			return nil
+		}
+	}
+	if len(args) > 0 {
+		method, exists := cli.getMethod(args[0])
+		if !exists {
+			return fmt.Errorf("docker: '%s' is not a docker command. See 'docker --help'.", args[0])
+		}
+		method("--help")
+		return nil
+	}
+
+	flag.Usage()
+
+	return nil
+}
diff --git a/api/client/hijack.go b/api/client/hijack.go
index 4f89c3a..5f4794a 100644
--- a/api/client/hijack.go
+++ b/api/client/hijack.go
@@ -13,7 +13,7 @@
 	"strings"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/autogen/dockerversion"
 	"github.com/docker/docker/pkg/promise"
@@ -142,6 +142,13 @@
 	if err != nil {
 		return err
 	}
+
+	// Add CLI Config's HTTP Headers BEFORE we set the Docker headers
+	// then the user can't change OUR headers
+	for k, v := range cli.configFile.HttpHeaders {
+		req.Header.Set(k, v)
+	}
+
 	req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
 	req.Header.Set("Content-Type", "text/plain")
 	req.Header.Set("Connection", "Upgrade")
@@ -211,7 +218,7 @@
 			} else {
 				_, err = stdcopy.StdCopy(stdout, stderr, br)
 			}
-			log.Debugf("[hijack] End of stdout")
+			logrus.Debugf("[hijack] End of stdout")
 			return err
 		})
 	}
@@ -219,14 +226,14 @@
 	sendStdin := promise.Go(func() error {
 		if in != nil {
 			io.Copy(rwc, in)
-			log.Debugf("[hijack] End of stdin")
+			logrus.Debugf("[hijack] End of stdin")
 		}
 
 		if conn, ok := rwc.(interface {
 			CloseWrite() error
 		}); ok {
 			if err := conn.CloseWrite(); err != nil {
-				log.Debugf("Couldn't send EOF: %s", err)
+				logrus.Debugf("Couldn't send EOF: %s", err)
 			}
 		}
 		// Discard errors due to pipe interruption
@@ -235,14 +242,14 @@
 
 	if stdout != nil || stderr != nil {
 		if err := <-receiveStdout; err != nil {
-			log.Debugf("Error receiveStdout: %s", err)
+			logrus.Debugf("Error receiveStdout: %s", err)
 			return err
 		}
 	}
 
 	if !cli.isTerminalIn {
 		if err := <-sendStdin; err != nil {
-			log.Debugf("Error sendStdin: %s", err)
+			logrus.Debugf("Error sendStdin: %s", err)
 			return err
 		}
 	}
diff --git a/api/client/history.go b/api/client/history.go
new file mode 100644
index 0000000..31b8535
--- /dev/null
+++ b/api/client/history.go
@@ -0,0 +1,73 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"text/tabwriter"
+	"time"
+
+	"github.com/docker/docker/api/types"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/pkg/stringutils"
+	"github.com/docker/docker/pkg/units"
+)
+
+// CmdHistory shows the history of an image.
+//
+// Usage: docker history [OPTIONS] IMAGE
+func (cli *DockerCli) CmdHistory(args ...string) error {
+	cmd := cli.Subcmd("history", "IMAGE", "Show the history of an image", true)
+	human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format")
+	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
+	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
+	cmd.Require(flag.Exact, 1)
+	cmd.ParseFlags(args, true)
+
+	rdr, _, err := cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, nil)
+	if err != nil {
+		return err
+	}
+
+	history := []types.ImageHistory{}
+	if err := json.NewDecoder(rdr).Decode(&history); err != nil {
+		return err
+	}
+
+	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
+	if !*quiet {
+		fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT")
+	}
+
+	for _, entry := range history {
+		if *noTrunc {
+			fmt.Fprintf(w, entry.ID)
+		} else {
+			fmt.Fprintf(w, stringid.TruncateID(entry.ID))
+		}
+		if !*quiet {
+			if *human {
+				fmt.Fprintf(w, "\t%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))))
+			} else {
+				fmt.Fprintf(w, "\t%s\t", time.Unix(entry.Created, 0).Format(time.RFC3339))
+			}
+
+			if *noTrunc {
+				fmt.Fprintf(w, "%s\t", entry.CreatedBy)
+			} else {
+				fmt.Fprintf(w, "%s\t", stringutils.Truncate(entry.CreatedBy, 45))
+			}
+
+			if *human {
+				fmt.Fprintf(w, "%s\t", units.HumanSize(float64(entry.Size)))
+			} else {
+				fmt.Fprintf(w, "%d\t", entry.Size)
+			}
+
+			fmt.Fprintf(w, "%s", entry.Comment)
+		}
+		fmt.Fprintf(w, "\n")
+	}
+	w.Flush()
+	return nil
+}
diff --git a/api/client/images.go b/api/client/images.go
new file mode 100644
index 0000000..e39c473
--- /dev/null
+++ b/api/client/images.go
@@ -0,0 +1,126 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"text/tabwriter"
+	"time"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/opts"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/parsers/filters"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/pkg/units"
+	"github.com/docker/docker/utils"
+)
+
+// CmdImages lists the images in a specified repository, or all top-level images if no repository is specified.
+//
+// Usage: docker images [OPTIONS] [REPOSITORY]
+func (cli *DockerCli) CmdImages(args ...string) error {
+	cmd := cli.Subcmd("images", "[REPOSITORY]", "List images", true)
+	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
+	all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)")
+	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
+	showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests")
+
+	flFilter := opts.NewListOpts(nil)
+	cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
+	cmd.Require(flag.Max, 1)
+	cmd.ParseFlags(args, true)
+
+	// Consolidate all filter flags, and sanity check them early.
+	// They'll get process in the daemon/server.
+	imageFilterArgs := filters.Args{}
+	for _, f := range flFilter.GetAll() {
+		var err error
+		imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs)
+		if err != nil {
+			return err
+		}
+	}
+
+	matchName := cmd.Arg(0)
+	v := url.Values{}
+	if len(imageFilterArgs) > 0 {
+		filterJSON, err := filters.ToParam(imageFilterArgs)
+		if err != nil {
+			return err
+		}
+		v.Set("filters", filterJSON)
+	}
+
+	if cmd.NArg() == 1 {
+		// FIXME rename this parameter, to not be confused with the filters flag
+		v.Set("filter", matchName)
+	}
+	if *all {
+		v.Set("all", "1")
+	}
+
+	rdr, _, err := cli.call("GET", "/images/json?"+v.Encode(), nil, nil)
+	if err != nil {
+		return err
+	}
+
+	images := []types.Image{}
+	if err := json.NewDecoder(rdr).Decode(&images); err != nil {
+		return err
+	}
+
+	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
+	if !*quiet {
+		if *showDigests {
+			fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
+		} else {
+			fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
+		}
+	}
+
+	for _, image := range images {
+		ID := image.ID
+		if !*noTrunc {
+			ID = stringid.TruncateID(ID)
+		}
+
+		repoTags := image.RepoTags
+		repoDigests := image.RepoDigests
+
+		if len(repoTags) == 1 && repoTags[0] == "<none>:<none>" && len(repoDigests) == 1 && repoDigests[0] == "<none>@<none>" {
+			// dangling image - clear out either repoTags or repoDigsts so we only show it once below
+			repoDigests = []string{}
+		}
+
+		// combine the tags and digests lists
+		tagsAndDigests := append(repoTags, repoDigests...)
+		for _, repoAndRef := range tagsAndDigests {
+			repo, ref := parsers.ParseRepositoryTag(repoAndRef)
+			// default tag and digest to none - if there's a value, it'll be set below
+			tag := "<none>"
+			digest := "<none>"
+			if utils.DigestReference(ref) {
+				digest = ref
+			} else {
+				tag = ref
+			}
+
+			if !*quiet {
+				if *showDigests {
+					fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize)))
+				} else {
+					fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize)))
+				}
+			} else {
+				fmt.Fprintln(w, ID)
+			}
+		}
+	}
+
+	if !*quiet {
+		w.Flush()
+	}
+	return nil
+}
diff --git a/api/client/import.go b/api/client/import.go
new file mode 100644
index 0000000..48c5689
--- /dev/null
+++ b/api/client/import.go
@@ -0,0 +1,64 @@
+package client
+
+import (
+	"fmt"
+	"io"
+	"net/url"
+
+	"github.com/docker/docker/opts"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/registry"
+)
+
+// CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image.
+//
+// The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file. If the URL is '-', then the tar file is read from STDIN.
+//
+// Usage: docker import [OPTIONS] URL [REPOSITORY[:TAG]]
+func (cli *DockerCli) CmdImport(args ...string) error {
+	cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the\ntarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then\noptionally tag it.", true)
+	flChanges := opts.NewListOpts(nil)
+	cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image")
+	cmd.Require(flag.Min, 1)
+
+	cmd.ParseFlags(args, true)
+
+	var (
+		v          = url.Values{}
+		src        = cmd.Arg(0)
+		repository = cmd.Arg(1)
+	)
+
+	v.Set("fromSrc", src)
+	v.Set("repo", repository)
+	for _, change := range flChanges.GetAll() {
+		v.Add("changes", change)
+	}
+	if cmd.NArg() == 3 {
+		fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' has been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
+		v.Set("tag", cmd.Arg(2))
+	}
+
+	if repository != "" {
+		//Check if the given image name can be resolved
+		repo, _ := parsers.ParseRepositoryTag(repository)
+		if err := registry.ValidateRepositoryName(repo); err != nil {
+			return err
+		}
+	}
+
+	var in io.Reader
+
+	if src == "-" {
+		in = cli.in
+	}
+
+	sopts := &streamOpts{
+		rawTerminal: true,
+		in:          in,
+		out:         cli.out,
+	}
+
+	return cli.stream("POST", "/images/create?"+v.Encode(), sopts)
+}
diff --git a/api/client/info.go b/api/client/info.go
new file mode 100644
index 0000000..9984f23
--- /dev/null
+++ b/api/client/info.go
@@ -0,0 +1,96 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/docker/docker/api/types"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/units"
+)
+
+// CmdInfo displays system-wide information.
+//
+// Usage: docker info
+func (cli *DockerCli) CmdInfo(args ...string) error {
+	cmd := cli.Subcmd("info", "", "Display system-wide information", true)
+	cmd.Require(flag.Exact, 0)
+	cmd.ParseFlags(args, false)
+
+	rdr, _, err := cli.call("GET", "/info", nil, nil)
+	if err != nil {
+		return err
+	}
+
+	info := &types.Info{}
+	if err := json.NewDecoder(rdr).Decode(info); err != nil {
+		return fmt.Errorf("Error reading remote info: %v", err)
+	}
+
+	fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers)
+	fmt.Fprintf(cli.out, "Images: %d\n", info.Images)
+	fmt.Fprintf(cli.out, "Storage Driver: %s\n", info.Driver)
+	if info.DriverStatus != nil {
+		for _, pair := range info.DriverStatus {
+			fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
+		}
+	}
+	fmt.Fprintf(cli.out, "Execution Driver: %s\n", info.ExecutionDriver)
+	fmt.Fprintf(cli.out, "Logging Driver: %s\n", info.LoggingDriver)
+	fmt.Fprintf(cli.out, "Kernel Version: %s\n", info.KernelVersion)
+	fmt.Fprintf(cli.out, "Operating System: %s\n", info.OperatingSystem)
+	fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU)
+	fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal)))
+	fmt.Fprintf(cli.out, "Name: %s\n", info.Name)
+	fmt.Fprintf(cli.out, "ID: %s\n", info.ID)
+
+	if info.Debug {
+		fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug)
+		fmt.Fprintf(cli.out, "File Descriptors: %d\n", info.NFd)
+		fmt.Fprintf(cli.out, "Goroutines: %d\n", info.NGoroutines)
+		fmt.Fprintf(cli.out, "System Time: %s\n", info.SystemTime)
+		fmt.Fprintf(cli.out, "EventsListeners: %d\n", info.NEventsListener)
+		fmt.Fprintf(cli.out, "Init SHA1: %s\n", info.InitSha1)
+		fmt.Fprintf(cli.out, "Init Path: %s\n", info.InitPath)
+		fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", info.DockerRootDir)
+	}
+
+	if info.HttpProxy != "" {
+		fmt.Fprintf(cli.out, "Http Proxy: %s\n", info.HttpProxy)
+	}
+	if info.HttpsProxy != "" {
+		fmt.Fprintf(cli.out, "Https Proxy: %s\n", info.HttpsProxy)
+	}
+	if info.NoProxy != "" {
+		fmt.Fprintf(cli.out, "No Proxy: %s\n", info.NoProxy)
+	}
+
+	if info.IndexServerAddress != "" {
+		u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username
+		if len(u) > 0 {
+			fmt.Fprintf(cli.out, "Username: %v\n", u)
+			fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress)
+		}
+	}
+	if !info.MemoryLimit {
+		fmt.Fprintf(cli.err, "WARNING: No memory limit support\n")
+	}
+	if !info.SwapLimit {
+		fmt.Fprintf(cli.err, "WARNING: No swap limit support\n")
+	}
+	if !info.IPv4Forwarding {
+		fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n")
+	}
+	if info.Labels != nil {
+		fmt.Fprintln(cli.out, "Labels:")
+		for _, attribute := range info.Labels {
+			fmt.Fprintf(cli.out, " %s\n", attribute)
+		}
+	}
+
+	if info.ExperimentalBuild {
+		fmt.Fprintf(cli.out, "Experimental: true\n")
+	}
+
+	return nil
+}
diff --git a/api/client/inspect.go b/api/client/inspect.go
new file mode 100644
index 0000000..eb8565b
--- /dev/null
+++ b/api/client/inspect.go
@@ -0,0 +1,121 @@
+package client
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"strings"
+	"text/template"
+
+	"github.com/docker/docker/api/types"
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdInspect displays low-level information on one or more containers or images.
+//
+// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...]
+func (cli *DockerCli) CmdInspect(args ...string) error {
+	cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image", true)
+	tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template")
+	cmd.Require(flag.Min, 1)
+
+	cmd.ParseFlags(args, true)
+
+	var tmpl *template.Template
+	if *tmplStr != "" {
+		var err error
+		if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil {
+			return StatusError{StatusCode: 64,
+				Status: "Template parsing error: " + err.Error()}
+		}
+	}
+
+	indented := new(bytes.Buffer)
+	indented.WriteString("[\n")
+	status := 0
+	isImage := false
+
+	for _, name := range cmd.Args() {
+		obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, nil))
+		if err != nil {
+			obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, nil))
+			isImage = true
+			if err != nil {
+				if strings.Contains(err.Error(), "No such") {
+					fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name)
+				} else {
+					fmt.Fprintf(cli.err, "%s", err)
+				}
+				status = 1
+				continue
+			}
+		}
+
+		if tmpl == nil {
+			if err = json.Indent(indented, obj, "", "    "); err != nil {
+				fmt.Fprintf(cli.err, "%s\n", err)
+				status = 1
+				continue
+			}
+		} else {
+			rdr := bytes.NewReader(obj)
+			dec := json.NewDecoder(rdr)
+
+			if isImage {
+				inspPtr := types.ImageInspect{}
+				if err := dec.Decode(&inspPtr); err != nil {
+					fmt.Fprintf(cli.err, "%s\n", err)
+					status = 1
+					continue
+				}
+				if err := tmpl.Execute(cli.out, inspPtr); err != nil {
+					rdr.Seek(0, 0)
+					var raw interface{}
+					if err := dec.Decode(&raw); err != nil {
+						return err
+					}
+					if err = tmpl.Execute(cli.out, raw); err != nil {
+						return err
+					}
+				}
+			} else {
+				inspPtr := types.ContainerJSON{}
+				if err := dec.Decode(&inspPtr); err != nil {
+					fmt.Fprintf(cli.err, "%s\n", err)
+					status = 1
+					continue
+				}
+				if err := tmpl.Execute(cli.out, inspPtr); err != nil {
+					rdr.Seek(0, 0)
+					var raw interface{}
+					if err := dec.Decode(&raw); err != nil {
+						return err
+					}
+					if err = tmpl.Execute(cli.out, raw); err != nil {
+						return err
+					}
+				}
+			}
+			cli.out.Write([]byte{'\n'})
+		}
+		indented.WriteString(",")
+	}
+
+	if indented.Len() > 1 {
+		// Remove trailing ','
+		indented.Truncate(indented.Len() - 1)
+	}
+	indented.WriteString("]\n")
+
+	if tmpl == nil {
+		if _, err := io.Copy(cli.out, indented); err != nil {
+			return err
+		}
+	}
+
+	if status != 0 {
+		return StatusError{StatusCode: status}
+	}
+	return nil
+}
diff --git a/api/client/kill.go b/api/client/kill.go
new file mode 100644
index 0000000..becff3b
--- /dev/null
+++ b/api/client/kill.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+	"fmt"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdKill kills one or more running container using SIGKILL or a specified signal.
+//
+// Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...]
+func (cli *DockerCli) CmdKill(args ...string) error {
+	cmd := cli.Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container using SIGKILL or a specified signal", true)
+	signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container")
+	cmd.Require(flag.Min, 1)
+
+	cmd.ParseFlags(args, true)
+
+	var errNames []string
+	for _, name := range cmd.Args() {
+		if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, nil)); err != nil {
+			fmt.Fprintf(cli.err, "%s\n", err)
+			errNames = append(errNames, name)
+		} else {
+			fmt.Fprintf(cli.out, "%s\n", name)
+		}
+	}
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to kill containers: %v", errNames)
+	}
+	return nil
+}
diff --git a/api/client/load.go b/api/client/load.go
new file mode 100644
index 0000000..8dd8bb5
--- /dev/null
+++ b/api/client/load.go
@@ -0,0 +1,41 @@
+package client
+
+import (
+	"io"
+	"os"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdLoad loads an image from a tar archive.
+//
+// The tar archive is read from STDIN by default, or from a tar archive file.
+//
+// Usage: docker load [OPTIONS]
+func (cli *DockerCli) CmdLoad(args ...string) error {
+	cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN", true)
+	infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN")
+	cmd.Require(flag.Exact, 0)
+
+	cmd.ParseFlags(args, true)
+
+	var (
+		input io.Reader = cli.in
+		err   error
+	)
+	if *infile != "" {
+		input, err = os.Open(*infile)
+		if err != nil {
+			return err
+		}
+	}
+	sopts := &streamOpts{
+		rawTerminal: true,
+		in:          input,
+		out:         cli.out,
+	}
+	if err := cli.stream("POST", "/images/load", sopts); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/api/client/login.go b/api/client/login.go
new file mode 100644
index 0000000..d7da1de
--- /dev/null
+++ b/api/client/login.go
@@ -0,0 +1,144 @@
+package client
+
+import (
+	"bufio"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/cliconfig"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/registry"
+)
+
+// CmdLogin logs in or registers a user to a Docker registry service.
+//
+// If no server is specified, the user will be logged into or registered to the registry's index server.
+//
+// Usage: docker login SERVER
+func (cli *DockerCli) CmdLogin(args ...string) error {
+	cmd := cli.Subcmd("login", "[SERVER]", "Register or log in to a Docker registry server, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true)
+	cmd.Require(flag.Max, 1)
+
+	var username, password, email string
+
+	cmd.StringVar(&username, []string{"u", "-username"}, "", "Username")
+	cmd.StringVar(&password, []string{"p", "-password"}, "", "Password")
+	cmd.StringVar(&email, []string{"e", "-email"}, "", "Email")
+
+	cmd.ParseFlags(args, true)
+
+	serverAddress := registry.IndexServerAddress()
+	if len(cmd.Args()) > 0 {
+		serverAddress = cmd.Arg(0)
+	}
+
+	promptDefault := func(prompt string, configDefault string) {
+		if configDefault == "" {
+			fmt.Fprintf(cli.out, "%s: ", prompt)
+		} else {
+			fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault)
+		}
+	}
+
+	readInput := func(in io.Reader, out io.Writer) string {
+		reader := bufio.NewReader(in)
+		line, _, err := reader.ReadLine()
+		if err != nil {
+			fmt.Fprintln(out, err.Error())
+			os.Exit(1)
+		}
+		return string(line)
+	}
+
+	authconfig, ok := cli.configFile.AuthConfigs[serverAddress]
+	if !ok {
+		authconfig = cliconfig.AuthConfig{}
+	}
+
+	if username == "" {
+		promptDefault("Username", authconfig.Username)
+		username = readInput(cli.in, cli.out)
+		username = strings.Trim(username, " ")
+		if username == "" {
+			username = authconfig.Username
+		}
+	}
+	// Assume that a different username means they may not want to use
+	// the password or email from the config file, so prompt them
+	if username != authconfig.Username {
+		if password == "" {
+			oldState, err := term.SaveState(cli.inFd)
+			if err != nil {
+				return err
+			}
+			fmt.Fprintf(cli.out, "Password: ")
+			term.DisableEcho(cli.inFd, oldState)
+
+			password = readInput(cli.in, cli.out)
+			fmt.Fprint(cli.out, "\n")
+
+			term.RestoreTerminal(cli.inFd, oldState)
+			if password == "" {
+				return fmt.Errorf("Error : Password Required")
+			}
+		}
+
+		if email == "" {
+			promptDefault("Email", authconfig.Email)
+			email = readInput(cli.in, cli.out)
+			if email == "" {
+				email = authconfig.Email
+			}
+		}
+	} else {
+		// However, if they don't override the username use the
+		// password or email from the cmd line if specified. IOW, allow
+		// then to change/override them.  And if not specified, just
+		// use what's in the config file
+		if password == "" {
+			password = authconfig.Password
+		}
+		if email == "" {
+			email = authconfig.Email
+		}
+	}
+	authconfig.Username = username
+	authconfig.Password = password
+	authconfig.Email = email
+	authconfig.ServerAddress = serverAddress
+	cli.configFile.AuthConfigs[serverAddress] = authconfig
+
+	stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.AuthConfigs[serverAddress], nil)
+	if statusCode == 401 {
+		delete(cli.configFile.AuthConfigs, serverAddress)
+		if err2 := cli.configFile.Save(); err2 != nil {
+			fmt.Fprintf(cli.out, "WARNING: could not save config file: %v\n", err2)
+		}
+		return err
+	}
+	if err != nil {
+		return err
+	}
+
+	var response types.AuthResponse
+	if err := json.NewDecoder(stream).Decode(&response); err != nil {
+		// Upon error, remove entry
+		delete(cli.configFile.AuthConfigs, serverAddress)
+		return err
+	}
+
+	if err := cli.configFile.Save(); err != nil {
+		return fmt.Errorf("Error saving config file: %v", err)
+	}
+	fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s\n", cli.configFile.Filename())
+
+	if response.Status != "" {
+		fmt.Fprintf(cli.out, "%s\n", response.Status)
+	}
+	return nil
+}
diff --git a/api/client/logout.go b/api/client/logout.go
new file mode 100644
index 0000000..74d0c27
--- /dev/null
+++ b/api/client/logout.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+	"fmt"
+
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/registry"
+)
+
+// CmdLogout logs a user out from a Docker registry.
+//
+// If no server is specified, the user will be logged out from the registry's index server.
+//
+// Usage: docker logout [SERVER]
+func (cli *DockerCli) CmdLogout(args ...string) error {
+	cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true)
+	cmd.Require(flag.Max, 1)
+
+	cmd.ParseFlags(args, false)
+	serverAddress := registry.IndexServerAddress()
+	if len(cmd.Args()) > 0 {
+		serverAddress = cmd.Arg(0)
+	}
+
+	if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok {
+		fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress)
+	} else {
+		fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress)
+		delete(cli.configFile.AuthConfigs, serverAddress)
+
+		if err := cli.configFile.Save(); err != nil {
+			return fmt.Errorf("Failed to save docker config: %v", err)
+		}
+	}
+	return nil
+}
diff --git a/api/client/logs.go b/api/client/logs.go
new file mode 100644
index 0000000..00369db
--- /dev/null
+++ b/api/client/logs.go
@@ -0,0 +1,68 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/docker/docker/api/types"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/timeutils"
+)
+
+// CmdLogs fetches the logs of a given container.
+//
+// docker logs [OPTIONS] CONTAINER
+func (cli *DockerCli) CmdLogs(args ...string) error {
+	var (
+		cmd    = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container", true)
+		follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
+		since  = cmd.String([]string{"-since"}, "", "Show logs since timestamp")
+		times  = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
+		tail   = cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs")
+	)
+	cmd.Require(flag.Exact, 1)
+
+	cmd.ParseFlags(args, true)
+
+	name := cmd.Arg(0)
+
+	stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, nil)
+	if err != nil {
+		return err
+	}
+
+	var c types.ContainerJSON
+	if err := json.NewDecoder(stream).Decode(&c); err != nil {
+		return err
+	}
+
+	if logType := c.HostConfig.LogConfig.Type; logType != "json-file" {
+		return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver (got: %s)", logType)
+	}
+
+	v := url.Values{}
+	v.Set("stdout", "1")
+	v.Set("stderr", "1")
+
+	if *since != "" {
+		v.Set("since", timeutils.GetTimestamp(*since))
+	}
+
+	if *times {
+		v.Set("timestamps", "1")
+	}
+
+	if *follow {
+		v.Set("follow", "1")
+	}
+	v.Set("tail", *tail)
+
+	sopts := &streamOpts{
+		rawTerminal: c.Config.Tty,
+		out:         cli.out,
+		err:         cli.err,
+	}
+
+	return cli.stream("GET", "/containers/"+name+"/logs?"+v.Encode(), sopts)
+}
diff --git a/api/client/pause.go b/api/client/pause.go
new file mode 100644
index 0000000..2f8f3c8
--- /dev/null
+++ b/api/client/pause.go
@@ -0,0 +1,30 @@
+package client
+
+import (
+	"fmt"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdPause pauses all processes within one or more containers.
+//
+// Usage: docker pause CONTAINER [CONTAINER...]
+func (cli *DockerCli) CmdPause(args ...string) error {
+	cmd := cli.Subcmd("pause", "CONTAINER [CONTAINER...]", "Pause all processes within a container", true)
+	cmd.Require(flag.Min, 1)
+	cmd.ParseFlags(args, false)
+
+	var errNames []string
+	for _, name := range cmd.Args() {
+		if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, nil)); err != nil {
+			fmt.Fprintf(cli.err, "%s\n", err)
+			errNames = append(errNames, name)
+		} else {
+			fmt.Fprintf(cli.out, "%s\n", name)
+		}
+	}
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to pause containers: %v", errNames)
+	}
+	return nil
+}
diff --git a/api/client/port.go b/api/client/port.go
new file mode 100644
index 0000000..4c39314
--- /dev/null
+++ b/api/client/port.go
@@ -0,0 +1,64 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+
+	"github.com/docker/docker/nat"
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdPort lists port mappings for a container.
+// If a private port is specified, it also shows the public-facing port that is NATed to the private port.
+//
+// Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]]
+func (cli *DockerCli) CmdPort(args ...string) error {
+	cmd := cli.Subcmd("port", "CONTAINER [PRIVATE_PORT[/PROTO]]", "List port mappings for the CONTAINER, or lookup the public-facing port that\nis NAT-ed to the PRIVATE_PORT", true)
+	cmd.Require(flag.Min, 1)
+	cmd.ParseFlags(args, true)
+
+	stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil)
+	if err != nil {
+		return err
+	}
+
+	var c struct {
+		NetworkSettings struct {
+			Ports nat.PortMap
+		}
+	}
+
+	if err := json.NewDecoder(stream).Decode(&c); err != nil {
+		return err
+	}
+
+	if cmd.NArg() == 2 {
+		var (
+			port  = cmd.Arg(1)
+			proto = "tcp"
+			parts = strings.SplitN(port, "/", 2)
+		)
+
+		if len(parts) == 2 && len(parts[1]) != 0 {
+			port = parts[0]
+			proto = parts[1]
+		}
+		natPort := port + "/" + proto
+		if frontends, exists := c.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil {
+			for _, frontend := range frontends {
+				fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
+			}
+			return nil
+		}
+		return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0))
+	}
+
+	for from, frontends := range c.NetworkSettings.Ports {
+		for _, frontend := range frontends {
+			fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIp, frontend.HostPort)
+		}
+	}
+
+	return nil
+}
diff --git a/api/client/ps.go b/api/client/ps.go
new file mode 100644
index 0000000..6c40c68
--- /dev/null
+++ b/api/client/ps.go
@@ -0,0 +1,175 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strconv"
+	"strings"
+	"text/tabwriter"
+	"time"
+
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/opts"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers/filters"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/pkg/stringutils"
+	"github.com/docker/docker/pkg/units"
+)
+
+// CmdPs outputs a list of Docker containers.
+//
+// Usage: docker ps [OPTIONS]
+func (cli *DockerCli) CmdPs(args ...string) error {
+	var (
+		err error
+
+		psFilterArgs = filters.Args{}
+		v            = url.Values{}
+
+		cmd      = cli.Subcmd("ps", "", "List containers", true)
+		quiet    = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
+		size     = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes")
+		all      = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)")
+		noTrunc  = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
+		nLatest  = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container, include non-running")
+		since    = cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show created since Id or Name, include non-running")
+		before   = cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name")
+		last     = cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running")
+		flFilter = opts.NewListOpts(nil)
+	)
+	cmd.Require(flag.Exact, 0)
+
+	cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
+
+	cmd.ParseFlags(args, true)
+	if *last == -1 && *nLatest {
+		*last = 1
+	}
+
+	if *all {
+		v.Set("all", "1")
+	}
+
+	if *last != -1 {
+		v.Set("limit", strconv.Itoa(*last))
+	}
+
+	if *since != "" {
+		v.Set("since", *since)
+	}
+
+	if *before != "" {
+		v.Set("before", *before)
+	}
+
+	if *size {
+		v.Set("size", "1")
+	}
+
+	// Consolidate all filter flags, and sanity check them.
+	// They'll get processed in the daemon/server.
+	for _, f := range flFilter.GetAll() {
+		if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil {
+			return err
+		}
+	}
+
+	if len(psFilterArgs) > 0 {
+		filterJSON, err := filters.ToParam(psFilterArgs)
+		if err != nil {
+			return err
+		}
+
+		v.Set("filters", filterJSON)
+	}
+
+	rdr, _, err := cli.call("GET", "/containers/json?"+v.Encode(), nil, nil)
+	if err != nil {
+		return err
+	}
+
+	containers := []types.Container{}
+	if err := json.NewDecoder(rdr).Decode(&containers); err != nil {
+		return err
+	}
+
+	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
+	if !*quiet {
+		fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES")
+
+		if *size {
+			fmt.Fprintln(w, "\tSIZE")
+		} else {
+			fmt.Fprint(w, "\n")
+		}
+	}
+
+	stripNamePrefix := func(ss []string) []string {
+		for i, s := range ss {
+			ss[i] = s[1:]
+		}
+
+		return ss
+	}
+
+	for _, container := range containers {
+		ID := container.ID
+
+		if !*noTrunc {
+			ID = stringid.TruncateID(ID)
+		}
+
+		if *quiet {
+			fmt.Fprintln(w, ID)
+
+			continue
+		}
+
+		var (
+			names   = stripNamePrefix(container.Names)
+			command = strconv.Quote(container.Command)
+		)
+
+		if !*noTrunc {
+			command = stringutils.Truncate(command, 20)
+
+			// only display the default name for the container with notrunc is passed
+			for _, name := range names {
+				if len(strings.Split(name, "/")) == 1 {
+					names = []string{name}
+					break
+				}
+			}
+		}
+
+		image := container.Image
+		if image == "" {
+			image = "<no image>"
+		}
+
+		fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", ID, image, command,
+			units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(container.Created), 0))),
+			container.Status, api.DisplayablePorts(container.Ports), strings.Join(names, ","))
+
+		if *size {
+			if container.SizeRootFs > 0 {
+				fmt.Fprintf(w, "%s (virtual %s)\n", units.HumanSize(float64(container.SizeRw)), units.HumanSize(float64(container.SizeRootFs)))
+			} else {
+				fmt.Fprintf(w, "%s\n", units.HumanSize(float64(container.SizeRw)))
+			}
+
+			continue
+		}
+
+		fmt.Fprint(w, "\n")
+	}
+
+	if !*quiet {
+		w.Flush()
+	}
+
+	return nil
+}
diff --git a/api/client/pull.go b/api/client/pull.go
new file mode 100644
index 0000000..4be30b4
--- /dev/null
+++ b/api/client/pull.go
@@ -0,0 +1,47 @@
+package client
+
+import (
+	"fmt"
+	"net/url"
+
+	"github.com/docker/docker/graph/tags"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/utils"
+)
+
+// CmdPull pulls an image or a repository from the registry.
+//
+// Usage: docker pull [OPTIONS] IMAGENAME[:TAG|@DIGEST]
+func (cli *DockerCli) CmdPull(args ...string) error {
+	cmd := cli.Subcmd("pull", "NAME[:TAG|@DIGEST]", "Pull an image or a repository from the registry", true)
+	allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository")
+	cmd.Require(flag.Exact, 1)
+
+	cmd.ParseFlags(args, true)
+
+	var (
+		v         = url.Values{}
+		remote    = cmd.Arg(0)
+		newRemote = remote
+	)
+	taglessRemote, tag := parsers.ParseRepositoryTag(remote)
+	if tag == "" && !*allTags {
+		newRemote = utils.ImageReference(taglessRemote, tags.DEFAULTTAG)
+	}
+	if tag != "" && *allTags {
+		return fmt.Errorf("tag can't be used with --all-tags/-a")
+	}
+
+	v.Set("fromImage", newRemote)
+
+	// Resolve the Repository name from fqn to RepositoryInfo
+	repoInfo, err := registry.ParseRepositoryInfo(taglessRemote)
+	if err != nil {
+		return err
+	}
+
+	_, _, err = cli.clientRequestAttemptLogin("POST", "/images/create?"+v.Encode(), nil, cli.out, repoInfo.Index, "pull")
+	return err
+}
diff --git a/api/client/push.go b/api/client/push.go
new file mode 100644
index 0000000..dc4266c
--- /dev/null
+++ b/api/client/push.go
@@ -0,0 +1,49 @@
+package client
+
+import (
+	"fmt"
+	"net/url"
+
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/registry"
+)
+
+// CmdPush pushes an image or repository to the registry.
+//
+// Usage: docker push NAME[:TAG]
+func (cli *DockerCli) CmdPush(args ...string) error {
+	cmd := cli.Subcmd("push", "NAME[:TAG]", "Push an image or a repository to the registry", true)
+	cmd.Require(flag.Exact, 1)
+
+	cmd.ParseFlags(args, true)
+
+	name := cmd.Arg(0)
+
+	remote, tag := parsers.ParseRepositoryTag(name)
+
+	// Resolve the Repository name from fqn to RepositoryInfo
+	repoInfo, err := registry.ParseRepositoryInfo(remote)
+	if err != nil {
+		return err
+	}
+	// Resolve the Auth config relevant for this server
+	authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index)
+	// If we're not using a custom registry, we know the restrictions
+	// applied to repository names and can warn the user in advance.
+	// Custom repositories can have different rules, and we must also
+	// allow pushing by image ID.
+	if repoInfo.Official {
+		username := authConfig.Username
+		if username == "" {
+			username = "<user>"
+		}
+		return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository to <user>/<repo> (ex: %s/%s)", username, repoInfo.LocalName)
+	}
+
+	v := url.Values{}
+	v.Set("tag", tag)
+
+	_, _, err = cli.clientRequestAttemptLogin("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, repoInfo.Index, "push")
+	return err
+}
diff --git a/api/client/rename.go b/api/client/rename.go
new file mode 100644
index 0000000..ebe1696
--- /dev/null
+++ b/api/client/rename.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+	"fmt"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdRename renames a container.
+//
+// Usage: docker rename OLD_NAME NEW_NAME
+func (cli *DockerCli) CmdRename(args ...string) error {
+	cmd := cli.Subcmd("rename", "OLD_NAME NEW_NAME", "Rename a container", true)
+	cmd.Require(flag.Exact, 2)
+	cmd.ParseFlags(args, true)
+
+	oldName := cmd.Arg(0)
+	newName := cmd.Arg(1)
+
+	if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/rename?name=%s", oldName, newName), nil, nil)); err != nil {
+		fmt.Fprintf(cli.err, "%s\n", err)
+		return fmt.Errorf("Error: failed to rename container named %s", oldName)
+	}
+	return nil
+}
diff --git a/api/client/restart.go b/api/client/restart.go
new file mode 100644
index 0000000..c769fb6
--- /dev/null
+++ b/api/client/restart.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+	"fmt"
+	"net/url"
+	"strconv"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdRestart restarts one or more running containers.
+//
+// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...]
+func (cli *DockerCli) CmdRestart(args ...string) error {
+	cmd := cli.Subcmd("restart", "CONTAINER [CONTAINER...]", "Restart a running container", true)
+	nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container")
+	cmd.Require(flag.Min, 1)
+
+	cmd.ParseFlags(args, true)
+
+	v := url.Values{}
+	v.Set("t", strconv.Itoa(*nSeconds))
+
+	var errNames []string
+	for _, name := range cmd.Args() {
+		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, nil))
+		if err != nil {
+			fmt.Fprintf(cli.err, "%s\n", err)
+			errNames = append(errNames, name)
+		} else {
+			fmt.Fprintf(cli.out, "%s\n", name)
+		}
+	}
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to restart containers: %v", errNames)
+	}
+	return nil
+}
diff --git a/api/client/rm.go b/api/client/rm.go
new file mode 100644
index 0000000..e6f3aea
--- /dev/null
+++ b/api/client/rm.go
@@ -0,0 +1,54 @@
+package client
+
+import (
+	"fmt"
+	"net/url"
+	"strings"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdRm removes one or more containers.
+//
+// Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...]
+func (cli *DockerCli) CmdRm(args ...string) error {
+	cmd := cli.Subcmd("rm", "CONTAINER [CONTAINER...]", "Remove one or more containers", true)
+	v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container")
+	link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link")
+	force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)")
+	cmd.Require(flag.Min, 1)
+
+	cmd.ParseFlags(args, true)
+
+	val := url.Values{}
+	if *v {
+		val.Set("v", "1")
+	}
+	if *link {
+		val.Set("link", "1")
+	}
+
+	if *force {
+		val.Set("force", "1")
+	}
+
+	var errNames []string
+	for _, name := range cmd.Args() {
+		if name == "" {
+			return fmt.Errorf("Container name cannot be empty")
+		}
+		name = strings.Trim(name, "/")
+
+		_, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, nil))
+		if err != nil {
+			fmt.Fprintf(cli.err, "%s\n", err)
+			errNames = append(errNames, name)
+		} else {
+			fmt.Fprintf(cli.out, "%s\n", name)
+		}
+	}
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to remove containers: %v", errNames)
+	}
+	return nil
+}
diff --git a/api/client/rmi.go b/api/client/rmi.go
new file mode 100644
index 0000000..36f2036
--- /dev/null
+++ b/api/client/rmi.go
@@ -0,0 +1,59 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/docker/docker/api/types"
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdRmi removes all images with the specified name(s).
+//
+// Usage: docker rmi [OPTIONS] IMAGE [IMAGE...]
+func (cli *DockerCli) CmdRmi(args ...string) error {
+	var (
+		cmd     = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images", true)
+		force   = cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image")
+		noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents")
+	)
+	cmd.Require(flag.Min, 1)
+	cmd.ParseFlags(args, true)
+
+	v := url.Values{}
+	if *force {
+		v.Set("force", "1")
+	}
+	if *noprune {
+		v.Set("noprune", "1")
+	}
+
+	var errNames []string
+	for _, name := range cmd.Args() {
+		rdr, _, err := cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, nil)
+		if err != nil {
+			fmt.Fprintf(cli.err, "%s\n", err)
+			errNames = append(errNames, name)
+		} else {
+			dels := []types.ImageDelete{}
+			if err := json.NewDecoder(rdr).Decode(&dels); err != nil {
+				fmt.Fprintf(cli.err, "%s\n", err)
+				errNames = append(errNames, name)
+				continue
+			}
+
+			for _, del := range dels {
+				if del.Deleted != "" {
+					fmt.Fprintf(cli.out, "Deleted: %s\n", del.Deleted)
+				} else {
+					fmt.Fprintf(cli.out, "Untagged: %s\n", del.Untagged)
+				}
+			}
+		}
+	}
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to remove images: %v", errNames)
+	}
+	return nil
+}
diff --git a/api/client/run.go b/api/client/run.go
new file mode 100644
index 0000000..10cf924
--- /dev/null
+++ b/api/client/run.go
@@ -0,0 +1,246 @@
+package client
+
+import (
+	"fmt"
+	"io"
+	"net/url"
+	"os"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/opts"
+	"github.com/docker/docker/pkg/promise"
+	"github.com/docker/docker/pkg/signal"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/libnetwork/resolvconf/dns"
+)
+
+func (cid *cidFile) Close() error {
+	cid.file.Close()
+
+	if !cid.written {
+		if err := os.Remove(cid.path); err != nil {
+			return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err)
+		}
+	}
+
+	return nil
+}
+
+func (cid *cidFile) Write(id string) error {
+	if _, err := cid.file.Write([]byte(id)); err != nil {
+		return fmt.Errorf("Failed to write the container ID to the file: %s", err)
+	}
+	cid.written = true
+	return nil
+}
+
+// CmdRun runs a command in a new container.
+//
+// Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
+func (cli *DockerCli) CmdRun(args ...string) error {
+	cmd := cli.Subcmd("run", "IMAGE [COMMAND] [ARG...]", "Run a command in a new container", true)
+
+	// These are flags not stored in Config/HostConfig
+	var (
+		flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits")
+		flDetach     = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID")
+		flSigProxy   = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process")
+		flName       = cmd.String([]string{"-name"}, "", "Assign a name to the container")
+		flAttach     *opts.ListOpts
+
+		ErrConflictAttachDetach               = fmt.Errorf("Conflicting options: -a and -d")
+		ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm")
+		ErrConflictDetachAutoRemove           = fmt.Errorf("Conflicting options: --rm and -d")
+	)
+
+	config, hostConfig, cmd, err := runconfig.Parse(cmd, args)
+	// just in case the Parse does not exit
+	if err != nil {
+		cmd.ReportError(err.Error(), true)
+	}
+
+	if len(hostConfig.Dns) > 0 {
+		// check the DNS settings passed via --dns against
+		// localhost regexp to warn if they are trying to
+		// set a DNS to a localhost address
+		for _, dnsIP := range hostConfig.Dns {
+			if dns.IsLocalhost(dnsIP) {
+				fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
+				break
+			}
+		}
+	}
+	if config.Image == "" {
+		cmd.Usage()
+		return nil
+	}
+
+	if !*flDetach {
+		if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil {
+			return err
+		}
+	} else {
+		if fl := cmd.Lookup("-attach"); fl != nil {
+			flAttach = fl.Value.(*opts.ListOpts)
+			if flAttach.Len() != 0 {
+				return ErrConflictAttachDetach
+			}
+		}
+		if *flAutoRemove {
+			return ErrConflictDetachAutoRemove
+		}
+
+		config.AttachStdin = false
+		config.AttachStdout = false
+		config.AttachStderr = false
+		config.StdinOnce = false
+	}
+
+	// Disable flSigProxy when in TTY mode
+	sigProxy := *flSigProxy
+	if config.Tty {
+		sigProxy = false
+	}
+
+	createResponse, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
+	if err != nil {
+		return err
+	}
+	if sigProxy {
+		sigc := cli.forwardAllSignals(createResponse.ID)
+		defer signal.StopCatch(sigc)
+	}
+	var (
+		waitDisplayID chan struct{}
+		errCh         chan error
+	)
+	if !config.AttachStdout && !config.AttachStderr {
+		// Make this asynchronous to allow the client to write to stdin before having to read the ID
+		waitDisplayID = make(chan struct{})
+		go func() {
+			defer close(waitDisplayID)
+			fmt.Fprintf(cli.out, "%s\n", createResponse.ID)
+		}()
+	}
+	if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) {
+		return ErrConflictRestartPolicyAndAutoRemove
+	}
+	// We need to instantiate the chan because the select needs it. It can
+	// be closed but can't be uninitialized.
+	hijacked := make(chan io.Closer)
+	// Block the return until the chan gets closed
+	defer func() {
+		logrus.Debugf("End of CmdRun(), Waiting for hijack to finish.")
+		if _, ok := <-hijacked; ok {
+			fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
+		}
+	}()
+	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
+		var (
+			out, stderr io.Writer
+			in          io.ReadCloser
+			v           = url.Values{}
+		)
+		v.Set("stream", "1")
+		if config.AttachStdin {
+			v.Set("stdin", "1")
+			in = cli.in
+		}
+		if config.AttachStdout {
+			v.Set("stdout", "1")
+			out = cli.out
+		}
+		if config.AttachStderr {
+			v.Set("stderr", "1")
+			if config.Tty {
+				stderr = cli.out
+			} else {
+				stderr = cli.err
+			}
+		}
+		errCh = promise.Go(func() error {
+			return cli.hijack("POST", "/containers/"+createResponse.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil)
+		})
+	} else {
+		close(hijacked)
+	}
+	// Acknowledge the hijack before starting
+	select {
+	case closer := <-hijacked:
+		// Make sure that the hijack gets closed when returning (results
+		// in closing the hijack chan and freeing server's goroutines)
+		if closer != nil {
+			defer closer.Close()
+		}
+	case err := <-errCh:
+		if err != nil {
+			logrus.Debugf("Error hijack: %s", err)
+			return err
+		}
+	}
+
+	defer func() {
+		if *flAutoRemove {
+			if _, _, err = readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, nil)); err != nil {
+				fmt.Fprintf(cli.err, "Error deleting container: %s\n", err)
+			}
+		}
+	}()
+
+	//start the container
+	if _, _, err = readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, nil)); err != nil {
+		return err
+	}
+
+	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
+		if err := cli.monitorTtySize(createResponse.ID, false); err != nil {
+			fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
+		}
+	}
+
+	if errCh != nil {
+		if err := <-errCh; err != nil {
+			logrus.Debugf("Error hijack: %s", err)
+			return err
+		}
+	}
+
+	// Detached mode: wait for the id to be displayed and return.
+	if !config.AttachStdout && !config.AttachStderr {
+		// Detached mode
+		<-waitDisplayID
+		return nil
+	}
+
+	var status int
+
+	// Attached mode
+	if *flAutoRemove {
+		// Autoremove: wait for the container to finish, retrieve
+		// the exit code and remove the container
+		if _, _, err := readBody(cli.call("POST", "/containers/"+createResponse.ID+"/wait", nil, nil)); err != nil {
+			return err
+		}
+		if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
+			return err
+		}
+	} else {
+		// No Autoremove: Simply retrieve the exit code
+		if !config.Tty {
+			// In non-TTY mode, we can't detach, so we must wait for container exit
+			if status, err = waitForExit(cli, createResponse.ID); err != nil {
+				return err
+			}
+		} else {
+			// In TTY mode, there is a race: if the process dies too slowly, the state could
+			// be updated after the getExitCode call and result in the wrong exit code being reported
+			if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
+				return err
+			}
+		}
+	}
+	if status != 0 {
+		return StatusError{StatusCode: status}
+	}
+	return nil
+}
diff --git a/api/client/save.go b/api/client/save.go
new file mode 100644
index 0000000..a04cbcf
--- /dev/null
+++ b/api/client/save.go
@@ -0,0 +1,57 @@
+package client
+
+import (
+	"errors"
+	"io"
+	"net/url"
+	"os"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdSave saves one or more images to a tar archive.
+//
+// The tar archive is written to STDOUT by default, or written to a file.
+//
+// Usage: docker save [OPTIONS] IMAGE [IMAGE...]
+func (cli *DockerCli) CmdSave(args ...string) error {
+	cmd := cli.Subcmd("save", "IMAGE [IMAGE...]", "Save an image(s) to a tar archive (streamed to STDOUT by default)", true)
+	outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT")
+	cmd.Require(flag.Min, 1)
+
+	cmd.ParseFlags(args, true)
+
+	var (
+		output io.Writer = cli.out
+		err    error
+	)
+	if *outfile != "" {
+		output, err = os.Create(*outfile)
+		if err != nil {
+			return err
+		}
+	} else if cli.isTerminalOut {
+		return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
+	}
+
+	sopts := &streamOpts{
+		rawTerminal: true,
+		out:         output,
+	}
+
+	if len(cmd.Args()) == 1 {
+		image := cmd.Arg(0)
+		if err := cli.stream("GET", "/images/"+image+"/get", sopts); err != nil {
+			return err
+		}
+	} else {
+		v := url.Values{}
+		for _, arg := range cmd.Args() {
+			v.Add("names", arg)
+		}
+		if err := cli.stream("GET", "/images/get?"+v.Encode(), sopts); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/api/client/search.go b/api/client/search.go
new file mode 100644
index 0000000..e606d47
--- /dev/null
+++ b/api/client/search.go
@@ -0,0 +1,84 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"sort"
+	"strings"
+	"text/tabwriter"
+
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/stringutils"
+	"github.com/docker/docker/registry"
+)
+
+// ByStars sorts search results in ascending order by number of stars.
+type ByStars []registry.SearchResult
+
+func (r ByStars) Len() int           { return len(r) }
+func (r ByStars) Swap(i, j int)      { r[i], r[j] = r[j], r[i] }
+func (r ByStars) Less(i, j int) bool { return r[i].StarCount < r[j].StarCount }
+
+// CmdSearch searches the Docker Hub for images.
+//
+// Usage: docker search [OPTIONS] TERM
+func (cli *DockerCli) CmdSearch(args ...string) error {
+	cmd := cli.Subcmd("search", "TERM", "Search the Docker Hub for images", true)
+	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
+	trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds")
+	automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds")
+	stars := cmd.Uint([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least x stars")
+	cmd.Require(flag.Exact, 1)
+
+	cmd.ParseFlags(args, true)
+
+	name := cmd.Arg(0)
+	v := url.Values{}
+	v.Set("term", name)
+
+	// Resolve the Repository name from fqn to hostname + name
+	taglessRemote, _ := parsers.ParseRepositoryTag(name)
+	repoInfo, err := registry.ParseRepositoryInfo(taglessRemote)
+	if err != nil {
+		return err
+	}
+
+	rdr, _, err := cli.clientRequestAttemptLogin("GET", "/images/search?"+v.Encode(), nil, nil, repoInfo.Index, "search")
+	if err != nil {
+		return err
+	}
+
+	results := ByStars{}
+	if err := json.NewDecoder(rdr).Decode(&results); err != nil {
+		return err
+	}
+
+	sort.Sort(sort.Reverse(results))
+
+	w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0)
+	fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n")
+	for _, res := range results {
+		if ((*automated || *trusted) && (!res.IsTrusted && !res.IsAutomated)) || (int(*stars) > res.StarCount) {
+			continue
+		}
+		desc := strings.Replace(res.Description, "\n", " ", -1)
+		desc = strings.Replace(desc, "\r", " ", -1)
+		if !*noTrunc && len(desc) > 45 {
+			desc = stringutils.Truncate(desc, 42) + "..."
+		}
+		fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount)
+		if res.IsOfficial {
+			fmt.Fprint(w, "[OK]")
+
+		}
+		fmt.Fprint(w, "\t")
+		if res.IsAutomated || res.IsTrusted {
+			fmt.Fprint(w, "[OK]")
+		}
+		fmt.Fprint(w, "\n")
+	}
+	w.Flush()
+	return nil
+}
diff --git a/api/client/start.go b/api/client/start.go
new file mode 100644
index 0000000..40f84d7
--- /dev/null
+++ b/api/client/start.go
@@ -0,0 +1,167 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/url"
+	"os"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/api/types"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/promise"
+	"github.com/docker/docker/pkg/signal"
+)
+
+func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
+	sigc := make(chan os.Signal, 128)
+	signal.CatchAll(sigc)
+	go func() {
+		for s := range sigc {
+			if s == signal.SIGCHLD {
+				continue
+			}
+			var sig string
+			for sigStr, sigN := range signal.SignalMap {
+				if sigN == s {
+					sig = sigStr
+					break
+				}
+			}
+			if sig == "" {
+				fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s)
+			}
+			if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, nil)); err != nil {
+				logrus.Debugf("Error sending signal: %s", err)
+			}
+		}
+	}()
+	return sigc
+}
+
+// CmdStart starts one or more stopped containers.
+//
+// Usage: docker start [OPTIONS] CONTAINER [CONTAINER...]
+func (cli *DockerCli) CmdStart(args ...string) error {
+	var (
+		cErr chan error
+		tty  bool
+
+		cmd       = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Start one or more stopped containers", true)
+		attach    = cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals")
+		openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN")
+	)
+
+	cmd.Require(flag.Min, 1)
+	cmd.ParseFlags(args, true)
+
+	if *attach || *openStdin {
+		if cmd.NArg() > 1 {
+			return fmt.Errorf("You cannot start and attach multiple containers at once.")
+		}
+
+		stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil)
+		if err != nil {
+			return err
+		}
+
+		var c types.ContainerJSON
+		if err := json.NewDecoder(stream).Decode(&c); err != nil {
+			return err
+		}
+
+		tty = c.Config.Tty
+
+		if !tty {
+			sigc := cli.forwardAllSignals(cmd.Arg(0))
+			defer signal.StopCatch(sigc)
+		}
+
+		var in io.ReadCloser
+
+		v := url.Values{}
+		v.Set("stream", "1")
+
+		if *openStdin && c.Config.OpenStdin {
+			v.Set("stdin", "1")
+			in = cli.in
+		}
+
+		v.Set("stdout", "1")
+		v.Set("stderr", "1")
+
+		hijacked := make(chan io.Closer)
+		// Block the return until the chan gets closed
+		defer func() {
+			logrus.Debugf("CmdStart() returned, defer waiting for hijack to finish.")
+			if _, ok := <-hijacked; ok {
+				fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
+			}
+			cli.in.Close()
+		}()
+		cErr = promise.Go(func() error {
+			return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil)
+		})
+
+		// Acknowledge the hijack before starting
+		select {
+		case closer := <-hijacked:
+			// Make sure that the hijack gets closed when returning (results
+			// in closing the hijack chan and freeing server's goroutines)
+			if closer != nil {
+				defer closer.Close()
+			}
+		case err := <-cErr:
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	var encounteredError error
+	var errNames []string
+	for _, name := range cmd.Args() {
+		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, nil))
+		if err != nil {
+			if !*attach && !*openStdin {
+				// attach and openStdin is false means it could be starting multiple containers
+				// when a container start failed, show the error message and start next
+				fmt.Fprintf(cli.err, "%s\n", err)
+				errNames = append(errNames, name)
+			} else {
+				encounteredError = err
+			}
+		} else {
+			if !*attach && !*openStdin {
+				fmt.Fprintf(cli.out, "%s\n", name)
+			}
+		}
+	}
+
+	if len(errNames) > 0 {
+		encounteredError = fmt.Errorf("Error: failed to start containers: %v", errNames)
+	}
+	if encounteredError != nil {
+		return encounteredError
+	}
+
+	if *openStdin || *attach {
+		if tty && cli.isTerminalOut {
+			if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
+				fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
+			}
+		}
+		if attchErr := <-cErr; attchErr != nil {
+			return attchErr
+		}
+		_, status, err := getExitCode(cli, cmd.Arg(0))
+		if err != nil {
+			return err
+		}
+		if status != 0 {
+			return StatusError{StatusCode: status}
+		}
+	}
+	return nil
+}
diff --git a/api/client/stats.go b/api/client/stats.go
new file mode 100644
index 0000000..ba56982
--- /dev/null
+++ b/api/client/stats.go
@@ -0,0 +1,202 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/url"
+	"sort"
+	"strings"
+	"sync"
+	"text/tabwriter"
+	"time"
+
+	"github.com/docker/docker/api/types"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/units"
+)
+
+type containerStats struct {
+	Name             string
+	CPUPercentage    float64
+	Memory           float64
+	MemoryLimit      float64
+	MemoryPercentage float64
+	NetworkRx        float64
+	NetworkTx        float64
+	mu               sync.RWMutex
+	err              error
+}
+
+func (s *containerStats) Collect(cli *DockerCli, streamStats bool) {
+	v := url.Values{}
+	if streamStats {
+		v.Set("stream", "1")
+	} else {
+		v.Set("stream", "0")
+	}
+	stream, _, err := cli.call("GET", "/containers/"+s.Name+"/stats?"+v.Encode(), nil, nil)
+	if err != nil {
+		s.mu.Lock()
+		s.err = err
+		s.mu.Unlock()
+		return
+	}
+	defer stream.Close()
+	var (
+		previousCPU    uint64
+		previousSystem uint64
+		start          = true
+		dec            = json.NewDecoder(stream)
+		u              = make(chan error, 1)
+	)
+	go func() {
+		for {
+			var v *types.Stats
+			if err := dec.Decode(&v); err != nil {
+				u <- err
+				return
+			}
+			var (
+				memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0
+				cpuPercent = 0.0
+			)
+			if !start {
+				cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v)
+			}
+			start = false
+			s.mu.Lock()
+			s.CPUPercentage = cpuPercent
+			s.Memory = float64(v.MemoryStats.Usage)
+			s.MemoryLimit = float64(v.MemoryStats.Limit)
+			s.MemoryPercentage = memPercent
+			s.NetworkRx = float64(v.Network.RxBytes)
+			s.NetworkTx = float64(v.Network.TxBytes)
+			s.mu.Unlock()
+			previousCPU = v.CpuStats.CpuUsage.TotalUsage
+			previousSystem = v.CpuStats.SystemUsage
+			u <- nil
+			if !streamStats {
+				return
+			}
+		}
+	}()
+	for {
+		select {
+		case <-time.After(2 * time.Second):
+			// zero out the values if we have not received an update within
+			// the specified duration.
+			s.mu.Lock()
+			s.CPUPercentage = 0
+			s.Memory = 0
+			s.MemoryPercentage = 0
+			s.mu.Unlock()
+		case err := <-u:
+			if err != nil {
+				s.mu.Lock()
+				s.err = err
+				s.mu.Unlock()
+				return
+			}
+		}
+		if !streamStats {
+			return
+		}
+	}
+}
+
+func (s *containerStats) Display(w io.Writer) error {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+	if s.err != nil {
+		return s.err
+	}
+	fmt.Fprintf(w, "%s\t%.2f%%\t%s/%s\t%.2f%%\t%s/%s\n",
+		s.Name,
+		s.CPUPercentage,
+		units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit),
+		s.MemoryPercentage,
+		units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx))
+	return nil
+}
+
+// CmdStats displays a live stream of resource usage statistics for one or more containers.
+//
+// This shows real-time information on CPU usage, memory usage, and network I/O.
+//
+// Usage: docker stats CONTAINER [CONTAINER...]
+func (cli *DockerCli) CmdStats(args ...string) error {
+	cmd := cli.Subcmd("stats", "CONTAINER [CONTAINER...]", "Display a live stream of one or more containers' resource usage statistics", true)
+	noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result")
+	cmd.Require(flag.Min, 1)
+	cmd.ParseFlags(args, true)
+
+	names := cmd.Args()
+	sort.Strings(names)
+	var (
+		cStats []*containerStats
+		w      = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
+	)
+	printHeader := func() {
+		if !*noStream {
+			fmt.Fprint(cli.out, "\033[2J")
+			fmt.Fprint(cli.out, "\033[H")
+		}
+		io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE/LIMIT\tMEM %\tNET I/O\n")
+	}
+	for _, n := range names {
+		s := &containerStats{Name: n}
+		cStats = append(cStats, s)
+		go s.Collect(cli, !*noStream)
+	}
+	// do a quick pause so that any failed connections for containers that do not exist are able to be
+	// evicted before we display the initial or default values.
+	time.Sleep(500 * time.Millisecond)
+	var errs []string
+	for _, c := range cStats {
+		c.mu.Lock()
+		if c.err != nil {
+			errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err))
+		}
+		c.mu.Unlock()
+	}
+	if len(errs) > 0 {
+		return fmt.Errorf("%s", strings.Join(errs, ", "))
+	}
+	for range time.Tick(500 * time.Millisecond) {
+		printHeader()
+		toRemove := []int{}
+		for i, s := range cStats {
+			if err := s.Display(w); err != nil && !*noStream {
+				toRemove = append(toRemove, i)
+			}
+		}
+		for j := len(toRemove) - 1; j >= 0; j-- {
+			i := toRemove[j]
+			cStats = append(cStats[:i], cStats[i+1:]...)
+		}
+		if len(cStats) == 0 {
+			return nil
+		}
+		w.Flush()
+		if *noStream {
+			break
+		}
+	}
+	return nil
+}
+
+func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.Stats) float64 {
+	var (
+		cpuPercent = 0.0
+		// calculate the change for the cpu usage of the container in between readings
+		cpuDelta = float64(v.CpuStats.CpuUsage.TotalUsage - previousCPU)
+		// calculate the change for the entire system between readings
+		systemDelta = float64(v.CpuStats.SystemUsage - previousSystem)
+	)
+
+	if systemDelta > 0.0 && cpuDelta > 0.0 {
+		cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CpuStats.CpuUsage.PercpuUsage)) * 100.0
+	}
+	return cpuPercent
+}
diff --git a/api/client/stats_unit_test.go b/api/client/stats_unit_test.go
new file mode 100644
index 0000000..0831dbc
--- /dev/null
+++ b/api/client/stats_unit_test.go
@@ -0,0 +1,29 @@
+package client
+
+import (
+	"bytes"
+	"sync"
+	"testing"
+)
+
+func TestDisplay(t *testing.T) {
+	c := &containerStats{
+		Name:             "app",
+		CPUPercentage:    30.0,
+		Memory:           100 * 1024 * 1024.0,
+		MemoryLimit:      2048 * 1024 * 1024.0,
+		MemoryPercentage: 100.0 / 2048.0 * 100.0,
+		NetworkRx:        100 * 1024 * 1024,
+		NetworkTx:        800 * 1024 * 1024,
+		mu:               sync.RWMutex{},
+	}
+	var b bytes.Buffer
+	if err := c.Display(&b); err != nil {
+		t.Fatalf("c.Display() gave error: %s", err)
+	}
+	got := b.String()
+	want := "app\t30.00%\t104.9 MB/2.147 GB\t4.88%\t104.9 MB/838.9 MB\n"
+	if got != want {
+		t.Fatalf("c.Display() = %q, want %q", got, want)
+	}
+}
diff --git a/api/client/stop.go b/api/client/stop.go
new file mode 100644
index 0000000..9551911
--- /dev/null
+++ b/api/client/stop.go
@@ -0,0 +1,40 @@
+package client
+
+import (
+	"fmt"
+	"net/url"
+	"strconv"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdStop stops one or more running containers.
+//
+// A running container is stopped by first sending SIGTERM and then SIGKILL if the container fails to stop within a grace period (the default is 10 seconds).
+//
+// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...]
+func (cli *DockerCli) CmdStop(args ...string) error {
+	cmd := cli.Subcmd("stop", "CONTAINER [CONTAINER...]", "Stop a running container by sending SIGTERM and then SIGKILL after a\ngrace period", true)
+	nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it")
+	cmd.Require(flag.Min, 1)
+
+	cmd.ParseFlags(args, true)
+
+	v := url.Values{}
+	v.Set("t", strconv.Itoa(*nSeconds))
+
+	var errNames []string
+	for _, name := range cmd.Args() {
+		_, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, nil))
+		if err != nil {
+			fmt.Fprintf(cli.err, "%s\n", err)
+			errNames = append(errNames, name)
+		} else {
+			fmt.Fprintf(cli.out, "%s\n", name)
+		}
+	}
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to stop containers: %v", errNames)
+	}
+	return nil
+}
diff --git a/api/client/tag.go b/api/client/tag.go
new file mode 100644
index 0000000..56541f8
--- /dev/null
+++ b/api/client/tag.go
@@ -0,0 +1,41 @@
+package client
+
+import (
+	"net/url"
+
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/registry"
+)
+
+// CmdTag tags an image into a repository.
+//
+// Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]
+func (cli *DockerCli) CmdTag(args ...string) error {
+	cmd := cli.Subcmd("tag", "IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository", true)
+	force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force")
+	cmd.Require(flag.Exact, 2)
+
+	cmd.ParseFlags(args, true)
+
+	var (
+		repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1))
+		v               = url.Values{}
+	)
+
+	//Check if the given image name can be resolved
+	if err := registry.ValidateRepositoryName(repository); err != nil {
+		return err
+	}
+	v.Set("repo", repository)
+	v.Set("tag", tag)
+
+	if *force {
+		v.Set("force", "1")
+	}
+
+	if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, nil)); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/api/client/top.go b/api/client/top.go
new file mode 100644
index 0000000..ee16fdb
--- /dev/null
+++ b/api/client/top.go
@@ -0,0 +1,46 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+	"text/tabwriter"
+
+	"github.com/docker/docker/api/types"
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdTop displays the running processes of a container.
+//
+// Usage: docker top CONTAINER
+func (cli *DockerCli) CmdTop(args ...string) error {
+	cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Display the running processes of a container", true)
+	cmd.Require(flag.Min, 1)
+
+	cmd.ParseFlags(args, true)
+
+	val := url.Values{}
+	if cmd.NArg() > 1 {
+		val.Set("ps_args", strings.Join(cmd.Args()[1:], " "))
+	}
+
+	stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, nil)
+	if err != nil {
+		return err
+	}
+
+	procList := types.ContainerProcessList{}
+	if err := json.NewDecoder(stream).Decode(&procList); err != nil {
+		return err
+	}
+
+	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
+	fmt.Fprintln(w, strings.Join(procList.Titles, "\t"))
+
+	for _, proc := range procList.Processes {
+		fmt.Fprintln(w, strings.Join(proc, "\t"))
+	}
+	w.Flush()
+	return nil
+}
diff --git a/api/client/unpause.go b/api/client/unpause.go
new file mode 100644
index 0000000..dceeb23
--- /dev/null
+++ b/api/client/unpause.go
@@ -0,0 +1,30 @@
+package client
+
+import (
+	"fmt"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdUnpause unpauses all processes within a container, for one or more containers.
+//
+// Usage: docker unpause CONTAINER [CONTAINER...]
+func (cli *DockerCli) CmdUnpause(args ...string) error {
+	cmd := cli.Subcmd("unpause", "CONTAINER [CONTAINER...]", "Unpause all processes within a container", true)
+	cmd.Require(flag.Min, 1)
+	cmd.ParseFlags(args, false)
+
+	var errNames []string
+	for _, name := range cmd.Args() {
+		if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, nil)); err != nil {
+			fmt.Fprintf(cli.err, "%s\n", err)
+			errNames = append(errNames, name)
+		} else {
+			fmt.Fprintf(cli.out, "%s\n", name)
+		}
+	}
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to unpause containers: %v", errNames)
+	}
+	return nil
+}
diff --git a/api/client/utils.go b/api/client/utils.go
index e638a87..6fb9b25 100644
--- a/api/client/utils.go
+++ b/api/client/utils.go
@@ -17,21 +17,23 @@
 	"strings"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/autogen/dockerversion"
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/cliconfig"
+	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/registry"
-	"github.com/docker/docker/utils"
 )
 
 var (
-	ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
+	errConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
 )
 
+// HTTPClient creates a new HTP client with the cli's client transport instance.
 func (cli *DockerCli) HTTPClient() *http.Client {
 	return &http.Client{Transport: cli.transport}
 }
@@ -39,18 +41,8 @@
 func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) {
 	params := bytes.NewBuffer(nil)
 	if data != nil {
-		if env, ok := data.(engine.Env); ok {
-			if err := env.Encode(params); err != nil {
-				return nil, err
-			}
-		} else {
-			buf, err := json.Marshal(data)
-			if err != nil {
-				return nil, err
-			}
-			if _, err := params.Write(buf); err != nil {
-				return nil, err
-			}
+		if err := json.NewEncoder(params).Encode(data); err != nil {
+			return nil, err
 		}
 	}
 	return params, nil
@@ -65,14 +57,23 @@
 	if err != nil {
 		return nil, "", -1, err
 	}
+
+	// Add CLI Config's HTTP Headers BEFORE we set the Docker headers
+	// then the user can't change OUR headers
+	for k, v := range cli.configFile.HttpHeaders {
+		req.Header.Set(k, v)
+	}
+
 	req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
 	req.URL.Host = cli.addr
 	req.URL.Scheme = cli.scheme
+
 	if headers != nil {
 		for k, v := range headers {
 			req.Header[k] = v
 		}
 	}
+
 	if expectedPayload && req.Header.Get("Content-Type") == "" {
 		req.Header.Set("Content-Type", "text/plain")
 	}
@@ -84,13 +85,12 @@
 	}
 	if err != nil {
 		if strings.Contains(err.Error(), "connection refused") {
-			return nil, "", statusCode, ErrConnectionRefused
+			return nil, "", statusCode, errConnectionRefused
 		}
 
 		if cli.tlsConfig == nil {
 			return nil, "", statusCode, fmt.Errorf("%v. Are you trying to connect to a TLS-enabled daemon without TLS?", err)
 		}
-
 		return nil, "", statusCode, fmt.Errorf("An error occurred trying to connect: %v", err)
 	}
 
@@ -109,7 +109,7 @@
 }
 
 func (cli *DockerCli) clientRequestAttemptLogin(method, path string, in io.Reader, out io.Writer, index *registry.IndexInfo, cmdName string) (io.ReadCloser, int, error) {
-	cmdAttempt := func(authConfig registry.AuthConfig) (io.ReadCloser, int, error) {
+	cmdAttempt := func(authConfig cliconfig.AuthConfig) (io.ReadCloser, int, error) {
 		buf, err := json.Marshal(authConfig)
 		if err != nil {
 			return nil, -1, err
@@ -140,14 +140,14 @@
 	}
 
 	// Resolve the Auth config relevant for this server
-	authConfig := cli.configFile.ResolveAuthConfig(index)
+	authConfig := registry.ResolveAuthConfig(cli.configFile, index)
 	body, statusCode, err := cmdAttempt(authConfig)
 	if statusCode == http.StatusUnauthorized {
 		fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName)
 		if err = cli.CmdLogin(index.GetAuthConfigKey()); err != nil {
 			return nil, -1, err
 		}
-		authConfig = cli.configFile.ResolveAuthConfig(index)
+		authConfig = registry.ResolveAuthConfig(cli.configFile, index)
 		return cmdAttempt(authConfig)
 	}
 	return body, statusCode, err
@@ -169,33 +169,38 @@
 	body, _, statusCode, err := cli.clientRequest(method, path, params, headers)
 	return body, statusCode, err
 }
-func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
-	return cli.streamHelper(method, path, true, in, out, nil, headers)
+
+type streamOpts struct {
+	rawTerminal bool
+	in          io.Reader
+	out         io.Writer
+	err         io.Writer
+	headers     map[string][]string
 }
 
-func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in io.Reader, stdout, stderr io.Writer, headers map[string][]string) error {
-	body, contentType, _, err := cli.clientRequest(method, path, in, headers)
+func (cli *DockerCli) stream(method, path string, opts *streamOpts) error {
+	body, contentType, _, err := cli.clientRequest(method, path, opts.in, opts.headers)
 	if err != nil {
 		return err
 	}
-	return cli.streamBody(body, contentType, setRawTerminal, stdout, stderr)
+	return cli.streamBody(body, contentType, opts.rawTerminal, opts.out, opts.err)
 }
 
-func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, setRawTerminal bool, stdout, stderr io.Writer) error {
+func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, rawTerminal bool, stdout, stderr io.Writer) error {
 	defer body.Close()
 
 	if api.MatchesContentType(contentType, "application/json") {
-		return utils.DisplayJSONMessagesStream(body, stdout, cli.outFd, cli.isTerminalOut)
+		return jsonmessage.DisplayJSONMessagesStream(body, stdout, cli.outFd, cli.isTerminalOut)
 	}
 	if stdout != nil || stderr != nil {
 		// When TTY is ON, use regular copy
 		var err error
-		if setRawTerminal {
+		if rawTerminal {
 			_, err = io.Copy(stdout, body)
 		} else {
 			_, err = stdcopy.StdCopy(stdout, stderr, body)
 		}
-		log.Debugf("[stream] End of stdout")
+		logrus.Debugf("[stream] End of stdout")
 		return err
 	}
 	return nil
@@ -218,7 +223,7 @@
 	}
 
 	if _, _, err := readBody(cli.call("POST", path+v.Encode(), nil, nil)); err != nil {
-		log.Debugf("Error resize: %s", err)
+		logrus.Debugf("Error resize: %s", err)
 	}
 }
 
@@ -228,11 +233,12 @@
 		return -1, err
 	}
 
-	var out engine.Env
-	if err := out.Decode(stream); err != nil {
+	var res types.ContainerWaitResponse
+	if err := json.NewDecoder(stream).Decode(&res); err != nil {
 		return -1, err
 	}
-	return out.GetInt("StatusCode"), nil
+
+	return res.StatusCode, nil
 }
 
 // getExitCode perform an inspect on the container. It returns
@@ -241,19 +247,18 @@
 	stream, _, err := cli.call("GET", "/containers/"+containerID+"/json", nil, nil)
 	if err != nil {
 		// If we can't connect, then the daemon probably died.
-		if err != ErrConnectionRefused {
+		if err != errConnectionRefused {
 			return false, -1, err
 		}
 		return false, -1, nil
 	}
 
-	var result engine.Env
-	if err := result.Decode(stream); err != nil {
+	var c types.ContainerJSON
+	if err := json.NewDecoder(stream).Decode(&c); err != nil {
 		return false, -1, err
 	}
 
-	state := result.GetSubEnv("State")
-	return state.GetBool("Running"), state.GetInt("ExitCode"), nil
+	return c.State.Running, c.State.ExitCode, nil
 }
 
 // getExecExitCode perform an inspect on the exec command. It returns
@@ -262,18 +267,24 @@
 	stream, _, err := cli.call("GET", "/exec/"+execID+"/json", nil, nil)
 	if err != nil {
 		// If we can't connect, then the daemon probably died.
-		if err != ErrConnectionRefused {
+		if err != errConnectionRefused {
 			return false, -1, err
 		}
 		return false, -1, nil
 	}
 
-	var result engine.Env
-	if err := result.Decode(stream); err != nil {
+	//TODO: Should we reconsider having a type in api/types?
+	//this is a response to exex/id/json not container
+	var c struct {
+		Running  bool
+		ExitCode int
+	}
+
+	if err := json.NewDecoder(stream).Decode(&c); err != nil {
 		return false, -1, err
 	}
 
-	return result.GetBool("Running"), result.GetInt("ExitCode"), nil
+	return c.Running, c.ExitCode, nil
 }
 
 func (cli *DockerCli) monitorTtySize(id string, isExec bool) error {
@@ -297,7 +308,7 @@
 		sigchan := make(chan os.Signal, 1)
 		gosignal.Notify(sigchan, signal.SIGWINCH)
 		go func() {
-			for _ = range sigchan {
+			for range sigchan {
 				cli.resizeTty(id, isExec)
 			}
 		}()
@@ -311,7 +322,7 @@
 	}
 	ws, err := term.GetWinsize(cli.outFd)
 	if err != nil {
-		log.Debugf("Error getting size: %s", err)
+		logrus.Debugf("Error getting size: %s", err)
 		if ws == nil {
 			return 0, 0
 		}
diff --git a/api/client/version.go b/api/client/version.go
new file mode 100644
index 0000000..4e06a6c
--- /dev/null
+++ b/api/client/version.go
@@ -0,0 +1,55 @@
+package client
+
+import (
+	"encoding/json"
+	"fmt"
+	"runtime"
+
+	"github.com/docker/docker/api"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/autogen/dockerversion"
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdVersion shows Docker version information.
+//
+// Available version information is shown for: client Docker version, client API version, client Go version, client Git commit, client OS/Arch, server Docker version, server API version, server Go version, server Git commit, and server OS/Arch.
+//
+// Usage: docker version
+func (cli *DockerCli) CmdVersion(args ...string) error {
+	cmd := cli.Subcmd("version", "", "Show the Docker version information.", true)
+	cmd.Require(flag.Exact, 0)
+
+	cmd.ParseFlags(args, false)
+
+	if dockerversion.VERSION != "" {
+		fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION)
+	}
+	fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION)
+	fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version())
+	if dockerversion.GITCOMMIT != "" {
+		fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT)
+	}
+	fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH)
+
+	stream, _, err := cli.call("GET", "/version", nil, nil)
+	if err != nil {
+		return err
+	}
+
+	var v types.Version
+	if err := json.NewDecoder(stream).Decode(&v); err != nil {
+		fmt.Fprintf(cli.err, "Error reading remote version: %s\n", err)
+		return err
+	}
+
+	fmt.Fprintf(cli.out, "Server version: %s\n", v.Version)
+	if v.ApiVersion != "" {
+		fmt.Fprintf(cli.out, "Server API version: %s\n", v.ApiVersion)
+	}
+	fmt.Fprintf(cli.out, "Go version (server): %s\n", v.GoVersion)
+	fmt.Fprintf(cli.out, "Git commit (server): %s\n", v.GitCommit)
+	fmt.Fprintf(cli.out, "OS/Arch (server): %s/%s\n", v.Os, v.Arch)
+
+	return nil
+}
diff --git a/api/client/wait.go b/api/client/wait.go
new file mode 100644
index 0000000..bfec19e
--- /dev/null
+++ b/api/client/wait.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+	"fmt"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CmdWait blocks until a container stops, then prints its exit code.
+//
+// If more than one container is specified, this will wait synchronously on each container.
+//
+// Usage: docker wait CONTAINER [CONTAINER...]
+func (cli *DockerCli) CmdWait(args ...string) error {
+	cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.", true)
+	cmd.Require(flag.Min, 1)
+
+	cmd.ParseFlags(args, true)
+
+	var errNames []string
+	for _, name := range cmd.Args() {
+		status, err := waitForExit(cli, name)
+		if err != nil {
+			fmt.Fprintf(cli.err, "%s\n", err)
+			errNames = append(errNames, name)
+		} else {
+			fmt.Fprintf(cli.out, "%d\n", status)
+		}
+	}
+	if len(errNames) > 0 {
+		return fmt.Errorf("Error: failed to wait containers: %v", errNames)
+	}
+	return nil
+}
diff --git a/api/common.go b/api/common.go
index f6a0bc4..743eb67 100644
--- a/api/common.go
+++ b/api/common.go
@@ -3,34 +3,30 @@
 import (
 	"fmt"
 	"mime"
-	"os"
 	"path/filepath"
+	"sort"
 	"strings"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/parsers"
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/version"
 	"github.com/docker/libtrust"
 )
 
+// Common constants for daemon and client.
 const (
-	APIVERSION            version.Version = "1.18"
-	DEFAULTHTTPHOST                       = "127.0.0.1"
-	DEFAULTUNIXSOCKET                     = "/var/run/docker.sock"
-	DefaultDockerfileName string          = "Dockerfile"
+	APIVERSION            version.Version = "1.19"       // Current REST API version
+	DefaultDockerfileName string          = "Dockerfile" // Default filename with Docker commands, read by docker build
 )
 
-func ValidateHost(val string) (string, error) {
-	host, err := parsers.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val)
-	if err != nil {
-		return val, err
-	}
-	return host, nil
-}
+type ByPrivatePort []types.Port
 
-// TODO remove, used on < 1.5 in getContainersJSON
-func DisplayablePorts(ports *engine.Table) string {
+func (r ByPrivatePort) Len() int           { return len(r) }
+func (r ByPrivatePort) Swap(i, j int)      { r[i], r[j] = r[j], r[i] }
+func (r ByPrivatePort) Less(i, j int) bool { return r[i].PrivatePort < r[j].PrivatePort }
+
+func DisplayablePorts(ports []types.Port) string {
 	var (
 		result          = []string{}
 		hostMappings    = []string{}
@@ -39,21 +35,20 @@
 	)
 	firstInGroupMap = make(map[string]int)
 	lastInGroupMap = make(map[string]int)
-	ports.SetKey("PrivatePort")
-	ports.Sort()
-	for _, port := range ports.Data {
+	sort.Sort(ByPrivatePort(ports))
+	for _, port := range ports {
 		var (
-			current      = port.GetInt("PrivatePort")
-			portKey      = port.Get("Type")
+			current      = port.PrivatePort
+			portKey      = port.Type
 			firstInGroup int
 			lastInGroup  int
 		)
-		if port.Get("IP") != "" {
-			if port.GetInt("PublicPort") != current {
-				hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type")))
+		if port.IP != "" {
+			if port.PublicPort != current {
+				hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type))
 				continue
 			}
-			portKey = fmt.Sprintf("%s/%s", port.Get("IP"), port.Get("Type"))
+			portKey = fmt.Sprintf("%s/%s", port.IP, port.Type)
 		}
 		firstInGroup = firstInGroupMap[portKey]
 		lastInGroup = lastInGroupMap[portKey]
@@ -104,7 +99,7 @@
 func MatchesContentType(contentType, expectedType string) bool {
 	mimetype, _, err := mime.ParseMediaType(contentType)
 	if err != nil {
-		log.Errorf("Error parsing media type: %s error: %v", contentType, err)
+		logrus.Errorf("Error parsing media type: %s error: %v", contentType, err)
 	}
 	return err == nil && mimetype == expectedType
 }
@@ -112,7 +107,7 @@
 // LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
 // otherwise generates a new one
 func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
-	err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700)
+	err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700)
 	if err != nil {
 		return nil, err
 	}
diff --git a/api/server/form.go b/api/server/form.go
new file mode 100644
index 0000000..75584df
--- /dev/null
+++ b/api/server/form.go
@@ -0,0 +1,20 @@
+package server
+
+import (
+	"net/http"
+	"strconv"
+	"strings"
+)
+
+func boolValue(r *http.Request, k string) bool {
+	s := strings.ToLower(strings.TrimSpace(r.FormValue(k)))
+	return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none")
+}
+
+func int64ValueOrZero(r *http.Request, k string) int64 {
+	val, err := strconv.ParseInt(r.FormValue(k), 10, 64)
+	if err != nil {
+		return 0
+	}
+	return val
+}
diff --git a/api/server/form_test.go b/api/server/form_test.go
new file mode 100644
index 0000000..caa9f17
--- /dev/null
+++ b/api/server/form_test.go
@@ -0,0 +1,55 @@
+package server
+
+import (
+	"net/http"
+	"net/url"
+	"testing"
+)
+
+func TestBoolValue(t *testing.T) {
+	cases := map[string]bool{
+		"":      false,
+		"0":     false,
+		"no":    false,
+		"false": false,
+		"none":  false,
+		"1":     true,
+		"yes":   true,
+		"true":  true,
+		"one":   true,
+		"100":   true,
+	}
+
+	for c, e := range cases {
+		v := url.Values{}
+		v.Set("test", c)
+		r, _ := http.NewRequest("POST", "", nil)
+		r.Form = v
+
+		a := boolValue(r, "test")
+		if a != e {
+			t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a)
+		}
+	}
+}
+
+func TestInt64ValueOrZero(t *testing.T) {
+	cases := map[string]int64{
+		"":     0,
+		"asdf": 0,
+		"0":    0,
+		"1":    1,
+	}
+
+	for c, e := range cases {
+		v := url.Values{}
+		v.Set("test", c)
+		r, _ := http.NewRequest("POST", "", nil)
+		r.Form = v
+
+		a := int64ValueOrZero(r, "test")
+		if a != e {
+			t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a)
+		}
+	}
+}
diff --git a/api/server/profiler.go b/api/server/profiler.go
new file mode 100644
index 0000000..eebfe69
--- /dev/null
+++ b/api/server/profiler.go
@@ -0,0 +1,38 @@
+package server
+
+import (
+	"expvar"
+	"fmt"
+	"net/http"
+	"net/http/pprof"
+
+	"github.com/gorilla/mux"
+)
+
+func ProfilerSetup(mainRouter *mux.Router, path string) {
+	var r = mainRouter.PathPrefix(path).Subrouter()
+	r.HandleFunc("/vars", expVars)
+	r.HandleFunc("/pprof/", pprof.Index)
+	r.HandleFunc("/pprof/cmdline", pprof.Cmdline)
+	r.HandleFunc("/pprof/profile", pprof.Profile)
+	r.HandleFunc("/pprof/symbol", pprof.Symbol)
+	r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP)
+	r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP)
+	r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP)
+	r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
+}
+
+// Replicated from expvar.go as not public.
+func expVars(w http.ResponseWriter, r *http.Request) {
+	first := true
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	fmt.Fprintf(w, "{\n")
+	expvar.Do(func(kv expvar.KeyValue) {
+		if !first {
+			fmt.Fprintf(w, ",\n")
+		}
+		first = false
+		fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+	})
+	fmt.Fprintf(w, "\n}\n")
+}
diff --git a/api/server/server.go b/api/server/server.go
index cfbb7f2..1dd7b43 100644
--- a/api/server/server.go
+++ b/api/server/server.go
@@ -1,45 +1,122 @@
 package server
 
 import (
-	"bufio"
-	"bytes"
-
 	"encoding/base64"
 	"encoding/json"
-	"expvar"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"net"
 	"net/http"
-	"net/http/pprof"
 	"os"
+	"runtime"
 	"strconv"
 	"strings"
-
-	"crypto/tls"
-	"crypto/x509"
+	"time"
 
 	"code.google.com/p/go.net/websocket"
-	"github.com/docker/libcontainer/user"
 	"github.com/gorilla/mux"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/daemon/networkdriver/bridge"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/listenbuffer"
+	"github.com/docker/docker/autogen/dockerversion"
+	"github.com/docker/docker/builder"
+	"github.com/docker/docker/cliconfig"
+	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/graph"
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/parsers/filters"
+	"github.com/docker/docker/pkg/parsers/kernel"
+	"github.com/docker/docker/pkg/signal"
+	"github.com/docker/docker/pkg/sockets"
 	"github.com/docker/docker/pkg/stdcopy"
+	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/pkg/version"
-	"github.com/docker/docker/registry"
+	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
+	"github.com/docker/libnetwork/portallocator"
 )
 
-var (
-	activationLock chan struct{} = make(chan struct{})
-)
+type ServerConfig struct {
+	Logging     bool
+	EnableCors  bool
+	CorsHeaders string
+	Version     string
+	SocketGroup string
+	Tls         bool
+	TlsVerify   bool
+	TlsCa       string
+	TlsCert     string
+	TlsKey      string
+}
+
+type Server struct {
+	daemon  *daemon.Daemon
+	cfg     *ServerConfig
+	router  *mux.Router
+	start   chan struct{}
+	servers []serverCloser
+}
+
+func New(cfg *ServerConfig) *Server {
+	srv := &Server{
+		cfg:   cfg,
+		start: make(chan struct{}),
+	}
+	r := createRouter(srv)
+	srv.router = r
+	return srv
+}
+
+func (s *Server) Close() {
+	for _, srv := range s.servers {
+		if err := srv.Close(); err != nil {
+			logrus.Error(err)
+		}
+	}
+}
+
+type serverCloser interface {
+	Serve() error
+	Close() error
+}
+
+// ServeApi loops through all of the protocols sent in to docker and spawns
+// off a go routine to setup a serving http.Server for each.
+func (s *Server) ServeApi(protoAddrs []string) error {
+	var chErrors = make(chan error, len(protoAddrs))
+
+	for _, protoAddr := range protoAddrs {
+		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
+		if len(protoAddrParts) != 2 {
+			return fmt.Errorf("bad format, expected PROTO://ADDR")
+		}
+		srv, err := s.newServer(protoAddrParts[0], protoAddrParts[1])
+		if err != nil {
+			return err
+		}
+		s.servers = append(s.servers, srv)
+
+		go func(proto, addr string) {
+			logrus.Infof("Listening for HTTP on %s (%s)", proto, addr)
+			if err := srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
+				err = nil
+			}
+			chErrors <- err
+		}(protoAddrParts[0], protoAddrParts[1])
+	}
+
+	for i := 0; i < len(protoAddrs); i++ {
+		err := <-chErrors
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
 
 type HttpServer struct {
 	srv *http.Server
@@ -53,7 +130,7 @@
 	return s.l.Close()
 }
 
-type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error
+type HttpApiFunc func(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error
 
 func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
 	conn, _, err := w.(http.Hijacker).Hijack()
@@ -114,37 +191,32 @@
 }
 
 func httpError(w http.ResponseWriter, err error) {
+	if err == nil || w == nil {
+		logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling")
+		return
+	}
 	statusCode := http.StatusInternalServerError
 	// FIXME: this is brittle and should not be necessary.
 	// If we need to differentiate between different possible error types, we should
 	// create appropriate error types with clearly defined meaning.
 	errStr := strings.ToLower(err.Error())
-	if strings.Contains(errStr, "no such") {
-		statusCode = http.StatusNotFound
-	} else if strings.Contains(errStr, "bad parameter") {
-		statusCode = http.StatusBadRequest
-	} else if strings.Contains(errStr, "conflict") {
-		statusCode = http.StatusConflict
-	} else if strings.Contains(errStr, "impossible") {
-		statusCode = http.StatusNotAcceptable
-	} else if strings.Contains(errStr, "wrong login/password") {
-		statusCode = http.StatusUnauthorized
-	} else if strings.Contains(errStr, "hasn't been activated") {
-		statusCode = http.StatusForbidden
+	for keyword, status := range map[string]int{
+		"not found":             http.StatusNotFound,
+		"no such":               http.StatusNotFound,
+		"bad parameter":         http.StatusBadRequest,
+		"conflict":              http.StatusConflict,
+		"impossible":            http.StatusNotAcceptable,
+		"wrong login/password":  http.StatusUnauthorized,
+		"hasn't been activated": http.StatusForbidden,
+	} {
+		if strings.Contains(errStr, keyword) {
+			statusCode = status
+			break
+		}
 	}
 
-	if err != nil {
-		log.Errorf("HTTP Error: statusCode=%d %v", statusCode, err)
-		http.Error(w, err.Error(), statusCode)
-	}
-}
-
-// writeJSONEnv writes the engine.Env values to the http response stream as a
-// json encoded body.
-func writeJSONEnv(w http.ResponseWriter, code int, v engine.Env) error {
-	w.Header().Set("Content-Type", "application/json")
-	w.WriteHeader(code)
-	return v.Encode(w)
+	logrus.WithFields(logrus.Fields{"statusCode": statusCode, "err": err}).Error("HTTP Error")
+	http.Error(w, err.Error(), statusCode)
 }
 
 // writeJSON writes the value v to the http response stream as json with standard
@@ -155,333 +227,373 @@
 	return json.NewEncoder(w).Encode(v)
 }
 
-func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) {
-	w.Header().Set("Content-Type", "application/json")
-	if flush {
-		job.Stdout.Add(utils.NewWriteFlusher(w))
-	} else {
-		job.Stdout.Add(w)
-	}
-}
-
-func getBoolParam(value string) (bool, error) {
-	if value == "" {
-		return false, nil
-	}
-	ret, err := strconv.ParseBool(value)
-	if err != nil {
-		return false, fmt.Errorf("Bad parameter")
-	}
-	return ret, nil
-}
-
-func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	var (
-		authConfig, err = ioutil.ReadAll(r.Body)
-		job             = eng.Job("auth")
-		stdoutBuffer    = bytes.NewBuffer(nil)
-	)
+func (s *Server) postAuth(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	var config *cliconfig.AuthConfig
+	err := json.NewDecoder(r.Body).Decode(&config)
+	r.Body.Close()
 	if err != nil {
 		return err
 	}
-	job.Setenv("authConfig", string(authConfig))
-	job.Stdout.Add(stdoutBuffer)
-	if err = job.Run(); err != nil {
+	status, err := s.daemon.RegistryService.Auth(config)
+	if err != nil {
 		return err
 	}
-	if status := engine.Tail(stdoutBuffer, 1); status != "" {
-		var env engine.Env
-		env.Set("Status", status)
-		return writeJSONEnv(w, http.StatusOK, env)
+	return writeJSON(w, http.StatusOK, &types.AuthResponse{
+		Status: status,
+	})
+}
+
+func (s *Server) getVersion(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	v := &types.Version{
+		Version:    dockerversion.VERSION,
+		ApiVersion: api.APIVERSION,
+		GitCommit:  dockerversion.GITCOMMIT,
+		GoVersion:  runtime.Version(),
+		Os:         runtime.GOOS,
+		Arch:       runtime.GOARCH,
 	}
-	w.WriteHeader(http.StatusNoContent)
-	return nil
+	if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
+		v.KernelVersion = kernelVersion.String()
+	}
+
+	return writeJSON(w, http.StatusOK, v)
 }
 
-func getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	w.Header().Set("Content-Type", "application/json")
-	eng.ServeHTTP(w, r)
-	return nil
-}
-
-func postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersKill(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
 	if err := parseForm(r); err != nil {
 		return err
 	}
-	job := eng.Job("kill", vars["name"])
-	if sig := r.Form.Get("signal"); sig != "" {
-		job.Args = append(job.Args, sig)
+
+	var sig uint64
+	name := vars["name"]
+
+	// If we have a signal, look at it. Otherwise, do nothing
+	if sigStr := vars["signal"]; sigStr != "" {
+		// Check if we passed the signal as a number:
+		// The largest legal signal is 31, so let's parse on 5 bits
+		sig, err := strconv.ParseUint(sigStr, 10, 5)
+		if err != nil {
+			// The signal is not a number, treat it as a string (either like
+			// "KILL" or like "SIGKILL")
+			sig = uint64(signal.SignalMap[strings.TrimPrefix(sigStr, "SIG")])
+		}
+
+		if sig == 0 {
+			return fmt.Errorf("Invalid signal: %s", sigStr)
+		}
 	}
-	if err := job.Run(); err != nil {
+
+	if err := s.daemon.ContainerKill(name, sig); err != nil {
 		return err
 	}
+
 	w.WriteHeader(http.StatusNoContent)
 	return nil
 }
 
-func postContainersPause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersPause(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
 	if err := parseForm(r); err != nil {
 		return err
 	}
-	job := eng.Job("pause", vars["name"])
-	if err := job.Run(); err != nil {
+
+	if err := s.daemon.ContainerPause(vars["name"]); err != nil {
 		return err
 	}
+
 	w.WriteHeader(http.StatusNoContent)
+
 	return nil
 }
 
-func postContainersUnpause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersUnpause(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
 	if err := parseForm(r); err != nil {
 		return err
 	}
-	job := eng.Job("unpause", vars["name"])
-	if err := job.Run(); err != nil {
+
+	if err := s.daemon.ContainerUnpause(vars["name"]); err != nil {
 		return err
 	}
+
 	w.WriteHeader(http.StatusNoContent)
+
 	return nil
 }
 
-func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getContainersExport(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	job := eng.Job("export", vars["name"])
-	job.Stdout.Add(w)
-	if err := job.Run(); err != nil {
-		return err
-	}
-	return nil
+
+	return s.daemon.ContainerExport(vars["name"], w)
 }
 
-func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getImagesJSON(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
 	}
 
-	var (
-		err  error
-		outs *engine.Table
-		job  = eng.Job("images")
-	)
+	imagesConfig := graph.ImagesConfig{
+		Filters: r.Form.Get("filters"),
+		// FIXME this parameter could just be a match filter
+		Filter: r.Form.Get("filter"),
+		All:    boolValue(r, "all"),
+	}
 
-	job.Setenv("filters", r.Form.Get("filters"))
-	// FIXME this parameter could just be a match filter
-	job.Setenv("filter", r.Form.Get("filter"))
-	job.Setenv("all", r.Form.Get("all"))
-
-	if version.GreaterThanOrEqualTo("1.7") {
-		streamJSON(job, w, false)
-	} else if outs, err = job.Stdout.AddListTable(); err != nil {
+	images, err := s.daemon.Repositories().Images(&imagesConfig)
+	if err != nil {
 		return err
 	}
 
-	if err := job.Run(); err != nil {
+	return writeJSON(w, http.StatusOK, images)
+}
+
+func (s *Server) getInfo(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	info, err := s.daemon.SystemInfo()
+	if err != nil {
 		return err
 	}
 
-	if version.LessThan("1.7") && outs != nil { // Convert to legacy format
-		outsLegacy := engine.NewTable("Created", 0)
-		for _, out := range outs.Data {
-			for _, repoTag := range out.GetList("RepoTags") {
-				repo, tag := parsers.ParseRepositoryTag(repoTag)
-				outLegacy := &engine.Env{}
-				outLegacy.Set("Repository", repo)
-				outLegacy.SetJson("Tag", tag)
-				outLegacy.Set("Id", out.Get("Id"))
-				outLegacy.SetInt64("Created", out.GetInt64("Created"))
-				outLegacy.SetInt64("Size", out.GetInt64("Size"))
-				outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize"))
-				outsLegacy.Add(outLegacy)
+	return writeJSON(w, http.StatusOK, info)
+}
+
+func (s *Server) getEvents(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := parseForm(r); err != nil {
+		return err
+	}
+	var since int64 = -1
+	if r.Form.Get("since") != "" {
+		s, err := strconv.ParseInt(r.Form.Get("since"), 10, 64)
+		if err != nil {
+			return err
+		}
+		since = s
+	}
+
+	var until int64 = -1
+	if r.Form.Get("until") != "" {
+		u, err := strconv.ParseInt(r.Form.Get("until"), 10, 64)
+		if err != nil {
+			return err
+		}
+		until = u
+	}
+	timer := time.NewTimer(0)
+	timer.Stop()
+	if until > 0 {
+		dur := time.Unix(until, 0).Sub(time.Now())
+		timer = time.NewTimer(dur)
+	}
+
+	ef, err := filters.FromParam(r.Form.Get("filters"))
+	if err != nil {
+		return err
+	}
+
+	isFiltered := func(field string, filter []string) bool {
+		if len(filter) == 0 {
+			return false
+		}
+		for _, v := range filter {
+			if v == field {
+				return false
+			}
+			if strings.Contains(field, ":") {
+				image := strings.Split(field, ":")
+				if image[0] == v {
+					return false
+				}
 			}
 		}
-		w.Header().Set("Content-Type", "application/json")
-		if _, err := outsLegacy.WriteListTo(w); err != nil {
-			return err
-		}
+		return true
 	}
-	return nil
-}
 
-func getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if version.GreaterThan("1.6") {
-		w.WriteHeader(http.StatusNotFound)
-		return fmt.Errorf("This is now implemented in the client.")
-	}
-	eng.ServeHTTP(w, r)
-	return nil
-}
-
-func getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	d := s.daemon
+	es := d.EventsService
 	w.Header().Set("Content-Type", "application/json")
-	eng.ServeHTTP(w, r)
-	return nil
-}
+	enc := json.NewEncoder(ioutils.NewWriteFlusher(w))
 
-func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if err := parseForm(r); err != nil {
-		return err
-	}
-
-	var job = eng.Job("events")
-	streamJSON(job, w, true)
-	job.Setenv("since", r.Form.Get("since"))
-	job.Setenv("until", r.Form.Get("until"))
-	job.Setenv("filters", r.Form.Get("filters"))
-	return job.Run()
-}
-
-func getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if vars == nil {
-		return fmt.Errorf("Missing parameter")
-	}
-
-	var job = eng.Job("history", vars["name"])
-	streamJSON(job, w, false)
-
-	if err := job.Run(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if vars == nil {
-		return fmt.Errorf("Missing parameter")
-	}
-	var job = eng.Job("container_changes", vars["name"])
-	streamJSON(job, w, false)
-
-	return job.Run()
-}
-
-func getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if version.LessThan("1.4") {
-		return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.")
-	}
-	if vars == nil {
-		return fmt.Errorf("Missing parameter")
-	}
-	if err := parseForm(r); err != nil {
-		return err
-	}
-
-	job := eng.Job("top", vars["name"], r.Form.Get("ps_args"))
-	streamJSON(job, w, false)
-	return job.Run()
-}
-
-func getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if err := parseForm(r); err != nil {
-		return err
-	}
-	var (
-		err  error
-		outs *engine.Table
-		job  = eng.Job("containers")
-	)
-
-	job.Setenv("all", r.Form.Get("all"))
-	job.Setenv("size", r.Form.Get("size"))
-	job.Setenv("since", r.Form.Get("since"))
-	job.Setenv("before", r.Form.Get("before"))
-	job.Setenv("limit", r.Form.Get("limit"))
-	job.Setenv("filters", r.Form.Get("filters"))
-
-	if version.GreaterThanOrEqualTo("1.5") {
-		streamJSON(job, w, false)
-	} else if outs, err = job.Stdout.AddTable(); err != nil {
-		return err
-	}
-	if err = job.Run(); err != nil {
-		return err
-	}
-	if version.LessThan("1.5") { // Convert to legacy format
-		for _, out := range outs.Data {
-			ports := engine.NewTable("", 0)
-			ports.ReadListFrom([]byte(out.Get("Ports")))
-			out.Set("Ports", api.DisplayablePorts(ports))
+	getContainerId := func(cn string) string {
+		c, err := d.Get(cn)
+		if err != nil {
+			return ""
 		}
-		w.Header().Set("Content-Type", "application/json")
-		if _, err = outs.WriteListTo(w); err != nil {
+		return c.ID
+	}
+
+	sendEvent := func(ev *jsonmessage.JSONMessage) error {
+		//incoming container filter can be name,id or partial id, convert and replace as a full container id
+		for i, cn := range ef["container"] {
+			ef["container"][i] = getContainerId(cn)
+		}
+
+		if isFiltered(ev.Status, ef["event"]) || isFiltered(ev.From, ef["image"]) ||
+			isFiltered(ev.ID, ef["container"]) {
+			return nil
+		}
+
+		return enc.Encode(ev)
+	}
+
+	current, l := es.Subscribe()
+	defer es.Evict(l)
+	for _, ev := range current {
+		if ev.Time < since {
+			continue
+		}
+		if err := sendEvent(ev); err != nil {
 			return err
 		}
 	}
-	return nil
+	for {
+		select {
+		case ev := <-l:
+			jev, ok := ev.(*jsonmessage.JSONMessage)
+			if !ok {
+				continue
+			}
+			if err := sendEvent(jev); err != nil {
+				return err
+			}
+		case <-timer.C:
+			return nil
+		}
+	}
 }
 
-func getContainersStats(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if err := parseForm(r); err != nil {
-		return err
-	}
+func (s *Server) getImagesHistory(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
+
 	name := vars["name"]
-	job := eng.Job("container_stats", name)
-	streamJSON(job, w, true)
-	return job.Run()
-}
-
-func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if err := parseForm(r); err != nil {
-		return err
-	}
-	if vars == nil {
-		return fmt.Errorf("Missing parameter")
-	}
-
-	var (
-		inspectJob = eng.Job("container_inspect", vars["name"])
-		logsJob    = eng.Job("logs", vars["name"])
-		c, err     = inspectJob.Stdout.AddEnv()
-	)
+	history, err := s.daemon.Repositories().History(name)
 	if err != nil {
 		return err
 	}
-	logsJob.Setenv("follow", r.Form.Get("follow"))
-	logsJob.Setenv("tail", r.Form.Get("tail"))
-	logsJob.Setenv("stdout", r.Form.Get("stdout"))
-	logsJob.Setenv("stderr", r.Form.Get("stderr"))
-	logsJob.Setenv("timestamps", r.Form.Get("timestamps"))
+
+	return writeJSON(w, http.StatusOK, history)
+}
+
+func (s *Server) getContainersChanges(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if vars == nil {
+		return fmt.Errorf("Missing parameter")
+	}
+
+	changes, err := s.daemon.ContainerChanges(vars["name"])
+	if err != nil {
+		return err
+	}
+
+	return writeJSON(w, http.StatusOK, changes)
+}
+
+func (s *Server) getContainersTop(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if vars == nil {
+		return fmt.Errorf("Missing parameter")
+	}
+
+	if err := parseForm(r); err != nil {
+		return err
+	}
+
+	procList, err := s.daemon.ContainerTop(vars["name"], r.Form.Get("ps_args"))
+	if err != nil {
+		return err
+	}
+
+	return writeJSON(w, http.StatusOK, procList)
+}
+
+func (s *Server) getContainersJSON(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := parseForm(r); err != nil {
+		return err
+	}
+
+	config := &daemon.ContainersConfig{
+		All:     boolValue(r, "all"),
+		Size:    boolValue(r, "size"),
+		Since:   r.Form.Get("since"),
+		Before:  r.Form.Get("before"),
+		Filters: r.Form.Get("filters"),
+	}
+
+	if tmpLimit := r.Form.Get("limit"); tmpLimit != "" {
+		limit, err := strconv.Atoi(tmpLimit)
+		if err != nil {
+			return err
+		}
+		config.Limit = limit
+	}
+
+	containers, err := s.daemon.Containers(config)
+	if err != nil {
+		return err
+	}
+
+	return writeJSON(w, http.StatusOK, containers)
+}
+
+func (s *Server) getContainersStats(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := parseForm(r); err != nil {
+		return err
+	}
+	if vars == nil {
+		return fmt.Errorf("Missing parameter")
+	}
+
+	return s.daemon.ContainerStats(vars["name"], boolValue(r, "stream"), ioutils.NewWriteFlusher(w))
+}
+
+func (s *Server) getContainersLogs(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := parseForm(r); err != nil {
+		return err
+	}
+	if vars == nil {
+		return fmt.Errorf("Missing parameter")
+	}
+
 	// Validate args here, because we can't return not StatusOK after job.Run() call
-	stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr")
+	stdout, stderr := boolValue(r, "stdout"), boolValue(r, "stderr")
 	if !(stdout || stderr) {
 		return fmt.Errorf("Bad parameters: you must choose at least one stream")
 	}
-	if err = inspectJob.Run(); err != nil {
-		return err
+
+	var since time.Time
+	if r.Form.Get("since") != "" {
+		s, err := strconv.ParseInt(r.Form.Get("since"), 10, 64)
+		if err != nil {
+			return err
+		}
+		since = time.Unix(s, 0)
 	}
 
-	var outStream, errStream io.Writer
-	outStream = utils.NewWriteFlusher(w)
-
-	if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") {
-		errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
-		outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
-	} else {
-		errStream = outStream
+	logsConfig := &daemon.ContainerLogsConfig{
+		Follow:     boolValue(r, "follow"),
+		Timestamps: boolValue(r, "timestamps"),
+		Since:      since,
+		Tail:       r.Form.Get("tail"),
+		UseStdout:  stdout,
+		UseStderr:  stderr,
+		OutStream:  ioutils.NewWriteFlusher(w),
 	}
 
-	logsJob.Stdout.Add(outStream)
-	logsJob.Stderr.Set(errStream)
-	if err := logsJob.Run(); err != nil {
-		fmt.Fprintf(outStream, "Error running logs job: %s\n", err)
+	if err := s.daemon.ContainerLogs(vars["name"], logsConfig); err != nil {
+		fmt.Fprintf(w, "Error running logs job: %s\n", err)
 	}
+
 	return nil
 }
 
-func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postImagesTag(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
 	}
@@ -489,57 +601,65 @@
 		return fmt.Errorf("Missing parameter")
 	}
 
-	job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag"))
-	job.Setenv("force", r.Form.Get("force"))
-	if err := job.Run(); err != nil {
+	repo := r.Form.Get("repo")
+	tag := r.Form.Get("tag")
+	force := boolValue(r, "force")
+	name := vars["name"]
+	if err := s.daemon.Repositories().Tag(repo, tag, name, force); err != nil {
 		return err
 	}
+	s.daemon.EventsService.Log("tag", utils.ImageReference(repo, tag), "")
 	w.WriteHeader(http.StatusCreated)
 	return nil
 }
 
-func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postCommit(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
 	}
-	var (
-		config       engine.Env
-		env          engine.Env
-		job          = eng.Job("commit", r.Form.Get("container"))
-		stdoutBuffer = bytes.NewBuffer(nil)
-	)
 
 	if err := checkForJson(r); err != nil {
 		return err
 	}
 
-	if err := config.Decode(r.Body); err != nil {
-		log.Errorf("%s", err)
-	}
+	cont := r.Form.Get("container")
 
+	pause := boolValue(r, "pause")
 	if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") {
-		job.Setenv("pause", "1")
-	} else {
-		job.Setenv("pause", r.FormValue("pause"))
+		pause = true
 	}
 
-	job.Setenv("repo", r.Form.Get("repo"))
-	job.Setenv("tag", r.Form.Get("tag"))
-	job.Setenv("author", r.Form.Get("author"))
-	job.Setenv("comment", r.Form.Get("comment"))
-	job.SetenvList("changes", r.Form["changes"])
-	job.SetenvSubEnv("config", &config)
-
-	job.Stdout.Add(stdoutBuffer)
-	if err := job.Run(); err != nil {
+	c, _, err := runconfig.DecodeContainerConfig(r.Body)
+	if err != nil && err != io.EOF { //Do not fail if body is empty.
 		return err
 	}
-	env.Set("Id", engine.Tail(stdoutBuffer, 1))
-	return writeJSONEnv(w, http.StatusCreated, env)
+
+	if c == nil {
+		c = &runconfig.Config{}
+	}
+
+	containerCommitConfig := &daemon.ContainerCommitConfig{
+		Pause:   pause,
+		Repo:    r.Form.Get("repo"),
+		Tag:     r.Form.Get("tag"),
+		Author:  r.Form.Get("author"),
+		Comment: r.Form.Get("comment"),
+		Changes: r.Form["changes"],
+		Config:  c,
+	}
+
+	imgID, err := builder.Commit(s.daemon, cont, containerCommitConfig)
+	if err != nil {
+		return err
+	}
+
+	return writeJSON(w, http.StatusCreated, &types.ContainerCommitResponse{
+		ID: imgID,
+	})
 }
 
 // Creates an image from Pull or from Import
-func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postImagesCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
 	}
@@ -548,18 +668,25 @@
 		image = r.Form.Get("fromImage")
 		repo  = r.Form.Get("repo")
 		tag   = r.Form.Get("tag")
-		job   *engine.Job
 	)
 	authEncoded := r.Header.Get("X-Registry-Auth")
-	authConfig := &registry.AuthConfig{}
+	authConfig := &cliconfig.AuthConfig{}
 	if authEncoded != "" {
 		authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
 		if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
 			// for a pull it is not an error if no auth was given
 			// to increase compatibility with the existing api it is defaulting to be empty
-			authConfig = &registry.AuthConfig{}
+			authConfig = &cliconfig.AuthConfig{}
 		}
 	}
+
+	var (
+		err    error
+		output = ioutils.NewWriteFlusher(w)
+	)
+
+	w.Header().Set("Content-Type", "application/json")
+
 	if image != "" { //pull
 		if tag == "" {
 			image, tag = parsers.ParseRepositoryTag(image)
@@ -570,69 +697,81 @@
 				metaHeaders[k] = v
 			}
 		}
-		job = eng.Job("pull", image, tag)
-		job.SetenvBool("parallel", version.GreaterThan("1.3"))
-		job.SetenvJson("metaHeaders", metaHeaders)
-		job.SetenvJson("authConfig", authConfig)
+
+		imagePullConfig := &graph.ImagePullConfig{
+			MetaHeaders: metaHeaders,
+			AuthConfig:  authConfig,
+			OutStream:   output,
+		}
+
+		err = s.daemon.Repositories().Pull(image, tag, imagePullConfig)
 	} else { //import
 		if tag == "" {
 			repo, tag = parsers.ParseRepositoryTag(repo)
 		}
-		job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag)
-		job.Stdin.Add(r.Body)
-		job.SetenvList("changes", r.Form["changes"])
-	}
 
-	if version.GreaterThan("1.0") {
-		job.SetenvBool("json", true)
-		streamJSON(job, w, true)
-	} else {
-		job.Stdout.Add(utils.NewWriteFlusher(w))
-	}
-	if err := job.Run(); err != nil {
-		if !job.Stdout.Used() {
+		src := r.Form.Get("fromSrc")
+		imageImportConfig := &graph.ImageImportConfig{
+			Changes:   r.Form["changes"],
+			InConfig:  r.Body,
+			OutStream: output,
+		}
+
+		// 'err' MUST NOT be defined within this block, we need any error
+		// generated from the download to be available to the output
+		// stream processing below
+		var newConfig *runconfig.Config
+		newConfig, err = builder.BuildFromConfig(s.daemon, &runconfig.Config{}, imageImportConfig.Changes)
+		if err != nil {
 			return err
 		}
-		sf := utils.NewStreamFormatter(version.GreaterThan("1.0"))
-		w.Write(sf.FormatError(err))
+		imageImportConfig.ContainerConfig = newConfig
+
+		err = s.daemon.Repositories().Import(src, repo, tag, imageImportConfig)
+	}
+	if err != nil {
+		if !output.Flushed() {
+			return err
+		}
+		sf := streamformatter.NewJSONStreamFormatter()
+		output.Write(sf.FormatError(err))
 	}
 
 	return nil
+
 }
 
-func getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getImagesSearch(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
 	}
 	var (
+		config      *cliconfig.AuthConfig
 		authEncoded = r.Header.Get("X-Registry-Auth")
-		authConfig  = &registry.AuthConfig{}
-		metaHeaders = map[string][]string{}
+		headers     = map[string][]string{}
 	)
 
 	if authEncoded != "" {
 		authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
-		if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
+		if err := json.NewDecoder(authJson).Decode(&config); err != nil {
 			// for a search it is not an error if no auth was given
 			// to increase compatibility with the existing api it is defaulting to be empty
-			authConfig = &registry.AuthConfig{}
+			config = &cliconfig.AuthConfig{}
 		}
 	}
 	for k, v := range r.Header {
 		if strings.HasPrefix(k, "X-Meta-") {
-			metaHeaders[k] = v
+			headers[k] = v
 		}
 	}
-
-	var job = eng.Job("search", r.Form.Get("term"))
-	job.SetenvJson("metaHeaders", metaHeaders)
-	job.SetenvJson("authConfig", authConfig)
-	streamJSON(job, w, false)
-
-	return job.Run()
+	query, err := s.daemon.RegistryService.Search(r.Form.Get("term"), config, headers)
+	if err != nil {
+		return err
+	}
+	return json.NewEncoder(w).Encode(query.Results)
 }
 
-func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postImagesPush(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
@@ -646,7 +785,7 @@
 	if err := parseForm(r); err != nil {
 		return err
 	}
-	authConfig := &registry.AuthConfig{}
+	authConfig := &cliconfig.AuthConfig{}
 
 	authEncoded := r.Header.Get("X-Registry-Auth")
 	if authEncoded != "" {
@@ -654,114 +793,135 @@
 		authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
 		if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
 			// to increase compatibility to existing api it is defaulting to be empty
-			authConfig = &registry.AuthConfig{}
+			authConfig = &cliconfig.AuthConfig{}
 		}
 	} else {
 		// the old format is supported for compatibility if there was no authConfig header
 		if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
-			return err
+			return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err)
 		}
 	}
 
-	job := eng.Job("push", vars["name"])
-	job.SetenvJson("metaHeaders", metaHeaders)
-	job.SetenvJson("authConfig", authConfig)
-	job.Setenv("tag", r.Form.Get("tag"))
-	if version.GreaterThan("1.0") {
-		job.SetenvBool("json", true)
-		streamJSON(job, w, true)
-	} else {
-		job.Stdout.Add(utils.NewWriteFlusher(w))
+	name := vars["name"]
+	output := ioutils.NewWriteFlusher(w)
+	imagePushConfig := &graph.ImagePushConfig{
+		MetaHeaders: metaHeaders,
+		AuthConfig:  authConfig,
+		Tag:         r.Form.Get("tag"),
+		OutStream:   output,
 	}
 
-	if err := job.Run(); err != nil {
-		if !job.Stdout.Used() {
+	w.Header().Set("Content-Type", "application/json")
+
+	if err := s.daemon.Repositories().Push(name, imagePushConfig); err != nil {
+		if !output.Flushed() {
 			return err
 		}
-		sf := utils.NewStreamFormatter(version.GreaterThan("1.0"))
-		w.Write(sf.FormatError(err))
+		sf := streamformatter.NewJSONStreamFormatter()
+		output.Write(sf.FormatError(err))
 	}
 	return nil
+
 }
 
-func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getImagesGet(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
 	if err := parseForm(r); err != nil {
 		return err
 	}
-	if version.GreaterThan("1.0") {
-		w.Header().Set("Content-Type", "application/x-tar")
-	}
-	var job *engine.Job
+
+	w.Header().Set("Content-Type", "application/x-tar")
+
+	output := ioutils.NewWriteFlusher(w)
+	imageExportConfig := &graph.ImageExportConfig{Outstream: output}
 	if name, ok := vars["name"]; ok {
-		job = eng.Job("image_export", name)
+		imageExportConfig.Names = []string{name}
 	} else {
-		job = eng.Job("image_export", r.Form["names"]...)
+		imageExportConfig.Names = r.Form["names"]
 	}
-	job.Stdout.Add(w)
-	return job.Run()
+
+	if err := s.daemon.Repositories().ImageExport(imageExportConfig); err != nil {
+		if !output.Flushed() {
+			return err
+		}
+		sf := streamformatter.NewJSONStreamFormatter()
+		output.Write(sf.FormatError(err))
+	}
+	return nil
+
 }
 
-func postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	job := eng.Job("load")
-	job.Stdin.Add(r.Body)
-	return job.Run()
+func (s *Server) postImagesLoad(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	return s.daemon.Repositories().Load(r.Body, w)
 }
 
-func postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
-		return nil
+		return err
 	}
 	if err := checkForJson(r); err != nil {
 		return err
 	}
 	var (
-		job          = eng.Job("create", r.Form.Get("name"))
-		outWarnings  []string
-		stdoutBuffer = bytes.NewBuffer(nil)
-		warnings     = bytes.NewBuffer(nil)
+		warnings []string
+		name     = r.Form.Get("name")
 	)
 
-	if err := job.DecodeEnv(r.Body); err != nil {
+	config, hostConfig, err := runconfig.DecodeContainerConfig(r.Body)
+	if err != nil {
 		return err
 	}
-	// Read container ID from the first line of stdout
-	job.Stdout.Add(stdoutBuffer)
-	// Read warnings from stderr
-	job.Stderr.Add(warnings)
-	if err := job.Run(); err != nil {
+
+	containerId, warnings, err := s.daemon.ContainerCreate(name, config, hostConfig)
+	if err != nil {
 		return err
 	}
-	// Parse warnings from stderr
-	scanner := bufio.NewScanner(warnings)
-	for scanner.Scan() {
-		outWarnings = append(outWarnings, scanner.Text())
-	}
+
 	return writeJSON(w, http.StatusCreated, &types.ContainerCreateResponse{
-		ID:       engine.Tail(stdoutBuffer, 1),
-		Warnings: outWarnings,
+		ID:       containerId,
+		Warnings: warnings,
 	})
 }
 
-func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersRestart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
 	}
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	job := eng.Job("restart", vars["name"])
-	job.Setenv("t", r.Form.Get("t"))
-	if err := job.Run(); err != nil {
+
+	timeout, _ := strconv.Atoi(r.Form.Get("t"))
+
+	if err := s.daemon.ContainerRestart(vars["name"], timeout); err != nil {
+		return err
+	}
+
+	w.WriteHeader(http.StatusNoContent)
+
+	return nil
+}
+
+func (s *Server) postContainerRename(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := parseForm(r); err != nil {
+		return err
+	}
+	if vars == nil {
+		return fmt.Errorf("Missing parameter")
+	}
+
+	name := vars["name"]
+	newName := r.Form.Get("name")
+	if err := s.daemon.ContainerRename(name, newName); err != nil {
 		return err
 	}
 	w.WriteHeader(http.StatusNoContent)
 	return nil
 }
 
-func postContainerRename(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) deleteContainers(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
 	}
@@ -769,77 +929,72 @@
 		return fmt.Errorf("Missing parameter")
 	}
 
-	newName := r.URL.Query().Get("name")
-	job := eng.Job("container_rename", vars["name"], newName)
-	job.Setenv("t", r.Form.Get("t"))
-	if err := job.Run(); err != nil {
+	name := vars["name"]
+	config := &daemon.ContainerRmConfig{
+		ForceRemove:  boolValue(r, "force"),
+		RemoveVolume: boolValue(r, "v"),
+		RemoveLink:   boolValue(r, "link"),
+	}
+
+	if err := s.daemon.ContainerRm(name, config); err != nil {
+		// Force a 404 for the empty string
+		if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") {
+			return fmt.Errorf("no such id: \"\"")
+		}
 		return err
 	}
+
 	w.WriteHeader(http.StatusNoContent)
+
 	return nil
 }
 
-func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) deleteImages(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
 	}
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	job := eng.Job("rm", vars["name"])
 
-	job.Setenv("forceRemove", r.Form.Get("force"))
+	name := vars["name"]
+	force := boolValue(r, "force")
+	noprune := boolValue(r, "noprune")
 
-	job.Setenv("removeVolume", r.Form.Get("v"))
-	job.Setenv("removeLink", r.Form.Get("link"))
-	if err := job.Run(); err != nil {
+	list, err := s.daemon.ImageDelete(name, force, noprune)
+	if err != nil {
 		return err
 	}
-	w.WriteHeader(http.StatusNoContent)
-	return nil
+
+	return writeJSON(w, http.StatusOK, list)
 }
 
-func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if err := parseForm(r); err != nil {
-		return err
-	}
+func (s *Server) postContainersStart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	var job = eng.Job("image_delete", vars["name"])
-	streamJSON(job, w, false)
-	job.Setenv("force", r.Form.Get("force"))
-	job.Setenv("noprune", r.Form.Get("noprune"))
-
-	return job.Run()
-}
-
-func postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if vars == nil {
-		return fmt.Errorf("Missing parameter")
-	}
-	var (
-		name = vars["name"]
-		job  = eng.Job("start", name)
-	)
 
 	// If contentLength is -1, we can assumed chunked encoding
 	// or more technically that the length is unknown
-	// http://golang.org/src/pkg/net/http/request.go#L139
+	// https://golang.org/src/pkg/net/http/request.go#L139
 	// net/http otherwise seems to swallow any headers related to chunked encoding
 	// including r.TransferEncoding
 	// allow a nil body for backwards compatibility
+	var hostConfig *runconfig.HostConfig
 	if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) {
 		if err := checkForJson(r); err != nil {
 			return err
 		}
 
-		if err := job.DecodeEnv(r.Body); err != nil {
+		c, err := runconfig.DecodeHostConfig(r.Body)
+		if err != nil {
 			return err
 		}
+
+		hostConfig = c
 	}
 
-	if err := job.Run(); err != nil {
+	if err := s.daemon.ContainerStart(vars["name"], hostConfig); err != nil {
 		if err.Error() == "Container already started" {
 			w.WriteHeader(http.StatusNotModified)
 			return nil
@@ -850,16 +1005,17 @@
 	return nil
 }
 
-func postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersStop(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
 	}
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	job := eng.Job("stop", vars["name"])
-	job.Setenv("t", r.Form.Get("t"))
-	if err := job.Run(); err != nil {
+
+	seconds, _ := strconv.Atoi(r.Form.Get("t"))
+
+	if err := s.daemon.ContainerStop(vars["name"], seconds); err != nil {
 		if err.Error() == "Container already stopped" {
 			w.WriteHeader(http.StatusNotModified)
 			return nil
@@ -867,58 +1023,52 @@
 		return err
 	}
 	w.WriteHeader(http.StatusNoContent)
+
 	return nil
 }
 
-func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if vars == nil {
-		return fmt.Errorf("Missing parameter")
-	}
-	var (
-		env          engine.Env
-		stdoutBuffer = bytes.NewBuffer(nil)
-		job          = eng.Job("wait", vars["name"])
-	)
-	job.Stdout.Add(stdoutBuffer)
-	if err := job.Run(); err != nil {
-		return err
-	}
-
-	env.Set("StatusCode", engine.Tail(stdoutBuffer, 1))
-	return writeJSONEnv(w, http.StatusOK, env)
-}
-
-func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if err := parseForm(r); err != nil {
-		return err
-	}
-	if vars == nil {
-		return fmt.Errorf("Missing parameter")
-	}
-	if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if err := parseForm(r); err != nil {
-		return err
-	}
+func (s *Server) postContainersWait(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
 
-	var (
-		job    = eng.Job("container_inspect", vars["name"])
-		c, err = job.Stdout.AddEnv()
-	)
+	status, err := s.daemon.ContainerWait(vars["name"], -1*time.Second)
 	if err != nil {
 		return err
 	}
-	if err = job.Run(); err != nil {
+
+	return writeJSON(w, http.StatusOK, &types.ContainerWaitResponse{
+		StatusCode: status,
+	})
+}
+
+func (s *Server) postContainersResize(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := parseForm(r); err != nil {
 		return err
 	}
+	if vars == nil {
+		return fmt.Errorf("Missing parameter")
+	}
+
+	height, err := strconv.Atoi(r.Form.Get("h"))
+	if err != nil {
+		return err
+	}
+	width, err := strconv.Atoi(r.Form.Get("w"))
+	if err != nil {
+		return err
+	}
+
+	return s.daemon.ContainerResize(vars["name"], height, width)
+}
+
+func (s *Server) postContainersAttach(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+	if err := parseForm(r); err != nil {
+		return err
+	}
+	if vars == nil {
+		return fmt.Errorf("Missing parameter")
+	}
 
 	inStream, outStream, err := hijackServer(w)
 	if err != nil {
@@ -926,38 +1076,31 @@
 	}
 	defer closeStreams(inStream, outStream)
 
-	var errStream io.Writer
-
 	if _, ok := r.Header["Upgrade"]; ok {
 		fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
 	} else {
 		fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
 	}
 
-	if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") {
-		errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
-		outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
-	} else {
-		errStream = outStream
+	attachWithLogsConfig := &daemon.ContainerAttachWithLogsConfig{
+		InStream:  inStream,
+		OutStream: outStream,
+		UseStdin:  boolValue(r, "stdin"),
+		UseStdout: boolValue(r, "stdout"),
+		UseStderr: boolValue(r, "stderr"),
+		Logs:      boolValue(r, "logs"),
+		Stream:    boolValue(r, "stream"),
+		Multiplex: version.GreaterThanOrEqualTo("1.6"),
 	}
 
-	job = eng.Job("attach", vars["name"])
-	job.Setenv("logs", r.Form.Get("logs"))
-	job.Setenv("stream", r.Form.Get("stream"))
-	job.Setenv("stdin", r.Form.Get("stdin"))
-	job.Setenv("stdout", r.Form.Get("stdout"))
-	job.Setenv("stderr", r.Form.Get("stderr"))
-	job.Stdin.Add(inStream)
-	job.Stdout.Add(outStream)
-	job.Stderr.Set(errStream)
-	if err := job.Run(); err != nil {
+	if err := s.daemon.ContainerAttachWithLogs(vars["name"], attachWithLogsConfig); err != nil {
 		fmt.Fprintf(outStream, "Error attaching: %s\n", err)
-
 	}
+
 	return nil
 }
 
-func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) wsContainersAttach(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
 	}
@@ -965,23 +1108,19 @@
 		return fmt.Errorf("Missing parameter")
 	}
 
-	if err := eng.Job("container_inspect", vars["name"]).Run(); err != nil {
-		return err
-	}
-
 	h := websocket.Handler(func(ws *websocket.Conn) {
 		defer ws.Close()
-		job := eng.Job("attach", vars["name"])
-		job.Setenv("logs", r.Form.Get("logs"))
-		job.Setenv("stream", r.Form.Get("stream"))
-		job.Setenv("stdin", r.Form.Get("stdin"))
-		job.Setenv("stdout", r.Form.Get("stdout"))
-		job.Setenv("stderr", r.Form.Get("stderr"))
-		job.Stdin.Add(ws)
-		job.Stdout.Add(ws)
-		job.Stderr.Set(ws)
-		if err := job.Run(); err != nil {
-			log.Errorf("Error attaching websocket: %s", err)
+
+		wsAttachWithLogsConfig := &daemon.ContainerWsAttachWithLogsConfig{
+			InStream:  ws,
+			OutStream: ws,
+			ErrStream: ws,
+			Logs:      boolValue(r, "logs"),
+			Stream:    boolValue(r, "stream"),
+		}
+
+		if err := s.daemon.ContainerWsAttachWithLogs(vars["name"], wsAttachWithLogsConfig); err != nil {
+			logrus.Errorf("Error attaching websocket: %s", err)
 		}
 	})
 	h.ServeHTTP(w, r)
@@ -989,103 +1128,94 @@
 	return nil
 }
 
-func getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getContainersByName(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	var job = eng.Job("container_inspect", vars["name"])
-	if version.LessThan("1.12") {
-		job.SetenvBool("raw", true)
+
+	containerJSON, err := s.daemon.ContainerInspect(vars["name"])
+	if err != nil {
+		return err
 	}
-	streamJSON(job, w, false)
-	return job.Run()
+	return writeJSON(w, http.StatusOK, containerJSON)
 }
 
-func getExecByID(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getExecByID(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter 'id'")
 	}
-	var job = eng.Job("execInspect", vars["id"])
-	streamJSON(job, w, false)
-	return job.Run()
+
+	eConfig, err := s.daemon.ContainerExecInspect(vars["id"])
+	if err != nil {
+		return err
+	}
+
+	return writeJSON(w, http.StatusOK, eConfig)
 }
 
-func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) getImagesByName(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	var job = eng.Job("image_inspect", vars["name"])
-	if version.LessThan("1.12") {
-		job.SetenvBool("raw", true)
+
+	imageInspect, err := s.daemon.Repositories().Lookup(vars["name"])
+	if err != nil {
+		return err
 	}
-	streamJSON(job, w, false)
-	return job.Run()
+
+	return writeJSON(w, http.StatusOK, imageInspect)
 }
 
-func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	if version.LessThan("1.3") {
-		return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.")
-	}
+func (s *Server) postBuild(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	var (
-		authEncoded       = r.Header.Get("X-Registry-Auth")
-		authConfig        = &registry.AuthConfig{}
+		authConfig        = &cliconfig.AuthConfig{}
 		configFileEncoded = r.Header.Get("X-Registry-Config")
-		configFile        = &registry.ConfigFile{}
-		job               = eng.Job("build")
+		configFile        = &cliconfig.ConfigFile{}
+		buildConfig       = builder.NewBuildConfig()
 	)
 
-	// This block can be removed when API versions prior to 1.9 are deprecated.
-	// Both headers will be parsed and sent along to the daemon, but if a non-empty
-	// ConfigFile is present, any value provided as an AuthConfig directly will
-	// be overridden. See BuildFile::CmdFrom for details.
-	if version.LessThan("1.9") && authEncoded != "" {
-		authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
-		if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
-			// for a pull it is not an error if no auth was given
-			// to increase compatibility with the existing api it is defaulting to be empty
-			authConfig = &registry.AuthConfig{}
-		}
-	}
-
 	if configFileEncoded != "" {
 		configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded))
 		if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil {
 			// for a pull it is not an error if no auth was given
 			// to increase compatibility with the existing api it is defaulting to be empty
-			configFile = &registry.ConfigFile{}
+			configFile = &cliconfig.ConfigFile{}
 		}
 	}
 
-	if version.GreaterThanOrEqualTo("1.8") {
-		job.SetenvBool("json", true)
-		streamJSON(job, w, true)
+	w.Header().Set("Content-Type", "application/json")
+
+	if boolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") {
+		buildConfig.Remove = true
+	} else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") {
+		buildConfig.Remove = true
 	} else {
-		job.Stdout.Add(utils.NewWriteFlusher(w))
+		buildConfig.Remove = boolValue(r, "rm")
+	}
+	if boolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") {
+		buildConfig.Pull = true
 	}
 
-	if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") {
-		job.Setenv("rm", "1")
-	} else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") {
-		job.Setenv("rm", "1")
-	} else {
-		job.Setenv("rm", r.FormValue("rm"))
-	}
-	if r.FormValue("pull") == "1" && version.GreaterThanOrEqualTo("1.16") {
-		job.Setenv("pull", "1")
-	}
-	job.Stdin.Add(r.Body)
-	job.Setenv("remote", r.FormValue("remote"))
-	job.Setenv("dockerfile", r.FormValue("dockerfile"))
-	job.Setenv("t", r.FormValue("t"))
-	job.Setenv("q", r.FormValue("q"))
-	job.Setenv("nocache", r.FormValue("nocache"))
-	job.Setenv("forcerm", r.FormValue("forcerm"))
-	job.SetenvJson("authConfig", authConfig)
-	job.SetenvJson("configFile", configFile)
-	job.Setenv("memswap", r.FormValue("memswap"))
-	job.Setenv("memory", r.FormValue("memory"))
-	job.Setenv("cpusetcpus", r.FormValue("cpusetcpus"))
-	job.Setenv("cpushares", r.FormValue("cpushares"))
+	output := ioutils.NewWriteFlusher(w)
+	buildConfig.Stdout = output
+	buildConfig.Context = r.Body
+
+	buildConfig.RemoteURL = r.FormValue("remote")
+	buildConfig.DockerfileName = r.FormValue("dockerfile")
+	buildConfig.RepoName = r.FormValue("t")
+	buildConfig.SuppressOutput = boolValue(r, "q")
+	buildConfig.NoCache = boolValue(r, "nocache")
+	buildConfig.ForceRemove = boolValue(r, "forcerm")
+	buildConfig.AuthConfig = authConfig
+	buildConfig.ConfigFile = configFile
+	buildConfig.MemorySwap = int64ValueOrZero(r, "memswap")
+	buildConfig.Memory = int64ValueOrZero(r, "memory")
+	buildConfig.CpuShares = int64ValueOrZero(r, "cpushares")
+	buildConfig.CpuPeriod = int64ValueOrZero(r, "cpuperiod")
+	buildConfig.CpuQuota = int64ValueOrZero(r, "cpuquota")
+	buildConfig.CpuSetCpus = r.FormValue("cpusetcpus")
+	buildConfig.CpuSetMems = r.FormValue("cpusetmems")
+	buildConfig.CgroupParent = r.FormValue("cgroupparent")
 
 	// Job cancellation. Note: not all job types support this.
 	if closeNotifier, ok := w.(http.CloseNotifier); ok {
@@ -1095,103 +1225,109 @@
 			select {
 			case <-finished:
 			case <-closeNotifier.CloseNotify():
-				log.Infof("Client disconnected, cancelling job: %v", job)
-				job.Cancel()
+				logrus.Infof("Client disconnected, cancelling job: build")
+				buildConfig.Cancel()
 			}
 		}()
 	}
 
-	if err := job.Run(); err != nil {
-		if !job.Stdout.Used() {
+	if err := builder.Build(s.daemon, buildConfig); err != nil {
+		// Do not write the error in the http output if it's still empty.
+		// This prevents from writing a 200(OK) when there is an interal error.
+		if !output.Flushed() {
 			return err
 		}
-		sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8"))
+		sf := streamformatter.NewJSONStreamFormatter()
 		w.Write(sf.FormatError(err))
 	}
 	return nil
 }
 
-func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainersCopy(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
 
-	var copyData engine.Env
-
 	if err := checkForJson(r); err != nil {
 		return err
 	}
 
-	if err := copyData.Decode(r.Body); err != nil {
+	cfg := types.CopyConfig{}
+	if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
 		return err
 	}
 
-	if copyData.Get("Resource") == "" {
+	if cfg.Resource == "" {
 		return fmt.Errorf("Path cannot be empty")
 	}
 
-	origResource := copyData.Get("Resource")
-
-	if copyData.Get("Resource")[0] == '/' {
-		copyData.Set("Resource", copyData.Get("Resource")[1:])
-	}
-
-	job := eng.Job("container_copy", vars["name"], copyData.Get("Resource"))
-	job.Stdout.Add(w)
-	w.Header().Set("Content-Type", "application/x-tar")
-	if err := job.Run(); err != nil {
-		log.Errorf("%v", err)
+	data, err := s.daemon.ContainerCopy(vars["name"], cfg.Resource)
+	if err != nil {
 		if strings.Contains(strings.ToLower(err.Error()), "no such id") {
 			w.WriteHeader(http.StatusNotFound)
-		} else if strings.Contains(err.Error(), "no such file or directory") {
-			return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"])
+			return nil
 		}
+		if os.IsNotExist(err) {
+			return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"])
+		}
+		return err
 	}
+	defer data.Close()
+
+	w.Header().Set("Content-Type", "application/x-tar")
+	if _, err := io.Copy(w, data); err != nil {
+		return err
+	}
+
 	return nil
 }
 
-func postContainerExecCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainerExecCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
-		return nil
-	}
-	var (
-		out          engine.Env
-		name         = vars["name"]
-		job          = eng.Job("execCreate", name)
-		stdoutBuffer = bytes.NewBuffer(nil)
-	)
-
-	if err := job.DecodeEnv(r.Body); err != nil {
 		return err
 	}
+	name := vars["name"]
 
-	job.Stdout.Add(stdoutBuffer)
+	execConfig := &runconfig.ExecConfig{}
+	if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil {
+		return err
+	}
+	execConfig.Container = name
+
+	if len(execConfig.Cmd) == 0 {
+		return fmt.Errorf("No exec command specified")
+	}
+
 	// Register an instance of Exec in container.
-	if err := job.Run(); err != nil {
-		fmt.Fprintf(os.Stderr, "Error setting up exec command in container %s: %s\n", name, err)
+	id, err := s.daemon.ContainerExecCreate(execConfig)
+	if err != nil {
+		logrus.Errorf("Error setting up exec command in container %s: %s", name, err)
 		return err
 	}
-	// Return the ID
-	out.Set("Id", engine.Tail(stdoutBuffer, 1))
 
-	return writeJSONEnv(w, http.StatusCreated, out)
+	return writeJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{
+		ID: id,
+	})
 }
 
 // TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start.
-func postContainerExecStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainerExecStart(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
-		return nil
-	}
-	var (
-		name             = vars["name"]
-		job              = eng.Job("execStart", name)
-		errOut io.Writer = os.Stderr
-	)
-
-	if err := job.DecodeEnv(r.Body); err != nil {
 		return err
 	}
-	if !job.GetenvBool("Detach") {
+	var (
+		execName = vars["name"]
+		stdin    io.ReadCloser
+		stdout   io.Writer
+		stderr   io.Writer
+	)
+
+	execStartCheck := &types.ExecStartCheck{}
+	if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil {
+		return err
+	}
+
+	if !execStartCheck.Detach {
 		// Setting up the streaming http interface.
 		inStream, outStream, err := hijackServer(w)
 		if err != nil {
@@ -1207,21 +1343,19 @@
 			fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
 		}
 
-		if !job.GetenvBool("Tty") && version.GreaterThanOrEqualTo("1.6") {
+		if !execStartCheck.Tty {
 			errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
 			outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
-		} else {
-			errStream = outStream
 		}
-		job.Stdin.Add(inStream)
-		job.Stdout.Add(outStream)
-		job.Stderr.Set(errStream)
-		errOut = outStream
+
+		stdin = inStream
+		stdout = outStream
+		stderr = errStream
 	}
 	// Now run the user process in container.
-	job.SetCloseIO(false)
-	if err := job.Run(); err != nil {
-		fmt.Fprintf(errOut, "Error starting exec command in container %s: %s\n", name, err)
+
+	if err := s.daemon.ContainerExecStart(execName, stdin, stdout, stderr); err != nil {
+		logrus.Errorf("Error starting exec command in container %s: %s", execName, err)
 		return err
 	}
 	w.WriteHeader(http.StatusNoContent)
@@ -1229,48 +1363,75 @@
 	return nil
 }
 
-func postContainerExecResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) postContainerExecResize(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := parseForm(r); err != nil {
 		return err
 	}
 	if vars == nil {
 		return fmt.Errorf("Missing parameter")
 	}
-	if err := eng.Job("execResize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil {
+
+	height, err := strconv.Atoi(r.Form.Get("h"))
+	if err != nil {
 		return err
 	}
-	return nil
+	width, err := strconv.Atoi(r.Form.Get("w"))
+	if err != nil {
+		return err
+	}
+
+	return s.daemon.ContainerExecResize(vars["name"], height, width)
 }
 
-func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) optionsHandler(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	w.WriteHeader(http.StatusOK)
 	return nil
 }
 func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) {
-	log.Debugf("CORS header is enabled and set to: %s", corsHeaders)
+	logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders)
 	w.Header().Add("Access-Control-Allow-Origin", corsHeaders)
 	w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
 	w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS")
 }
 
-func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+func (s *Server) ping(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	_, err := w.Write([]byte{'O', 'K'})
 	return err
 }
 
-func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc {
+func (s *Server) initTcpSocket(addr string) (l net.Listener, err error) {
+	if !s.cfg.TlsVerify {
+		logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
+	}
+
+	var c *sockets.TlsConfig
+	if s.cfg.Tls || s.cfg.TlsVerify {
+		c = sockets.NewTlsConfig(s.cfg.TlsCert, s.cfg.TlsKey, s.cfg.TlsCa, s.cfg.TlsVerify)
+	}
+
+	if l, err = sockets.NewTcpSocket(addr, c, s.start); err != nil {
+		return nil, err
+	}
+	if err := allocateDaemonPort(addr); err != nil {
+		return nil, err
+	}
+
+	return
+}
+
+func makeHttpHandler(logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc {
 	return func(w http.ResponseWriter, r *http.Request) {
 		// log the request
-		log.Debugf("Calling %s %s", localMethod, localRoute)
+		logrus.Debugf("Calling %s %s", localMethod, localRoute)
 
 		if logging {
-			log.Infof("%s %s", r.Method, r.RequestURI)
+			logrus.Infof("%s %s", r.Method, r.RequestURI)
 		}
 
 		if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
 			userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
 			if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
-				log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
+				logrus.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
 			}
 		}
 		version := version.Version(mux.Vars(r)["version"])
@@ -1282,123 +1443,96 @@
 		}
 
 		if version.GreaterThan(api.APIVERSION) {
-			http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound)
+			http.Error(w, fmt.Errorf("client and server don't have same version (client API version: %s, server API version: %s)", version, api.APIVERSION).Error(), http.StatusBadRequest)
 			return
 		}
 
-		if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil {
-			log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
+		if err := handlerFunc(version, w, r, mux.Vars(r)); err != nil {
+			logrus.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err)
 			httpError(w, err)
 		}
 	}
 }
 
-// Replicated from expvar.go as not public.
-func expvarHandler(w http.ResponseWriter, r *http.Request) {
-	w.Header().Set("Content-Type", "application/json; charset=utf-8")
-	fmt.Fprintf(w, "{\n")
-	first := true
-	expvar.Do(func(kv expvar.KeyValue) {
-		if !first {
-			fmt.Fprintf(w, ",\n")
-		}
-		first = false
-		fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
-	})
-	fmt.Fprintf(w, "\n}\n")
-}
-
-func AttachProfiler(router *mux.Router) {
-	router.HandleFunc("/debug/vars", expvarHandler)
-	router.HandleFunc("/debug/pprof/", pprof.Index)
-	router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
-	router.HandleFunc("/debug/pprof/profile", pprof.Profile)
-	router.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
-	router.HandleFunc("/debug/pprof/block", pprof.Handler("block").ServeHTTP)
-	router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP)
-	router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP)
-	router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
-}
-
 // we keep enableCors just for legacy usage, need to be removed in the future
-func createRouter(eng *engine.Engine, logging, enableCors bool, corsHeaders string, dockerVersion string) *mux.Router {
+func createRouter(s *Server) *mux.Router {
 	r := mux.NewRouter()
 	if os.Getenv("DEBUG") != "" {
-		AttachProfiler(r)
+		ProfilerSetup(r, "/debug/")
 	}
 	m := map[string]map[string]HttpApiFunc{
 		"GET": {
-			"/_ping":                          ping,
-			"/events":                         getEvents,
-			"/info":                           getInfo,
-			"/version":                        getVersion,
-			"/images/json":                    getImagesJSON,
-			"/images/viz":                     getImagesViz,
-			"/images/search":                  getImagesSearch,
-			"/images/get":                     getImagesGet,
-			"/images/{name:.*}/get":           getImagesGet,
-			"/images/{name:.*}/history":       getImagesHistory,
-			"/images/{name:.*}/json":          getImagesByName,
-			"/containers/ps":                  getContainersJSON,
-			"/containers/json":                getContainersJSON,
-			"/containers/{name:.*}/export":    getContainersExport,
-			"/containers/{name:.*}/changes":   getContainersChanges,
-			"/containers/{name:.*}/json":      getContainersByName,
-			"/containers/{name:.*}/top":       getContainersTop,
-			"/containers/{name:.*}/logs":      getContainersLogs,
-			"/containers/{name:.*}/stats":     getContainersStats,
-			"/containers/{name:.*}/attach/ws": wsContainersAttach,
-			"/exec/{id:.*}/json":              getExecByID,
+			"/_ping":                          s.ping,
+			"/events":                         s.getEvents,
+			"/info":                           s.getInfo,
+			"/version":                        s.getVersion,
+			"/images/json":                    s.getImagesJSON,
+			"/images/search":                  s.getImagesSearch,
+			"/images/get":                     s.getImagesGet,
+			"/images/{name:.*}/get":           s.getImagesGet,
+			"/images/{name:.*}/history":       s.getImagesHistory,
+			"/images/{name:.*}/json":          s.getImagesByName,
+			"/containers/ps":                  s.getContainersJSON,
+			"/containers/json":                s.getContainersJSON,
+			"/containers/{name:.*}/export":    s.getContainersExport,
+			"/containers/{name:.*}/changes":   s.getContainersChanges,
+			"/containers/{name:.*}/json":      s.getContainersByName,
+			"/containers/{name:.*}/top":       s.getContainersTop,
+			"/containers/{name:.*}/logs":      s.getContainersLogs,
+			"/containers/{name:.*}/stats":     s.getContainersStats,
+			"/containers/{name:.*}/attach/ws": s.wsContainersAttach,
+			"/exec/{id:.*}/json":              s.getExecByID,
 		},
 		"POST": {
-			"/auth":                         postAuth,
-			"/commit":                       postCommit,
-			"/build":                        postBuild,
-			"/images/create":                postImagesCreate,
-			"/images/load":                  postImagesLoad,
-			"/images/{name:.*}/push":        postImagesPush,
-			"/images/{name:.*}/tag":         postImagesTag,
-			"/containers/create":            postContainersCreate,
-			"/containers/{name:.*}/kill":    postContainersKill,
-			"/containers/{name:.*}/pause":   postContainersPause,
-			"/containers/{name:.*}/unpause": postContainersUnpause,
-			"/containers/{name:.*}/restart": postContainersRestart,
-			"/containers/{name:.*}/start":   postContainersStart,
-			"/containers/{name:.*}/stop":    postContainersStop,
-			"/containers/{name:.*}/wait":    postContainersWait,
-			"/containers/{name:.*}/resize":  postContainersResize,
-			"/containers/{name:.*}/attach":  postContainersAttach,
-			"/containers/{name:.*}/copy":    postContainersCopy,
-			"/containers/{name:.*}/exec":    postContainerExecCreate,
-			"/exec/{name:.*}/start":         postContainerExecStart,
-			"/exec/{name:.*}/resize":        postContainerExecResize,
-			"/containers/{name:.*}/rename":  postContainerRename,
+			"/auth":                         s.postAuth,
+			"/commit":                       s.postCommit,
+			"/build":                        s.postBuild,
+			"/images/create":                s.postImagesCreate,
+			"/images/load":                  s.postImagesLoad,
+			"/images/{name:.*}/push":        s.postImagesPush,
+			"/images/{name:.*}/tag":         s.postImagesTag,
+			"/containers/create":            s.postContainersCreate,
+			"/containers/{name:.*}/kill":    s.postContainersKill,
+			"/containers/{name:.*}/pause":   s.postContainersPause,
+			"/containers/{name:.*}/unpause": s.postContainersUnpause,
+			"/containers/{name:.*}/restart": s.postContainersRestart,
+			"/containers/{name:.*}/start":   s.postContainersStart,
+			"/containers/{name:.*}/stop":    s.postContainersStop,
+			"/containers/{name:.*}/wait":    s.postContainersWait,
+			"/containers/{name:.*}/resize":  s.postContainersResize,
+			"/containers/{name:.*}/attach":  s.postContainersAttach,
+			"/containers/{name:.*}/copy":    s.postContainersCopy,
+			"/containers/{name:.*}/exec":    s.postContainerExecCreate,
+			"/exec/{name:.*}/start":         s.postContainerExecStart,
+			"/exec/{name:.*}/resize":        s.postContainerExecResize,
+			"/containers/{name:.*}/rename":  s.postContainerRename,
 		},
 		"DELETE": {
-			"/containers/{name:.*}": deleteContainers,
-			"/images/{name:.*}":     deleteImages,
+			"/containers/{name:.*}": s.deleteContainers,
+			"/images/{name:.*}":     s.deleteImages,
 		},
 		"OPTIONS": {
-			"": optionsHandler,
+			"": s.optionsHandler,
 		},
 	}
 
 	// If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*"
 	// otherwise, all head values will be passed to HTTP handler
-	if corsHeaders == "" && enableCors {
+	corsHeaders := s.cfg.CorsHeaders
+	if corsHeaders == "" && s.cfg.EnableCors {
 		corsHeaders = "*"
 	}
 
 	for method, routes := range m {
 		for route, fct := range routes {
-			log.Debugf("Registering %s, %s", method, route)
+			logrus.Debugf("Registering %s, %s", method, route)
 			// NOTE: scope issue, make sure the variables are local and won't be changed
 			localRoute := route
 			localFct := fct
 			localMethod := method
 
 			// build the handler function
-			f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, corsHeaders, version.Version(dockerVersion))
+			f := makeHttpHandler(s.cfg.Logging, localMethod, localRoute, localFct, corsHeaders, version.Version(s.cfg.Version))
 
 			// add the new route
 			if localRoute == "" {
@@ -1413,101 +1547,6 @@
 	return r
 }
 
-// ServeRequest processes a single http request to the docker remote api.
-// FIXME: refactor this to be part of Server and not require re-creating a new
-// router each time. This requires first moving ListenAndServe into Server.
-func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) {
-	router := createRouter(eng, false, true, "", "")
-	// Insert APIVERSION into the request as a convenience
-	req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path)
-	router.ServeHTTP(w, req)
-}
-
-func lookupGidByName(nameOrGid string) (int, error) {
-	groupFile, err := user.GetGroupPath()
-	if err != nil {
-		return -1, err
-	}
-	groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool {
-		return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid
-	})
-	if err != nil {
-		return -1, err
-	}
-	if groups != nil && len(groups) > 0 {
-		return groups[0].Gid, nil
-	}
-	gid, err := strconv.Atoi(nameOrGid)
-	if err == nil {
-		log.Warnf("Could not find GID %d", gid)
-		return gid, nil
-	}
-	return -1, fmt.Errorf("Group %s not found", nameOrGid)
-}
-
-func setupTls(cert, key, ca string, l net.Listener) (net.Listener, error) {
-	tlsCert, err := tls.LoadX509KeyPair(cert, key)
-	if err != nil {
-		if os.IsNotExist(err) {
-			return nil, fmt.Errorf("Could not load X509 key pair (%s, %s): %v", cert, key, err)
-		}
-		return nil, fmt.Errorf("Error reading X509 key pair (%s, %s): %q. Make sure the key is encrypted.",
-			cert, key, err)
-	}
-	tlsConfig := &tls.Config{
-		NextProtos:   []string{"http/1.1"},
-		Certificates: []tls.Certificate{tlsCert},
-		// Avoid fallback on insecure SSL protocols
-		MinVersion: tls.VersionTLS10,
-	}
-
-	if ca != "" {
-		certPool := x509.NewCertPool()
-		file, err := ioutil.ReadFile(ca)
-		if err != nil {
-			return nil, fmt.Errorf("Could not read CA certificate: %v", err)
-		}
-		certPool.AppendCertsFromPEM(file)
-		tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
-		tlsConfig.ClientCAs = certPool
-	}
-
-	return tls.NewListener(l, tlsConfig), nil
-}
-
-func newListener(proto, addr string, bufferRequests bool) (net.Listener, error) {
-	if bufferRequests {
-		return listenbuffer.NewListenBuffer(proto, addr, activationLock)
-	}
-
-	return net.Listen(proto, addr)
-}
-
-func changeGroup(addr string, nameOrGid string) error {
-	gid, err := lookupGidByName(nameOrGid)
-	if err != nil {
-		return err
-	}
-
-	log.Debugf("%s group found. gid: %d", nameOrGid, gid)
-	return os.Chown(addr, 0, gid)
-}
-
-func setSocketGroup(addr, group string) error {
-	if group == "" {
-		return nil
-	}
-
-	if err := changeGroup(addr, group); err != nil {
-		if group != "docker" {
-			return err
-		}
-		log.Debugf("Warning: could not chgrp %s to docker: %v", addr, err)
-	}
-
-	return nil
-}
-
 func allocateDaemonPort(addr string) error {
 	host, port, err := net.SplitHostPort(addr)
 	if err != nil {
@@ -1526,89 +1565,11 @@
 		return fmt.Errorf("failed to lookup %s address in host specification", host)
 	}
 
+	pa := portallocator.Get()
 	for _, hostIP := range hostIPs {
-		if _, err := bridge.RequestPort(hostIP, "tcp", intPort); err != nil {
+		if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil {
 			return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err)
 		}
 	}
 	return nil
 }
-
-func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) {
-	if !job.GetenvBool("TlsVerify") {
-		log.Infof("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
-	}
-
-	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version"))
-
-	l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests"))
-	if err != nil {
-		return nil, err
-	}
-
-	if err := allocateDaemonPort(addr); err != nil {
-		return nil, err
-	}
-
-	if job.GetenvBool("Tls") || job.GetenvBool("TlsVerify") {
-		var tlsCa string
-		if job.GetenvBool("TlsVerify") {
-			tlsCa = job.Getenv("TlsCa")
-		}
-		l, err = setupTls(job.Getenv("TlsCert"), job.Getenv("TlsKey"), tlsCa, l)
-		if err != nil {
-			return nil, err
-		}
-	}
-	return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil
-}
-
-type Server interface {
-	Serve() error
-	Close() error
-}
-
-// ServeApi loops through all of the protocols sent in to docker and spawns
-// off a go routine to setup a serving http.Server for each.
-func ServeApi(job *engine.Job) engine.Status {
-	if len(job.Args) == 0 {
-		return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
-	}
-	var (
-		protoAddrs = job.Args
-		chErrors   = make(chan error, len(protoAddrs))
-	)
-
-	for _, protoAddr := range protoAddrs {
-		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
-		if len(protoAddrParts) != 2 {
-			return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
-		}
-		go func() {
-			log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
-			srv, err := NewServer(protoAddrParts[0], protoAddrParts[1], job)
-			if err != nil {
-				chErrors <- err
-				return
-			}
-			job.Eng.OnShutdown(func() {
-				if err := srv.Close(); err != nil {
-					log.Error(err)
-				}
-			})
-			if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
-				err = nil
-			}
-			chErrors <- err
-		}()
-	}
-
-	for i := 0; i < len(protoAddrs); i++ {
-		err := <-chErrors
-		if err != nil {
-			return job.Error(err)
-		}
-	}
-
-	return engine.StatusOK
-}
diff --git a/api/server/server_linux.go b/api/server/server_linux.go
index a931157..a0cfee1 100644
--- a/api/server/server_linux.go
+++ b/api/server/server_linux.go
@@ -4,102 +4,75 @@
 
 import (
 	"fmt"
+	"net"
 	"net/http"
-	"os"
-	"syscall"
 
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/pkg/sockets"
 	"github.com/docker/docker/pkg/systemd"
 )
 
-// NewServer sets up the required Server and does protocol specific checking.
-func NewServer(proto, addr string, job *engine.Job) (Server, error) {
-	// Basic error and sanity checking
+// newServer sets up the required serverCloser and does protocol specific checking.
+func (s *Server) newServer(proto, addr string) (serverCloser, error) {
+	var (
+		err error
+		l   net.Listener
+	)
 	switch proto {
 	case "fd":
-		return nil, serveFd(addr, job)
-	case "tcp":
-		return setupTcpHttp(addr, job)
-	case "unix":
-		return setupUnixHttp(addr, job)
-	default:
-		return nil, fmt.Errorf("Invalid protocol format.")
-	}
-}
-
-func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) {
-	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version"))
-
-	if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) {
-		return nil, err
-	}
-	mask := syscall.Umask(0777)
-	defer syscall.Umask(mask)
-
-	l, err := newListener("unix", addr, job.GetenvBool("BufferRequests"))
-	if err != nil {
-		return nil, err
-	}
-
-	if err := setSocketGroup(addr, job.Getenv("SocketGroup")); err != nil {
-		return nil, err
-	}
-
-	if err := os.Chmod(addr, 0660); err != nil {
-		return nil, err
-	}
-
-	return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil
-}
-
-// serveFd creates an http.Server and sets it up to serve given a socket activated
-// argument.
-func serveFd(addr string, job *engine.Job) error {
-	r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version"))
-
-	ls, e := systemd.ListenFD(addr)
-	if e != nil {
-		return e
-	}
-
-	chErrors := make(chan error, len(ls))
-
-	// We don't want to start serving on these sockets until the
-	// daemon is initialized and installed. Otherwise required handlers
-	// won't be ready.
-	<-activationLock
-
-	// Since ListenFD will return one or more sockets we have
-	// to create a go func to spawn off multiple serves
-	for i := range ls {
-		listener := ls[i]
-		go func() {
-			httpSrv := http.Server{Handler: r}
-			chErrors <- httpSrv.Serve(listener)
-		}()
-	}
-
-	for i := 0; i < len(ls); i++ {
-		err := <-chErrors
+		ls, err := systemd.ListenFD(addr)
 		if err != nil {
-			return err
+			return nil, err
 		}
+		chErrors := make(chan error, len(ls))
+		// We don't want to start serving on these sockets until the
+		// daemon is initialized and installed. Otherwise required handlers
+		// won't be ready.
+		<-s.start
+		// Since ListenFD will return one or more sockets we have
+		// to create a go func to spawn off multiple serves
+		for i := range ls {
+			listener := ls[i]
+			go func() {
+				httpSrv := http.Server{Handler: s.router}
+				chErrors <- httpSrv.Serve(listener)
+			}()
+		}
+		for i := 0; i < len(ls); i++ {
+			if err := <-chErrors; err != nil {
+				return nil, err
+			}
+		}
+		return nil, nil
+	case "tcp":
+		l, err = s.initTcpSocket(addr)
+		if err != nil {
+			return nil, err
+		}
+	case "unix":
+		if l, err = sockets.NewUnixSocket(addr, s.cfg.SocketGroup, s.start); err != nil {
+			return nil, err
+		}
+	default:
+		return nil, fmt.Errorf("Invalid protocol format: %q", proto)
 	}
-
-	return nil
+	return &HttpServer{
+		&http.Server{
+			Addr:    addr,
+			Handler: s.router,
+		},
+		l,
+	}, nil
 }
 
-// Called through eng.Job("acceptconnections")
-func AcceptConnections(job *engine.Job) engine.Status {
+func (s *Server) AcceptConnections(d *daemon.Daemon) {
 	// Tell the init daemon we are accepting requests
+	s.daemon = d
 	go systemd.SdNotify("READY=1")
-
 	// close the lock so the listeners start accepting connections
 	select {
-	case <-activationLock:
+	case <-s.start:
 	default:
-		close(activationLock)
+		close(s.start)
 	}
-
-	return engine.StatusOK
 }
diff --git a/api/server/server_unit_test.go b/api/server/server_unit_test.go
deleted file mode 100644
index b5ec7c8..0000000
--- a/api/server/server_unit_test.go
+++ /dev/null
@@ -1,553 +0,0 @@
-package server
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io"
-	"net/http"
-	"net/http/httptest"
-	"reflect"
-	"strings"
-	"testing"
-
-	"github.com/docker/docker/api"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/version"
-)
-
-func TestGetBoolParam(t *testing.T) {
-	if ret, err := getBoolParam("true"); err != nil || !ret {
-		t.Fatalf("true -> true, nil | got %t %s", ret, err)
-	}
-	if ret, err := getBoolParam("True"); err != nil || !ret {
-		t.Fatalf("True -> true, nil | got %t %s", ret, err)
-	}
-	if ret, err := getBoolParam("1"); err != nil || !ret {
-		t.Fatalf("1 -> true, nil | got %t %s", ret, err)
-	}
-	if ret, err := getBoolParam(""); err != nil || ret {
-		t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
-	}
-	if ret, err := getBoolParam("false"); err != nil || ret {
-		t.Fatalf("false -> false, nil | got %t %s", ret, err)
-	}
-	if ret, err := getBoolParam("0"); err != nil || ret {
-		t.Fatalf("0 -> false, nil | got %t %s", ret, err)
-	}
-	if ret, err := getBoolParam("faux"); err == nil || ret {
-		t.Fatalf("faux -> false, err | got %t %s", ret, err)
-
-	}
-}
-
-func TesthttpError(t *testing.T) {
-	r := httptest.NewRecorder()
-
-	httpError(r, fmt.Errorf("No such method"))
-	if r.Code != http.StatusNotFound {
-		t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
-	}
-
-	httpError(r, fmt.Errorf("This accound hasn't been activated"))
-	if r.Code != http.StatusForbidden {
-		t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
-	}
-
-	httpError(r, fmt.Errorf("Some error"))
-	if r.Code != http.StatusInternalServerError {
-		t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
-	}
-}
-
-func TestGetVersion(t *testing.T) {
-	eng := engine.New()
-	var called bool
-	eng.Register("version", func(job *engine.Job) engine.Status {
-		called = true
-		v := &engine.Env{}
-		v.SetJson("Version", "42.1")
-		v.Set("ApiVersion", "1.1.1.1.1")
-		v.Set("GoVersion", "2.42")
-		v.Set("Os", "Linux")
-		v.Set("Arch", "x86_64")
-		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	})
-	r := serveRequest("GET", "/version", nil, eng, t)
-	if !called {
-		t.Fatalf("handler was not called")
-	}
-	v := readEnv(r.Body, t)
-	if v.Get("Version") != "42.1" {
-		t.Fatalf("%#v\n", v)
-	}
-	if r.HeaderMap.Get("Content-Type") != "application/json" {
-		t.Fatalf("%#v\n", r)
-	}
-}
-
-func TestGetInfo(t *testing.T) {
-	eng := engine.New()
-	var called bool
-	eng.Register("info", func(job *engine.Job) engine.Status {
-		called = true
-		v := &engine.Env{}
-		v.SetInt("Containers", 1)
-		v.SetInt("Images", 42000)
-		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	})
-	r := serveRequest("GET", "/info", nil, eng, t)
-	if !called {
-		t.Fatalf("handler was not called")
-	}
-	v := readEnv(r.Body, t)
-	if v.GetInt("Images") != 42000 {
-		t.Fatalf("%#v\n", v)
-	}
-	if v.GetInt("Containers") != 1 {
-		t.Fatalf("%#v\n", v)
-	}
-	assertContentType(r, "application/json", t)
-}
-
-func TestGetImagesJSON(t *testing.T) {
-	eng := engine.New()
-	var called bool
-	eng.Register("images", func(job *engine.Job) engine.Status {
-		called = true
-		v := createEnvFromGetImagesJSONStruct(sampleImage)
-		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	})
-	r := serveRequest("GET", "/images/json", nil, eng, t)
-	if !called {
-		t.Fatal("handler was not called")
-	}
-	assertHttpNotError(r, t)
-	assertContentType(r, "application/json", t)
-	var observed getImagesJSONStruct
-	if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil {
-		t.Fatal(err)
-	}
-	if !reflect.DeepEqual(observed, sampleImage) {
-		t.Errorf("Expected %#v but got %#v", sampleImage, observed)
-	}
-}
-
-func TestGetImagesJSONFilter(t *testing.T) {
-	eng := engine.New()
-	filter := "nothing"
-	eng.Register("images", func(job *engine.Job) engine.Status {
-		filter = job.Getenv("filter")
-		return engine.StatusOK
-	})
-	serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t)
-	if filter != "aaaa" {
-		t.Errorf("%#v", filter)
-	}
-}
-
-func TestGetImagesJSONFilters(t *testing.T) {
-	eng := engine.New()
-	filter := "nothing"
-	eng.Register("images", func(job *engine.Job) engine.Status {
-		filter = job.Getenv("filters")
-		return engine.StatusOK
-	})
-	serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t)
-	if filter != "nnnn" {
-		t.Errorf("%#v", filter)
-	}
-}
-
-func TestGetImagesJSONAll(t *testing.T) {
-	eng := engine.New()
-	allFilter := "-1"
-	eng.Register("images", func(job *engine.Job) engine.Status {
-		allFilter = job.Getenv("all")
-		return engine.StatusOK
-	})
-	serveRequest("GET", "/images/json?all=1", nil, eng, t)
-	if allFilter != "1" {
-		t.Errorf("%#v", allFilter)
-	}
-}
-
-func TestGetImagesJSONLegacyFormat(t *testing.T) {
-	eng := engine.New()
-	var called bool
-	eng.Register("images", func(job *engine.Job) engine.Status {
-		called = true
-		outsLegacy := engine.NewTable("Created", 0)
-		outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage))
-		if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	})
-	r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t)
-	if !called {
-		t.Fatal("handler was not called")
-	}
-	assertHttpNotError(r, t)
-	assertContentType(r, "application/json", t)
-	images := engine.NewTable("Created", 0)
-	if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil {
-		t.Fatal(err)
-	}
-	if images.Len() != 1 {
-		t.Fatalf("Expected 1 image, %d found", images.Len())
-	}
-	image := images.Data[0]
-	if image.Get("Tag") != "test-tag" {
-		t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag"))
-	}
-	if image.Get("Repository") != "test-name" {
-		t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository"))
-	}
-}
-
-func TestGetContainersByName(t *testing.T) {
-	eng := engine.New()
-	name := "container_name"
-	var called bool
-	eng.Register("container_inspect", func(job *engine.Job) engine.Status {
-		called = true
-		if job.Args[0] != name {
-			t.Errorf("name != '%s': %#v", name, job.Args[0])
-		}
-		if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
-			t.Errorf("dirty env variable not set")
-		} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
-			t.Errorf("dirty env variable set when it shouldn't")
-		}
-		v := &engine.Env{}
-		v.SetBool("dirty", true)
-		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	})
-	r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t)
-	if !called {
-		t.Fatal("handler was not called")
-	}
-	assertContentType(r, "application/json", t)
-	var stdoutJson interface{}
-	if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
-		t.Fatalf("%#v", err)
-	}
-	if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 {
-		t.Fatalf("%#v", stdoutJson)
-	}
-}
-
-func TestGetEvents(t *testing.T) {
-	eng := engine.New()
-	var called bool
-	eng.Register("events", func(job *engine.Job) engine.Status {
-		called = true
-		since := job.Getenv("since")
-		if since != "1" {
-			t.Fatalf("'since' should be 1, found %#v instead", since)
-		}
-		until := job.Getenv("until")
-		if until != "0" {
-			t.Fatalf("'until' should be 0, found %#v instead", until)
-		}
-		v := &engine.Env{}
-		v.Set("since", since)
-		v.Set("until", until)
-		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	})
-	r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t)
-	if !called {
-		t.Fatal("handler was not called")
-	}
-	assertContentType(r, "application/json", t)
-	var stdout_json struct {
-		Since int
-		Until int
-	}
-	if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil {
-		t.Fatal(err)
-	}
-	if stdout_json.Since != 1 {
-		t.Errorf("since != 1: %#v", stdout_json.Since)
-	}
-	if stdout_json.Until != 0 {
-		t.Errorf("until != 0: %#v", stdout_json.Until)
-	}
-}
-
-func TestLogs(t *testing.T) {
-	eng := engine.New()
-	var inspect bool
-	var logs bool
-	eng.Register("container_inspect", func(job *engine.Job) engine.Status {
-		inspect = true
-		if len(job.Args) == 0 {
-			t.Fatal("Job arguments is empty")
-		}
-		if job.Args[0] != "test" {
-			t.Fatalf("Container name %s, must be test", job.Args[0])
-		}
-		return engine.StatusOK
-	})
-	expected := "logs"
-	eng.Register("logs", func(job *engine.Job) engine.Status {
-		logs = true
-		if len(job.Args) == 0 {
-			t.Fatal("Job arguments is empty")
-		}
-		if job.Args[0] != "test" {
-			t.Fatalf("Container name %s, must be test", job.Args[0])
-		}
-		follow := job.Getenv("follow")
-		if follow != "1" {
-			t.Fatalf("follow: %s, must be 1", follow)
-		}
-		stdout := job.Getenv("stdout")
-		if stdout != "1" {
-			t.Fatalf("stdout %s, must be 1", stdout)
-		}
-		stderr := job.Getenv("stderr")
-		if stderr != "" {
-			t.Fatalf("stderr %s, must be empty", stderr)
-		}
-		timestamps := job.Getenv("timestamps")
-		if timestamps != "1" {
-			t.Fatalf("timestamps %s, must be 1", timestamps)
-		}
-		job.Stdout.Write([]byte(expected))
-		return engine.StatusOK
-	})
-	r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1&timestamps=1", nil, eng, t)
-	if r.Code != http.StatusOK {
-		t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
-	}
-	if !inspect {
-		t.Fatal("container_inspect job was not called")
-	}
-	if !logs {
-		t.Fatal("logs job was not called")
-	}
-	res := r.Body.String()
-	if res != expected {
-		t.Fatalf("Output %s, expected %s", res, expected)
-	}
-}
-
-func TestLogsNoStreams(t *testing.T) {
-	eng := engine.New()
-	var inspect bool
-	var logs bool
-	eng.Register("container_inspect", func(job *engine.Job) engine.Status {
-		inspect = true
-		if len(job.Args) == 0 {
-			t.Fatal("Job arguments is empty")
-		}
-		if job.Args[0] != "test" {
-			t.Fatalf("Container name %s, must be test", job.Args[0])
-		}
-		return engine.StatusOK
-	})
-	eng.Register("logs", func(job *engine.Job) engine.Status {
-		logs = true
-		return engine.StatusOK
-	})
-	r := serveRequest("GET", "/containers/test/logs", nil, eng, t)
-	if r.Code != http.StatusBadRequest {
-		t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest)
-	}
-	if inspect {
-		t.Fatal("container_inspect job was called, but it shouldn't")
-	}
-	if logs {
-		t.Fatal("logs job was called, but it shouldn't")
-	}
-	res := strings.TrimSpace(r.Body.String())
-	expected := "Bad parameters: you must choose at least one stream"
-	if !strings.Contains(res, expected) {
-		t.Fatalf("Output %s, expected %s in it", res, expected)
-	}
-}
-
-func TestGetImagesHistory(t *testing.T) {
-	eng := engine.New()
-	imageName := "docker-test-image"
-	var called bool
-	eng.Register("history", func(job *engine.Job) engine.Status {
-		called = true
-		if len(job.Args) == 0 {
-			t.Fatal("Job arguments is empty")
-		}
-		if job.Args[0] != imageName {
-			t.Fatalf("name != '%s': %#v", imageName, job.Args[0])
-		}
-		v := &engine.Env{}
-		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	})
-	r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t)
-	if !called {
-		t.Fatalf("handler was not called")
-	}
-	if r.Code != http.StatusOK {
-		t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
-	}
-	if r.HeaderMap.Get("Content-Type") != "application/json" {
-		t.Fatalf("%#v\n", r)
-	}
-}
-
-func TestGetImagesByName(t *testing.T) {
-	eng := engine.New()
-	name := "image_name"
-	var called bool
-	eng.Register("image_inspect", func(job *engine.Job) engine.Status {
-		called = true
-		if job.Args[0] != name {
-			t.Fatalf("name != '%s': %#v", name, job.Args[0])
-		}
-		if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") {
-			t.Fatal("dirty env variable not set")
-		} else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") {
-			t.Fatal("dirty env variable set when it shouldn't")
-		}
-		v := &engine.Env{}
-		v.SetBool("dirty", true)
-		if _, err := v.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	})
-	r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t)
-	if !called {
-		t.Fatal("handler was not called")
-	}
-	if r.HeaderMap.Get("Content-Type") != "application/json" {
-		t.Fatalf("%#v\n", r)
-	}
-	var stdoutJson interface{}
-	if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil {
-		t.Fatalf("%#v", err)
-	}
-	if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 {
-		t.Fatalf("%#v", stdoutJson)
-	}
-}
-
-func TestDeleteContainers(t *testing.T) {
-	eng := engine.New()
-	name := "foo"
-	var called bool
-	eng.Register("rm", func(job *engine.Job) engine.Status {
-		called = true
-		if len(job.Args) == 0 {
-			t.Fatalf("Job arguments is empty")
-		}
-		if job.Args[0] != name {
-			t.Fatalf("name != '%s': %#v", name, job.Args[0])
-		}
-		return engine.StatusOK
-	})
-	r := serveRequest("DELETE", "/containers/"+name, nil, eng, t)
-	if !called {
-		t.Fatalf("handler was not called")
-	}
-	if r.Code != http.StatusNoContent {
-		t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent)
-	}
-}
-
-func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
-	return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t)
-}
-
-func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
-	r := httptest.NewRecorder()
-	req, err := http.NewRequest(method, target, body)
-	if err != nil {
-		t.Fatal(err)
-	}
-	ServeRequest(eng, version, r, req)
-	return r
-}
-
-func readEnv(src io.Reader, t *testing.T) *engine.Env {
-	out := engine.NewOutput()
-	v, err := out.AddEnv()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if _, err := io.Copy(out, src); err != nil {
-		t.Fatal(err)
-	}
-	out.Close()
-	return v
-}
-
-func toJson(data interface{}, t *testing.T) io.Reader {
-	var buf bytes.Buffer
-	if err := json.NewEncoder(&buf).Encode(data); err != nil {
-		t.Fatal(err)
-	}
-	return &buf
-}
-
-func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) {
-	if recorder.HeaderMap.Get("Content-Type") != content_type {
-		t.Fatalf("%#v\n", recorder)
-	}
-}
-
-// XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that
-// should die as soon as we converted all integration tests?
-// assertHttpNotError expect the given response to not have an error.
-// Otherwise the it causes the test to fail.
-func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) {
-	// Non-error http status are [200, 400)
-	if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest {
-		t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code))
-	}
-}
-
-func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) *engine.Env {
-	v := &engine.Env{}
-	v.SetList("RepoTags", data.RepoTags)
-	v.Set("Id", data.Id)
-	v.SetInt64("Created", data.Created)
-	v.SetInt64("Size", data.Size)
-	v.SetInt64("VirtualSize", data.VirtualSize)
-	return v
-}
-
-type getImagesJSONStruct struct {
-	RepoTags    []string
-	Id          string
-	Created     int64
-	Size        int64
-	VirtualSize int64
-}
-
-var sampleImage getImagesJSONStruct = getImagesJSONStruct{
-	RepoTags:    []string{"test-name:test-tag"},
-	Id:          "ID",
-	Created:     999,
-	Size:        777,
-	VirtualSize: 666,
-}
diff --git a/api/server/server_windows.go b/api/server/server_windows.go
index c5d2c2c..9fa5ab6 100644
--- a/api/server/server_windows.go
+++ b/api/server/server_windows.go
@@ -3,29 +3,43 @@
 package server
 
 import (
-	"fmt"
+	"errors"
+	"net"
+	"net/http"
 
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/daemon"
 )
 
 // NewServer sets up the required Server and does protocol specific checking.
-func NewServer(proto, addr string, job *engine.Job) (Server, error) {
-	// Basic error and sanity checking
+func (s *Server) newServer(proto, addr string) (Server, error) {
+	var (
+		err error
+		l   net.Listener
+	)
 	switch proto {
 	case "tcp":
-		return setupTcpHttp(addr, job)
+		l, err = s.initTcpSocket(addr)
+		if err != nil {
+			return nil, err
+		}
 	default:
 		return nil, errors.New("Invalid protocol format. Windows only supports tcp.")
 	}
+	return &HttpServer{
+		&http.Server{
+			Addr:    addr,
+			Handler: s.router,
+		},
+		l,
+	}, nil
 }
 
-// Called through eng.Job("acceptconnections")
-func AcceptConnections(job *engine.Job) engine.Status {
-
+func (s *Server) AcceptConnections(d *daemon.Daemon) {
+	s.daemon = d
 	// close the lock so the listeners start accepting connections
-	if activationLock != nil {
-		close(activationLock)
+	select {
+	case <-s.start:
+	default:
+		close(s.start)
 	}
-
-	return engine.StatusOK
 }
diff --git a/api/types/types.go b/api/types/types.go
index f1b1d04..457808f 100644
--- a/api/types/types.go
+++ b/api/types/types.go
@@ -1,5 +1,13 @@
 package types
 
+import (
+	"time"
+
+	"github.com/docker/docker/daemon/network"
+	"github.com/docker/docker/pkg/version"
+	"github.com/docker/docker/runconfig"
+)
+
 // ContainerCreateResponse contains the information returned to a client on the
 // creation of a new container.
 type ContainerCreateResponse struct {
@@ -9,3 +17,205 @@
 	// Warnings are any warnings encountered during the creation of the container.
 	Warnings []string `json:"Warnings"`
 }
+
+// POST /containers/{name:.*}/exec
+type ContainerExecCreateResponse struct {
+	// ID is the exec ID.
+	ID string `json:"Id"`
+}
+
+// POST /auth
+type AuthResponse struct {
+	// Status is the authentication status
+	Status string `json:"Status"`
+}
+
+// POST "/containers/"+containerID+"/wait"
+type ContainerWaitResponse struct {
+	// StatusCode is the status code of the wait job
+	StatusCode int `json:"StatusCode"`
+}
+
+// POST "/commit?container="+containerID
+type ContainerCommitResponse struct {
+	ID string `json:"Id"`
+}
+
+// GET "/containers/{name:.*}/changes"
+type ContainerChange struct {
+	Kind int
+	Path string
+}
+
+// GET "/images/{name:.*}/history"
+type ImageHistory struct {
+	ID        string `json:"Id"`
+	Created   int64
+	CreatedBy string
+	Tags      []string
+	Size      int64
+	Comment   string
+}
+
+// DELETE "/images/{name:.*}"
+type ImageDelete struct {
+	Untagged string `json:",omitempty"`
+	Deleted  string `json:",omitempty"`
+}
+
+// GET "/images/json"
+type Image struct {
+	ID          string `json:"Id"`
+	ParentId    string
+	RepoTags    []string
+	RepoDigests []string
+	Created     int
+	Size        int
+	VirtualSize int
+	Labels      map[string]string
+}
+
+// GET "/images/{name:.*}/json"
+type ImageInspect struct {
+	Id              string
+	Parent          string
+	Comment         string
+	Created         time.Time
+	Container       string
+	ContainerConfig *runconfig.Config
+	DockerVersion   string
+	Author          string
+	Config          *runconfig.Config
+	Architecture    string
+	Os              string
+	Size            int64
+	VirtualSize     int64
+}
+
+// GET  "/containers/json"
+type Port struct {
+	IP          string
+	PrivatePort int
+	PublicPort  int
+	Type        string
+}
+
+type Container struct {
+	ID         string            `json:"Id"`
+	Names      []string          `json:",omitempty"`
+	Image      string            `json:",omitempty"`
+	Command    string            `json:",omitempty"`
+	Created    int               `json:",omitempty"`
+	Ports      []Port            `json:",omitempty"`
+	SizeRw     int               `json:",omitempty"`
+	SizeRootFs int               `json:",omitempty"`
+	Labels     map[string]string `json:",omitempty"`
+	Status     string            `json:",omitempty"`
+}
+
+// POST "/containers/"+containerID+"/copy"
+type CopyConfig struct {
+	Resource string
+}
+
+// GET "/containers/{name:.*}/top"
+type ContainerProcessList struct {
+	Processes [][]string
+	Titles    []string
+}
+
+type Version struct {
+	Version       string
+	ApiVersion    version.Version
+	GitCommit     string
+	GoVersion     string
+	Os            string
+	Arch          string
+	KernelVersion string `json:",omitempty"`
+}
+
+// GET "/info"
+type Info struct {
+	ID                 string
+	Containers         int
+	Images             int
+	Driver             string
+	DriverStatus       [][2]string
+	MemoryLimit        bool
+	SwapLimit          bool
+	CpuCfsPeriod       bool
+	CpuCfsQuota        bool
+	IPv4Forwarding     bool
+	Debug              bool
+	NFd                int
+	OomKillDisable     bool
+	NGoroutines        int
+	SystemTime         string
+	ExecutionDriver    string
+	LoggingDriver      string
+	NEventsListener    int
+	KernelVersion      string
+	OperatingSystem    string
+	IndexServerAddress string
+	RegistryConfig     interface{}
+	InitSha1           string
+	InitPath           string
+	NCPU               int
+	MemTotal           int64
+	DockerRootDir      string
+	HttpProxy          string
+	HttpsProxy         string
+	NoProxy            string
+	Name               string
+	Labels             []string
+	ExperimentalBuild  bool
+}
+
+// This struct is a temp struct used by execStart
+// Config fields is part of ExecConfig in runconfig package
+type ExecStartCheck struct {
+	// ExecStart will first check if it's detached
+	Detach bool
+	// Check if there's a tty
+	Tty bool
+}
+
+type ContainerState struct {
+	Running    bool
+	Paused     bool
+	Restarting bool
+	OOMKilled  bool
+	Dead       bool
+	Pid        int
+	ExitCode   int
+	Error      string
+	StartedAt  time.Time
+	FinishedAt time.Time
+}
+
+// GET "/containers/{name:.*}/json"
+type ContainerJSON struct {
+	Id              string
+	Created         time.Time
+	Path            string
+	Args            []string
+	Config          *runconfig.Config
+	State           *ContainerState
+	Image           string
+	NetworkSettings *network.Settings
+	ResolvConfPath  string
+	HostnamePath    string
+	HostsPath       string
+	LogPath         string
+	Name            string
+	RestartCount    int
+	Driver          string
+	ExecDriver      string
+	MountLabel      string
+	ProcessLabel    string
+	Volumes         map[string]string
+	VolumesRW       map[string]bool
+	AppArmorProfile string
+	ExecIDs         []string
+	HostConfig      *runconfig.HostConfig
+}
diff --git a/builder/bflag.go b/builder/bflag.go
new file mode 100644
index 0000000..a6a2ba3
--- /dev/null
+++ b/builder/bflag.go
@@ -0,0 +1,155 @@
+package builder
+
+import (
+	"fmt"
+	"strings"
+)
+
+type FlagType int
+
+const (
+	boolType FlagType = iota
+	stringType
+)
+
+type BuilderFlags struct {
+	Args  []string // actual flags/args from cmd line
+	flags map[string]*Flag
+	used  map[string]*Flag
+	Err   error
+}
+
+type Flag struct {
+	bf       *BuilderFlags
+	name     string
+	flagType FlagType
+	Value    string
+}
+
+func NewBuilderFlags() *BuilderFlags {
+	return &BuilderFlags{
+		flags: make(map[string]*Flag),
+		used:  make(map[string]*Flag),
+	}
+}
+
+func (bf *BuilderFlags) AddBool(name string, def bool) *Flag {
+	flag := bf.addFlag(name, boolType)
+	if flag == nil {
+		return nil
+	}
+	if def {
+		flag.Value = "true"
+	} else {
+		flag.Value = "false"
+	}
+	return flag
+}
+
+func (bf *BuilderFlags) AddString(name string, def string) *Flag {
+	flag := bf.addFlag(name, stringType)
+	if flag == nil {
+		return nil
+	}
+	flag.Value = def
+	return flag
+}
+
+func (bf *BuilderFlags) addFlag(name string, flagType FlagType) *Flag {
+	if _, ok := bf.flags[name]; ok {
+		bf.Err = fmt.Errorf("Duplicate flag defined: %s", name)
+		return nil
+	}
+
+	newFlag := &Flag{
+		bf:       bf,
+		name:     name,
+		flagType: flagType,
+	}
+	bf.flags[name] = newFlag
+
+	return newFlag
+}
+
+func (fl *Flag) IsUsed() bool {
+	if _, ok := fl.bf.used[fl.name]; ok {
+		return true
+	}
+	return false
+}
+
+func (fl *Flag) IsTrue() bool {
+	if fl.flagType != boolType {
+		// Should never get here
+		panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name))
+	}
+	return fl.Value == "true"
+}
+
+func (bf *BuilderFlags) Parse() error {
+	// If there was an error while defining the possible flags
+	// go ahead and bubble it back up here since we didn't do it
+	// earlier in the processing
+	if bf.Err != nil {
+		return fmt.Errorf("Error setting up flags: %s", bf.Err)
+	}
+
+	for _, arg := range bf.Args {
+		if !strings.HasPrefix(arg, "--") {
+			return fmt.Errorf("Arg should start with -- : %s", arg)
+		}
+
+		if arg == "--" {
+			return nil
+		}
+
+		arg = arg[2:]
+		value := ""
+
+		index := strings.Index(arg, "=")
+		if index >= 0 {
+			value = arg[index+1:]
+			arg = arg[:index]
+		}
+
+		flag, ok := bf.flags[arg]
+		if !ok {
+			return fmt.Errorf("Unknown flag: %s", arg)
+		}
+
+		if _, ok = bf.used[arg]; ok {
+			return fmt.Errorf("Duplicate flag specified: %s", arg)
+		}
+
+		bf.used[arg] = flag
+
+		switch flag.flagType {
+		case boolType:
+			// value == "" is only ok if no "=" was specified
+			if index >= 0 && value == "" {
+				return fmt.Errorf("Missing a value on flag: %s", arg)
+			}
+
+			lower := strings.ToLower(value)
+			if lower == "" {
+				flag.Value = "true"
+			} else if lower == "true" || lower == "false" {
+				flag.Value = lower
+			} else {
+				return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value)
+			}
+
+		case stringType:
+			if index < 0 {
+				return fmt.Errorf("Missing a value on flag: %s", arg)
+			}
+			flag.Value = value
+
+		default:
+			panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!"))
+		}
+
+	}
+
+	return nil
+}
diff --git a/builder/bflag_test.go b/builder/bflag_test.go
new file mode 100644
index 0000000..d03a1c3
--- /dev/null
+++ b/builder/bflag_test.go
@@ -0,0 +1,187 @@
+package builder
+
+import (
+	"testing"
+)
+
+func TestBuilderFlags(t *testing.T) {
+	var expected string
+	var err error
+
+	// ---
+
+	bf := NewBuilderFlags()
+	bf.Args = []string{}
+	if err := bf.Parse(); err != nil {
+		t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	bf.Args = []string{"--"}
+	if err := bf.Parse(); err != nil {
+		t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flStr1 := bf.AddString("str1", "")
+	flBool1 := bf.AddBool("bool1", false)
+	bf.Args = []string{}
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if flStr1.IsUsed() == true {
+		t.Fatalf("Test3 - str1 was not used!")
+	}
+	if flBool1.IsUsed() == true {
+		t.Fatalf("Test3 - bool1 was not used!")
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if flStr1.Value != "HI" {
+		t.Fatalf("Str1 was supposed to default to: HI")
+	}
+	if flBool1.IsTrue() {
+		t.Fatalf("Bool1 was supposed to default to: false")
+	}
+	if flStr1.IsUsed() == true {
+		t.Fatalf("Str1 was not used!")
+	}
+	if flBool1.IsUsed() == true {
+		t.Fatalf("Bool1 was not used!")
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	bf.Args = []string{"--str1"}
+
+	if err = bf.Parse(); err == nil {
+		t.Fatalf("Test %q was supposed to fail", bf.Args)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	bf.Args = []string{"--str1="}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	expected = ""
+	if flStr1.Value != expected {
+		t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	bf.Args = []string{"--str1=BYE"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	expected = "BYE"
+	if flStr1.Value != expected {
+		t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if !flBool1.IsTrue() {
+		t.Fatalf("Test-b1 Bool1 was supposed to be true")
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1=true"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if !flBool1.IsTrue() {
+		t.Fatalf("Test-b2 Bool1 was supposed to be true")
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1=false"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if flBool1.IsTrue() {
+		t.Fatalf("Test-b3 Bool1 was supposed to be false")
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1=false1"}
+
+	if err = bf.Parse(); err == nil {
+		t.Fatalf("Test %q was supposed to fail", bf.Args)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool2"}
+
+	if err = bf.Parse(); err == nil {
+		t.Fatalf("Test %q was supposed to fail", bf.Args)
+	}
+
+	// ---
+
+	bf = NewBuilderFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1", "--str1=BYE"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if flStr1.Value != "BYE" {
+		t.Fatalf("Teset %s, str1 should be BYE", bf.Args)
+	}
+	if !flBool1.IsTrue() {
+		t.Fatalf("Teset %s, bool1 should be true", bf.Args)
+	}
+}
diff --git a/builder/command/command.go b/builder/command/command.go
index 16544f0..8e5d980 100644
--- a/builder/command/command.go
+++ b/builder/command/command.go
@@ -16,7 +16,6 @@
 	Expose     = "expose"
 	Volume     = "volume"
 	User       = "user"
-	Insert     = "insert"
 )
 
 // Commands is list of all Dockerfile commands
@@ -35,5 +34,4 @@
 	Expose:     {},
 	Volume:     {},
 	User:       {},
-	Insert:     {},
 }
diff --git a/builder/dispatchers.go b/builder/dispatchers.go
index 4d21a75..6d0a30c 100644
--- a/builder/dispatchers.go
+++ b/builder/dispatchers.go
@@ -12,10 +12,11 @@
 	"io/ioutil"
 	"path/filepath"
 	"regexp"
+	"runtime"
 	"sort"
 	"strings"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/nat"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/runconfig"
@@ -38,6 +39,9 @@
 // in the dockerfile available from the next statement on via ${foo}.
 //
 func env(b *Builder, args []string, attributes map[string]bool, original string) error {
+	if runtime.GOOS == "windows" {
+		return fmt.Errorf("ENV is not supported on Windows.")
+	}
 	if len(args) == 0 {
 		return fmt.Errorf("ENV requires at least one argument")
 	}
@@ -47,6 +51,26 @@
 		return fmt.Errorf("Bad input to ENV, too many args")
 	}
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
+	// TODO/FIXME/NOT USED
+	// Just here to show how to use the builder flags stuff within the
+	// context of a builder command. Will remove once we actually add
+	// a builder command to something!
+	/*
+		flBool1 := b.BuilderFlags.AddBool("bool1", false)
+		flStr1 := b.BuilderFlags.AddString("str1", "HI")
+
+		if err := b.BuilderFlags.Parse(); err != nil {
+			return err
+		}
+
+		fmt.Printf("Bool1:%v\n", flBool1)
+		fmt.Printf("Str1:%v\n", flStr1)
+	*/
+
 	commitStr := "ENV"
 
 	for j := 0; j < len(args); j++ {
@@ -81,6 +105,10 @@
 		return fmt.Errorf("MAINTAINER requires exactly one argument")
 	}
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	b.maintainer = args[0]
 	return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
 }
@@ -98,6 +126,10 @@
 		return fmt.Errorf("Bad input to LABEL, too many args")
 	}
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	commitStr := "LABEL"
 
 	if b.Config.Labels == nil {
@@ -126,6 +158,10 @@
 		return fmt.Errorf("ADD requires at least two arguments")
 	}
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	return b.runContextCommand(args, true, true, "ADD")
 }
 
@@ -138,6 +174,10 @@
 		return fmt.Errorf("COPY requires at least two arguments")
 	}
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	return b.runContextCommand(args, false, false, "COPY")
 }
 
@@ -150,6 +190,10 @@
 		return fmt.Errorf("FROM requires one argument")
 	}
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	name := args[0]
 
 	if name == NoBaseImageSpecifier {
@@ -166,12 +210,12 @@
 		}
 	}
 	if err != nil {
-		if b.Daemon.Graph().IsNotExist(err) {
+		if b.Daemon.Graph().IsNotExist(err, name) {
 			image, err = b.pullImage(name)
 		}
 
 		// note that the top level err will still be !nil here if IsNotExist is
-		// not the error. This approach just simplifies hte logic a bit.
+		// not the error. This approach just simplifies the logic a bit.
 		if err != nil {
 			return err
 		}
@@ -194,6 +238,10 @@
 		return fmt.Errorf("ONBUILD requires at least one argument")
 	}
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
 	switch triggerInstruction {
 	case "ONBUILD":
@@ -217,6 +265,10 @@
 		return fmt.Errorf("WORKDIR requires exactly one argument")
 	}
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	workdir := args[0]
 
 	if !filepath.IsAbs(workdir) {
@@ -231,10 +283,11 @@
 // RUN some command yo
 //
 // run a command and commit the image. Args are automatically prepended with
-// 'sh -c' in the event there is only one argument. The difference in
-// processing:
+// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is
+// only one argument. The difference in processing:
 //
-// RUN echo hi          # sh -c echo hi
+// RUN echo hi          # sh -c echo hi       (Linux)
+// RUN echo hi          # cmd /S /C echo hi   (Windows)
 // RUN [ "echo", "hi" ] # echo hi
 //
 func run(b *Builder, args []string, attributes map[string]bool, original string) error {
@@ -242,10 +295,18 @@
 		return fmt.Errorf("Please provide a source image with `from` prior to run")
 	}
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	args = handleJsonArgs(args, attributes)
 
 	if !attributes["json"] {
-		args = append([]string{"/bin/sh", "-c"}, args...)
+		if runtime.GOOS != "windows" {
+			args = append([]string{"/bin/sh", "-c"}, args...)
+		} else {
+			args = append([]string{"cmd", "/S /C"}, args...)
+		}
 	}
 
 	runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
@@ -262,9 +323,9 @@
 	b.Config.Cmd = config.Cmd
 	runconfig.Merge(b.Config, config)
 
-	defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
+	defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
 
-	log.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)
+	logrus.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)
 
 	hit, err := b.probeCache()
 	if err != nil {
@@ -301,13 +362,23 @@
 // Argument handling is the same as RUN.
 //
 func cmd(b *Builder, args []string, attributes map[string]bool, original string) error {
-	b.Config.Cmd = handleJsonArgs(args, attributes)
-
-	if !attributes["json"] {
-		b.Config.Cmd = append([]string{"/bin/sh", "-c"}, b.Config.Cmd...)
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
 	}
 
-	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", b.Config.Cmd)); err != nil {
+	cmdSlice := handleJsonArgs(args, attributes)
+
+	if !attributes["json"] {
+		if runtime.GOOS != "windows" {
+			cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...)
+		} else {
+			cmdSlice = append([]string{"cmd", "/S /C"}, cmdSlice...)
+		}
+	}
+
+	b.Config.Cmd = runconfig.NewCommand(cmdSlice...)
+
+	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
 		return err
 	}
 
@@ -320,25 +391,33 @@
 
 // ENTRYPOINT /usr/sbin/nginx
 //
-// Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will
-// accept the CMD as the arguments to /usr/sbin/nginx.
+// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to
+// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx.
 //
 // Handles command processing similar to CMD and RUN, only b.Config.Entrypoint
 // is initialized at NewBuilder time instead of through argument parsing.
 //
 func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error {
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	parsed := handleJsonArgs(args, attributes)
 
 	switch {
 	case attributes["json"]:
 		// ENTRYPOINT ["echo", "hi"]
-		b.Config.Entrypoint = parsed
+		b.Config.Entrypoint = runconfig.NewEntrypoint(parsed...)
 	case len(parsed) == 0:
 		// ENTRYPOINT []
 		b.Config.Entrypoint = nil
 	default:
 		// ENTRYPOINT echo hi
-		b.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]}
+		if runtime.GOOS != "windows" {
+			b.Config.Entrypoint = runconfig.NewEntrypoint("/bin/sh", "-c", parsed[0])
+		} else {
+			b.Config.Entrypoint = runconfig.NewEntrypoint("cmd", "/S /C", parsed[0])
+		}
 	}
 
 	// when setting the entrypoint if a CMD was not explicitly set then
@@ -366,6 +445,10 @@
 		return fmt.Errorf("EXPOSE requires at least one argument")
 	}
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	if b.Config.ExposedPorts == nil {
 		b.Config.ExposedPorts = make(nat.PortSet)
 	}
@@ -406,10 +489,18 @@
 // ENTRYPOINT/CMD at container run time.
 //
 func user(b *Builder, args []string, attributes map[string]bool, original string) error {
+	if runtime.GOOS == "windows" {
+		return fmt.Errorf("USER is not supported on Windows.")
+	}
+
 	if len(args) != 1 {
 		return fmt.Errorf("USER requires exactly one argument")
 	}
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	b.Config.User = args[0]
 	return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
 }
@@ -419,10 +510,17 @@
 // Expose the volume /foo for use. Will also accept the JSON array form.
 //
 func volume(b *Builder, args []string, attributes map[string]bool, original string) error {
+	if runtime.GOOS == "windows" {
+		return fmt.Errorf("VOLUME is not supported on Windows.")
+	}
 	if len(args) == 0 {
 		return fmt.Errorf("VOLUME requires at least one argument")
 	}
 
+	if err := b.BuilderFlags.Parse(); err != nil {
+		return err
+	}
+
 	if b.Config.Volumes == nil {
 		b.Config.Volumes = map[string]struct{}{}
 	}
@@ -438,8 +536,3 @@
 	}
 	return nil
 }
-
-// INSERT is no longer accepted, but we still parse it.
-func insert(b *Builder, args []string, attributes map[string]bool, original string) error {
-	return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
-}
diff --git a/builder/evaluator.go b/builder/evaluator.go
index b74c1ba..62f86bb 100644
--- a/builder/evaluator.go
+++ b/builder/evaluator.go
@@ -1,4 +1,4 @@
-// builder is the evaluation step in the Dockerfile parse/evaluate pipeline.
+// Package builder is the evaluation step in the Dockerfile parse/evaluate pipeline.
 //
 // It incorporates a dispatch table based on the parser.Node values (see the
 // parser package for more information) that are yielded from the parser itself.
@@ -20,32 +20,27 @@
 package builder
 
 import (
-	"errors"
 	"fmt"
 	"io"
 	"os"
 	"path/filepath"
 	"strings"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/builder/command"
 	"github.com/docker/docker/builder/parser"
+	"github.com/docker/docker/cliconfig"
 	"github.com/docker/docker/daemon"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/fileutils"
+	"github.com/docker/docker/pkg/streamformatter"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/tarsum"
-	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
 )
 
-var (
-	ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
-)
-
 // Environment variable interpolation will happen on these statements only.
 var replaceEnvAllowed = map[string]struct{}{
 	command.Env:     {},
@@ -76,7 +71,6 @@
 		command.Expose:     expose,
 		command.Volume:     volume,
 		command.User:       user,
-		command.Insert:     insert,
 	}
 }
 
@@ -84,7 +78,6 @@
 // processing as it evaluates the parsing result.
 type Builder struct {
 	Daemon *daemon.Daemon
-	Engine *engine.Engine
 
 	// effectively stdio for the run. Because it is not stdio, I said
 	// "Effectively". Do not use stdio anywhere in this package for any reason.
@@ -105,12 +98,12 @@
 	// the final configs of the Dockerfile but dont want the layers
 	disableCommit bool
 
-	AuthConfig     *registry.AuthConfig
-	AuthConfigFile *registry.ConfigFile
+	AuthConfig *cliconfig.AuthConfig
+	ConfigFile *cliconfig.ConfigFile
 
 	// Deprecated, original writer used for ImagePull. To be removed.
 	OutOld          io.Writer
-	StreamFormatter *utils.StreamFormatter
+	StreamFormatter *streamformatter.StreamFormatter
 
 	Config *runconfig.Config // runconfig for cmd, run, entrypoint etc.
 
@@ -122,15 +115,20 @@
 	image          string        // image name for commit processing
 	maintainer     string        // maintainer name. could probably be removed.
 	cmdSet         bool          // indicates is CMD was set in current Dockerfile
+	BuilderFlags   *BuilderFlags // current cmd's BuilderFlags - temporary
 	context        tarsum.TarSum // the context is a tarball that is uploaded by the client
 	contextPath    string        // the path of the temporary directory the local context is unpacked to (server side)
 	noBaseImage    bool          // indicates that this build does not start from any base image, but is being built from an empty file system.
 
 	// Set resource restrictions for build containers
-	cpuSetCpus string
-	cpuShares  int64
-	memory     int64
-	memorySwap int64
+	cpuSetCpus   string
+	cpuSetMems   string
+	cpuShares    int64
+	cpuPeriod    int64
+	cpuQuota     int64
+	cgroupParent string
+	memory       int64
+	memorySwap   int64
 
 	cancelled <-chan struct{} // When closed, job was cancelled.
 }
@@ -154,7 +152,7 @@
 
 	defer func() {
 		if err := os.RemoveAll(b.contextPath); err != nil {
-			log.Debugf("[BUILDER] failed to remove temporary context: %s", err)
+			logrus.Debugf("[BUILDER] failed to remove temporary context: %s", err)
 		}
 	}()
 
@@ -170,7 +168,7 @@
 	for i, n := range b.dockerfile.Children {
 		select {
 		case <-b.cancelled:
-			log.Debug("Builder: build cancelled!")
+			logrus.Debug("Builder: build cancelled!")
 			fmt.Fprintf(b.OutStream, "Build cancelled")
 			return "", fmt.Errorf("Build cancelled")
 		default:
@@ -182,7 +180,7 @@
 			}
 			return "", err
 		}
-		fmt.Fprintf(b.OutStream, " ---> %s\n", common.TruncateID(b.image))
+		fmt.Fprintf(b.OutStream, " ---> %s\n", stringid.TruncateID(b.image))
 		if b.Remove {
 			b.clearTmp()
 		}
@@ -192,7 +190,7 @@
 		return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
 	}
 
-	fmt.Fprintf(b.OutStream, "Successfully built %s\n", common.TruncateID(b.image))
+	fmt.Fprintf(b.OutStream, "Successfully built %s\n", stringid.TruncateID(b.image))
 	return b.image, nil
 }
 
@@ -225,7 +223,7 @@
 		return fmt.Errorf("Cannot locate specified Dockerfile: %s", origFile)
 	}
 	if fi.Size() == 0 {
-		return ErrDockerfileEmpty
+		return fmt.Errorf("The Dockerfile (%s) cannot be empty", origFile)
 	}
 
 	f, err := os.Open(filename)
@@ -280,9 +278,14 @@
 	cmd := ast.Value
 	attrs := ast.Attributes
 	original := ast.Original
+	flags := ast.Flags
 	strs := []string{}
 	msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd))
 
+	if len(ast.Flags) > 0 {
+		msg += " " + strings.Join(ast.Flags, " ")
+	}
+
 	if cmd == "onbuild" {
 		if ast.Next == nil {
 			return fmt.Errorf("ONBUILD requires at least one argument")
@@ -290,6 +293,11 @@
 		ast = ast.Next.Children[0]
 		strs = append(strs, ast.Value)
 		msg += " " + ast.Value
+
+		if len(ast.Flags) > 0 {
+			msg += " " + strings.Join(ast.Flags, " ")
+		}
+
 	}
 
 	// count the number of nodes that we are going to traverse first
@@ -329,6 +337,8 @@
 	// XXX yes, we skip any cmds that are not valid; the parser should have
 	// picked these out already.
 	if f, ok := evaluateTable[cmd]; ok {
+		b.BuilderFlags = NewBuilderFlags()
+		b.BuilderFlags.Args = flags
 		return f(b, strList, attrs, original)
 	}
 
diff --git a/builder/internals.go b/builder/internals.go
index 7c22b47..9e58bb2 100644
--- a/builder/internals.go
+++ b/builder/internals.go
@@ -19,23 +19,24 @@
 	"syscall"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/graph"
 	imagepkg "github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
-	"github.com/docker/docker/pkg/common"
+	"github.com/docker/docker/pkg/httputils"
 	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/progressreader"
-	"github.com/docker/docker/pkg/symlink"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/tarsum"
 	"github.com/docker/docker/pkg/urlutil"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/utils"
 )
 
 func (b *Builder) readContext(context io.Reader) error {
@@ -61,7 +62,7 @@
 	return nil
 }
 
-func (b *Builder) commit(id string, autoCmd []string, comment string) error {
+func (b *Builder) commit(id string, autoCmd *runconfig.Command, comment string) error {
 	if b.disableCommit {
 		return nil
 	}
@@ -71,8 +72,8 @@
 	b.Config.Image = b.image
 	if id == "" {
 		cmd := b.Config.Cmd
-		b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
-		defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
+		b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", "#(nop) "+comment)
+		defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
 
 		hit, err := b.probeCache()
 		if err != nil {
@@ -146,8 +147,16 @@
 	// do the copy (e.g. hash value if cached).  Don't actually do
 	// the copy until we've looked at all src files
 	for _, orig := range args[0 : len(args)-1] {
-		err := calcCopyInfo(b, cmdName, &copyInfos, orig, dest, allowRemote, allowDecompression)
-		if err != nil {
+		if err := calcCopyInfo(
+			b,
+			cmdName,
+			&copyInfos,
+			orig,
+			dest,
+			allowRemote,
+			allowDecompression,
+			true,
+		); err != nil {
 			return err
 		}
 	}
@@ -182,8 +191,8 @@
 	}
 
 	cmd := b.Config.Cmd
-	b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)}
-	defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
+	b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
+	defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
 
 	hit, err := b.probeCache()
 	if err != nil {
@@ -217,7 +226,7 @@
 	return nil
 }
 
-func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
+func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {
 
 	if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
 		origPath = origPath[1:]
@@ -250,7 +259,7 @@
 		*cInfos = append(*cInfos, &ci)
 
 		// Initiate the download
-		resp, err := utils.Download(ci.origPath)
+		resp, err := httputils.Download(ci.origPath)
 		if err != nil {
 			return err
 		}
@@ -342,7 +351,7 @@
 	}
 
 	// Deal with wildcards
-	if ContainsWildcards(origPath) {
+	if allowWildcards && ContainsWildcards(origPath) {
 		for _, fileInfo := range b.context.GetSums() {
 			if fileInfo.Name() == "" {
 				continue
@@ -352,7 +361,9 @@
 				continue
 			}
 
-			calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
+			// Note we set allowWildcards to false in case the name has
+			// a * in it
+			calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false)
 		}
 		return nil
 	}
@@ -434,24 +445,27 @@
 	if tag == "" {
 		tag = "latest"
 	}
-	job := b.Engine.Job("pull", remote, tag)
+
 	pullRegistryAuth := b.AuthConfig
-	if len(b.AuthConfigFile.Configs) > 0 {
+	if len(b.ConfigFile.AuthConfigs) > 0 {
 		// The request came with a full auth config file, we prefer to use that
-		repoInfo, err := registry.ResolveRepositoryInfo(job, remote)
+		repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote)
 		if err != nil {
 			return nil, err
 		}
-		resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(repoInfo.Index)
+		resolvedAuth := registry.ResolveAuthConfig(b.ConfigFile, repoInfo.Index)
 		pullRegistryAuth = &resolvedAuth
 	}
-	job.SetenvBool("json", b.StreamFormatter.Json())
-	job.SetenvBool("parallel", true)
-	job.SetenvJson("authConfig", pullRegistryAuth)
-	job.Stdout.Add(ioutils.NopWriteCloser(b.OutOld))
-	if err := job.Run(); err != nil {
+
+	imagePullConfig := &graph.ImagePullConfig{
+		AuthConfig: pullRegistryAuth,
+		OutStream:  ioutils.NopWriteCloser(b.OutOld),
+	}
+
+	if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
 		return nil, err
 	}
+
 	image, err := b.Daemon.Repositories().LookupImage(name)
 	if err != nil {
 		return nil, err
@@ -476,7 +490,7 @@
 		fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
 	}
 
-	// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
+	// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
 	onBuildTriggers := b.Config.OnBuild
 	b.Config.OnBuild = []string{}
 
@@ -521,13 +535,13 @@
 		return false, err
 	}
 	if cache == nil {
-		log.Debugf("[BUILDER] Cache miss")
+		logrus.Debugf("[BUILDER] Cache miss")
 		b.cacheBusted = true
 		return false, nil
 	}
 
 	fmt.Fprintf(b.OutStream, " ---> Using cache\n")
-	log.Debugf("[BUILDER] Use cached version")
+	logrus.Debugf("[BUILDER] Use cached version")
 	b.image = cache.ID
 	return true, nil
 }
@@ -539,10 +553,15 @@
 	b.Config.Image = b.image
 
 	hostConfig := &runconfig.HostConfig{
-		CpuShares:  b.cpuShares,
-		CpusetCpus: b.cpuSetCpus,
-		Memory:     b.memory,
-		MemorySwap: b.memorySwap,
+		CpuShares:    b.cpuShares,
+		CpuPeriod:    b.cpuPeriod,
+		CpuQuota:     b.cpuQuota,
+		CpusetCpus:   b.cpuSetCpus,
+		CpusetMems:   b.cpuSetMems,
+		CgroupParent: b.cgroupParent,
+		Memory:       b.memory,
+		MemorySwap:   b.memorySwap,
+		NetworkMode:  "bridge",
 	}
 
 	config := *b.Config
@@ -557,14 +576,15 @@
 	}
 
 	b.TmpContainers[c.ID] = struct{}{}
-	fmt.Fprintf(b.OutStream, " ---> Running in %s\n", common.TruncateID(c.ID))
+	fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID))
 
-	if len(config.Cmd) > 0 {
+	if config.Cmd.Len() > 0 {
 		// override the entry point that may have been picked up from the base image
-		c.Path = config.Cmd[0]
-		c.Args = config.Cmd[1:]
+		s := config.Cmd.Slice()
+		c.Path = s[0]
+		c.Args = s[1:]
 	} else {
-		config.Cmd = []string{}
+		config.Cmd = runconfig.NewCommand()
 	}
 
 	return c, nil
@@ -573,7 +593,7 @@
 func (b *Builder) run(c *daemon.Container) error {
 	var errCh chan error
 	if b.Verbose {
-		errCh = b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, b.OutStream, b.ErrStream)
+		errCh = c.Attach(nil, b.OutStream, b.ErrStream)
 	}
 
 	//start the container
@@ -586,7 +606,7 @@
 	go func() {
 		select {
 		case <-b.cancelled:
-			log.Debugln("Build cancelled, killing container:", c.ID)
+			logrus.Debugln("Build cancelled, killing container:", c.ID)
 			c.Kill()
 		case <-finished:
 		}
@@ -601,11 +621,10 @@
 
 	// Wait for it to finish
 	if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
-		err := &utils.JSONError{
-			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
+		return &jsonmessage.JSONError{
+			Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret),
 			Code:    ret,
 		}
-		return err
 	}
 
 	return nil
@@ -637,14 +656,12 @@
 		err        error
 		destExists = true
 		origPath   = path.Join(b.contextPath, orig)
-		destPath   = path.Join(container.RootfsPath(), dest)
+		destPath   string
 	)
 
-	if destPath != container.RootfsPath() {
-		destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
-		if err != nil {
-			return err
-		}
+	destPath, err = container.GetResourcePath(dest)
+	if err != nil {
+		return err
 	}
 
 	// Preserve the trailing '/'
@@ -687,7 +704,7 @@
 		if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
 			return nil
 		} else if err != io.EOF {
-			log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
+			logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
 		}
 	}
 
@@ -747,17 +764,15 @@
 
 func (b *Builder) clearTmp() {
 	for c := range b.TmpContainers {
-		tmp, err := b.Daemon.Get(c)
-		if err != nil {
-			fmt.Fprint(b.OutStream, err.Error())
+		rmConfig := &daemon.ContainerRmConfig{
+			ForceRemove:  true,
+			RemoveVolume: true,
 		}
-
-		if err := b.Daemon.Rm(tmp); err != nil {
-			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", common.TruncateID(c), err)
+		if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
+			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
 			return
 		}
-		b.Daemon.DeleteVolumes(tmp.VolumePaths())
 		delete(b.TmpContainers, c)
-		fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", common.TruncateID(c))
+		fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
 	}
 }
diff --git a/builder/job.go b/builder/job.go
index 59df87e..c081dbe 100644
--- a/builder/job.go
+++ b/builder/job.go
@@ -2,20 +2,22 @@
 
 import (
 	"bytes"
-	"encoding/json"
+	"fmt"
 	"io"
 	"io/ioutil"
 	"os"
-	"os/exec"
 	"strings"
+	"sync"
 
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/builder/parser"
+	"github.com/docker/docker/cliconfig"
 	"github.com/docker/docker/daemon"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/graph"
+	"github.com/docker/docker/graph/tags"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/httputils"
 	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/pkg/urlutil"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
@@ -34,169 +36,171 @@
 	"onbuild":    true,
 }
 
-type BuilderJob struct {
-	Engine *engine.Engine
-	Daemon *daemon.Daemon
+type Config struct {
+	DockerfileName string
+	RemoteURL      string
+	RepoName       string
+	SuppressOutput bool
+	NoCache        bool
+	Remove         bool
+	ForceRemove    bool
+	Pull           bool
+	Memory         int64
+	MemorySwap     int64
+	CpuShares      int64
+	CpuPeriod      int64
+	CpuQuota       int64
+	CpuSetCpus     string
+	CpuSetMems     string
+	CgroupParent   string
+	AuthConfig     *cliconfig.AuthConfig
+	ConfigFile     *cliconfig.ConfigFile
+
+	Stdout  io.Writer
+	Context io.ReadCloser
+	// When closed, the job has been cancelled.
+	// Note: not all jobs implement cancellation.
+	// See Job.Cancel() and Job.WaitCancelled()
+	cancelled  chan struct{}
+	cancelOnce sync.Once
 }
 
-func (b *BuilderJob) Install() {
-	b.Engine.Register("build", b.CmdBuild)
-	b.Engine.Register("build_config", b.CmdBuildConfig)
+// When called, causes the Job.WaitCancelled channel to unblock.
+func (b *Config) Cancel() {
+	b.cancelOnce.Do(func() {
+		close(b.cancelled)
+	})
 }
 
-func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
-	if len(job.Args) != 0 {
-		return job.Errorf("Usage: %s\n", job.Name)
+// Returns a channel which is closed ("never blocks") when the job is cancelled.
+func (b *Config) WaitCancelled() <-chan struct{} {
+	return b.cancelled
+}
+
+func NewBuildConfig() *Config {
+	return &Config{
+		AuthConfig: &cliconfig.AuthConfig{},
+		ConfigFile: &cliconfig.ConfigFile{},
+		cancelled:  make(chan struct{}),
 	}
+}
+
+func Build(d *daemon.Daemon, buildConfig *Config) error {
 	var (
-		dockerfileName = job.Getenv("dockerfile")
-		remoteURL      = job.Getenv("remote")
-		repoName       = job.Getenv("t")
-		suppressOutput = job.GetenvBool("q")
-		noCache        = job.GetenvBool("nocache")
-		rm             = job.GetenvBool("rm")
-		forceRm        = job.GetenvBool("forcerm")
-		pull           = job.GetenvBool("pull")
-		memory         = job.GetenvInt64("memory")
-		memorySwap     = job.GetenvInt64("memswap")
-		cpuShares      = job.GetenvInt64("cpushares")
-		cpuSetCpus     = job.Getenv("cpusetcpus")
-		authConfig     = &registry.AuthConfig{}
-		configFile     = &registry.ConfigFile{}
-		tag            string
-		context        io.ReadCloser
+		repoName string
+		tag      string
+		context  io.ReadCloser
 	)
 
-	job.GetenvJson("authConfig", authConfig)
-	job.GetenvJson("configFile", configFile)
-
-	repoName, tag = parsers.ParseRepositoryTag(repoName)
+	repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName)
 	if repoName != "" {
 		if err := registry.ValidateRepositoryName(repoName); err != nil {
-			return job.Error(err)
+			return err
 		}
 		if len(tag) > 0 {
-			if err := graph.ValidateTagName(tag); err != nil {
-				return job.Error(err)
+			if err := tags.ValidateTagName(tag); err != nil {
+				return err
 			}
 		}
 	}
 
-	if remoteURL == "" {
-		context = ioutil.NopCloser(job.Stdin)
-	} else if urlutil.IsGitURL(remoteURL) {
-		if !urlutil.IsGitTransport(remoteURL) {
-			remoteURL = "https://" + remoteURL
-		}
-		root, err := ioutil.TempDir("", "docker-build-git")
+	if buildConfig.RemoteURL == "" {
+		context = ioutil.NopCloser(buildConfig.Context)
+	} else if urlutil.IsGitURL(buildConfig.RemoteURL) {
+		root, err := utils.GitClone(buildConfig.RemoteURL)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		defer os.RemoveAll(root)
 
-		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
-			return job.Errorf("Error trying to use git: %s (%s)", err, output)
-		}
-
 		c, err := archive.Tar(root, archive.Uncompressed)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		context = c
-	} else if urlutil.IsURL(remoteURL) {
-		f, err := utils.Download(remoteURL)
+	} else if urlutil.IsURL(buildConfig.RemoteURL) {
+		f, err := httputils.Download(buildConfig.RemoteURL)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		defer f.Body.Close()
 		dockerFile, err := ioutil.ReadAll(f.Body)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 
 		// When we're downloading just a Dockerfile put it in
 		// the default name - don't allow the client to move/specify it
-		dockerfileName = api.DefaultDockerfileName
+		buildConfig.DockerfileName = api.DefaultDockerfileName
 
-		c, err := archive.Generate(dockerfileName, string(dockerFile))
+		c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile))
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		context = c
 	}
 	defer context.Close()
 
-	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
+	sf := streamformatter.NewJSONStreamFormatter()
 
 	builder := &Builder{
-		Daemon: b.Daemon,
-		Engine: b.Engine,
-		OutStream: &utils.StdoutFormater{
-			Writer:          job.Stdout,
+		Daemon: d,
+		OutStream: &streamformatter.StdoutFormater{
+			Writer:          buildConfig.Stdout,
 			StreamFormatter: sf,
 		},
-		ErrStream: &utils.StderrFormater{
-			Writer:          job.Stdout,
+		ErrStream: &streamformatter.StderrFormater{
+			Writer:          buildConfig.Stdout,
 			StreamFormatter: sf,
 		},
-		Verbose:         !suppressOutput,
-		UtilizeCache:    !noCache,
-		Remove:          rm,
-		ForceRemove:     forceRm,
-		Pull:            pull,
-		OutOld:          job.Stdout,
+		Verbose:         !buildConfig.SuppressOutput,
+		UtilizeCache:    !buildConfig.NoCache,
+		Remove:          buildConfig.Remove,
+		ForceRemove:     buildConfig.ForceRemove,
+		Pull:            buildConfig.Pull,
+		OutOld:          buildConfig.Stdout,
 		StreamFormatter: sf,
-		AuthConfig:      authConfig,
-		AuthConfigFile:  configFile,
-		dockerfileName:  dockerfileName,
-		cpuShares:       cpuShares,
-		cpuSetCpus:      cpuSetCpus,
-		memory:          memory,
-		memorySwap:      memorySwap,
-		cancelled:       job.WaitCancelled(),
+		AuthConfig:      buildConfig.AuthConfig,
+		ConfigFile:      buildConfig.ConfigFile,
+		dockerfileName:  buildConfig.DockerfileName,
+		cpuShares:       buildConfig.CpuShares,
+		cpuPeriod:       buildConfig.CpuPeriod,
+		cpuQuota:        buildConfig.CpuQuota,
+		cpuSetCpus:      buildConfig.CpuSetCpus,
+		cpuSetMems:      buildConfig.CpuSetMems,
+		cgroupParent:    buildConfig.CgroupParent,
+		memory:          buildConfig.Memory,
+		memorySwap:      buildConfig.MemorySwap,
+		cancelled:       buildConfig.WaitCancelled(),
 	}
 
 	id, err := builder.Run(context)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	if repoName != "" {
-		b.Daemon.Repositories().Set(repoName, tag, id, true)
+		return d.Repositories().Tag(repoName, tag, id, true)
 	}
-	return engine.StatusOK
+	return nil
 }
 
-func (b *BuilderJob) CmdBuildConfig(job *engine.Job) engine.Status {
-	if len(job.Args) != 0 {
-		return job.Errorf("Usage: %s\n", job.Name)
-	}
-
-	var (
-		changes   = job.GetenvList("changes")
-		newConfig runconfig.Config
-	)
-
-	if err := job.GetenvJson("config", &newConfig); err != nil {
-		return job.Error(err)
-	}
-
+func BuildFromConfig(d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) {
 	ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")))
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
 
 	// ensure that the commands are valid
 	for _, n := range ast.Children {
 		if !validCommitCommands[n.Value] {
-			return job.Errorf("%s is not a valid change command", n.Value)
+			return nil, fmt.Errorf("%s is not a valid change command", n.Value)
 		}
 	}
 
 	builder := &Builder{
-		Daemon:        b.Daemon,
-		Engine:        b.Engine,
-		Config:        &newConfig,
+		Daemon:        d,
+		Config:        c,
 		OutStream:     ioutil.Discard,
 		ErrStream:     ioutil.Discard,
 		disableCommit: true,
@@ -204,12 +208,32 @@
 
 	for i, n := range ast.Children {
 		if err := builder.dispatch(i, n); err != nil {
-			return job.Error(err)
+			return nil, err
 		}
 	}
 
-	if err := json.NewEncoder(job.Stdout).Encode(builder.Config); err != nil {
-		return job.Error(err)
+	return builder.Config, nil
+}
+
+func Commit(d *daemon.Daemon, name string, c *daemon.ContainerCommitConfig) (string, error) {
+	container, err := d.Get(name)
+	if err != nil {
+		return "", err
 	}
-	return engine.StatusOK
+
+	newConfig, err := BuildFromConfig(d, c.Config, c.Changes)
+	if err != nil {
+		return "", err
+	}
+
+	if err := runconfig.Merge(newConfig, container.Config); err != nil {
+		return "", err
+	}
+
+	img, err := d.Commit(container, c.Repo, c.Tag, c.Comment, c.Author, c.Pause, newConfig)
+	if err != nil {
+		return "", err
+	}
+
+	return img.ID, nil
 }
diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go
index 3026a0b..8db360c 100644
--- a/builder/parser/line_parsers.go
+++ b/builder/parser/line_parsers.go
@@ -233,23 +233,24 @@
 // parseJSON converts JSON arrays to an AST.
 func parseJSON(rest string) (*Node, map[string]bool, error) {
 	var myJson []interface{}
-	if err := json.Unmarshal([]byte(rest), &myJson); err != nil {
+	if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJson); err != nil {
 		return nil, nil, err
 	}
 
 	var top, prev *Node
 	for _, str := range myJson {
-		if s, ok := str.(string); !ok {
+		s, ok := str.(string)
+		if !ok {
 			return nil, nil, errDockerfileNotStringArray
-		} else {
-			node := &Node{Value: s}
-			if prev == nil {
-				top = node
-			} else {
-				prev.Next = node
-			}
-			prev = node
 		}
+
+		node := &Node{Value: s}
+		if prev == nil {
+			top = node
+		} else {
+			prev.Next = node
+		}
+		prev = node
 	}
 
 	return top, map[string]bool{"json": true}, nil
@@ -278,7 +279,7 @@
 }
 
 // parseMaybeJSONToList determines if the argument appears to be a JSON array. If
-// so, passes to parseJSON; if not, attmpts to parse it as a whitespace
+// so, passes to parseJSON; if not, attempts to parse it as a whitespace
 // delimited string.
 func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) {
 	node, attrs, err := parseJSON(rest)
diff --git a/builder/parser/parser.go b/builder/parser/parser.go
index 1ab151b..2260cd5 100644
--- a/builder/parser/parser.go
+++ b/builder/parser/parser.go
@@ -29,6 +29,7 @@
 	Children   []*Node         // the children of this sexp
 	Attributes map[string]bool // special attributes for this node
 	Original   string          // original line used before parsing
+	Flags      []string        // only top Node should have this set
 }
 
 var (
@@ -60,7 +61,6 @@
 		command.Entrypoint: parseMaybeJSON,
 		command.Expose:     parseStringsWhitespaceDelimited,
 		command.Volume:     parseMaybeJSONToList,
-		command.Insert:     parseIgnore,
 	}
 }
 
@@ -75,7 +75,7 @@
 		return line, nil, nil
 	}
 
-	cmd, args, err := splitCommand(line)
+	cmd, flags, args, err := splitCommand(line)
 	if err != nil {
 		return "", nil, err
 	}
@@ -91,6 +91,7 @@
 	node.Next = sexp
 	node.Attributes = attrs
 	node.Original = line
+	node.Flags = flags
 
 	return "", node, nil
 }
diff --git a/builder/parser/testfiles/flags/Dockerfile b/builder/parser/testfiles/flags/Dockerfile
new file mode 100644
index 0000000..2418e0f
--- /dev/null
+++ b/builder/parser/testfiles/flags/Dockerfile
@@ -0,0 +1,10 @@
+FROM scratch
+COPY foo /tmp/
+COPY --user=me foo /tmp/
+COPY --doit=true foo /tmp/
+COPY --user=me --doit=true foo /tmp/
+COPY --doit=true -- foo /tmp/
+COPY -- foo /tmp/
+CMD --doit [ "a", "b" ]
+CMD --doit=true -- [ "a", "b" ]
+CMD --doit -- [ ]
diff --git a/builder/parser/testfiles/flags/result b/builder/parser/testfiles/flags/result
new file mode 100644
index 0000000..4578f4c
--- /dev/null
+++ b/builder/parser/testfiles/flags/result
@@ -0,0 +1,10 @@
+(from "scratch")
+(copy "foo" "/tmp/")
+(copy ["--user=me"] "foo" "/tmp/")
+(copy ["--doit=true"] "foo" "/tmp/")
+(copy ["--user=me" "--doit=true"] "foo" "/tmp/")
+(copy ["--doit=true"] "foo" "/tmp/")
+(copy "foo" "/tmp/")
+(cmd ["--doit"] "a" "b")
+(cmd ["--doit=true"] "a" "b")
+(cmd ["--doit"])
diff --git a/builder/parser/utils.go b/builder/parser/utils.go
index a60ad12..5d82e96 100644
--- a/builder/parser/utils.go
+++ b/builder/parser/utils.go
@@ -1,8 +1,10 @@
 package parser
 
 import (
+	"fmt"
 	"strconv"
 	"strings"
+	"unicode"
 )
 
 // dumps the AST defined by `node` as a list of sexps. Returns a string
@@ -11,6 +13,10 @@
 	str := ""
 	str += node.Value
 
+	if len(node.Flags) > 0 {
+		str += fmt.Sprintf(" %q", node.Flags)
+	}
+
 	for _, n := range node.Children {
 		str += "(" + n.Dump() + ")\n"
 	}
@@ -48,20 +54,23 @@
 
 // splitCommand takes a single line of text and parses out the cmd and args,
 // which are used for dispatching to more exact parsing functions.
-func splitCommand(line string) (string, string, error) {
+func splitCommand(line string) (string, []string, string, error) {
 	var args string
+	var flags []string
 
 	// Make sure we get the same results irrespective of leading/trailing spaces
 	cmdline := TOKEN_WHITESPACE.Split(strings.TrimSpace(line), 2)
 	cmd := strings.ToLower(cmdline[0])
 
 	if len(cmdline) == 2 {
-		args = strings.TrimSpace(cmdline[1])
+		var err error
+		args, flags, err = extractBuilderFlags(cmdline[1])
+		if err != nil {
+			return "", nil, "", err
+		}
 	}
 
-	// the cmd should never have whitespace, but it's possible for the args to
-	// have trailing whitespace.
-	return cmd, args, nil
+	return cmd, flags, strings.TrimSpace(args), nil
 }
 
 // covers comments and empty lines. Lines should be trimmed before passing to
@@ -74,3 +83,94 @@
 
 	return line
 }
+
+func extractBuilderFlags(line string) (string, []string, error) {
+	// Parses the BuilderFlags and returns the remaining part of the line
+
+	const (
+		inSpaces = iota // looking for start of a word
+		inWord
+		inQuote
+	)
+
+	words := []string{}
+	phase := inSpaces
+	word := ""
+	quote := '\000'
+	blankOK := false
+	var ch rune
+
+	for pos := 0; pos <= len(line); pos++ {
+		if pos != len(line) {
+			ch = rune(line[pos])
+		}
+
+		if phase == inSpaces { // Looking for start of word
+			if pos == len(line) { // end of input
+				break
+			}
+			if unicode.IsSpace(ch) { // skip spaces
+				continue
+			}
+
+			// Only keep going if the next word starts with --
+			if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
+				return line[pos:], words, nil
+			}
+
+			phase = inWord // found someting with "--", fall thru
+		}
+		if (phase == inWord || phase == inQuote) && (pos == len(line)) {
+			if word != "--" && (blankOK || len(word) > 0) {
+				words = append(words, word)
+			}
+			break
+		}
+		if phase == inWord {
+			if unicode.IsSpace(ch) {
+				phase = inSpaces
+				if word == "--" {
+					return line[pos:], words, nil
+				}
+				if blankOK || len(word) > 0 {
+					words = append(words, word)
+				}
+				word = ""
+				blankOK = false
+				continue
+			}
+			if ch == '\'' || ch == '"' {
+				quote = ch
+				blankOK = true
+				phase = inQuote
+				continue
+			}
+			if ch == '\\' {
+				if pos+1 == len(line) {
+					continue // just skip \ at end
+				}
+				pos++
+				ch = rune(line[pos])
+			}
+			word += string(ch)
+			continue
+		}
+		if phase == inQuote {
+			if ch == quote {
+				phase = inWord
+				continue
+			}
+			if ch == '\\' {
+				if pos+1 == len(line) {
+					phase = inWord
+					continue // just skip \ at end
+				}
+				pos++
+				ch = rune(line[pos])
+			}
+			word += string(ch)
+		}
+	}
+
+	return "", words, nil
+}
diff --git a/builder/shell_parser.go b/builder/shell_parser.go
index b8c7467..1ea44f6 100644
--- a/builder/shell_parser.go
+++ b/builder/shell_parser.go
@@ -157,15 +157,47 @@
 			sw.next()
 			return sw.getEnv(name), nil
 		}
-		return "", fmt.Errorf("Unsupported ${} substitution: %s", sw.word)
-	} else {
-		// $xxx case
-		name := sw.processName()
-		if name == "" {
-			return "$", nil
+		if ch == ':' {
+			// Special ${xx:...} format processing
+			// Yes it allows for recursive $'s in the ... spot
+
+			sw.next() // skip over :
+			modifier := sw.next()
+
+			word, err := sw.processStopOn('}')
+			if err != nil {
+				return "", err
+			}
+
+			// Grab the current value of the variable in question so we
+			// can use to to determine what to do based on the modifier
+			newValue := sw.getEnv(name)
+
+			switch modifier {
+			case '+':
+				if newValue != "" {
+					newValue = word
+				}
+				return newValue, nil
+
+			case '-':
+				if newValue == "" {
+					newValue = word
+				}
+				return newValue, nil
+
+			default:
+				return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word)
+			}
 		}
-		return sw.getEnv(name), nil
+		return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word)
 	}
+	// $xxx case
+	name := sw.processName()
+	if name == "" {
+		return "$", nil
+	}
+	return sw.getEnv(name), nil
 }
 
 func (sw *shellWord) processName() string {
diff --git a/builder/words b/builder/words
index 2148f72..1114a7e 100644
--- a/builder/words
+++ b/builder/words
@@ -15,7 +15,7 @@
 'hello\'                 |     hello\
 "''"                     |     ''
 $.                       |     $.
-$1                       |     
+$1                       |
 he$1x                    |     hex
 he$.x                    |     he$.x
 he$pwd.                  |     he.
@@ -30,6 +30,17 @@
 he${hi}xx                |     hexx
 he${PWD}                 |     he/home
 he${.}                   |     error
+he${XXX:-000}xx          |     he000xx
+he${PWD:-000}xx          |     he/homexx
+he${XXX:-$PWD}xx         |     he/homexx
+he${XXX:-${PWD:-yyy}}xx  |     he/homexx
+he${XXX:-${YYY:-yyy}}xx  |     heyyyxx
+he${XXX:YYY}             |     error
+he${XXX:+${PWD}}xx       |     hexx
+he${PWD:+${XXX}}xx       |     hexx
+he${PWD:+${SHELL}}xx     |     hebashxx
+he${XXX:+000}xx          |     hexx
+he${PWD:+000}xx          |     he000xx
 'he${XX}'                |     he${XX}
 "he${PWD}"               |     he/home
 "he'$PWD'"               |     he'/home'
@@ -41,3 +52,7 @@
 "he\$PWD"                |     he$PWD
 'he\$PWD'                |     he\$PWD
 he${PWD                  |     error
+he${PWD:=000}xx          |     error
+he${PWD:+${PWD}:}xx      |     he/home:xx
+he${XXX:-\$PWD:}xx       |     he$PWD:xx
+he${XXX:-\${PWD}z}xx     |     he${PWDz}xx
diff --git a/builtins/builtins.go b/builtins/builtins.go
deleted file mode 100644
index 1bd9362..0000000
--- a/builtins/builtins.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package builtins
-
-import (
-	"runtime"
-
-	"github.com/docker/docker/api"
-	apiserver "github.com/docker/docker/api/server"
-	"github.com/docker/docker/autogen/dockerversion"
-	"github.com/docker/docker/daemon/networkdriver/bridge"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/events"
-	"github.com/docker/docker/pkg/parsers/kernel"
-)
-
-func Register(eng *engine.Engine) error {
-	if err := daemon(eng); err != nil {
-		return err
-	}
-	if err := remote(eng); err != nil {
-		return err
-	}
-	if err := events.New().Install(eng); err != nil {
-		return err
-	}
-	if err := eng.Register("version", dockerVersion); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// remote: a RESTful api for cross-docker communication
-func remote(eng *engine.Engine) error {
-	if err := eng.Register("serveapi", apiserver.ServeApi); err != nil {
-		return err
-	}
-	return eng.Register("acceptconnections", apiserver.AcceptConnections)
-}
-
-// daemon: a default execution and storage backend for Docker on Linux,
-// with the following underlying components:
-//
-// * Pluggable storage drivers including aufs, vfs, lvm and btrfs.
-// * Pluggable execution drivers including lxc and chroot.
-//
-// In practice `daemon` still includes most core Docker components, including:
-//
-// * The reference registry client implementation
-// * Image management
-// * The build facility
-// * Logging
-//
-// These components should be broken off into plugins of their own.
-//
-func daemon(eng *engine.Engine) error {
-	return eng.Register("init_networkdriver", bridge.InitDriver)
-}
-
-// builtins jobs independent of any subsystem
-func dockerVersion(job *engine.Job) engine.Status {
-	v := &engine.Env{}
-	v.SetJson("Version", dockerversion.VERSION)
-	v.SetJson("ApiVersion", api.APIVERSION)
-	v.SetJson("GitCommit", dockerversion.GITCOMMIT)
-	v.Set("GoVersion", runtime.Version())
-	v.Set("Os", runtime.GOOS)
-	v.Set("Arch", runtime.GOARCH)
-	if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
-		v.Set("KernelVersion", kernelVersion.String())
-	}
-	if _, err := v.WriteTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
-}
diff --git a/cliconfig/config.go b/cliconfig/config.go
new file mode 100644
index 0000000..2a27589
--- /dev/null
+++ b/cliconfig/config.go
@@ -0,0 +1,207 @@
+package cliconfig
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/docker/docker/pkg/homedir"
+)
+
+const (
+	// Where we store the config file
+	CONFIGFILE     = "config.json"
+	OLD_CONFIGFILE = ".dockercfg"
+
+	// This constant is only used for really old config files when the
+	// URL wasn't saved as part of the config file and it was just
+	// assumed to be this value.
+	DEFAULT_INDEXSERVER = "https://index.docker.io/v1/"
+)
+
+var (
+	ErrConfigFileMissing = errors.New("The Auth config file is missing")
+)
+
+// Registry Auth Info
+type AuthConfig struct {
+	Username      string `json:"username,omitempty"`
+	Password      string `json:"password,omitempty"`
+	Auth          string `json:"auth"`
+	Email         string `json:"email"`
+	ServerAddress string `json:"serveraddress,omitempty"`
+}
+
+// ~/.docker/config.json file info
+type ConfigFile struct {
+	AuthConfigs map[string]AuthConfig `json:"auths"`
+	HttpHeaders map[string]string     `json:"HttpHeaders,omitempty"`
+	filename    string                // Note: not serialized - for internal use only
+}
+
+func NewConfigFile(fn string) *ConfigFile {
+	return &ConfigFile{
+		AuthConfigs: make(map[string]AuthConfig),
+		HttpHeaders: make(map[string]string),
+		filename:    fn,
+	}
+}
+
+// load up the auth config information and return values
+// FIXME: use the internal golang config parser
+func Load(configDir string) (*ConfigFile, error) {
+	if configDir == "" {
+		configDir = filepath.Join(homedir.Get(), ".docker")
+	}
+
+	configFile := ConfigFile{
+		AuthConfigs: make(map[string]AuthConfig),
+		filename:    filepath.Join(configDir, CONFIGFILE),
+	}
+
+	// Try happy path first - latest config file
+	if _, err := os.Stat(configFile.filename); err == nil {
+		file, err := os.Open(configFile.filename)
+		if err != nil {
+			return &configFile, err
+		}
+		defer file.Close()
+
+		if err := json.NewDecoder(file).Decode(&configFile); err != nil {
+			return &configFile, err
+		}
+
+		for addr, ac := range configFile.AuthConfigs {
+			ac.Username, ac.Password, err = DecodeAuth(ac.Auth)
+			if err != nil {
+				return &configFile, err
+			}
+			ac.Auth = ""
+			ac.ServerAddress = addr
+			configFile.AuthConfigs[addr] = ac
+		}
+
+		return &configFile, nil
+	} else if !os.IsNotExist(err) {
+		// if file is there but we can't stat it for any reason other
+		// than it doesn't exist then stop
+		return &configFile, err
+	}
+
+	// Can't find latest config file so check for the old one
+	confFile := filepath.Join(homedir.Get(), OLD_CONFIGFILE)
+
+	if _, err := os.Stat(confFile); err != nil {
+		return &configFile, nil //missing file is not an error
+	}
+
+	b, err := ioutil.ReadFile(confFile)
+	if err != nil {
+		return &configFile, err
+	}
+
+	if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil {
+		arr := strings.Split(string(b), "\n")
+		if len(arr) < 2 {
+			return &configFile, fmt.Errorf("The Auth config file is empty")
+		}
+		authConfig := AuthConfig{}
+		origAuth := strings.Split(arr[0], " = ")
+		if len(origAuth) != 2 {
+			return &configFile, fmt.Errorf("Invalid Auth config file")
+		}
+		authConfig.Username, authConfig.Password, err = DecodeAuth(origAuth[1])
+		if err != nil {
+			return &configFile, err
+		}
+		origEmail := strings.Split(arr[1], " = ")
+		if len(origEmail) != 2 {
+			return &configFile, fmt.Errorf("Invalid Auth config file")
+		}
+		authConfig.Email = origEmail[1]
+		authConfig.ServerAddress = DEFAULT_INDEXSERVER
+		configFile.AuthConfigs[DEFAULT_INDEXSERVER] = authConfig
+	} else {
+		for k, authConfig := range configFile.AuthConfigs {
+			authConfig.Username, authConfig.Password, err = DecodeAuth(authConfig.Auth)
+			if err != nil {
+				return &configFile, err
+			}
+			authConfig.Auth = ""
+			authConfig.ServerAddress = k
+			configFile.AuthConfigs[k] = authConfig
+		}
+	}
+	return &configFile, nil
+}
+
+func (configFile *ConfigFile) Save() error {
+	// Encode sensitive data into a new/temp struct
+	tmpAuthConfigs := make(map[string]AuthConfig, len(configFile.AuthConfigs))
+	for k, authConfig := range configFile.AuthConfigs {
+		authCopy := authConfig
+
+		authCopy.Auth = EncodeAuth(&authCopy)
+		authCopy.Username = ""
+		authCopy.Password = ""
+		authCopy.ServerAddress = ""
+		tmpAuthConfigs[k] = authCopy
+	}
+
+	saveAuthConfigs := configFile.AuthConfigs
+	configFile.AuthConfigs = tmpAuthConfigs
+	defer func() { configFile.AuthConfigs = saveAuthConfigs }()
+
+	data, err := json.MarshalIndent(configFile, "", "\t")
+	if err != nil {
+		return err
+	}
+
+	if err := os.MkdirAll(filepath.Dir(configFile.filename), 0700); err != nil {
+		return err
+	}
+
+	if err := ioutil.WriteFile(configFile.filename, data, 0600); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (config *ConfigFile) Filename() string {
+	return config.filename
+}
+
+// create a base64 encoded auth string to store in config
+func EncodeAuth(authConfig *AuthConfig) string {
+	authStr := authConfig.Username + ":" + authConfig.Password
+	msg := []byte(authStr)
+	encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
+	base64.StdEncoding.Encode(encoded, msg)
+	return string(encoded)
+}
+
+// decode the auth string
+func DecodeAuth(authStr string) (string, string, error) {
+	decLen := base64.StdEncoding.DecodedLen(len(authStr))
+	decoded := make([]byte, decLen)
+	authByte := []byte(authStr)
+	n, err := base64.StdEncoding.Decode(decoded, authByte)
+	if err != nil {
+		return "", "", err
+	}
+	if n > decLen {
+		return "", "", fmt.Errorf("Something went wrong decoding auth config")
+	}
+	arr := strings.SplitN(string(decoded), ":", 2)
+	if len(arr) != 2 {
+		return "", "", fmt.Errorf("Invalid auth configuration file")
+	}
+	password := strings.Trim(arr[1], "\x00")
+	return arr[0], password, nil
+}
diff --git a/cliconfig/config_file_test.go b/cliconfig/config_file_test.go
new file mode 100644
index 0000000..6d1125f
--- /dev/null
+++ b/cliconfig/config_file_test.go
@@ -0,0 +1,157 @@
+package cliconfig
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"testing"
+
+	"github.com/docker/docker/pkg/homedir"
+)
+
+func TestMissingFile(t *testing.T) {
+	tmpHome, _ := ioutil.TempDir("", "config-test")
+
+	config, err := Load(tmpHome)
+	if err != nil {
+		t.Fatalf("Failed loading on missing file: %q", err)
+	}
+
+	// Now save it and make sure it shows up in new form
+	err = config.Save()
+	if err != nil {
+		t.Fatalf("Failed to save: %q", err)
+	}
+
+	buf, err := ioutil.ReadFile(filepath.Join(tmpHome, CONFIGFILE))
+	if !strings.Contains(string(buf), `"auths":`) {
+		t.Fatalf("Should have save in new form: %s", string(buf))
+	}
+}
+
+func TestSaveFileToDirs(t *testing.T) {
+	tmpHome, _ := ioutil.TempDir("", "config-test")
+
+	tmpHome += "/.docker"
+
+	config, err := Load(tmpHome)
+	if err != nil {
+		t.Fatalf("Failed loading on missing file: %q", err)
+	}
+
+	// Now save it and make sure it shows up in new form
+	err = config.Save()
+	if err != nil {
+		t.Fatalf("Failed to save: %q", err)
+	}
+
+	buf, err := ioutil.ReadFile(filepath.Join(tmpHome, CONFIGFILE))
+	if !strings.Contains(string(buf), `"auths":`) {
+		t.Fatalf("Should have save in new form: %s", string(buf))
+	}
+}
+
+func TestEmptyFile(t *testing.T) {
+	tmpHome, _ := ioutil.TempDir("", "config-test")
+	fn := filepath.Join(tmpHome, CONFIGFILE)
+	ioutil.WriteFile(fn, []byte(""), 0600)
+
+	_, err := Load(tmpHome)
+	if err == nil {
+		t.Fatalf("Was supposed to fail")
+	}
+}
+
+func TestEmptyJson(t *testing.T) {
+	tmpHome, _ := ioutil.TempDir("", "config-test")
+	fn := filepath.Join(tmpHome, CONFIGFILE)
+	ioutil.WriteFile(fn, []byte("{}"), 0600)
+
+	config, err := Load(tmpHome)
+	if err != nil {
+		t.Fatalf("Failed loading on empty json file: %q", err)
+	}
+
+	// Now save it and make sure it shows up in new form
+	err = config.Save()
+	if err != nil {
+		t.Fatalf("Failed to save: %q", err)
+	}
+
+	buf, err := ioutil.ReadFile(filepath.Join(tmpHome, CONFIGFILE))
+	if !strings.Contains(string(buf), `"auths":`) {
+		t.Fatalf("Should have save in new form: %s", string(buf))
+	}
+}
+
+func TestOldJson(t *testing.T) {
+	if runtime.GOOS == "windows" {
+		return
+	}
+
+	tmpHome, _ := ioutil.TempDir("", "config-test")
+	defer os.RemoveAll(tmpHome)
+
+	homeKey := homedir.Key()
+	homeVal := homedir.Get()
+
+	defer func() { os.Setenv(homeKey, homeVal) }()
+	os.Setenv(homeKey, tmpHome)
+
+	fn := filepath.Join(tmpHome, OLD_CONFIGFILE)
+	js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}`
+	ioutil.WriteFile(fn, []byte(js), 0600)
+
+	config, err := Load(tmpHome)
+	if err != nil {
+		t.Fatalf("Failed loading on empty json file: %q", err)
+	}
+
+	ac := config.AuthConfigs["https://index.docker.io/v1/"]
+	if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" {
+		t.Fatalf("Missing data from parsing:\n%q", config)
+	}
+
+	// Now save it and make sure it shows up in new form
+	err = config.Save()
+	if err != nil {
+		t.Fatalf("Failed to save: %q", err)
+	}
+
+	buf, err := ioutil.ReadFile(filepath.Join(tmpHome, CONFIGFILE))
+	if !strings.Contains(string(buf), `"auths":`) ||
+		!strings.Contains(string(buf), "user@example.com") {
+		t.Fatalf("Should have save in new form: %s", string(buf))
+	}
+}
+
+func TestNewJson(t *testing.T) {
+	tmpHome, _ := ioutil.TempDir("", "config-test")
+	fn := filepath.Join(tmpHome, CONFIGFILE)
+	js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }`
+	ioutil.WriteFile(fn, []byte(js), 0600)
+
+	config, err := Load(tmpHome)
+	if err != nil {
+		t.Fatalf("Failed loading on empty json file: %q", err)
+	}
+
+	ac := config.AuthConfigs["https://index.docker.io/v1/"]
+	if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" {
+		t.Fatalf("Missing data from parsing:\n%q", config)
+	}
+
+	// Now save it and make sure it shows up in new form
+	err = config.Save()
+	if err != nil {
+		t.Fatalf("Failed to save: %q", err)
+	}
+
+	buf, err := ioutil.ReadFile(filepath.Join(tmpHome, CONFIGFILE))
+	if !strings.Contains(string(buf), `"auths":`) ||
+		!strings.Contains(string(buf), "user@example.com") {
+		t.Fatalf("Should have save in new form: %s", string(buf))
+	}
+}
diff --git a/contrib/builder/deb/README.md b/contrib/builder/deb/README.md
new file mode 100644
index 0000000..a6fd70d
--- /dev/null
+++ b/contrib/builder/deb/README.md
@@ -0,0 +1,5 @@
+# `dockercore/builder-deb`
+
+This image's tags contain the dependencies for building Docker `.deb`s for each of the Debian-based platforms Docker targets.
+
+To add new tags, see [`contrib/builder/deb` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/deb), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file.
diff --git a/contrib/builder/deb/build.sh b/contrib/builder/deb/build.sh
new file mode 100755
index 0000000..8271d9d
--- /dev/null
+++ b/contrib/builder/deb/build.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -e
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+set -x
+./generate.sh
+for d in */; do
+	docker build -t "dockercore/builder-deb:$(basename "$d")" "$d"
+done
diff --git a/contrib/builder/deb/debian-jessie/Dockerfile b/contrib/builder/deb/debian-jessie/Dockerfile
new file mode 100644
index 0000000..de888a1
--- /dev/null
+++ b/contrib/builder/deb/debian-jessie/Dockerfile
@@ -0,0 +1,14 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
+#
+
+FROM debian:jessie
+
+RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS apparmor selinux
diff --git a/contrib/builder/deb/debian-stretch/Dockerfile b/contrib/builder/deb/debian-stretch/Dockerfile
new file mode 100644
index 0000000..ee46282
--- /dev/null
+++ b/contrib/builder/deb/debian-stretch/Dockerfile
@@ -0,0 +1,14 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
+#
+
+FROM debian:stretch
+
+RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS apparmor selinux
diff --git a/contrib/builder/deb/debian-wheezy/Dockerfile b/contrib/builder/deb/debian-wheezy/Dockerfile
new file mode 100644
index 0000000..dc9c388
--- /dev/null
+++ b/contrib/builder/deb/debian-wheezy/Dockerfile
@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
+#
+
+FROM debian:wheezy
+RUN echo deb http://http.debian.net/debian wheezy-backports main > /etc/apt/sources.list.d/wheezy-backports.list
+
+RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS apparmor selinux
diff --git a/contrib/builder/deb/generate.sh b/contrib/builder/deb/generate.sh
new file mode 100755
index 0000000..49b26c4
--- /dev/null
+++ b/contrib/builder/deb/generate.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+set -e
+
+# usage: ./generate.sh [versions]
+#    ie: ./generate.sh
+#        to update all Dockerfiles in this directory
+#    or: ./generate.sh debian-jessie
+#        to only update debian-jessie/Dockerfile
+#    or: ./generate.sh debian-newversion
+#        to create a new folder and a Dockerfile within it
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+versions=( "$@" )
+if [ ${#versions[@]} -eq 0 ]; then
+	versions=( */ )
+fi
+versions=( "${versions[@]%/}" )
+
+for version in "${versions[@]}"; do
+	distro="${version%-*}"
+	suite="${version##*-}"
+	from="${distro}:${suite}"
+
+	mkdir -p "$version"
+	echo "$version -> FROM $from"
+	cat > "$version/Dockerfile" <<-EOF
+		#
+		# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
+		#
+
+		FROM $from
+	EOF
+
+	case "$from" in
+		debian:wheezy)
+			# add -backports, like our users have to
+			echo "RUN echo deb http://http.debian.net/debian $suite-backports main > /etc/apt/sources.list.d/$suite-backports.list" >> "$version/Dockerfile"
+			;;
+	esac
+
+	echo >> "$version/Dockerfile"
+
+	# this list is sorted alphabetically; please keep it that way
+	packages=(
+		bash-completion # for bash-completion debhelper integration
+		btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible)
+		build-essential # "essential for building Debian packages"
+		curl ca-certificates # for downloading Go
+		debhelper # for easy ".deb" building
+		dh-systemd # for systemd debhelper integration
+		git # for "git commit" info in "docker -v"
+		libapparmor-dev # for "sys/apparmor.h"
+		libdevmapper-dev # for "libdevmapper.h"
+		libsqlite3-dev # for "sqlite3.h"
+	)
+	echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile"
+
+	echo >> "$version/Dockerfile"
+
+	awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile"
+	echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile"
+	echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile"
+
+	echo >> "$version/Dockerfile"
+
+	echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile"
+	awk '$1 == "ENV" && $2 == "DOCKER_BUILDTAGS" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile"
+done
diff --git a/contrib/builder/deb/ubuntu-debootstrap-trusty/Dockerfile b/contrib/builder/deb/ubuntu-debootstrap-trusty/Dockerfile
new file mode 100644
index 0000000..599a74f
--- /dev/null
+++ b/contrib/builder/deb/ubuntu-debootstrap-trusty/Dockerfile
@@ -0,0 +1,14 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
+#
+
+FROM ubuntu-debootstrap:trusty
+
+RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS apparmor selinux
diff --git a/contrib/builder/deb/ubuntu-debootstrap-utopic/Dockerfile b/contrib/builder/deb/ubuntu-debootstrap-utopic/Dockerfile
new file mode 100644
index 0000000..81528ce
--- /dev/null
+++ b/contrib/builder/deb/ubuntu-debootstrap-utopic/Dockerfile
@@ -0,0 +1,14 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
+#
+
+FROM ubuntu-debootstrap:utopic
+
+RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS apparmor selinux
diff --git a/contrib/builder/deb/ubuntu-debootstrap-vivid/Dockerfile b/contrib/builder/deb/ubuntu-debootstrap-vivid/Dockerfile
new file mode 100644
index 0000000..a8e2385
--- /dev/null
+++ b/contrib/builder/deb/ubuntu-debootstrap-vivid/Dockerfile
@@ -0,0 +1,14 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
+#
+
+FROM ubuntu-debootstrap:vivid
+
+RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS apparmor selinux
diff --git a/contrib/builder/rpm/README.md b/contrib/builder/rpm/README.md
new file mode 100644
index 0000000..153fbce
--- /dev/null
+++ b/contrib/builder/rpm/README.md
@@ -0,0 +1,5 @@
+# `dockercore/builder-rpm`
+
+This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets.
+
+To add new tags, see [`contrib/builder/rpm` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file.
diff --git a/contrib/builder/rpm/build.sh b/contrib/builder/rpm/build.sh
new file mode 100755
index 0000000..558f7ee
--- /dev/null
+++ b/contrib/builder/rpm/build.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -e
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+set -x
+./generate.sh
+for d in */; do
+	docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d"
+done
diff --git a/contrib/builder/rpm/centos-6/Dockerfile b/contrib/builder/rpm/centos-6/Dockerfile
new file mode 100644
index 0000000..2daa715
--- /dev/null
+++ b/contrib/builder/rpm/centos-6/Dockerfile
@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+#
+
+FROM centos:6
+
+RUN yum groupinstall -y "Development Tools"
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS selinux exclude_graphdriver_btrfs
diff --git a/contrib/builder/rpm/centos-7/Dockerfile b/contrib/builder/rpm/centos-7/Dockerfile
new file mode 100644
index 0000000..d7e4f2c
--- /dev/null
+++ b/contrib/builder/rpm/centos-7/Dockerfile
@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+#
+
+FROM centos:7
+
+RUN yum groupinstall -y "Development Tools"
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS selinux
diff --git a/contrib/builder/rpm/fedora-20/Dockerfile b/contrib/builder/rpm/fedora-20/Dockerfile
new file mode 100644
index 0000000..f0c701b
--- /dev/null
+++ b/contrib/builder/rpm/fedora-20/Dockerfile
@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+#
+
+FROM fedora:20
+
+RUN yum install -y @development-tools fedora-packager
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS selinux
diff --git a/contrib/builder/rpm/fedora-21/Dockerfile b/contrib/builder/rpm/fedora-21/Dockerfile
new file mode 100644
index 0000000..3d84706
--- /dev/null
+++ b/contrib/builder/rpm/fedora-21/Dockerfile
@@ -0,0 +1,15 @@
+#
+# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+#
+
+FROM fedora:21
+
+RUN yum install -y @development-tools fedora-packager
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
+
+ENV GO_VERSION 1.4.2
+RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
+
+ENV AUTO_GOPATH 1
+ENV DOCKER_BUILDTAGS selinux
diff --git a/contrib/builder/rpm/generate.sh b/contrib/builder/rpm/generate.sh
new file mode 100755
index 0000000..b34193c
--- /dev/null
+++ b/contrib/builder/rpm/generate.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+set -e
+
+# usage: ./generate.sh [versions]
+#    ie: ./generate.sh
+#        to update all Dockerfiles in this directory
+#    or: ./generate.sh
+#        to only update fedora-20/Dockerfile
+#    or: ./generate.sh fedora-newversion
+#        to create a new folder and a Dockerfile within it
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+
+versions=( "$@" )
+if [ ${#versions[@]} -eq 0 ]; then
+	versions=( */ )
+fi
+versions=( "${versions[@]%/}" )
+
+for version in "${versions[@]}"; do
+	distro="${version%-*}"
+	suite="${version##*-}"
+	from="${distro}:${suite}"
+
+	mkdir -p "$version"
+	echo "$version -> FROM $from"
+	cat > "$version/Dockerfile" <<-EOF
+		#
+		# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
+		#
+
+		FROM $from
+	EOF
+
+	echo >> "$version/Dockerfile"
+
+	case "$from" in
+		centos:*)
+			# get "Development Tools" packages dependencies
+			echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile"
+			;;
+		*)
+			echo 'RUN yum install -y @development-tools fedora-packager' >> "$version/Dockerfile"
+			;;
+	esac
+
+	# this list is sorted alphabetically; please keep it that way
+	packages=(
+		btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible)
+		device-mapper-devel # for "libdevmapper.h"
+		glibc-static
+		libselinux-devel # for "libselinux.so"
+		sqlite-devel # for "sqlite3.h"
+		tar # older versions of dev-tools don't have tar
+	)
+	echo "RUN yum install -y ${packages[*]}" >> "$version/Dockerfile"
+
+	echo >> "$version/Dockerfile"
+
+	awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile"
+	echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile"
+	echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile"
+
+	echo >> "$version/Dockerfile"
+
+	echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile"
+
+	if [ "$from" == "centos:6" ]; then
+		echo 'ENV DOCKER_BUILDTAGS selinux exclude_graphdriver_btrfs' >> "$version/Dockerfile"
+	else
+		echo 'ENV DOCKER_BUILDTAGS selinux' >> "$version/Dockerfile"
+	fi
+done
diff --git a/contrib/check-config.sh b/contrib/check-config.sh
index ac5df62..1482777 100755
--- a/contrib/check-config.sh
+++ b/contrib/check-config.sh
@@ -26,8 +26,14 @@
 is_set() {
 	zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
 }
+is_set_in_kernel() {
+	zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
+}
+is_set_as_module() {
+	zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
+}
 
-# see http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
+# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
 declare -A colors=(
 	[black]=30
 	[red]=31
@@ -70,8 +76,10 @@
 }
 
 check_flag() {
-	if is_set "$1"; then
+	if is_set_in_kernel "$1"; then
 		wrap_good "CONFIG_$1" 'enabled'
+	elif is_set_as_module "$1"; then
+		wrap_good "CONFIG_$1" 'enabled (as module)'
 	else
 		wrap_bad "CONFIG_$1" 'missing'
 	fi
@@ -83,6 +91,22 @@
 	done
 }
 
+check_command() {
+	if command -v "$1" >/dev/null 2>&1; then
+		wrap_good "$1 command" 'available'
+	else
+		wrap_bad "$1 command" 'missing'
+	fi
+}
+
+check_device() {
+	if [ -c "$1" ]; then
+		wrap_good "$1" 'present'
+	else
+		wrap_bad "$1" 'missing'
+	fi
+}
+
 if [ ! -e "$CONFIG" ]; then
 	wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..."
 	for tryConfig in "${possibleConfigs[@]}"; do
@@ -139,7 +163,7 @@
 	NAMESPACES {NET,PID,IPC,UTS}_NS
 	DEVPTS_MULTIPLE_INSTANCES
 	CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS
-	MACVLAN VETH BRIDGE
+	MACVLAN VETH BRIDGE BRIDGE_NETFILTER
 	NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE
 	NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
 	NF_NAT NF_NAT_NEEDED
@@ -151,10 +175,19 @@
 echo
 
 echo 'Optional Features:'
+{
+	check_flags MEMCG_SWAP 
+	check_flags MEMCG_SWAP_ENABLED
+	if  is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
+		echo "    $(wrap_color '(note that cgroup swap accounting is not enabled in your kernel config, you can enable it by setting boot option "swapaccount=1")' bold black)"
+	fi
+}
 flags=(
-	MEMCG_SWAP
 	RESOURCE_COUNTERS
+	BLK_CGROUP
+	IOSCHED_CFQ
 	CGROUP_PERF
+	CFS_BANDWIDTH
 )
 check_flags "${flags[@]}"
 
@@ -175,6 +208,11 @@
 
 	echo '- "'$(wrap_color 'overlay' blue)'":'
 	check_flags OVERLAY_FS EXT4_FS_SECURITY EXT4_FS_POSIX_ACL | sed 's/^/  /'
+
+	echo '- "'$(wrap_color 'zfs' blue)'":'
+	echo "  - $(check_device /dev/zfs)"
+	echo "  - $(check_command zfs)"
+	echo "  - $(check_command zpool)"
 } | sed 's/^/  /'
 echo
 
diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker
index 6bee94f..0f4e2f1 100755
--- a/contrib/completion/bash/docker
+++ b/contrib/completion/bash/docker
@@ -22,12 +22,12 @@
 # must have access to the socket for the completions to function correctly
 #
 # Note for developers:
-# Please arrange options sorted alphabetically by long name with the short 
+# Please arrange options sorted alphabetically by long name with the short
 # options immediately following their corresponding long form.
 # This order should be applied to lists, alternatives and code blocks.
 
 __docker_q() {
-	docker 2>/dev/null "$@"
+	docker ${host:+-H "$host"} 2>/dev/null "$@"
 }
 
 __docker_containers_all() {
@@ -220,6 +220,10 @@
 			_filedir -d
 			return
 			;;
+		--log-driver)
+			COMPREPLY=( $( compgen -W "json-file syslog none" -- "$cur" ) )
+			return
+			;;
 		--log-level|-l)
 			COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) )
 			return
@@ -269,13 +273,13 @@
 			;;
 		--file|-f)
 			_filedir
-			return	
-			;;	
+			return
+			;;
 	esac
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--file -f --force-rm --help --no-cache --pull --quiet -q --rm --tag -t" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--cpu-shares -c --cpuset-cpus --cpu-quota --file -f --force-rm --help --memory -m --memory-swap --no-cache --pull --quiet -q --rm --tag -t" -- "$cur" ) )
 			;;
 		*)
 			local counter="$(__docker_pos_first_nonflag '--tag|-t')"
@@ -403,7 +407,7 @@
 _docker_exec() {
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i -t --tty" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i -t --tty -u --user" -- "$cur" ) )
 			;;
 		*)
 			__docker_containers_running
@@ -469,7 +473,7 @@
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-trunc --quiet -q" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--all -a --digests --filter -f --help --no-trunc --quiet -q" -- "$cur" ) )
 			;;
 		=)
 			return
@@ -589,7 +593,7 @@
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--follow -f --help --tail --timestamps -t" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--follow -f --help --since --tail --timestamps -t" -- "$cur" ) )
 			;;
 		*)
 			local counter=$(__docker_pos_first_nonflag '--tail')
@@ -675,6 +679,14 @@
 		*)
 			local counter=$(__docker_pos_first_nonflag)
 			if [ $cword -eq $counter ]; then
+				for arg in "${COMP_WORDS[@]}"; do
+					case "$arg" in
+						--all-tags|-a)
+							__docker_image_repos
+							return
+							;;
+					esac
+				done
 				__docker_image_repos_and_tags
 			fi
 			;;
@@ -766,6 +778,8 @@
 		--cidfile
 		--cpuset
 		--cpu-shares -c
+		--cpu-period
+		--cpu-quota
 		--device
 		--dns
 		--dns-search
@@ -998,7 +1012,7 @@
 _docker_stats() {
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--no-stream --help" -- "$cur" ) )
 			;;
 		*)
 			__docker_containers_running
@@ -1146,6 +1160,8 @@
 		--dns
 		--dns-search
 		--exec-driver -e
+		--exec-opt
+		--exec-root
 		--fixed-cidr
 		--fixed-cidr-v6
 		--graph -g
@@ -1154,6 +1170,7 @@
 		--insecure-registry
 		--ip
 		--label
+		--log-driver
 		--log-level -l
 		--mtu
 		--pidfile -p
@@ -1166,6 +1183,7 @@
 	"
 
 	local main_options_with_args_glob=$(__docker_to_extglob "$main_options_with_args")
+	local host
 
 	COMPREPLY=()
 	local cur prev words cword
@@ -1175,6 +1193,11 @@
 	local counter=1
 	while [ $counter -lt $cword ]; do
 		case "${words[$counter]}" in
+			# save host so that completion can use custom daemon
+			--host|-H)
+				(( counter++ ))
+				host="${words[$counter]}"
+				;;
 			$main_options_with_args_glob )
 				(( counter++ ))
 				;;
diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish
index d323758..79f7ed4 100644
--- a/contrib/completion/fish/docker.fish
+++ b/contrib/completion/fish/docker.fish
@@ -16,7 +16,7 @@
 
 function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand'
     for i in (commandline -opc)
-        if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait
+        if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats
             return 1
         end
     end
@@ -51,6 +51,7 @@
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the Docker runtime to use a specific exec driver'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set exec driver options'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)'
 complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)'
 complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode'
@@ -232,6 +233,7 @@
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps'
+complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)'
 complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container"
 
@@ -361,6 +363,7 @@
 # stats
 complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics"
 complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage'
+complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result'
 complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container"
 
 # stop
diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker
index 28398f7..abd6663 100644
--- a/contrib/completion/zsh/_docker
+++ b/contrib/completion/zsh/_docker
@@ -305,6 +305,7 @@
         (logs)
             _arguments \
                 {-f,--follow}'[Follow log output]' \
+                '-s,--since[Show logs since timestamp]' \
                 {-t,--timestamps}'[Show timestamps]' \
                 '--tail=-[Output the last K lines]:lines:(1 10 20 50 all)' \
                 '*:containers:__docker_containers'
@@ -326,6 +327,7 @@
             ;;
         (stats)
             _arguments \
+                '--no-stream[Disable streaming stats and only pull the first result]' \
                 '*:containers:__docker_runningcontainers'
             ;;
         (rm)
diff --git a/contrib/desktop-integration/gparted/Dockerfile b/contrib/desktop-integration/gparted/Dockerfile
index e76e658..3ddb232 100644
--- a/contrib/desktop-integration/gparted/Dockerfile
+++ b/contrib/desktop-integration/gparted/Dockerfile
@@ -3,7 +3,7 @@
 # AUTHOR:         Jessica Frazelle <jess@docker.com>
 # COMMENTS:
 #   This file describes how to build a gparted container with all
-#   dependencies installed. It uses native X11 unix socket. 
+#   dependencies installed. It uses native X11 unix socket.
 #   Tested on Debian Jessie
 # USAGE:
 #   # Download gparted Dockerfile
diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go
index ffc34a5..0a0b080 100644
--- a/contrib/docker-device-tool/device_tool.go
+++ b/contrib/docker-device-tool/device_tool.go
@@ -9,7 +9,7 @@
 	"strconv"
 	"strings"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/graphdriver/devmapper"
 	"github.com/docker/docker/pkg/devicemapper"
 )
@@ -63,7 +63,7 @@
 
 	if *flDebug {
 		os.Setenv("DEBUG", "1")
-		log.SetLevel(log.DebugLevel)
+		logrus.SetLevel(logrus.DebugLevel)
 	}
 
 	if flag.NArg() < 1 {
@@ -125,7 +125,7 @@
 
 		err = devices.ResizePool(size)
 		if err != nil {
-			fmt.Println("Error resizeing pool: ", err)
+			fmt.Println("Error resizing pool: ", err)
 			os.Exit(1)
 		}
 
diff --git a/contrib/dockerize-disk.sh b/contrib/dockerize-disk.sh
new file mode 100755
index 0000000..6e72d9f
--- /dev/null
+++ b/contrib/dockerize-disk.sh
@@ -0,0 +1,118 @@
+#!/usr/bin/env bash
+set -e
+
+if ! command -v qemu-nbd &> /dev/null; then
+  echo >&2 'error: "qemu-nbd" not found!'
+  exit 1
+fi
+
+usage() {
+  echo "Convert disk image to docker image"
+  echo ""
+  echo "usage: $0 image-name disk-image-file [ base-image ]"
+  echo "   ie: $0 cirros:0.3.3 cirros-0.3.3-x86_64-disk.img"
+  echo "       $0 ubuntu:cloud ubuntu-14.04-server-cloudimg-amd64-disk1.img ubuntu:14.04"
+}
+
+if [ "$#" -lt 2 ]; then
+  usage
+  exit 1
+fi
+
+CURDIR=$(pwd)
+
+image_name="${1%:*}"
+image_tag="${1#*:}"
+if [ "$image_tag" == "$1" ]; then
+  image_tag="latest"
+fi
+
+disk_image_file="$2"
+docker_base_image="$3"
+
+block_device=/dev/nbd0
+
+builddir=$(mktemp -d)
+
+cleanup() {
+  umount "$builddir/disk_image" || true
+  umount "$builddir/workdir" || true
+  qemu-nbd -d $block_device &> /dev/null || true
+  rm -rf $builddir
+}
+trap cleanup EXIT
+
+# Mount disk image
+modprobe nbd max_part=63
+qemu-nbd -rc ${block_device} -P 1 "$disk_image_file"
+mkdir "$builddir/disk_image"
+mount -o ro ${block_device} "$builddir/disk_image"
+
+mkdir "$builddir/workdir"
+mkdir "$builddir/diff"
+
+base_image_mounts=""
+
+# Unpack base image
+if [ -n "$docker_base_image" ]; then
+  mkdir -p "$builddir/base"
+  docker pull "$docker_base_image"
+  docker save "$docker_base_image" | tar -xC "$builddir/base"
+
+  image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image")
+  while [ -n "$image_id" ]; do
+    mkdir -p "$builddir/base/$image_id/layer"
+    tar -xf "$builddir/base/$image_id/layer.tar" -C "$builddir/base/$image_id/layer"
+
+    base_image_mounts="${base_image_mounts}:$builddir/base/$image_id/layer=ro+wh"
+    image_id=$(docker inspect -f "{{.Parent}}" "$image_id")
+  done
+fi
+
+# Mount work directory
+mount -t aufs -o "br=$builddir/diff=rw${base_image_mounts},dio,xino=/dev/shm/aufs.xino" none "$builddir/workdir"
+
+# Update files
+cd $builddir
+diff -rq disk_image workdir \
+  | sed -re "s|Only in workdir(.*?): |DEL \1/|g;s|Only in disk_image(.*?): |ADD \1/|g;s|Files disk_image/(.+) and workdir/(.+) differ|UPDATE /\1|g" \
+  | while read action entry; do
+      case "$action" in
+        ADD|UPDATE)
+          cp -a "disk_image$entry" "workdir$entry"
+          ;;
+        DEL)
+          rm -rf "workdir$entry"
+          ;;
+        *)
+          echo "Error: unknown diff line: $action $entry" >&2
+          ;;
+      esac
+    done
+
+# Pack new image
+new_image_id="$(for i in $(seq 1 32); do printf "%02x" $(($RANDOM % 256)); done)"
+mkdir -p $builddir/result/$new_image_id
+cd diff
+tar -cf $builddir/result/$new_image_id/layer.tar *
+echo "1.0" > $builddir/result/$new_image_id/VERSION
+cat > $builddir/result/$new_image_id/json <<-EOS
+{ "docker_version": "1.4.1"
+, "id": "$new_image_id"
+, "created": "$(date -u +%Y-%m-%dT%H:%M:%S.%NZ)"
+EOS
+
+if [ -n "$docker_base_image" ]; then
+  image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image")
+  echo ", \"parent\": \"$image_id\"" >> $builddir/result/$new_image_id/json
+fi
+
+echo "}" >> $builddir/result/$new_image_id/json
+
+echo "{\"$image_name\":{\"$image_tag\":\"$new_image_id\"}}" > $builddir/result/repositories
+
+cd $builddir/result
+
+# mkdir -p $CURDIR/$image_name
+# cp -r * $CURDIR/$image_name
+tar -c * | docker load
diff --git a/contrib/download-frozen-image.sh b/contrib/download-frozen-image.sh
index b45cba9..29d7ff5 100755
--- a/contrib/download-frozen-image.sh
+++ b/contrib/download-frozen-image.sh
@@ -41,39 +41,41 @@
 	[ "$imageId" != "$tag" ] || imageId=
 	[ "$tag" != "$imageTag" ] || tag='latest'
 	tag="${tag%@*}"
-	
+
+	imageFile="${image//\//_}" # "/" can't be in filenames :)
+
 	token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')"
-	
+
 	if [ -z "$imageId" ]; then
 		imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")"
 		imageId="${imageId//\"/}"
 	fi
-	
+
 	ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")"
 	if [ "${ancestryJson:0:1}" != '[' ]; then
 		echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:"
 		echo >&2 "  $ancestryJson"
 		exit 1
 	fi
-	
+
 	IFS=','
 	ancestry=( ${ancestryJson//[\[\] \"]/} )
 	unset IFS
-	
-	if [ -s "$dir/tags-$image.tmp" ]; then
-		echo -n ', ' >> "$dir/tags-$image.tmp"
+
+	if [ -s "$dir/tags-$imageFile.tmp" ]; then
+		echo -n ', ' >> "$dir/tags-$imageFile.tmp"
 	else
 		images=( "${images[@]}" "$image" )
 	fi
-	echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$image.tmp"
-	
+	echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp"
+
 	echo "Downloading '$imageTag' (${#ancestry[@]} layers)..."
 	for imageId in "${ancestry[@]}"; do
 		mkdir -p "$dir/$imageId"
 		echo '1.0' > "$dir/$imageId/VERSION"
-		
+
 		curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json"
-		
+
 		# TODO figure out why "-C -" doesn't work here
 		# "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume."
 		# "HTTP/1.1 416 Requested Range Not Satisfiable"
@@ -90,10 +92,12 @@
 echo -n '{' > "$dir/repositories"
 firstImage=1
 for image in "${images[@]}"; do
+	imageFile="${image//\//_}" # "/" can't be in filenames :)
+
 	[ "$firstImage" ] || echo -n ',' >> "$dir/repositories"
 	firstImage=
 	echo -n $'\n\t' >> "$dir/repositories"
-	echo -n '"'"$image"'": { '"$(cat "$dir/tags-$image.tmp")"' }' >> "$dir/repositories"
+	echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories"
 done
 echo -n $'\n}\n' >> "$dir/repositories"
 
diff --git a/contrib/host-integration/manager/systemd b/contrib/host-integration/manager/systemd
index 0431b3c..c1ab34e 100755
--- a/contrib/host-integration/manager/systemd
+++ b/contrib/host-integration/manager/systemd
@@ -10,11 +10,11 @@
 	Description=$desc
 	Author=$auth
 	After=docker.service
-	
+
 	[Service]
 	ExecStart=/usr/bin/docker start -a $cid
 	ExecStop=/usr/bin/docker stop -t 2 $cid
-	
+
 	[Install]
 	WantedBy=local.target
 EOF
diff --git a/contrib/init/openrc/docker.initd b/contrib/init/openrc/docker.initd
index a9d21b1..f251e9a 100755
--- a/contrib/init/openrc/docker.initd
+++ b/contrib/init/openrc/docker.initd
@@ -7,6 +7,7 @@
 DOCKER_PIDFILE=${DOCKER_PIDFILE:-/run/${SVCNAME}.pid}
 DOCKER_BINARY=${DOCKER_BINARY:-/usr/bin/docker}
 DOCKER_OPTS=${DOCKER_OPTS:-}
+UNSHARE_BINARY=${UNSHARE_BINARY:-/usr/bin/unshare}
 
 start() {
 	checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE"
@@ -16,11 +17,12 @@
 
 	ebegin "Starting docker daemon"
 	start-stop-daemon --start --background \
-		--exec "$DOCKER_BINARY" \
+		--exec "$UNSHARE_BINARY" \
 		--pidfile "$DOCKER_PIDFILE" \
 		--stdout "$DOCKER_LOGFILE" \
 		--stderr "$DOCKER_LOGFILE" \
-		-- -d -p "$DOCKER_PIDFILE" \
+		-- --mount \
+		-- "$DOCKER_BINARY" -d -p "$DOCKER_PIDFILE" \
 		$DOCKER_OPTS
 	eend $?
 }
diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker
index cf33c83..35fd71f 100755
--- a/contrib/init/sysvinit-debian/docker
+++ b/contrib/init/sysvinit-debian/docker
@@ -30,6 +30,7 @@
 DOCKER_LOGFILE=/var/log/$BASE.log
 DOCKER_OPTS=
 DOCKER_DESC="Docker"
+UNSHARE=${UNSHARE:-/usr/bin/unshare}
 
 # Get lsb functions
 . /lib/lsb/init-functions
@@ -99,11 +100,11 @@
 		log_begin_msg "Starting $DOCKER_DESC: $BASE"
 		start-stop-daemon --start --background \
 			--no-close \
-			--exec "$DOCKER" \
+			--exec "$UNSHARE" \
 			--pidfile "$DOCKER_SSD_PIDFILE" \
 			--make-pidfile \
-			-- \
-				-d -p "$DOCKER_PIDFILE" \
+			-- --mount \
+			-- "$DOCKER" -d -p "$DOCKER_PIDFILE" \
 				$DOCKER_OPTS \
 					>> "$DOCKER_LOGFILE" 2>&1
 		log_end_msg $?
diff --git a/contrib/init/sysvinit-redhat/docker.sysconfig b/contrib/init/sysvinit-redhat/docker.sysconfig
index 9c99dd1..5f9b7e5 100644
--- a/contrib/init/sysvinit-redhat/docker.sysconfig
+++ b/contrib/init/sysvinit-redhat/docker.sysconfig
@@ -1,5 +1,5 @@
 # /etc/sysconfig/docker
-# 
+#
 # Other arguments to pass to the docker daemon process
 # These will be parsed by the sysv initscript and appended
 # to the arguments list passed to docker -d
diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf
index 4ad6058..5272131 100644
--- a/contrib/init/upstart/docker.conf
+++ b/contrib/init/upstart/docker.conf
@@ -7,6 +7,8 @@
 
 respawn
 
+kill timeout 20
+
 pre-start script
 	# see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
 	if grep -v '^#' /etc/fstab | grep -q cgroup \
@@ -37,7 +39,7 @@
 	if [ -f /etc/default/$UPSTART_JOB ]; then
 		. /etc/default/$UPSTART_JOB
 	fi
-	exec "$DOCKER" -d $DOCKER_OPTS
+	exec unshare -m -- "$DOCKER" -d $DOCKER_OPTS
 end script
 
 # Don't emit "started" event until docker.sock is ready.
diff --git a/contrib/mkimage-arch.sh b/contrib/mkimage-arch.sh
index bbecf72..06406fe 100755
--- a/contrib/mkimage-arch.sh
+++ b/contrib/mkimage-arch.sh
@@ -14,6 +14,8 @@
 	exit 1
 }
 
+export LANG="C.UTF-8"
+
 ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX)
 chmod 755 $ROOTFS
 
diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh
index d9d6aae..412a5ce 100755
--- a/contrib/mkimage-debootstrap.sh
+++ b/contrib/mkimage-debootstrap.sh
@@ -14,9 +14,9 @@
 
 usage() {
 	echo >&2
-	
+
 	echo >&2 "usage: $0 [options] repo suite [mirror]"
-	
+
 	echo >&2
 	echo >&2 'options: (not recommended)'
 	echo >&2 "  -p set an http_proxy for debootstrap"
@@ -26,20 +26,20 @@
 	echo >&2 "  -s # skip version detection and tagging (ie, precise also tagged as 12.04)"
 	echo >&2 "     # note that this will also skip adding universe and/or security/updates to sources.list"
 	echo >&2 "  -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)"
-	
+
 	echo >&2
 	echo >&2 "   ie: $0 username/debian squeeze"
 	echo >&2 "       $0 username/debian squeeze http://ftp.uk.debian.org/debian/"
-	
+
 	echo >&2
 	echo >&2 "   ie: $0 username/ubuntu precise"
 	echo >&2 "       $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/"
-	
+
 	echo >&2
 	echo >&2 "   ie: $0 -t precise.tar.bz2 precise"
 	echo >&2 "       $0 -t wheezy.tgz wheezy"
 	echo >&2 "       $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/"
-	
+
 	echo >&2
 }
 
@@ -145,10 +145,10 @@
 	sudo chroot . dpkg-divert --local --rename --add /sbin/initctl
 	sudo ln -sf /bin/true sbin/initctl
 	# see https://github.com/docker/docker/issues/446#issuecomment-16953173
-	
+
 	# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
 	sudo chroot . apt-get clean
-	
+
 	if strings usr/bin/dpkg | grep -q unsafe-io; then
 		# while we're at it, apt is unnecessarily slow inside containers
 		#  this forces dpkg not to call sync() after package extraction and speeds up install
@@ -159,7 +159,7 @@
 		# (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82),
 		# and ubuntu lucid/10.04 only has 1.15.5.6
 	fi
-	
+
 	# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
 	{
 		aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";'
@@ -167,17 +167,17 @@
 		echo "APT::Update::Post-Invoke { ${aptGetClean} };"
 		echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";'
 	} | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
-	
+
 	# and remove the translations, too
 	echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null
-	
+
 	# helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
 	#  rm /usr/sbin/policy-rc.d
 	#  rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
 	#  rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
 	#  rm /etc/apt/apt.conf.d/no-cache
 	#  rm /etc/apt/apt.conf.d/no-languages
-	
+
 	if [ -z "$skipDetection" ]; then
 		# see also rudimentary platform detection in hack/install.sh
 		lsbDist=''
@@ -187,14 +187,14 @@
 		if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then
 			lsbDist='Debian'
 		fi
-		
+
 		case "$lsbDist" in
 			Debian)
 				# add the updates and security repositories
 				if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then
 					# ${suite}-updates only applies to non-unstable
 					sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list
-					
+
 					# same for security updates
 					echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null
 				fi
@@ -220,7 +220,7 @@
 				;;
 		esac
 	fi
-	
+
 	# make sure our packages lists are as up to date as we can get them
 	sudo chroot . apt-get update
 	sudo chroot . apt-get dist-upgrade -y
@@ -229,23 +229,23 @@
 if [ "$justTar" ]; then
 	# create the tarball file so it has the right permissions (ie, not root)
 	touch "$repo"
-	
+
 	# fill the tarball
 	sudo tar --numeric-owner -caf "$repo" .
 else
 	# create the image (and tag $repo:$suite)
 	sudo tar --numeric-owner -c . | $docker import - $repo:$suite
-	
+
 	# test the image
 	$docker run -i -t $repo:$suite echo success
-	
+
 	if [ -z "$skipDetection" ]; then
 		case "$lsbDist" in
 			Debian)
 				if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then
 					# tag latest
 					$docker tag $repo:$suite $repo:latest
-					
+
 					if [ -r etc/debian_version ]; then
 						# tag the specific debian release version (which is only reasonable to tag on debian stable)
 						ver=$(cat etc/debian_version)
diff --git a/contrib/mkimage-unittest.sh b/contrib/mkimage-unittest.sh
deleted file mode 100755
index feebb17..0000000
--- a/contrib/mkimage-unittest.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env bash
-# Generate a very minimal filesystem based on busybox-static,
-# and load it into the local docker under the name "docker-ut".
-
-missing_pkg() {
-    echo "Sorry, I could not locate $1"
-    echo "Try 'apt-get install ${2:-$1}'?"
-    exit 1
-}
-
-BUSYBOX=$(which busybox)
-[ "$BUSYBOX" ] || missing_pkg busybox busybox-static
-SOCAT=$(which socat)
-[ "$SOCAT" ] || missing_pkg socat
-
-shopt -s extglob
-set -ex
-ROOTFS=`mktemp -d ${TMPDIR:-/var/tmp}/rootfs-busybox.XXXXXXXXXX`
-trap "rm -rf $ROOTFS" INT QUIT TERM
-cd $ROOTFS
-
-mkdir bin etc dev dev/pts lib proc sys tmp
-touch etc/resolv.conf
-cp /etc/nsswitch.conf etc/nsswitch.conf
-echo root:x:0:0:root:/:/bin/sh > etc/passwd
-echo daemon:x:1:1:daemon:/usr/sbin:/bin/sh >> etc/passwd
-echo root:x:0: > etc/group
-echo daemon:x:1: >> etc/group
-ln -s lib lib64
-ln -s bin sbin
-cp $BUSYBOX $SOCAT bin
-for X in $(busybox --list)
-do
-    ln -s busybox bin/$X
-done
-rm bin/init
-ln bin/busybox bin/init
-cp -P /lib/x86_64-linux-gnu/lib{pthread*,c*(-*),dl*(-*),nsl*(-*),nss_*,util*(-*),wrap,z}.so* lib
-cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib
-cp -P /usr/lib/x86_64-linux-gnu/lib{crypto,ssl}.so* lib
-for X in console null ptmx random stdin stdout stderr tty urandom zero
-do
-    cp -a /dev/$X dev
-done
-
-chmod 0755 $ROOTFS # See #486
-tar --numeric-owner -cf- . | docker import - docker-ut
-docker run -i -u root docker-ut /bin/echo Success.
-rm -rf $ROOTFS
diff --git a/contrib/mkimage/debootstrap b/contrib/mkimage/debootstrap
index 72983d2..c613d53 100755
--- a/contrib/mkimage/debootstrap
+++ b/contrib/mkimage/debootstrap
@@ -19,7 +19,7 @@
 chrootPath="$(type -P chroot)"
 rootfs_chroot() {
 	# "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately!
-	
+
 	# set PATH and chroot away!
 	PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \
 		"$chrootPath" "$rootfsDir" "$@"
@@ -37,7 +37,7 @@
 
 # prevent init scripts from running during install/update
 echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'"
-cat > "$rootfsDir/usr/sbin/policy-rc.d" <<'EOF'
+cat > "$rootfsDir/usr/sbin/policy-rc.d" <<-'EOF'
 	#!/bin/sh
 
 	# For most Docker users, "apt-get install" only happens during "docker build",
@@ -176,11 +176,19 @@
 						s/ $suite / ${suite}-updates /
 					" "$rootfsDir/etc/apt/sources.list"
 					echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list"
-					# LTS
-					if [ "$suite" = 'squeeze' -o "$suite" = 'oldstable' ]; then
-						head -1 "$rootfsDir/etc/apt/sources.list" \
-							| sed "s/ $suite / squeeze-lts /" \
-								>> "$rootfsDir/etc/apt/sources.list"
+					# squeeze-lts
+					if [ -f "$rootfsDir/etc/debian_version" ]; then
+						ltsSuite=
+						case "$(cat "$rootfsDir/etc/debian_version")" in
+							6.*) ltsSuite='squeeze-lts' ;;
+							#7.*) ltsSuite='wheezy-lts' ;;
+							#8.*) ltsSuite='jessie-lts' ;;
+						esac
+						if [ "$ltsSuite" ]; then
+							head -1 "$rootfsDir/etc/apt/sources.list" \
+								| sed "s/ $suite / $ltsSuite /" \
+									>> "$rootfsDir/etc/apt/sources.list"
+						fi
 					fi
 				)
 			fi
@@ -220,13 +228,13 @@
 
 (
 	set -x
-	
+
 	# make sure we're fully up-to-date
 	rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y'
-	
+
 	# delete all the apt list files since they're big and get stale quickly
 	rm -rf "$rootfsDir/var/lib/apt/lists"/*
 	# this forces "apt-get update" in dependent images, which is also good
-	
+
 	mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing."
 )
diff --git a/contrib/project-stats.sh b/contrib/project-stats.sh
index 985a77f..2691c72 100755
--- a/contrib/project-stats.sh
+++ b/contrib/project-stats.sh
@@ -3,7 +3,7 @@
 ## Run this script from the root of the docker repository
 ## to query project stats useful to the maintainers.
 ## You will need to install `pulls` and `issues` from
-## http://github.com/crosbymichael/pulls
+## https://github.com/crosbymichael/pulls
 
 set -e
 
diff --git a/contrib/report-issue.sh b/contrib/report-issue.sh
index 5ef2ece..cb54f1a 100644
--- a/contrib/report-issue.sh
+++ b/contrib/report-issue.sh
@@ -29,41 +29,41 @@
 # this should always match the template from CONTRIBUTING.md
 	cat <<- EOM
 	Description of problem:
-	
-	
+
+
 	\`docker version\`:
 	`${DOCKER_COMMAND} -D version`
-	
-	
+
+
 	\`docker info\`:
 	`${DOCKER_COMMAND} -D info`
-	
-	
+
+
 	\`uname -a\`:
 	`uname -a`
-	
-	
+
+
 	Environment details (AWS, VirtualBox, physical, etc.):
-	
-	
+
+
 	How reproducible:
-	
-	
+
+
 	Steps to Reproduce:
 	1.
 	2.
 	3.
-	
-	
+
+
 	Actual Results:
-	
-	
+
+
 	Expected Results:
-	
-	
+
+
 	Additional info:
-	
-	
+
+
 	EOM
 }
 
@@ -81,7 +81,7 @@
 read -r -n 1 use_sudo
 echo ""
 
-if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then 
+if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then
 	export DOCKER_COMMAND="sudo ${DOCKER}"
 fi
 
diff --git a/contrib/syntax/nano/Dockerfile.nanorc b/contrib/syntax/nano/Dockerfile.nanorc
new file mode 100644
index 0000000..80e56df
--- /dev/null
+++ b/contrib/syntax/nano/Dockerfile.nanorc
@@ -0,0 +1,26 @@
+## Syntax highlighting for Dockerfiles
+syntax "Dockerfile" "Dockerfile[^/]*$"
+
+## Keywords
+icolor red "^(FROM|MAINTAINER|RUN|CMD|LABEL|EXPOSE|ENV|ADD|COPY|ENTRYPOINT|VOLUME|USER|WORKDIR|ONBUILD)[[:space:]]"
+
+## Brackets & parenthesis
+color brightgreen "(\(|\)|\[|\])"
+
+## Double ampersand
+color brightmagenta "&&"
+
+## Comments
+icolor cyan "^[[:space:]]*#.*$"
+
+## Blank space at EOL
+color ,green "[[:space:]]+$"
+
+## Strings, single-quoted
+color brightwhite "'([^']|(\\'))*'" "%[qw]\{[^}]*\}" "%[qw]\([^)]*\)" "%[qw]<[^>]*>" "%[qw]\[[^]]*\]" "%[qw]\$[^$]*\$" "%[qw]\^[^^]*\^" "%[qw]![^!]*!"
+
+## Strings, double-quoted
+color brightwhite ""([^"]|(\\"))*"" "%[QW]?\{[^}]*\}" "%[QW]?\([^)]*\)" "%[QW]?<[^>]*>" "%[QW]?\[[^]]*\]" "%[QW]?\$[^$]*\$" "%[QW]?\^[^^]*\^" "%[QW]?![^!]*!"
+
+## Single and double quotes
+color brightyellow "('|\")"
diff --git a/contrib/syntax/nano/README.md b/contrib/syntax/nano/README.md
new file mode 100644
index 0000000..5985208
--- /dev/null
+++ b/contrib/syntax/nano/README.md
@@ -0,0 +1,32 @@
+Dockerfile.nanorc
+=================
+
+Dockerfile syntax highlighting for nano
+
+Single User Installation
+------------------------
+1. Create a nano syntax directory in your home directory:
+ * `mkdir -p ~/.nano/syntax`
+
+2. Copy `Dockerfile.nanorc` to` ~/.nano/syntax/`
+ * `cp Dockerfile.nanorc ~/.nano/syntax/`
+
+3. Add the following to your `~/.nanorc` to tell nano where to find the `Dockerfile.nanorc` file
+  ```
+## Dockerfile files
+include "~/.nano/syntax/Dockerfile.nanorc"
+  ```
+
+System Wide Installation
+------------------------
+1. Create a nano syntax directory: 
+  * `mkdir /usr/local/share/nano`
+
+2. Copy `Dockerfile.nanorc` to `/usr/local/share/nano`
+  * `cp Dockerfile.nanorc /usr/local/share/nano/`
+
+3. Add the following to your `/etc/nanorc`:
+  ```
+## Dockerfile files
+include "/usr/local/share/nano/Dockerfile.nanorc"
+  ```
diff --git a/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage b/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage
index 75efc2e..c73ae21 100644
--- a/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage
+++ b/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage
@@ -18,12 +18,12 @@
 				<key>0</key>
 				<dict>
 					<key>name</key>
-					<string>keyword.control.dockerfile</string>					
+					<string>keyword.control.dockerfile</string>
 				</dict>
 				<key>1</key>
 				<dict>
 					<key>name</key>
-					<string>keyword.other.special-method.dockerfile</string>					
+					<string>keyword.other.special-method.dockerfile</string>
 				</dict>
 			</dict>
 		</dict>
@@ -35,12 +35,12 @@
 				<key>0</key>
 				<dict>
 					<key>name</key>
-					<string>keyword.operator.dockerfile</string>					
+					<string>keyword.operator.dockerfile</string>
 				</dict>
 				<key>1</key>
 				<dict>
 					<key>name</key>
-					<string>keyword.other.special-method.dockerfile</string>					
+					<string>keyword.other.special-method.dockerfile</string>
 				</dict>
 			</dict>
 		</dict>
diff --git a/contrib/syntax/vim/doc/dockerfile.txt b/contrib/syntax/vim/doc/dockerfile.txt
index 37cc7be..e69e2b7 100644
--- a/contrib/syntax/vim/doc/dockerfile.txt
+++ b/contrib/syntax/vim/doc/dockerfile.txt
@@ -1,6 +1,6 @@
 *dockerfile.txt*  Syntax highlighting for Dockerfiles
 
-Author: Honza Pokorny <http://honza.ca>
+Author: Honza Pokorny <https://honza.ca>
 License: BSD
 
 INSTALLATION                                                     *installation*
diff --git a/contrib/syntax/vim/syntax/dockerfile.vim b/contrib/syntax/vim/syntax/dockerfile.vim
index 36691e2..bd09268 100644
--- a/contrib/syntax/vim/syntax/dockerfile.vim
+++ b/contrib/syntax/vim/syntax/dockerfile.vim
@@ -1,5 +1,5 @@
 " dockerfile.vim - Syntax highlighting for Dockerfiles
-" Maintainer:   Honza Pokorny <http://honza.ca>
+" Maintainer:   Honza Pokorny <https://honza.ca>
 " Version:      0.5
 
 
diff --git a/daemon/attach.go b/daemon/attach.go
index 967c863..5193cf1 100644
--- a/daemon/attach.go
+++ b/daemon/attach.go
@@ -1,214 +1,61 @@
 package daemon
 
 import (
-	"encoding/json"
 	"io"
-	"os"
-	"sync"
-	"time"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/jsonlog"
-	"github.com/docker/docker/pkg/promise"
-	"github.com/docker/docker/utils"
+	"github.com/docker/docker/pkg/stdcopy"
 )
 
-func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
-	}
-
-	var (
-		name   = job.Args[0]
-		logs   = job.GetenvBool("logs")
-		stream = job.GetenvBool("stream")
-		stdin  = job.GetenvBool("stdin")
-		stdout = job.GetenvBool("stdout")
-		stderr = job.GetenvBool("stderr")
-	)
-
-	container, err := daemon.Get(name)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	//logs
-	if logs {
-		cLog, err := container.ReadLog("json")
-		if err != nil && os.IsNotExist(err) {
-			// Legacy logs
-			log.Debugf("Old logs format")
-			if stdout {
-				cLog, err := container.ReadLog("stdout")
-				if err != nil {
-					log.Errorf("Error reading logs (stdout): %s", err)
-				} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
-					log.Errorf("Error streaming logs (stdout): %s", err)
-				}
-			}
-			if stderr {
-				cLog, err := container.ReadLog("stderr")
-				if err != nil {
-					log.Errorf("Error reading logs (stderr): %s", err)
-				} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
-					log.Errorf("Error streaming logs (stderr): %s", err)
-				}
-			}
-		} else if err != nil {
-			log.Errorf("Error reading logs (json): %s", err)
-		} else {
-			dec := json.NewDecoder(cLog)
-			for {
-				l := &jsonlog.JSONLog{}
-
-				if err := dec.Decode(l); err == io.EOF {
-					break
-				} else if err != nil {
-					log.Errorf("Error streaming logs: %s", err)
-					break
-				}
-				if l.Stream == "stdout" && stdout {
-					io.WriteString(job.Stdout, l.Log)
-				}
-				if l.Stream == "stderr" && stderr {
-					io.WriteString(job.Stderr, l.Log)
-				}
-			}
-		}
-	}
-
-	//stream
-	if stream {
-		var (
-			cStdin           io.ReadCloser
-			cStdout, cStderr io.Writer
-		)
-
-		if stdin {
-			r, w := io.Pipe()
-			go func() {
-				defer w.Close()
-				defer log.Debugf("Closing buffered stdin pipe")
-				io.Copy(w, job.Stdin)
-			}()
-			cStdin = r
-		}
-		if stdout {
-			cStdout = job.Stdout
-		}
-		if stderr {
-			cStderr = job.Stderr
-		}
-
-		<-daemon.Attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdout, cStderr)
-		// If we are in stdinonce mode, wait for the process to end
-		// otherwise, simply return
-		if container.Config.StdinOnce && !container.Config.Tty {
-			container.WaitStop(-1 * time.Second)
-		}
-	}
-	return engine.StatusOK
+type ContainerAttachWithLogsConfig struct {
+	InStream                       io.ReadCloser
+	OutStream                      io.Writer
+	UseStdin, UseStdout, UseStderr bool
+	Logs, Stream                   bool
+	Multiplex                      bool
 }
 
-func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
-	var (
-		cStdout, cStderr io.ReadCloser
-		cStdin           io.WriteCloser
-		wg               sync.WaitGroup
-		errors           = make(chan error, 3)
-	)
-
-	if stdin != nil && openStdin {
-		cStdin = streamConfig.StdinPipe()
-		wg.Add(1)
+func (daemon *Daemon) ContainerAttachWithLogs(name string, c *ContainerAttachWithLogsConfig) error {
+	container, err := daemon.Get(name)
+	if err != nil {
+		return err
 	}
 
-	if stdout != nil {
-		cStdout = streamConfig.StdoutPipe()
-		wg.Add(1)
+	var errStream io.Writer
+
+	if !container.Config.Tty && c.Multiplex {
+		errStream = stdcopy.NewStdWriter(c.OutStream, stdcopy.Stderr)
+		c.OutStream = stdcopy.NewStdWriter(c.OutStream, stdcopy.Stdout)
+	} else {
+		errStream = c.OutStream
 	}
 
-	if stderr != nil {
-		cStderr = streamConfig.StderrPipe()
-		wg.Add(1)
+	var stdin io.ReadCloser
+	var stdout, stderr io.Writer
+
+	if c.UseStdin {
+		stdin = c.InStream
+	}
+	if c.UseStdout {
+		stdout = c.OutStream
+	}
+	if c.UseStderr {
+		stderr = errStream
 	}
 
-	// Connect stdin of container to the http conn.
-	go func() {
-		if stdin == nil || !openStdin {
-			return
-		}
-		log.Debugf("attach: stdin: begin")
-		defer func() {
-			if stdinOnce && !tty {
-				cStdin.Close()
-			} else {
-				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
-				if cStdout != nil {
-					cStdout.Close()
-				}
-				if cStderr != nil {
-					cStderr.Close()
-				}
-			}
-			wg.Done()
-			log.Debugf("attach: stdin: end")
-		}()
+	return container.AttachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream)
+}
 
-		var err error
-		if tty {
-			_, err = utils.CopyEscapable(cStdin, stdin)
-		} else {
-			_, err = io.Copy(cStdin, stdin)
+type ContainerWsAttachWithLogsConfig struct {
+	InStream             io.ReadCloser
+	OutStream, ErrStream io.Writer
+	Logs, Stream         bool
+}
 
-		}
-		if err == io.ErrClosedPipe {
-			err = nil
-		}
-		if err != nil {
-			log.Errorf("attach: stdin: %s", err)
-			errors <- err
-			return
-		}
-	}()
-
-	attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) {
-		if stream == nil {
-			return
-		}
-		defer func() {
-			// Make sure stdin gets closed
-			if stdin != nil {
-				stdin.Close()
-			}
-			streamPipe.Close()
-			wg.Done()
-			log.Debugf("attach: %s: end", name)
-		}()
-
-		log.Debugf("attach: %s: begin", name)
-		_, err := io.Copy(stream, streamPipe)
-		if err == io.ErrClosedPipe {
-			err = nil
-		}
-		if err != nil {
-			log.Errorf("attach: %s: %v", name, err)
-			errors <- err
-		}
+func (daemon *Daemon) ContainerWsAttachWithLogs(name string, c *ContainerWsAttachWithLogsConfig) error {
+	container, err := daemon.Get(name)
+	if err != nil {
+		return err
 	}
 
-	go attachStream("stdout", stdout, cStdout)
-	go attachStream("stderr", stderr, cStderr)
-
-	return promise.Go(func() error {
-		wg.Wait()
-		close(errors)
-		for err := range errors {
-			if err != nil {
-				return err
-			}
-		}
-		return nil
-	})
+	return container.AttachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
 }
diff --git a/daemon/changes.go b/daemon/changes.go
index faa4323..55b230b 100644
--- a/daemon/changes.go
+++ b/daemon/changes.go
@@ -1,37 +1,13 @@
 package daemon
 
-import (
-	"github.com/docker/docker/engine"
-)
+import "github.com/docker/docker/pkg/archive"
 
-func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s CONTAINER", job.Name)
-	}
-	name := job.Args[0]
-
-	container, error := daemon.Get(name)
-	if error != nil {
-		return job.Error(error)
-	}
-
-	outs := engine.NewTable("", 0)
-	changes, err := container.Changes()
+// ContainerChanges returns a list of container fs changes
+func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) {
+	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
 
-	for _, change := range changes {
-		out := &engine.Env{}
-		if err := out.Import(change); err != nil {
-			return job.Error(err)
-		}
-		outs.Add(out)
-	}
-
-	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-
-	return engine.StatusOK
+	return container.Changes()
 }
diff --git a/daemon/commit.go b/daemon/commit.go
index f1496a4..28be682 100644
--- a/daemon/commit.go
+++ b/daemon/commit.go
@@ -1,54 +1,18 @@
 package daemon
 
 import (
-	"bytes"
-	"encoding/json"
-
-	"github.com/docker/docker/engine"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/runconfig"
 )
 
-func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
-	}
-	name := job.Args[0]
-
-	container, err := daemon.Get(name)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	var (
-		config       = container.Config
-		stdoutBuffer = bytes.NewBuffer(nil)
-		newConfig    runconfig.Config
-	)
-
-	buildConfigJob := daemon.eng.Job("build_config")
-	buildConfigJob.Stdout.Add(stdoutBuffer)
-	buildConfigJob.Setenv("changes", job.Getenv("changes"))
-	// FIXME this should be remove when we remove deprecated config param
-	buildConfigJob.Setenv("config", job.Getenv("config"))
-
-	if err := buildConfigJob.Run(); err != nil {
-		return job.Error(err)
-	}
-	if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil {
-		return job.Error(err)
-	}
-
-	if err := runconfig.Merge(&newConfig, config); err != nil {
-		return job.Error(err)
-	}
-
-	img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig)
-	if err != nil {
-		return job.Error(err)
-	}
-	job.Printf("%s\n", img.ID)
-	return engine.StatusOK
+type ContainerCommitConfig struct {
+	Pause   bool
+	Repo    string
+	Tag     string
+	Author  string
+	Comment string
+	Changes []string
+	Config  *runconfig.Config
 }
 
 // Commit creates a new filesystem image from the current state of a container.
@@ -68,7 +32,11 @@
 	if err != nil {
 		return nil, err
 	}
-	defer rwTar.Close()
+	defer func() {
+		if rwTar != nil {
+			rwTar.Close()
+		}
+	}()
 
 	// Create a new image from the container's base layers + a new layer from container changes
 	var (
@@ -89,7 +57,7 @@
 
 	// Register the image if needed
 	if repository != "" {
-		if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil {
+		if err := daemon.repositories.Tag(repository, tag, img.ID, true); err != nil {
 			return img, err
 		}
 	}
diff --git a/daemon/config.go b/daemon/config.go
index 9b38fde..77badfa 100644
--- a/daemon/config.go
+++ b/daemon/config.go
@@ -3,10 +3,8 @@
 import (
 	"net"
 
-	"github.com/docker/docker/daemon/networkdriver"
 	"github.com/docker/docker/opts"
 	flag "github.com/docker/docker/pkg/mflag"
-	"github.com/docker/docker/pkg/ulimit"
 	"github.com/docker/docker/runconfig"
 )
 
@@ -15,80 +13,80 @@
 	disableNetworkBridge = "none"
 )
 
-// Config define the configuration of a docker daemon
-// These are the configuration settings that you pass
-// to the docker daemon when you launch it with say: `docker -d -e lxc`
-// FIXME: separate runtime configuration from http api configuration
-type Config struct {
-	Pidfile                     string
-	Root                        string
-	AutoRestart                 bool
-	Dns                         []string
-	DnsSearch                   []string
-	EnableIPv6                  bool
-	EnableIptables              bool
-	EnableIpForward             bool
-	EnableIpMasq                bool
-	DefaultIp                   net.IP
-	BridgeIface                 string
-	BridgeIP                    string
-	FixedCIDR                   string
-	FixedCIDRv6                 string
-	InterContainerCommunication bool
-	GraphDriver                 string
-	GraphOptions                []string
-	ExecDriver                  string
-	Mtu                         int
-	SocketGroup                 string
-	EnableCors                  bool
-	CorsHeaders                 string
-	DisableNetwork              bool
-	EnableSelinuxSupport        bool
-	Context                     map[string][]string
-	TrustKeyPath                string
-	Labels                      []string
-	Ulimits                     map[string]*ulimit.Ulimit
-	LogConfig                   runconfig.LogConfig
+// CommonConfig defines the configuration of a docker daemon which are
+// common across platforms.
+type CommonConfig struct {
+	AutoRestart bool
+	// Bridge holds bridge network specific configuration.
+	Bridge         bridgeConfig
+	Context        map[string][]string
+	CorsHeaders    string
+	DisableNetwork bool
+	Dns            []string
+	DnsSearch      []string
+	EnableCors     bool
+	ExecDriver     string
+	ExecRoot       string
+	GraphDriver    string
+	Labels         []string
+	LogConfig      runconfig.LogConfig
+	Mtu            int
+	Pidfile        string
+	Root           string
+	TrustKeyPath   string
 }
 
-// InstallFlags adds command-line options to the top-level flag parser for
+// bridgeConfig stores all the bridge driver specific
+// configuration.
+type bridgeConfig struct {
+	EnableIPv6                  bool
+	EnableIPTables              bool
+	EnableIPForward             bool
+	EnableIPMasq                bool
+	EnableUserlandProxy         bool
+	DefaultIP                   net.IP
+	Iface                       string
+	IP                          string
+	FixedCIDR                   string
+	FixedCIDRv6                 string
+	DefaultGatewayIPv4          string
+	DefaultGatewayIPv6          string
+	InterContainerCommunication bool
+}
+
+// InstallCommonFlags adds command-line options to the top-level flag parser for
 // the current process.
 // Subsequent calls to `flag.Parse` will populate config with values parsed
 // from the command-line.
-func (config *Config) InstallFlags() {
-	flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file")
-	flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Root of the Docker runtime")
+
+func (config *Config) InstallCommonFlags() {
+	flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, "Path to use for daemon PID file")
+	flag.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, "Root of the Docker runtime")
+	flag.StringVar(&config.ExecRoot, []string{"-exec-root"}, "/var/run/docker", "Root of the Docker execdriver")
 	flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run")
-	flag.BoolVar(&config.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable addition of iptables rules")
-	flag.BoolVar(&config.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
-	flag.BoolVar(&config.EnableIpMasq, []string{"-ip-masq"}, true, "Enable IP masquerading")
-	flag.BoolVar(&config.EnableIPv6, []string{"-ipv6"}, false, "Enable IPv6 networking")
-	flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Specify network bridge IP")
-	flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a network bridge")
-	flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs")
-	flag.StringVar(&config.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", "IPv6 subnet for fixed IPs")
-	flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication")
+	flag.BoolVar(&config.Bridge.EnableIPTables, []string{"#iptables", "-iptables"}, true, "Enable addition of iptables rules")
+	flag.BoolVar(&config.Bridge.EnableIPForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
+	flag.BoolVar(&config.Bridge.EnableIPMasq, []string{"-ip-masq"}, true, "Enable IP masquerading")
+	flag.BoolVar(&config.Bridge.EnableIPv6, []string{"-ipv6"}, false, "Enable IPv6 networking")
+	flag.StringVar(&config.Bridge.IP, []string{"#bip", "-bip"}, "", "Specify network bridge IP")
+	flag.StringVar(&config.Bridge.Iface, []string{"b", "-bridge"}, "", "Attach containers to a network bridge")
+	flag.StringVar(&config.Bridge.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs")
+	flag.StringVar(&config.Bridge.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", "IPv6 subnet for fixed IPs")
+	flag.StringVar(&config.Bridge.DefaultGatewayIPv4, []string{"-default-gateway"}, "", "Container default gateway IPv4 address")
+	flag.StringVar(&config.Bridge.DefaultGatewayIPv6, []string{"-default-gateway-v6"}, "", "Container default gateway IPv6 address")
+	flag.BoolVar(&config.Bridge.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication")
 	flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Storage driver to use")
-	flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Exec driver to use")
-	flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support")
+	flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, defaultExec, "Exec driver to use")
 	flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU")
-	flag.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", "Group for the unix socket")
 	flag.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, "Enable CORS headers in the remote API, this is deprecated by --api-cors-header")
 	flag.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", "Set CORS headers in the remote API")
-	opts.IPVar(&config.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP when binding container ports")
-	opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options")
+	opts.IPVar(&config.Bridge.DefaultIP, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP when binding container ports")
 	// FIXME: why the inconsistency between "hosts" and "sockets"?
 	opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "DNS server to use")
 	opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "DNS search domains to use")
 	opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon")
-	config.Ulimits = make(map[string]*ulimit.Ulimit)
-	opts.UlimitMapVar(config.Ulimits, []string{"-default-ulimit"}, "Set default ulimits for containers")
-	flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Containers logging driver")
-}
+	flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Default driver for container logs")
+	opts.LogOptsVar(config.LogConfig.Config, []string{"-log-opt"}, "Set log driver options")
+	flag.BoolVar(&config.Bridge.EnableUserlandProxy, []string{"-userland-proxy"}, true, "Use userland proxy for loopback traffic")
 
-func getDefaultNetworkMtu() int {
-	if iface, err := networkdriver.GetDefaultRouteIface(); err == nil {
-		return iface.MTU
-	}
-	return defaultNetworkMtu
 }
diff --git a/daemon/config_linux.go b/daemon/config_linux.go
new file mode 100644
index 0000000..340bc89
--- /dev/null
+++ b/daemon/config_linux.go
@@ -0,0 +1,44 @@
+package daemon
+
+import (
+	"github.com/docker/docker/opts"
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/ulimit"
+)
+
+var (
+	defaultPidFile = "/var/run/docker.pid"
+	defaultGraph   = "/var/lib/docker"
+	defaultExec    = "native"
+)
+
+// Config defines the configuration of a docker daemon.
+// These are the configuration settings that you pass
+// to the docker daemon when you launch it with say: `docker -d -e lxc`
+type Config struct {
+	CommonConfig
+
+	// Fields below here are platform specific.
+	EnableSelinuxSupport bool
+	ExecOptions          []string
+	GraphOptions         []string
+	SocketGroup          string
+	Ulimits              map[string]*ulimit.Ulimit
+}
+
+// InstallFlags adds command-line options to the top-level flag parser for
+// the current process.
+// Subsequent calls to `flag.Parse` will populate config with values parsed
+// from the command-line.
+func (config *Config) InstallFlags() {
+	// First handle install flags which are consistent cross-platform
+	config.InstallCommonFlags()
+
+	// Then platform-specific install flags
+	opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options")
+	opts.ListVar(&config.ExecOptions, []string{"-exec-opt"}, "Set exec driver options")
+	flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support")
+	flag.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", "Group for the unix socket")
+	config.Ulimits = make(map[string]*ulimit.Ulimit)
+	opts.UlimitMapVar(config.Ulimits, []string{"-default-ulimit"}, "Set default ulimits for containers")
+}
diff --git a/daemon/config_windows.go b/daemon/config_windows.go
new file mode 100644
index 0000000..4731767
--- /dev/null
+++ b/daemon/config_windows.go
@@ -0,0 +1,33 @@
+package daemon
+
+import (
+	"os"
+)
+
+var (
+	defaultPidFile = os.Getenv("programdata") + string(os.PathSeparator) + "docker.pid"
+	defaultGraph   = os.Getenv("programdata") + string(os.PathSeparator) + "docker"
+	defaultExec    = "windows"
+)
+
+// Config defines the configuration of a docker daemon.
+// These are the configuration settings that you pass
+// to the docker daemon when you launch it with say: `docker -d -e windows`
+type Config struct {
+	CommonConfig
+
+	// Fields below here are platform specific. (There are none presently
+	// for the Windows daemon.)
+}
+
+// InstallFlags adds command-line options to the top-level flag parser for
+// the current process.
+// Subsequent calls to `flag.Parse` will populate config with values parsed
+// from the command-line.
+func (config *Config) InstallFlags() {
+	// First handle install flags which are consistent cross-platform
+	config.InstallCommonFlags()
+
+	// Then platform-specific install flags. There are none presently on Windows
+
+}
diff --git a/daemon/container.go b/daemon/container.go
index 4f6ff2e..d91a68b 100644
--- a/daemon/container.go
+++ b/daemon/container.go
@@ -1,49 +1,38 @@
 package daemon
 
 import (
-	"bytes"
 	"encoding/json"
 	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
 	"os"
-	"path"
 	"path/filepath"
 	"strings"
+	"sync"
 	"syscall"
 	"time"
 
-	"github.com/docker/libcontainer"
-	"github.com/docker/libcontainer/configs"
-	"github.com/docker/libcontainer/devices"
 	"github.com/docker/libcontainer/label"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/daemon/logger/jsonfilelog"
-	"github.com/docker/docker/daemon/logger/syslog"
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/daemon/network"
 	"github.com/docker/docker/image"
-	"github.com/docker/docker/links"
 	"github.com/docker/docker/nat"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/broadcastwriter"
-	"github.com/docker/docker/pkg/common"
-	"github.com/docker/docker/pkg/directory"
 	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/networkfs/etchosts"
-	"github.com/docker/docker/pkg/networkfs/resolvconf"
+	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/symlink"
-	"github.com/docker/docker/pkg/ulimit"
 	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/utils"
+	"github.com/docker/docker/volume"
 )
 
-const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-
 var (
 	ErrNotATTY               = errors.New("The PTY is not a file")
 	ErrNoTTY                 = errors.New("No PTY found")
@@ -58,56 +47,43 @@
 	stdinPipe io.WriteCloser
 }
 
-type Container struct {
+// CommonContainer holds the settings for a container which are applicable
+// across all platforms supported by the daemon.
+type CommonContainer struct {
+	StreamConfig
+
 	*State `json:"State"` // Needed for remote api version <= 1.11
 	root   string         // Path to the "home" of the container, including metadata.
 	basefs string         // Path to the graphdriver mountpoint
 
-	ID string
-
-	Created time.Time
-
-	Path string
-	Args []string
-
-	Config  *runconfig.Config
-	ImageID string `json:"Image"`
-
-	NetworkSettings *NetworkSettings
-
-	ResolvConfPath string
-	HostnamePath   string
-	HostsPath      string
-	LogPath        string
-	Name           string
-	Driver         string
-	ExecDriver     string
-
-	command *execdriver.Command
-	StreamConfig
-
-	daemon                   *Daemon
+	ID                       string
+	Created                  time.Time
+	Path                     string
+	Args                     []string
+	Config                   *runconfig.Config
+	ImageID                  string `json:"Image"`
+	NetworkSettings          *network.Settings
+	ResolvConfPath           string
+	HostnamePath             string
+	HostsPath                string
+	LogPath                  string
+	Name                     string
+	Driver                   string
+	ExecDriver               string
 	MountLabel, ProcessLabel string
-	AppArmorProfile          string
 	RestartCount             int
 	UpdateDns                bool
+	MountPoints              map[string]*mountPoint
 
-	// Maps container paths to volume paths.  The key in this is the path to which
-	// the volume is being mounted inside the container.  Value is the path of the
-	// volume on disk
-	Volumes map[string]string
-	// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
-	// Easier than migrating older container configs :)
-	VolumesRW  map[string]bool
 	hostConfig *runconfig.HostConfig
+	command    *execdriver.Command
 
-	activeLinks  map[string]*links.Link
 	monitor      *containerMonitor
 	execCommands *execStore
+	daemon       *Daemon
 	// logDriver for closing
-	logDriver          logger.Logger
-	logCopier          *logger.Copier
-	AppliedVolumesFrom map[string]struct{}
+	logDriver logger.Logger
+	logCopier *logger.Copier
 }
 
 func (container *Container) FromDisk() error {
@@ -147,8 +123,7 @@
 		return err
 	}
 
-	err = ioutil.WriteFile(pth, data, 0666)
-	if err != nil {
+	if err := ioutil.WriteFile(pth, data, 0666); err != nil {
 		return err
 	}
 
@@ -177,11 +152,13 @@
 		return nil
 	}
 
-	data, err := ioutil.ReadFile(pth)
+	f, err := os.Open(pth)
 	if err != nil {
 		return err
 	}
-	return json.Unmarshal(data, container.hostConfig)
+	defer f.Close()
+
+	return json.NewDecoder(f).Decode(&container.hostConfig)
 }
 
 func (container *Container) WriteHostConfig() error {
@@ -200,158 +177,48 @@
 
 func (container *Container) LogEvent(action string) {
 	d := container.daemon
-	if err := d.eng.Job("log", action, container.ID, d.Repositories().ImageName(container.ImageID)).Run(); err != nil {
-		log.Errorf("Error logging event %s for %s: %s", action, container.ID, err)
-	}
+	d.EventsService.Log(
+		action,
+		container.ID,
+		container.Config.Image,
+	)
 }
 
-func (container *Container) getResourcePath(path string) (string, error) {
+// Evaluates `path` in the scope of the container's basefs, with proper path
+// sanitisation. Symlinks are all scoped to the basefs of the container, as
+// though the container's basefs was `/`.
+//
+// The basefs of a container is the host-facing path which is bind-mounted as
+// `/` inside the container. This method is essentially used to access a
+// particular path inside the container as though you were a process in that
+// container.
+//
+// NOTE: The returned path is *only* safely scoped inside the container's basefs
+//       if no component of the returned path changes (such as a component
+//       symlinking to a different path) between using this method and using the
+//       path. See symlink.FollowSymlinkInScope for more details.
+func (container *Container) GetResourcePath(path string) (string, error) {
 	cleanPath := filepath.Join("/", path)
 	return symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs)
 }
 
-func (container *Container) getRootResourcePath(path string) (string, error) {
+// Evaluates `path` in the scope of the container's root, with proper path
+// sanitisation. Symlinks are all scoped to the root of the container, as
+// though the container's root was `/`.
+//
+// The root of a container is the host-facing configuration metadata directory.
+// Only use this method to safely access the container's `container.json` or
+// other metadata files. If in doubt, use container.GetResourcePath.
+//
+// NOTE: The returned path is *only* safely scoped inside the container's root
+//       if no component of the returned path changes (such as a component
+//       symlinking to a different path) between using this method and using the
+//       path. See symlink.FollowSymlinkInScope for more details.
+func (container *Container) GetRootResourcePath(path string) (string, error) {
 	cleanPath := filepath.Join("/", path)
 	return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root)
 }
 
-func populateCommand(c *Container, env []string) error {
-	en := &execdriver.Network{
-		Mtu:       c.daemon.config.Mtu,
-		Interface: nil,
-	}
-
-	parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
-	switch parts[0] {
-	case "none":
-	case "host":
-		en.HostNetworking = true
-	case "bridge", "": // empty string to support existing containers
-		if !c.Config.NetworkDisabled {
-			network := c.NetworkSettings
-			en.Interface = &execdriver.NetworkInterface{
-				Gateway:              network.Gateway,
-				Bridge:               network.Bridge,
-				IPAddress:            network.IPAddress,
-				IPPrefixLen:          network.IPPrefixLen,
-				MacAddress:           network.MacAddress,
-				LinkLocalIPv6Address: network.LinkLocalIPv6Address,
-				GlobalIPv6Address:    network.GlobalIPv6Address,
-				GlobalIPv6PrefixLen:  network.GlobalIPv6PrefixLen,
-				IPv6Gateway:          network.IPv6Gateway,
-			}
-		}
-	case "container":
-		nc, err := c.getNetworkedContainer()
-		if err != nil {
-			return err
-		}
-		en.ContainerID = nc.ID
-	default:
-		return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
-	}
-
-	ipc := &execdriver.Ipc{}
-
-	if c.hostConfig.IpcMode.IsContainer() {
-		ic, err := c.getIpcContainer()
-		if err != nil {
-			return err
-		}
-		ipc.ContainerID = ic.ID
-	} else {
-		ipc.HostIpc = c.hostConfig.IpcMode.IsHost()
-	}
-
-	pid := &execdriver.Pid{}
-	pid.HostPid = c.hostConfig.PidMode.IsHost()
-
-	// Build lists of devices allowed and created within the container.
-	userSpecifiedDevices := make([]*configs.Device, len(c.hostConfig.Devices))
-	for i, deviceMapping := range c.hostConfig.Devices {
-		device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
-		if err != nil {
-			return fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
-		}
-		device.Path = deviceMapping.PathInContainer
-		userSpecifiedDevices[i] = device
-	}
-	allowedDevices := append(configs.DefaultAllowedDevices, userSpecifiedDevices...)
-
-	autoCreatedDevices := append(configs.DefaultAutoCreatedDevices, userSpecifiedDevices...)
-
-	// TODO: this can be removed after lxc-conf is fully deprecated
-	lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig)
-	if err != nil {
-		return err
-	}
-
-	var rlimits []*ulimit.Rlimit
-	ulimits := c.hostConfig.Ulimits
-
-	// Merge ulimits with daemon defaults
-	ulIdx := make(map[string]*ulimit.Ulimit)
-	for _, ul := range ulimits {
-		ulIdx[ul.Name] = ul
-	}
-	for name, ul := range c.daemon.config.Ulimits {
-		if _, exists := ulIdx[name]; !exists {
-			ulimits = append(ulimits, ul)
-		}
-	}
-
-	for _, limit := range ulimits {
-		rl, err := limit.GetRlimit()
-		if err != nil {
-			return err
-		}
-		rlimits = append(rlimits, rl)
-	}
-
-	resources := &execdriver.Resources{
-		Memory:     c.hostConfig.Memory,
-		MemorySwap: c.hostConfig.MemorySwap,
-		CpuShares:  c.hostConfig.CpuShares,
-		CpusetCpus: c.hostConfig.CpusetCpus,
-		Rlimits:    rlimits,
-	}
-
-	processConfig := execdriver.ProcessConfig{
-		Privileged: c.hostConfig.Privileged,
-		Entrypoint: c.Path,
-		Arguments:  c.Args,
-		Tty:        c.Config.Tty,
-		User:       c.Config.User,
-	}
-
-	processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
-	processConfig.Env = env
-
-	c.command = &execdriver.Command{
-		ID:                 c.ID,
-		Rootfs:             c.RootfsPath(),
-		ReadonlyRootfs:     c.hostConfig.ReadonlyRootfs,
-		InitPath:           "/.dockerinit",
-		WorkingDir:         c.Config.WorkingDir,
-		Network:            en,
-		Ipc:                ipc,
-		Pid:                pid,
-		Resources:          resources,
-		AllowedDevices:     allowedDevices,
-		AutoCreatedDevices: autoCreatedDevices,
-		CapAdd:             c.hostConfig.CapAdd,
-		CapDrop:            c.hostConfig.CapDrop,
-		ProcessConfig:      processConfig,
-		ProcessLabel:       c.GetProcessLabel(),
-		MountLabel:         c.GetMountLabel(),
-		LxcConfig:          lxcConfig,
-		AppArmorProfile:    c.AppArmorProfile,
-		CgroupParent:       c.hostConfig.CgroupParent,
-	}
-
-	return nil
-}
-
 func (container *Container) Start() (err error) {
 	container.Lock()
 	defer container.Unlock()
@@ -378,22 +245,13 @@
 		}
 	}()
 
-	if err := container.setupContainerDns(); err != nil {
-		return err
-	}
 	if err := container.Mount(); err != nil {
 		return err
 	}
 	if err := container.initializeNetworking(); err != nil {
 		return err
 	}
-	if err := container.updateParentsHosts(); err != nil {
-		return err
-	}
 	container.verifyDaemonSettings()
-	if err := container.prepareVolumes(); err != nil {
-		return err
-	}
 	linkedEnv, err := container.setupLinkedContainers()
 	if err != nil {
 		return err
@@ -405,10 +263,13 @@
 	if err := populateCommand(container, env); err != nil {
 		return err
 	}
-	if err := container.setupMounts(); err != nil {
+
+	mounts, err := container.setupMounts()
+	if err != nil {
 		return err
 	}
 
+	container.command.Mounts = mounts
 	return container.waitForStart()
 }
 
@@ -467,212 +328,30 @@
 	return ioutils.NewBufReader(reader)
 }
 
-func (container *Container) buildHostnameFile() error {
-	hostnamePath, err := container.getRootResourcePath("hostname")
-	if err != nil {
-		return err
-	}
-	container.HostnamePath = hostnamePath
-
-	if container.Config.Domainname != "" {
-		return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
-	}
-	return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
-}
-
-func (container *Container) buildHostsFiles(IP string) error {
-
-	hostsPath, err := container.getRootResourcePath("hosts")
-	if err != nil {
-		return err
-	}
-	container.HostsPath = hostsPath
-
-	var extraContent []etchosts.Record
-
-	children, err := container.daemon.Children(container.Name)
-	if err != nil {
-		return err
-	}
-
-	for linkAlias, child := range children {
-		_, alias := path.Split(linkAlias)
-		// allow access to the linked container via the alias, real name, and container hostname
-		aliasList := alias + " " + child.Config.Hostname
-		// only add the name if alias isn't equal to the name
-		if alias != child.Name[1:] {
-			aliasList = aliasList + " " + child.Name[1:]
-		}
-		extraContent = append(extraContent, etchosts.Record{Hosts: aliasList, IP: child.NetworkSettings.IPAddress})
-	}
-
-	for _, extraHost := range container.hostConfig.ExtraHosts {
-		// allow IPv6 addresses in extra hosts; only split on first ":"
-		parts := strings.SplitN(extraHost, ":", 2)
-		extraContent = append(extraContent, etchosts.Record{Hosts: parts[0], IP: parts[1]})
-	}
-
-	return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, extraContent)
-}
-
-func (container *Container) buildHostnameAndHostsFiles(IP string) error {
-	if err := container.buildHostnameFile(); err != nil {
-		return err
-	}
-
-	return container.buildHostsFiles(IP)
-}
-
-func (container *Container) AllocateNetwork() error {
-	mode := container.hostConfig.NetworkMode
-	if container.Config.NetworkDisabled || !mode.IsPrivate() {
-		return nil
-	}
-
-	var (
-		env *engine.Env
-		err error
-		eng = container.daemon.eng
-	)
-
-	job := eng.Job("allocate_interface", container.ID)
-	job.Setenv("RequestedMac", container.Config.MacAddress)
-	if env, err = job.Stdout.AddEnv(); err != nil {
-		return err
-	}
-	if err = job.Run(); err != nil {
-		return err
-	}
-
-	// Error handling: At this point, the interface is allocated so we have to
-	// make sure that it is always released in case of error, otherwise we
-	// might leak resources.
-
-	if container.Config.PortSpecs != nil {
-		if err = migratePortMappings(container.Config, container.hostConfig); err != nil {
-			eng.Job("release_interface", container.ID).Run()
-			return err
-		}
-		container.Config.PortSpecs = nil
-		if err = container.WriteHostConfig(); err != nil {
-			eng.Job("release_interface", container.ID).Run()
-			return err
-		}
-	}
-
-	var (
-		portSpecs = make(nat.PortSet)
-		bindings  = make(nat.PortMap)
-	)
-
-	if container.Config.ExposedPorts != nil {
-		portSpecs = container.Config.ExposedPorts
-	}
-
-	if container.hostConfig.PortBindings != nil {
-		for p, b := range container.hostConfig.PortBindings {
-			bindings[p] = []nat.PortBinding{}
-			for _, bb := range b {
-				bindings[p] = append(bindings[p], nat.PortBinding{
-					HostIp:   bb.HostIp,
-					HostPort: bb.HostPort,
-				})
-			}
-		}
-	}
-
-	container.NetworkSettings.PortMapping = nil
-
-	for port := range portSpecs {
-		if err = container.allocatePort(eng, port, bindings); err != nil {
-			eng.Job("release_interface", container.ID).Run()
-			return err
-		}
-	}
-	container.WriteHostConfig()
-
-	container.NetworkSettings.Ports = bindings
-	container.NetworkSettings.Bridge = env.Get("Bridge")
-	container.NetworkSettings.IPAddress = env.Get("IP")
-	container.NetworkSettings.IPPrefixLen = env.GetInt("IPPrefixLen")
-	container.NetworkSettings.MacAddress = env.Get("MacAddress")
-	container.NetworkSettings.Gateway = env.Get("Gateway")
-	container.NetworkSettings.LinkLocalIPv6Address = env.Get("LinkLocalIPv6")
-	container.NetworkSettings.LinkLocalIPv6PrefixLen = 64
-	container.NetworkSettings.GlobalIPv6Address = env.Get("GlobalIPv6")
-	container.NetworkSettings.GlobalIPv6PrefixLen = env.GetInt("GlobalIPv6PrefixLen")
-	container.NetworkSettings.IPv6Gateway = env.Get("IPv6Gateway")
-
-	return nil
-}
-
-func (container *Container) ReleaseNetwork() {
-	if container.Config.NetworkDisabled || !container.hostConfig.NetworkMode.IsPrivate() {
-		return
-	}
-	eng := container.daemon.eng
-
-	job := eng.Job("release_interface", container.ID)
-	job.SetenvBool("overrideShutdown", true)
-	job.Run()
-	container.NetworkSettings = &NetworkSettings{}
-}
-
 func (container *Container) isNetworkAllocated() bool {
 	return container.NetworkSettings.IPAddress != ""
 }
 
-func (container *Container) RestoreNetwork() error {
-	mode := container.hostConfig.NetworkMode
-	// Don't attempt a restore if we previously didn't allocate networking.
-	// This might be a legacy container with no network allocated, in which case the
-	// allocation will happen once and for all at start.
-	if !container.isNetworkAllocated() || container.Config.NetworkDisabled || !mode.IsPrivate() {
-		return nil
-	}
-
-	eng := container.daemon.eng
-
-	// Re-allocate the interface with the same IP and MAC address.
-	job := eng.Job("allocate_interface", container.ID)
-	job.Setenv("RequestedIP", container.NetworkSettings.IPAddress)
-	job.Setenv("RequestedMac", container.NetworkSettings.MacAddress)
-	if err := job.Run(); err != nil {
-		return err
-	}
-
-	// Re-allocate any previously allocated ports.
-	for port := range container.NetworkSettings.Ports {
-		if err := container.allocatePort(eng, port, container.NetworkSettings.Ports); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
 // cleanup releases any network resources allocated to the container along with any rules
 // around how containers are linked together.  It also unmounts the container's root filesystem.
 func (container *Container) cleanup() {
 	container.ReleaseNetwork()
 
-	// Disable all active links
-	if container.activeLinks != nil {
-		for _, link := range container.activeLinks {
-			link.Disable()
-		}
-	}
+	disableAllActiveLinks(container)
 
 	if err := container.Unmount(); err != nil {
-		log.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
+		logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
 	}
 
 	for _, eConfig := range container.execCommands.s {
 		container.daemon.unregisterExecCommand(eConfig)
 	}
+
+	container.UnmountVolumes(false)
 }
 
 func (container *Container) KillSig(sig int) error {
-	log.Debugf("Sending %d to %s", sig, container.ID)
+	logrus.Debugf("Sending %d to %s", sig, container.ID)
 	container.Lock()
 	defer container.Unlock()
 
@@ -703,30 +382,52 @@
 func (container *Container) killPossiblyDeadProcess(sig int) error {
 	err := container.KillSig(sig)
 	if err == syscall.ESRCH {
-		log.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPid(), sig)
+		logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPid(), sig)
 		return nil
 	}
 	return err
 }
 
 func (container *Container) Pause() error {
-	if container.IsPaused() {
+	container.Lock()
+	defer container.Unlock()
+
+	// We cannot Pause the container which is already paused
+	if container.Paused {
 		return fmt.Errorf("Container %s is already paused", container.ID)
 	}
-	if !container.IsRunning() {
+
+	// We cannot Pause the container which is not running
+	if !container.Running {
 		return fmt.Errorf("Container %s is not running", container.ID)
 	}
-	return container.daemon.Pause(container)
+
+	if err := container.daemon.execDriver.Pause(container.command); err != nil {
+		return err
+	}
+	container.Paused = true
+	return nil
 }
 
 func (container *Container) Unpause() error {
-	if !container.IsPaused() {
-		return fmt.Errorf("Container %s is not paused", container.ID)
+	container.Lock()
+	defer container.Unlock()
+
+	// We cannot unpause the container which is not paused
+	if !container.Paused {
+		return fmt.Errorf("Container %s is not paused, so what", container.ID)
 	}
-	if !container.IsRunning() {
+
+	// We cannot unpause the container which is not running
+	if !container.Running {
 		return fmt.Errorf("Container %s is not running", container.ID)
 	}
-	return container.daemon.Unpause(container)
+
+	if err := container.daemon.execDriver.Unpause(container.command); err != nil {
+		return err
+	}
+	container.Paused = false
+	return nil
 }
 
 func (container *Container) Kill() error {
@@ -736,21 +437,28 @@
 
 	// 1. Send SIGKILL
 	if err := container.killPossiblyDeadProcess(9); err != nil {
-		return err
+		// While normally we might "return err" here we're not going to
+		// because if we can't stop the container by this point then
+		// its probably because its already stopped. Meaning, between
+		// the time of the IsRunning() call above and now it stopped.
+		// Also, since the err return will be exec driver specific we can't
+		// look for any particular (common) error that would indicate
+		// that the process is already dead vs something else going wrong.
+		// So, instead we'll give it up to 2 more seconds to complete and if
+		// by that time the container is still running, then the error
+		// we got is probably valid and so we return it to the caller.
+
+		if container.IsRunning() {
+			container.WaitStop(2 * time.Second)
+			if container.IsRunning() {
+				return err
+			}
+		}
 	}
 
 	// 2. Wait for the process to die, in last resort, try to kill the process directly
-	if _, err := container.WaitStop(10 * time.Second); err != nil {
-		// Ensure that we don't kill ourselves
-		if pid := container.GetPid(); pid != 0 {
-			log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", common.TruncateID(container.ID))
-			if err := syscall.Kill(pid, 9); err != nil {
-				if err != syscall.ESRCH {
-					return err
-				}
-				log.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
-			}
-		}
+	if err := killProcessDirectly(container); err != nil {
+		return err
 	}
 
 	container.WaitStop(-1 * time.Second)
@@ -764,7 +472,7 @@
 
 	// 1. Send a SIGTERM
 	if err := container.killPossiblyDeadProcess(15); err != nil {
-		log.Infof("Failed to send SIGTERM to the process, force killing")
+		logrus.Infof("Failed to send SIGTERM to the process, force killing")
 		if err := container.killPossiblyDeadProcess(9); err != nil {
 			return err
 		}
@@ -772,13 +480,14 @@
 
 	// 2. Wait for the process to exit on its own
 	if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil {
-		log.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
+		logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
 		// 3. If it doesn't, then send SIGKILL
 		if err := container.Kill(); err != nil {
 			container.WaitStop(-1 * time.Second)
 			return err
 		}
 	}
+
 	return nil
 }
 
@@ -803,26 +512,6 @@
 	return container.command.ProcessConfig.Terminal.Resize(h, w)
 }
 
-func (container *Container) ExportRw() (archive.Archive, error) {
-	if err := container.Mount(); err != nil {
-		return nil, err
-	}
-	if container.daemon == nil {
-		return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
-	}
-	archive, err := container.daemon.Diff(container)
-	if err != nil {
-		container.Unmount()
-		return nil, err
-	}
-	return ioutils.NewReadCloserWrapper(archive, func() error {
-			err := archive.Close()
-			container.Unmount()
-			return err
-		}),
-		nil
-}
-
 func (container *Container) Export() (archive.Archive, error) {
 	if err := container.Mount(); err != nil {
 		return nil, err
@@ -866,24 +555,12 @@
 	return container.daemon.Unmount(container)
 }
 
-func (container *Container) logPath(name string) (string, error) {
-	return container.getRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name))
-}
-
-func (container *Container) ReadLog(name string) (io.Reader, error) {
-	pth, err := container.logPath(name)
-	if err != nil {
-		return nil, err
-	}
-	return os.Open(pth)
-}
-
 func (container *Container) hostConfigPath() (string, error) {
-	return container.getRootResourcePath("hostconfig.json")
+	return container.GetRootResourcePath("hostconfig.json")
 }
 
 func (container *Container) jsonPath() (string, error) {
-	return container.getRootResourcePath("config.json")
+	return container.GetRootResourcePath("config.json")
 }
 
 // This method must be exported to be used from the lxc template
@@ -899,80 +576,61 @@
 	return nil
 }
 
-// GetSize, return real size, virtual size
-func (container *Container) GetSize() (int64, int64) {
-	var (
-		sizeRw, sizeRootfs int64
-		err                error
-		driver             = container.daemon.driver
-	)
-
-	if err := container.Mount(); err != nil {
-		log.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
-		return sizeRw, sizeRootfs
-	}
-	defer container.Unmount()
-
-	initID := fmt.Sprintf("%s-init", container.ID)
-	sizeRw, err = driver.DiffSize(container.ID, initID)
-	if err != nil {
-		log.Errorf("Driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
-		// FIXME: GetSize should return an error. Not changing it now in case
-		// there is a side-effect.
-		sizeRw = -1
-	}
-
-	if _, err = os.Stat(container.basefs); err != nil {
-		if sizeRootfs, err = directory.Size(container.basefs); err != nil {
-			sizeRootfs = -1
-		}
-	}
-	return sizeRw, sizeRootfs
-}
-
 func (container *Container) Copy(resource string) (io.ReadCloser, error) {
+	container.Lock()
+	defer container.Unlock()
+	var err error
 	if err := container.Mount(); err != nil {
 		return nil, err
 	}
-
-	basePath, err := container.getResourcePath(resource)
+	defer func() {
+		if err != nil {
+			// unmount any volumes
+			container.UnmountVolumes(true)
+			// unmount the container's rootfs
+			container.Unmount()
+		}
+	}()
+	mounts, err := container.setupMounts()
 	if err != nil {
-		container.Unmount()
 		return nil, err
 	}
-
-	// Check if this is actually in a volume
-	for _, mnt := range container.VolumeMounts() {
-		if len(mnt.MountToPath) > 0 && strings.HasPrefix(resource, mnt.MountToPath[1:]) {
-			return mnt.Export(resource)
+	for _, m := range mounts {
+		dest, err := container.GetResourcePath(m.Destination)
+		if err != nil {
+			return nil, err
+		}
+		if err := mount.Mount(m.Source, dest, "bind", "rbind,ro"); err != nil {
+			return nil, err
 		}
 	}
-
+	basePath, err := container.GetResourcePath(resource)
+	if err != nil {
+		return nil, err
+	}
 	stat, err := os.Stat(basePath)
 	if err != nil {
-		container.Unmount()
 		return nil, err
 	}
 	var filter []string
 	if !stat.IsDir() {
-		d, f := path.Split(basePath)
+		d, f := filepath.Split(basePath)
 		basePath = d
 		filter = []string{f}
 	} else {
-		filter = []string{path.Base(basePath)}
-		basePath = path.Dir(basePath)
+		filter = []string{filepath.Base(basePath)}
+		basePath = filepath.Dir(basePath)
 	}
-
 	archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
 		Compression:  archive.Uncompressed,
 		IncludeFiles: filter,
 	})
 	if err != nil {
-		container.Unmount()
 		return nil, err
 	}
 	return ioutils.NewReadCloserWrapper(archive, func() error {
 			err := archive.Close()
+			container.UnmountVolumes(true)
 			container.Unmount()
 			return err
 		}),
@@ -985,417 +643,54 @@
 	return exists
 }
 
-func (container *Container) GetPtyMaster() (libcontainer.Console, error) {
-	ttyConsole, ok := container.command.ProcessConfig.Terminal.(execdriver.TtyTerminal)
-	if !ok {
-		return nil, ErrNoTTY
-	}
-	return ttyConsole.Master(), nil
-}
-
 func (container *Container) HostConfig() *runconfig.HostConfig {
-	container.Lock()
-	res := container.hostConfig
-	container.Unlock()
-	return res
+	return container.hostConfig
 }
 
 func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) {
-	container.Lock()
 	container.hostConfig = hostConfig
-	container.Unlock()
 }
 
-func (container *Container) DisableLink(name string) {
-	if container.activeLinks != nil {
-		if link, exists := container.activeLinks[name]; exists {
-			link.Disable()
-		} else {
-			log.Debugf("Could not find active link for %s", name)
-		}
+func (container *Container) getLogConfig() runconfig.LogConfig {
+	cfg := container.hostConfig.LogConfig
+	if cfg.Type != "" { // container has log driver configured
+		return cfg
 	}
+	// Use daemon's default log config for containers
+	return container.daemon.defaultLogConfig
 }
 
-func (container *Container) setupContainerDns() error {
-	if container.ResolvConfPath != "" {
-		// check if this is an existing container that needs DNS update:
-		if container.UpdateDns {
-			// read the host's resolv.conf, get the hash and call updateResolvConf
-			log.Debugf("Check container (%s) for update to resolv.conf - UpdateDns flag was set", container.ID)
-			latestResolvConf, latestHash := resolvconf.GetLastModified()
-
-			// clean container resolv.conf re: localhost nameservers and IPv6 NS (if IPv6 disabled)
-			updatedResolvConf, modified := resolvconf.FilterResolvDns(latestResolvConf, container.daemon.config.EnableIPv6)
-			if modified {
-				// changes have occurred during resolv.conf localhost cleanup: generate an updated hash
-				newHash, err := utils.HashData(bytes.NewReader(updatedResolvConf))
-				if err != nil {
-					return err
-				}
-				latestHash = newHash
-			}
-
-			if err := container.updateResolvConf(updatedResolvConf, latestHash); err != nil {
-				return err
-			}
-			// successful update of the restarting container; set the flag off
-			container.UpdateDns = false
-		}
-		return nil
-	}
-
-	var (
-		config = container.hostConfig
-		daemon = container.daemon
-	)
-
-	resolvConf, err := resolvconf.Get()
+func (container *Container) getLogger() (logger.Logger, error) {
+	cfg := container.getLogConfig()
+	c, err := logger.GetLogDriver(cfg.Type)
 	if err != nil {
-		return err
+		return nil, fmt.Errorf("Failed to get logging factory: %v", err)
 	}
-	container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf")
-	if err != nil {
-		return err
+	ctx := logger.Context{
+		Config:        cfg.Config,
+		ContainerID:   container.ID,
+		ContainerName: container.Name,
 	}
 
-	if config.NetworkMode != "host" {
-		// check configurations for any container/daemon dns settings
-		if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 {
-			var (
-				dns       = resolvconf.GetNameservers(resolvConf)
-				dnsSearch = resolvconf.GetSearchDomains(resolvConf)
-			)
-			if len(config.Dns) > 0 {
-				dns = config.Dns
-			} else if len(daemon.config.Dns) > 0 {
-				dns = daemon.config.Dns
-			}
-			if len(config.DnsSearch) > 0 {
-				dnsSearch = config.DnsSearch
-			} else if len(daemon.config.DnsSearch) > 0 {
-				dnsSearch = daemon.config.DnsSearch
-			}
-			return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch)
-		}
-
-		// replace any localhost/127.*, and remove IPv6 nameservers if IPv6 disabled in daemon
-		resolvConf, _ = resolvconf.FilterResolvDns(resolvConf, daemon.config.EnableIPv6)
-	}
-	//get a sha256 hash of the resolv conf at this point so we can check
-	//for changes when the host resolv.conf changes (e.g. network update)
-	resolvHash, err := utils.HashData(bytes.NewReader(resolvConf))
-	if err != nil {
-		return err
-	}
-	resolvHashFile := container.ResolvConfPath + ".hash"
-	if err = ioutil.WriteFile(resolvHashFile, []byte(resolvHash), 0644); err != nil {
-		return err
-	}
-	return ioutil.WriteFile(container.ResolvConfPath, resolvConf, 0644)
-}
-
-// called when the host's resolv.conf changes to check whether container's resolv.conf
-// is unchanged by the container "user" since container start: if unchanged, the
-// container's resolv.conf will be updated to match the host's new resolv.conf
-func (container *Container) updateResolvConf(updatedResolvConf []byte, newResolvHash string) error {
-
-	if container.ResolvConfPath == "" {
-		return nil
-	}
-	if container.Running {
-		//set a marker in the hostConfig to update on next start/restart
-		container.UpdateDns = true
-		return nil
-	}
-
-	resolvHashFile := container.ResolvConfPath + ".hash"
-
-	//read the container's current resolv.conf and compute the hash
-	resolvBytes, err := ioutil.ReadFile(container.ResolvConfPath)
-	if err != nil {
-		return err
-	}
-	curHash, err := utils.HashData(bytes.NewReader(resolvBytes))
-	if err != nil {
-		return err
-	}
-
-	//read the hash from the last time we wrote resolv.conf in the container
-	hashBytes, err := ioutil.ReadFile(resolvHashFile)
-	if err != nil {
-		if !os.IsNotExist(err) {
-			return err
-		}
-		// backwards compat: if no hash file exists, this container pre-existed from
-		// a Docker daemon that didn't contain this update feature. Given we can't know
-		// if the user has modified the resolv.conf since container start time, safer
-		// to just never update the container's resolv.conf during it's lifetime which
-		// we can control by setting hashBytes to an empty string
-		hashBytes = []byte("")
-	}
-
-	//if the user has not modified the resolv.conf of the container since we wrote it last
-	//we will replace it with the updated resolv.conf from the host
-	if string(hashBytes) == curHash {
-		log.Debugf("replacing %q with updated host resolv.conf", container.ResolvConfPath)
-
-		// for atomic updates to these files, use temporary files with os.Rename:
-		dir := path.Dir(container.ResolvConfPath)
-		tmpHashFile, err := ioutil.TempFile(dir, "hash")
+	// Set logging file for "json-logger"
+	if cfg.Type == jsonfilelog.Name {
+		ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
 		if err != nil {
-			return err
-		}
-		tmpResolvFile, err := ioutil.TempFile(dir, "resolv")
-		if err != nil {
-			return err
-		}
-
-		// write the updates to the temp files
-		if err = ioutil.WriteFile(tmpHashFile.Name(), []byte(newResolvHash), 0644); err != nil {
-			return err
-		}
-		if err = ioutil.WriteFile(tmpResolvFile.Name(), updatedResolvConf, 0644); err != nil {
-			return err
-		}
-
-		// rename the temp files for atomic replace
-		if err = os.Rename(tmpHashFile.Name(), resolvHashFile); err != nil {
-			return err
-		}
-		return os.Rename(tmpResolvFile.Name(), container.ResolvConfPath)
-	}
-	return nil
-}
-
-func (container *Container) updateParentsHosts() error {
-	refs := container.daemon.ContainerGraph().RefPaths(container.ID)
-	for _, ref := range refs {
-		if ref.ParentID == "0" {
-			continue
-		}
-
-		c, err := container.daemon.Get(ref.ParentID)
-		if err != nil {
-			log.Error(err)
-		}
-
-		if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() {
-			log.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress)
-			if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, ref.Name); err != nil {
-				log.Errorf("Failed to update /etc/hosts in parent container %s for alias %s: %v", c.ID, ref.Name, err)
-			}
+			return nil, err
 		}
 	}
-	return nil
-}
-
-func (container *Container) initializeNetworking() error {
-	var err error
-	if container.hostConfig.NetworkMode.IsHost() {
-		container.Config.Hostname, err = os.Hostname()
-		if err != nil {
-			return err
-		}
-
-		parts := strings.SplitN(container.Config.Hostname, ".", 2)
-		if len(parts) > 1 {
-			container.Config.Hostname = parts[0]
-			container.Config.Domainname = parts[1]
-		}
-
-		content, err := ioutil.ReadFile("/etc/hosts")
-		if os.IsNotExist(err) {
-			return container.buildHostnameAndHostsFiles("")
-		} else if err != nil {
-			return err
-		}
-
-		if err := container.buildHostnameFile(); err != nil {
-			return err
-		}
-
-		hostsPath, err := container.getRootResourcePath("hosts")
-		if err != nil {
-			return err
-		}
-		container.HostsPath = hostsPath
-
-		return ioutil.WriteFile(container.HostsPath, content, 0644)
-	}
-	if container.hostConfig.NetworkMode.IsContainer() {
-		// we need to get the hosts files from the container to join
-		nc, err := container.getNetworkedContainer()
-		if err != nil {
-			return err
-		}
-		container.HostnamePath = nc.HostnamePath
-		container.HostsPath = nc.HostsPath
-		container.ResolvConfPath = nc.ResolvConfPath
-		container.Config.Hostname = nc.Config.Hostname
-		container.Config.Domainname = nc.Config.Domainname
-		return nil
-	}
-	if container.daemon.config.DisableNetwork {
-		container.Config.NetworkDisabled = true
-		return container.buildHostnameAndHostsFiles("127.0.1.1")
-	}
-	if err := container.AllocateNetwork(); err != nil {
-		return err
-	}
-	return container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
-}
-
-// Make sure the config is compatible with the current kernel
-func (container *Container) verifyDaemonSettings() {
-	if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
-		log.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.")
-		container.Config.Memory = 0
-	}
-	if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit {
-		log.Warnf("Your kernel does not support swap limit capabilities. Limitation discarded.")
-		container.Config.MemorySwap = -1
-	}
-	if container.daemon.sysInfo.IPv4ForwardingDisabled {
-		log.Warnf("IPv4 forwarding is disabled. Networking will not work")
-	}
-}
-
-func (container *Container) setupLinkedContainers() ([]string, error) {
-	var (
-		env    []string
-		daemon = container.daemon
-	)
-	children, err := daemon.Children(container.Name)
-	if err != nil {
-		return nil, err
-	}
-
-	if len(children) > 0 {
-		container.activeLinks = make(map[string]*links.Link, len(children))
-
-		// If we encounter an error make sure that we rollback any network
-		// config and iptables changes
-		rollback := func() {
-			for _, link := range container.activeLinks {
-				link.Disable()
-			}
-			container.activeLinks = nil
-		}
-
-		for linkAlias, child := range children {
-			if !child.IsRunning() {
-				return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias)
-			}
-
-			link, err := links.NewLink(
-				container.NetworkSettings.IPAddress,
-				child.NetworkSettings.IPAddress,
-				linkAlias,
-				child.Config.Env,
-				child.Config.ExposedPorts,
-				daemon.eng)
-
-			if err != nil {
-				rollback()
-				return nil, err
-			}
-
-			container.activeLinks[link.Alias()] = link
-			if err := link.Enable(); err != nil {
-				rollback()
-				return nil, err
-			}
-
-			for _, envVar := range link.ToEnv() {
-				env = append(env, envVar)
-			}
-		}
-	}
-	return env, nil
-}
-
-func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
-	// if a domain name was specified, append it to the hostname (see #7851)
-	fullHostname := container.Config.Hostname
-	if container.Config.Domainname != "" {
-		fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname)
-	}
-	// Setup environment
-	env := []string{
-		"PATH=" + DefaultPathEnv,
-		"HOSTNAME=" + fullHostname,
-		// Note: we don't set HOME here because it'll get autoset intelligently
-		// based on the value of USER inside dockerinit, but only if it isn't
-		// set already (ie, that can be overridden by setting HOME via -e or ENV
-		// in a Dockerfile).
-	}
-	if container.Config.Tty {
-		env = append(env, "TERM=xterm")
-	}
-	env = append(env, linkedEnv...)
-	// because the env on the container can override certain default values
-	// we need to replace the 'env' keys where they match and append anything
-	// else.
-	env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env)
-
-	return env
-}
-
-func (container *Container) setupWorkingDirectory() error {
-	if container.Config.WorkingDir != "" {
-		container.Config.WorkingDir = path.Clean(container.Config.WorkingDir)
-
-		pth, err := container.getResourcePath(container.Config.WorkingDir)
-		if err != nil {
-			return err
-		}
-
-		pthInfo, err := os.Stat(pth)
-		if err != nil {
-			if !os.IsNotExist(err) {
-				return err
-			}
-
-			if err := os.MkdirAll(pth, 0755); err != nil {
-				return err
-			}
-		}
-		if pthInfo != nil && !pthInfo.IsDir() {
-			return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir)
-		}
-	}
-	return nil
+	return c(ctx)
 }
 
 func (container *Container) startLogging() error {
-	cfg := container.hostConfig.LogConfig
-	if cfg.Type == "" {
-		cfg = container.daemon.defaultLogConfig
+	cfg := container.getLogConfig()
+	if cfg.Type == "none" {
+		return nil // do not start logging routines
 	}
-	var l logger.Logger
-	switch cfg.Type {
-	case "json-file":
-		pth, err := container.logPath("json")
-		if err != nil {
-			return err
-		}
-		container.LogPath = pth
 
-		dl, err := jsonfilelog.New(pth)
-		if err != nil {
-			return err
-		}
-		l = dl
-	case "syslog":
-		dl, err := syslog.New(container.ID[:12])
-		if err != nil {
-			return err
-		}
-		l = dl
-	case "none":
-		return nil
-	default:
-		return fmt.Errorf("Unknown logging driver: %s", cfg.Type)
+	l, err := container.getLogger()
+	if err != nil {
+		return fmt.Errorf("Failed to initialize logging driver: %v", err)
 	}
 
 	copier, err := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l)
@@ -1406,6 +701,11 @@
 	copier.Run()
 	container.logDriver = l
 
+	// set LogPath field only for json-file logdriver
+	if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok {
+		container.LogPath = jl.LogPath()
+	}
+
 	return nil
 }
 
@@ -1423,37 +723,6 @@
 	return nil
 }
 
-func (container *Container) allocatePort(eng *engine.Engine, port nat.Port, bindings nat.PortMap) error {
-	binding := bindings[port]
-	if container.hostConfig.PublishAllPorts && len(binding) == 0 {
-		binding = append(binding, nat.PortBinding{})
-	}
-
-	for i := 0; i < len(binding); i++ {
-		b := binding[i]
-
-		job := eng.Job("allocate_port", container.ID)
-		job.Setenv("HostIP", b.HostIp)
-		job.Setenv("HostPort", b.HostPort)
-		job.Setenv("Proto", port.Proto())
-		job.Setenv("ContainerPort", port.Port())
-
-		portEnv, err := job.Stdout.AddEnv()
-		if err != nil {
-			return err
-		}
-		if err := job.Run(); err != nil {
-			return err
-		}
-		b.HostIp = portEnv.Get("HostIP")
-		b.HostPort = portEnv.Get("HostPort")
-
-		binding[i] = b
-	}
-	bindings[port] = binding
-	return nil
-}
-
 func (container *Container) GetProcessLabel() string {
 	// even if we have a process label return "" if we are running
 	// in privileged mode
@@ -1470,38 +739,6 @@
 	return container.MountLabel
 }
 
-func (container *Container) getIpcContainer() (*Container, error) {
-	containerID := container.hostConfig.IpcMode.Container()
-	c, err := container.daemon.Get(containerID)
-	if err != nil {
-		return nil, err
-	}
-	if !c.IsRunning() {
-		return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID)
-	}
-	return c, nil
-}
-
-func (container *Container) getNetworkedContainer() (*Container, error) {
-	parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2)
-	switch parts[0] {
-	case "container":
-		if len(parts) != 2 {
-			return nil, fmt.Errorf("no container specified to join network")
-		}
-		nc, err := container.daemon.Get(parts[1])
-		if err != nil {
-			return nil, err
-		}
-		if !nc.IsRunning() {
-			return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1])
-		}
-		return nc, nil
-	default:
-		return nil, fmt.Errorf("network mode not set to container")
-	}
-}
-
 func (container *Container) Stats() (*execdriver.ResourceStats, error) {
 	return container.daemon.Stats(container)
 }
@@ -1514,3 +751,382 @@
 	}
 	return c.hostConfig.LogConfig.Type
 }
+
+func (container *Container) GetExecIDs() []string {
+	return container.execCommands.List()
+}
+
+func (container *Container) Exec(execConfig *execConfig) error {
+	container.Lock()
+	defer container.Unlock()
+
+	waitStart := make(chan struct{})
+
+	callback := func(processConfig *execdriver.ProcessConfig, pid int) {
+		if processConfig.Tty {
+			// The callback is called after the process Start()
+			// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave
+			// which we close here.
+			if c, ok := processConfig.Stdout.(io.Closer); ok {
+				c.Close()
+			}
+		}
+		close(waitStart)
+	}
+
+	// We use a callback here instead of a goroutine and an chan for
+	// syncronization purposes
+	cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) })
+
+	// Exec should not return until the process is actually running
+	select {
+	case <-waitStart:
+	case err := <-cErr:
+		return err
+	}
+
+	return nil
+}
+
+func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error {
+	var (
+		err      error
+		exitCode int
+	)
+
+	pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin)
+	exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback)
+	if err != nil {
+		logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
+	}
+
+	logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
+	if execConfig.OpenStdin {
+		if err := execConfig.StreamConfig.stdin.Close(); err != nil {
+			logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
+		}
+	}
+	if err := execConfig.StreamConfig.stdout.Clean(); err != nil {
+		logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
+	}
+	if err := execConfig.StreamConfig.stderr.Clean(); err != nil {
+		logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
+	}
+	if execConfig.ProcessConfig.Terminal != nil {
+		if err := execConfig.ProcessConfig.Terminal.Close(); err != nil {
+			logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
+		}
+	}
+
+	return err
+}
+
+func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
+	return attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, stdin, stdout, stderr)
+}
+
+func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
+	if logs {
+		logDriver, err := c.getLogger()
+		cLog, err := logDriver.GetReader()
+
+		if err != nil {
+			logrus.Errorf("Error reading logs: %s", err)
+		} else if c.LogDriverType() != jsonfilelog.Name {
+			logrus.Errorf("Reading logs not implemented for driver %s", c.LogDriverType())
+		} else {
+			dec := json.NewDecoder(cLog)
+			for {
+				l := &jsonlog.JSONLog{}
+
+				if err := dec.Decode(l); err == io.EOF {
+					break
+				} else if err != nil {
+					logrus.Errorf("Error streaming logs: %s", err)
+					break
+				}
+				if l.Stream == "stdout" && stdout != nil {
+					io.WriteString(stdout, l.Log)
+				}
+				if l.Stream == "stderr" && stderr != nil {
+					io.WriteString(stderr, l.Log)
+				}
+			}
+		}
+	}
+
+	//stream
+	if stream {
+		var stdinPipe io.ReadCloser
+		if stdin != nil {
+			r, w := io.Pipe()
+			go func() {
+				defer w.Close()
+				defer logrus.Debugf("Closing buffered stdin pipe")
+				io.Copy(w, stdin)
+			}()
+			stdinPipe = r
+		}
+		<-c.Attach(stdinPipe, stdout, stderr)
+		// If we are in stdinonce mode, wait for the process to end
+		// otherwise, simply return
+		if c.Config.StdinOnce && !c.Config.Tty {
+			c.WaitStop(-1 * time.Second)
+		}
+	}
+	return nil
+}
+
+func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
+	var (
+		cStdout, cStderr io.ReadCloser
+		cStdin           io.WriteCloser
+		wg               sync.WaitGroup
+		errors           = make(chan error, 3)
+	)
+
+	if stdin != nil && openStdin {
+		cStdin = streamConfig.StdinPipe()
+		wg.Add(1)
+	}
+
+	if stdout != nil {
+		cStdout = streamConfig.StdoutPipe()
+		wg.Add(1)
+	}
+
+	if stderr != nil {
+		cStderr = streamConfig.StderrPipe()
+		wg.Add(1)
+	}
+
+	// Connect stdin of container to the http conn.
+	go func() {
+		if stdin == nil || !openStdin {
+			return
+		}
+		logrus.Debugf("attach: stdin: begin")
+		defer func() {
+			if stdinOnce && !tty {
+				cStdin.Close()
+			} else {
+				// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
+				if cStdout != nil {
+					cStdout.Close()
+				}
+				if cStderr != nil {
+					cStderr.Close()
+				}
+			}
+			wg.Done()
+			logrus.Debugf("attach: stdin: end")
+		}()
+
+		var err error
+		if tty {
+			_, err = copyEscapable(cStdin, stdin)
+		} else {
+			_, err = io.Copy(cStdin, stdin)
+
+		}
+		if err == io.ErrClosedPipe {
+			err = nil
+		}
+		if err != nil {
+			logrus.Errorf("attach: stdin: %s", err)
+			errors <- err
+			return
+		}
+	}()
+
+	attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) {
+		if stream == nil {
+			return
+		}
+		defer func() {
+			// Make sure stdin gets closed
+			if stdin != nil {
+				stdin.Close()
+			}
+			streamPipe.Close()
+			wg.Done()
+			logrus.Debugf("attach: %s: end", name)
+		}()
+
+		logrus.Debugf("attach: %s: begin", name)
+		_, err := io.Copy(stream, streamPipe)
+		if err == io.ErrClosedPipe {
+			err = nil
+		}
+		if err != nil {
+			logrus.Errorf("attach: %s: %v", name, err)
+			errors <- err
+		}
+	}
+
+	go attachStream("stdout", stdout, cStdout)
+	go attachStream("stderr", stderr, cStderr)
+
+	return promise.Go(func() error {
+		wg.Wait()
+		close(errors)
+		for err := range errors {
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+}
+
+// Code c/c from io.Copy() modified to handle escape sequence
+func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
+	buf := make([]byte, 32*1024)
+	for {
+		nr, er := src.Read(buf)
+		if nr > 0 {
+			// ---- Docker addition
+			// char 16 is C-p
+			if nr == 1 && buf[0] == 16 {
+				nr, er = src.Read(buf)
+				// char 17 is C-q
+				if nr == 1 && buf[0] == 17 {
+					if err := src.Close(); err != nil {
+						return 0, err
+					}
+					return 0, nil
+				}
+			}
+			// ---- End of docker
+			nw, ew := dst.Write(buf[0:nr])
+			if nw > 0 {
+				written += int64(nw)
+			}
+			if ew != nil {
+				err = ew
+				break
+			}
+			if nr != nw {
+				err = io.ErrShortWrite
+				break
+			}
+		}
+		if er == io.EOF {
+			break
+		}
+		if er != nil {
+			err = er
+			break
+		}
+	}
+	return written, err
+}
+
+func (container *Container) networkMounts() []execdriver.Mount {
+	var mounts []execdriver.Mount
+	if container.ResolvConfPath != "" {
+		label.SetFileLabel(container.ResolvConfPath, container.MountLabel)
+		mounts = append(mounts, execdriver.Mount{
+			Source:      container.ResolvConfPath,
+			Destination: "/etc/resolv.conf",
+			Writable:    !container.hostConfig.ReadonlyRootfs,
+			Private:     true,
+		})
+	}
+	if container.HostnamePath != "" {
+		label.SetFileLabel(container.HostnamePath, container.MountLabel)
+		mounts = append(mounts, execdriver.Mount{
+			Source:      container.HostnamePath,
+			Destination: "/etc/hostname",
+			Writable:    !container.hostConfig.ReadonlyRootfs,
+			Private:     true,
+		})
+	}
+	if container.HostsPath != "" {
+		label.SetFileLabel(container.HostsPath, container.MountLabel)
+		mounts = append(mounts, execdriver.Mount{
+			Source:      container.HostsPath,
+			Destination: "/etc/hosts",
+			Writable:    !container.hostConfig.ReadonlyRootfs,
+			Private:     true,
+		})
+	}
+	return mounts
+}
+
+func (container *Container) addLocalMountPoint(name, destination string, rw bool) {
+	container.MountPoints[destination] = &mountPoint{
+		Name:        name,
+		Driver:      volume.DefaultDriverName,
+		Destination: destination,
+		RW:          rw,
+	}
+}
+
+func (container *Container) addMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
+	container.MountPoints[destination] = &mountPoint{
+		Name:        vol.Name(),
+		Driver:      vol.DriverName(),
+		Destination: destination,
+		RW:          rw,
+		Volume:      vol,
+	}
+}
+
+func (container *Container) isDestinationMounted(destination string) bool {
+	return container.MountPoints[destination] != nil
+}
+
+func (container *Container) prepareMountPoints() error {
+	for _, config := range container.MountPoints {
+		if len(config.Driver) > 0 {
+			v, err := createVolume(config.Name, config.Driver)
+			if err != nil {
+				return err
+			}
+			config.Volume = v
+		}
+	}
+	return nil
+}
+
+func (container *Container) removeMountPoints() error {
+	for _, m := range container.MountPoints {
+		if m.Volume != nil {
+			if err := removeVolume(m.Volume); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (container *Container) shouldRestart() bool {
+	return container.hostConfig.RestartPolicy.Name == "always" ||
+		(container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0)
+}
+
+func (container *Container) copyImagePathContent(v volume.Volume, destination string) error {
+	rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs)
+	if err != nil {
+		return err
+	}
+
+	if _, err = ioutil.ReadDir(rootfs); err != nil {
+		if os.IsNotExist(err) {
+			return nil
+		}
+		return err
+	}
+
+	path, err := v.Mount()
+	if err != nil {
+		return err
+	}
+
+	if err := copyExistingContents(rootfs, path); err != nil {
+		return err
+	}
+
+	return v.Unmount()
+}
diff --git a/daemon/container_linux.go b/daemon/container_linux.go
new file mode 100644
index 0000000..8dd839e
--- /dev/null
+++ b/daemon/container_linux.go
@@ -0,0 +1,995 @@
+// +build linux
+
+package daemon
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net"
+	"os"
+	"path"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"syscall"
+	"time"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/network"
+	"github.com/docker/docker/links"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/directory"
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/pkg/ulimit"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
+	"github.com/docker/libcontainer/configs"
+	"github.com/docker/libcontainer/devices"
+	"github.com/docker/libnetwork"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/options"
+	"github.com/docker/libnetwork/types"
+)
+
+const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+
+type Container struct {
+	CommonContainer
+
+	// Fields below here are platform specific.
+
+	AppArmorProfile string
+	activeLinks     map[string]*links.Link
+}
+
+func killProcessDirectly(container *Container) error {
+	if _, err := container.WaitStop(10 * time.Second); err != nil {
+		// Ensure that we don't kill ourselves
+		if pid := container.GetPid(); pid != 0 {
+			logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
+			if err := syscall.Kill(pid, 9); err != nil {
+				if err != syscall.ESRCH {
+					return err
+				}
+				logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
+			}
+		}
+	}
+	return nil
+}
+
+func (container *Container) setupLinkedContainers() ([]string, error) {
+	var (
+		env    []string
+		daemon = container.daemon
+	)
+	children, err := daemon.Children(container.Name)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(children) > 0 {
+		container.activeLinks = make(map[string]*links.Link, len(children))
+
+		// If we encounter an error make sure that we rollback any network
+		// config and iptables changes
+		rollback := func() {
+			for _, link := range container.activeLinks {
+				link.Disable()
+			}
+			container.activeLinks = nil
+		}
+
+		for linkAlias, child := range children {
+			if !child.IsRunning() {
+				return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias)
+			}
+
+			link, err := links.NewLink(
+				container.NetworkSettings.IPAddress,
+				child.NetworkSettings.IPAddress,
+				linkAlias,
+				child.Config.Env,
+				child.Config.ExposedPorts,
+			)
+
+			if err != nil {
+				rollback()
+				return nil, err
+			}
+
+			container.activeLinks[link.Alias()] = link
+			if err := link.Enable(); err != nil {
+				rollback()
+				return nil, err
+			}
+
+			for _, envVar := range link.ToEnv() {
+				env = append(env, envVar)
+			}
+		}
+	}
+	return env, nil
+}
+
+func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
+	// if a domain name was specified, append it to the hostname (see #7851)
+	fullHostname := container.Config.Hostname
+	if container.Config.Domainname != "" {
+		fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname)
+	}
+	// Setup environment
+	env := []string{
+		"PATH=" + DefaultPathEnv,
+		"HOSTNAME=" + fullHostname,
+		// Note: we don't set HOME here because it'll get autoset intelligently
+		// based on the value of USER inside dockerinit, but only if it isn't
+		// set already (ie, that can be overridden by setting HOME via -e or ENV
+		// in a Dockerfile).
+	}
+	if container.Config.Tty {
+		env = append(env, "TERM=xterm")
+	}
+	env = append(env, linkedEnv...)
+	// because the env on the container can override certain default values
+	// we need to replace the 'env' keys where they match and append anything
+	// else.
+	env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env)
+
+	return env
+}
+
+func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.Device, err error) {
+	device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
+	// if there was no error, return the device
+	if err == nil {
+		device.Path = deviceMapping.PathInContainer
+		return append(devs, device), nil
+	}
+
+	// if the device is not a device node
+	// try to see if it's a directory holding many devices
+	if err == devices.ErrNotADevice {
+
+		// check if it is a directory
+		if src, e := os.Stat(deviceMapping.PathOnHost); e == nil && src.IsDir() {
+
+			// mount the internal devices recursively
+			filepath.Walk(deviceMapping.PathOnHost, func(dpath string, f os.FileInfo, e error) error {
+				childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions)
+				if e != nil {
+					// ignore the device
+					return nil
+				}
+
+				// add the device to userSpecified devices
+				childDevice.Path = strings.Replace(dpath, deviceMapping.PathOnHost, deviceMapping.PathInContainer, 1)
+				devs = append(devs, childDevice)
+
+				return nil
+			})
+		}
+	}
+
+	if len(devs) > 0 {
+		return devs, nil
+	}
+
+	return devs, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err)
+}
+
+func populateCommand(c *Container, env []string) error {
+	var en *execdriver.Network
+	if !c.daemon.config.DisableNetwork {
+		en = &execdriver.Network{
+			NamespacePath: c.NetworkSettings.SandboxKey,
+		}
+
+		parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
+		if parts[0] == "container" {
+			nc, err := c.getNetworkedContainer()
+			if err != nil {
+				return err
+			}
+			en.ContainerID = nc.ID
+		}
+	}
+
+	ipc := &execdriver.Ipc{}
+
+	if c.hostConfig.IpcMode.IsContainer() {
+		ic, err := c.getIpcContainer()
+		if err != nil {
+			return err
+		}
+		ipc.ContainerID = ic.ID
+	} else {
+		ipc.HostIpc = c.hostConfig.IpcMode.IsHost()
+	}
+
+	pid := &execdriver.Pid{}
+	pid.HostPid = c.hostConfig.PidMode.IsHost()
+
+	uts := &execdriver.UTS{
+		HostUTS: c.hostConfig.UTSMode.IsHost(),
+	}
+
+	// Build lists of devices allowed and created within the container.
+	var userSpecifiedDevices []*configs.Device
+	for _, deviceMapping := range c.hostConfig.Devices {
+		devs, err := getDevicesFromPath(deviceMapping)
+		if err != nil {
+			return err
+		}
+
+		userSpecifiedDevices = append(userSpecifiedDevices, devs...)
+	}
+	allowedDevices := append(configs.DefaultAllowedDevices, userSpecifiedDevices...)
+
+	autoCreatedDevices := append(configs.DefaultAutoCreatedDevices, userSpecifiedDevices...)
+
+	// TODO: this can be removed after lxc-conf is fully deprecated
+	lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig)
+	if err != nil {
+		return err
+	}
+
+	var rlimits []*ulimit.Rlimit
+	ulimits := c.hostConfig.Ulimits
+
+	// Merge ulimits with daemon defaults
+	ulIdx := make(map[string]*ulimit.Ulimit)
+	for _, ul := range ulimits {
+		ulIdx[ul.Name] = ul
+	}
+	for name, ul := range c.daemon.config.Ulimits {
+		if _, exists := ulIdx[name]; !exists {
+			ulimits = append(ulimits, ul)
+		}
+	}
+
+	for _, limit := range ulimits {
+		rl, err := limit.GetRlimit()
+		if err != nil {
+			return err
+		}
+		rlimits = append(rlimits, rl)
+	}
+
+	resources := &execdriver.Resources{
+		Memory:         c.hostConfig.Memory,
+		MemorySwap:     c.hostConfig.MemorySwap,
+		CpuShares:      c.hostConfig.CpuShares,
+		CpusetCpus:     c.hostConfig.CpusetCpus,
+		CpusetMems:     c.hostConfig.CpusetMems,
+		CpuPeriod:      c.hostConfig.CpuPeriod,
+		CpuQuota:       c.hostConfig.CpuQuota,
+		BlkioWeight:    c.hostConfig.BlkioWeight,
+		Rlimits:        rlimits,
+		OomKillDisable: c.hostConfig.OomKillDisable,
+	}
+
+	processConfig := execdriver.ProcessConfig{
+		Privileged: c.hostConfig.Privileged,
+		Entrypoint: c.Path,
+		Arguments:  c.Args,
+		Tty:        c.Config.Tty,
+		User:       c.Config.User,
+	}
+
+	processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
+	processConfig.Env = env
+
+	c.command = &execdriver.Command{
+		ID:                 c.ID,
+		Rootfs:             c.RootfsPath(),
+		ReadonlyRootfs:     c.hostConfig.ReadonlyRootfs,
+		InitPath:           "/.dockerinit",
+		WorkingDir:         c.Config.WorkingDir,
+		Network:            en,
+		Ipc:                ipc,
+		Pid:                pid,
+		UTS:                uts,
+		Resources:          resources,
+		AllowedDevices:     allowedDevices,
+		AutoCreatedDevices: autoCreatedDevices,
+		CapAdd:             c.hostConfig.CapAdd,
+		CapDrop:            c.hostConfig.CapDrop,
+		ProcessConfig:      processConfig,
+		ProcessLabel:       c.GetProcessLabel(),
+		MountLabel:         c.GetMountLabel(),
+		LxcConfig:          lxcConfig,
+		AppArmorProfile:    c.AppArmorProfile,
+		CgroupParent:       c.hostConfig.CgroupParent,
+	}
+
+	return nil
+}
+
+// GetSize, return real size, virtual size
+func (container *Container) GetSize() (int64, int64) {
+	var (
+		sizeRw, sizeRootfs int64
+		err                error
+		driver             = container.daemon.driver
+	)
+
+	if err := container.Mount(); err != nil {
+		logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
+		return sizeRw, sizeRootfs
+	}
+	defer container.Unmount()
+
+	initID := fmt.Sprintf("%s-init", container.ID)
+	sizeRw, err = driver.DiffSize(container.ID, initID)
+	if err != nil {
+		logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
+		// FIXME: GetSize should return an error. Not changing it now in case
+		// there is a side-effect.
+		sizeRw = -1
+	}
+
+	if _, err = os.Stat(container.basefs); err == nil {
+		if sizeRootfs, err = directory.Size(container.basefs); err != nil {
+			sizeRootfs = -1
+		}
+	}
+	return sizeRw, sizeRootfs
+}
+
+func (container *Container) buildHostnameFile() error {
+	hostnamePath, err := container.GetRootResourcePath("hostname")
+	if err != nil {
+		return err
+	}
+	container.HostnamePath = hostnamePath
+
+	if container.Config.Domainname != "" {
+		return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
+	}
+	return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
+}
+
+func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, error) {
+	var (
+		joinOptions []libnetwork.EndpointOption
+		err         error
+		dns         []string
+		dnsSearch   []string
+	)
+
+	joinOptions = append(joinOptions, libnetwork.JoinOptionHostname(container.Config.Hostname),
+		libnetwork.JoinOptionDomainname(container.Config.Domainname))
+
+	if container.hostConfig.NetworkMode.IsHost() {
+		joinOptions = append(joinOptions, libnetwork.JoinOptionUseDefaultSandbox())
+	}
+
+	container.HostsPath, err = container.GetRootResourcePath("hosts")
+	if err != nil {
+		return nil, err
+	}
+	joinOptions = append(joinOptions, libnetwork.JoinOptionHostsPath(container.HostsPath))
+
+	container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf")
+	if err != nil {
+		return nil, err
+	}
+	joinOptions = append(joinOptions, libnetwork.JoinOptionResolvConfPath(container.ResolvConfPath))
+
+	if len(container.hostConfig.Dns) > 0 {
+		dns = container.hostConfig.Dns
+	} else if len(container.daemon.config.Dns) > 0 {
+		dns = container.daemon.config.Dns
+	}
+
+	for _, d := range dns {
+		joinOptions = append(joinOptions, libnetwork.JoinOptionDNS(d))
+	}
+
+	if len(container.hostConfig.DnsSearch) > 0 {
+		dnsSearch = container.hostConfig.DnsSearch
+	} else if len(container.daemon.config.DnsSearch) > 0 {
+		dnsSearch = container.daemon.config.DnsSearch
+	}
+
+	for _, ds := range dnsSearch {
+		joinOptions = append(joinOptions, libnetwork.JoinOptionDNSSearch(ds))
+	}
+
+	if container.NetworkSettings.SecondaryIPAddresses != nil {
+		name := container.Config.Hostname
+		if container.Config.Domainname != "" {
+			name = name + "." + container.Config.Domainname
+		}
+
+		for _, a := range container.NetworkSettings.SecondaryIPAddresses {
+			joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(name, a.Addr))
+		}
+	}
+
+	var childEndpoints, parentEndpoints []string
+
+	children, err := container.daemon.Children(container.Name)
+	if err != nil {
+		return nil, err
+	}
+
+	for linkAlias, child := range children {
+		_, alias := path.Split(linkAlias)
+		// allow access to the linked container via the alias, real name, and container hostname
+		aliasList := alias + " " + child.Config.Hostname
+		// only add the name if alias isn't equal to the name
+		if alias != child.Name[1:] {
+			aliasList = aliasList + " " + child.Name[1:]
+		}
+		joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(aliasList, child.NetworkSettings.IPAddress))
+		if child.NetworkSettings.EndpointID != "" {
+			childEndpoints = append(childEndpoints, child.NetworkSettings.EndpointID)
+		}
+	}
+
+	for _, extraHost := range container.hostConfig.ExtraHosts {
+		// allow IPv6 addresses in extra hosts; only split on first ":"
+		parts := strings.SplitN(extraHost, ":", 2)
+		joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(parts[0], parts[1]))
+	}
+
+	refs := container.daemon.ContainerGraph().RefPaths(container.ID)
+	for _, ref := range refs {
+		if ref.ParentID == "0" {
+			continue
+		}
+
+		c, err := container.daemon.Get(ref.ParentID)
+		if err != nil {
+			logrus.Error(err)
+		}
+
+		if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() {
+			logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress)
+			joinOptions = append(joinOptions, libnetwork.JoinOptionParentUpdate(c.NetworkSettings.EndpointID, ref.Name, container.NetworkSettings.IPAddress))
+			if c.NetworkSettings.EndpointID != "" {
+				parentEndpoints = append(parentEndpoints, c.NetworkSettings.EndpointID)
+			}
+		}
+	}
+
+	linkOptions := options.Generic{
+		netlabel.GenericData: options.Generic{
+			"ParentEndpoints": parentEndpoints,
+			"ChildEndpoints":  childEndpoints,
+		},
+	}
+
+	joinOptions = append(joinOptions, libnetwork.JoinOptionGeneric(linkOptions))
+
+	return joinOptions, nil
+}
+
+func (container *Container) buildPortMapInfo(n libnetwork.Network, ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) {
+	if ep == nil {
+		return nil, fmt.Errorf("invalid endpoint while building port map info")
+	}
+
+	if networkSettings == nil {
+		return nil, fmt.Errorf("invalid networksettings while building port map info")
+	}
+
+	driverInfo, err := ep.DriverInfo()
+	if err != nil {
+		return nil, err
+	}
+
+	if driverInfo == nil {
+		// It is not an error for epInfo to be nil
+		return networkSettings, nil
+	}
+
+	if mac, ok := driverInfo[netlabel.MacAddress]; ok {
+		networkSettings.MacAddress = mac.(net.HardwareAddr).String()
+	}
+
+	mapData, ok := driverInfo[netlabel.PortMap]
+	if !ok {
+		return networkSettings, nil
+	}
+
+	if portMapping, ok := mapData.([]types.PortBinding); ok {
+		networkSettings.Ports = nat.PortMap{}
+		for _, pp := range portMapping {
+			natPort := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port)))
+			natBndg := nat.PortBinding{HostIp: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
+			networkSettings.Ports[natPort] = append(networkSettings.Ports[natPort], natBndg)
+		}
+	}
+
+	return networkSettings, nil
+}
+
+func (container *Container) buildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) {
+	if ep == nil {
+		return nil, fmt.Errorf("invalid endpoint while building port map info")
+	}
+
+	if networkSettings == nil {
+		return nil, fmt.Errorf("invalid networksettings while building port map info")
+	}
+
+	epInfo := ep.Info()
+	if epInfo == nil {
+		// It is not an error to get an empty endpoint info
+		return networkSettings, nil
+	}
+
+	ifaceList := epInfo.InterfaceList()
+	if len(ifaceList) == 0 {
+		return networkSettings, nil
+	}
+
+	iface := ifaceList[0]
+
+	ones, _ := iface.Address().Mask.Size()
+	networkSettings.IPAddress = iface.Address().IP.String()
+	networkSettings.IPPrefixLen = ones
+
+	if iface.AddressIPv6().IP.To16() != nil {
+		onesv6, _ := iface.AddressIPv6().Mask.Size()
+		networkSettings.GlobalIPv6Address = iface.AddressIPv6().IP.String()
+		networkSettings.GlobalIPv6PrefixLen = onesv6
+	}
+
+	if len(ifaceList) == 1 {
+		return networkSettings, nil
+	}
+
+	networkSettings.SecondaryIPAddresses = make([]network.Address, 0, len(ifaceList)-1)
+	networkSettings.SecondaryIPv6Addresses = make([]network.Address, 0, len(ifaceList)-1)
+	for _, iface := range ifaceList[1:] {
+		ones, _ := iface.Address().Mask.Size()
+		addr := network.Address{Addr: iface.Address().IP.String(), PrefixLen: ones}
+		networkSettings.SecondaryIPAddresses = append(networkSettings.SecondaryIPAddresses, addr)
+
+		if iface.AddressIPv6().IP.To16() != nil {
+			onesv6, _ := iface.AddressIPv6().Mask.Size()
+			addrv6 := network.Address{Addr: iface.AddressIPv6().IP.String(), PrefixLen: onesv6}
+			networkSettings.SecondaryIPv6Addresses = append(networkSettings.SecondaryIPv6Addresses, addrv6)
+		}
+	}
+
+	return networkSettings, nil
+}
+
+func (container *Container) updateJoinInfo(ep libnetwork.Endpoint) error {
+	epInfo := ep.Info()
+	if epInfo == nil {
+		// It is not an error to get an empty endpoint info
+		return nil
+	}
+
+	container.NetworkSettings.Gateway = epInfo.Gateway().String()
+	if epInfo.GatewayIPv6().To16() != nil {
+		container.NetworkSettings.IPv6Gateway = epInfo.GatewayIPv6().String()
+	}
+
+	container.NetworkSettings.SandboxKey = epInfo.SandboxKey()
+
+	return nil
+}
+
+func (container *Container) updateNetworkSettings(n libnetwork.Network, ep libnetwork.Endpoint) error {
+	networkSettings := &network.Settings{NetworkID: n.ID(), EndpointID: ep.ID()}
+
+	networkSettings, err := container.buildPortMapInfo(n, ep, networkSettings)
+	if err != nil {
+		return err
+	}
+
+	networkSettings, err = container.buildEndpointInfo(n, ep, networkSettings)
+	if err != nil {
+		return err
+	}
+
+	if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") {
+		networkSettings.Bridge = container.daemon.config.Bridge.Iface
+	}
+
+	container.NetworkSettings = networkSettings
+	return nil
+}
+
+func (container *Container) UpdateNetwork() error {
+	n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID)
+	if err != nil {
+		return fmt.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err)
+	}
+
+	ep, err := n.EndpointByID(container.NetworkSettings.EndpointID)
+	if err != nil {
+		return fmt.Errorf("error locating endpoint id %s: %v", container.NetworkSettings.EndpointID, err)
+	}
+
+	if err := ep.Leave(container.ID); err != nil {
+		return fmt.Errorf("endpoint leave failed: %v", err)
+
+	}
+
+	joinOptions, err := container.buildJoinOptions()
+	if err != nil {
+		return fmt.Errorf("Update network failed: %v", err)
+	}
+
+	if _, err := ep.Join(container.ID, joinOptions...); err != nil {
+		return fmt.Errorf("endpoint join failed: %v", err)
+	}
+
+	if err := container.updateJoinInfo(ep); err != nil {
+		return fmt.Errorf("Updating join info failed: %v", err)
+	}
+
+	return nil
+}
+
+func (container *Container) buildCreateEndpointOptions() ([]libnetwork.EndpointOption, error) {
+	var (
+		portSpecs     = make(nat.PortSet)
+		bindings      = make(nat.PortMap)
+		pbList        []types.PortBinding
+		exposeList    []types.TransportPort
+		createOptions []libnetwork.EndpointOption
+	)
+
+	if container.Config.PortSpecs != nil {
+		if err := migratePortMappings(container.Config, container.hostConfig); err != nil {
+			return nil, err
+		}
+		container.Config.PortSpecs = nil
+		if err := container.WriteHostConfig(); err != nil {
+			return nil, err
+		}
+	}
+
+	if container.Config.ExposedPorts != nil {
+		portSpecs = container.Config.ExposedPorts
+	}
+
+	if container.hostConfig.PortBindings != nil {
+		for p, b := range container.hostConfig.PortBindings {
+			bindings[p] = []nat.PortBinding{}
+			for _, bb := range b {
+				bindings[p] = append(bindings[p], nat.PortBinding{
+					HostIp:   bb.HostIp,
+					HostPort: bb.HostPort,
+				})
+			}
+		}
+	}
+
+	container.NetworkSettings.PortMapping = nil
+
+	ports := make([]nat.Port, len(portSpecs))
+	var i int
+	for p := range portSpecs {
+		ports[i] = p
+		i++
+	}
+	nat.SortPortMap(ports, bindings)
+	for _, port := range ports {
+		expose := types.TransportPort{}
+		expose.Proto = types.ParseProtocol(port.Proto())
+		expose.Port = uint16(port.Int())
+		exposeList = append(exposeList, expose)
+
+		pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto}
+		binding := bindings[port]
+		for i := 0; i < len(binding); i++ {
+			pbCopy := pb.GetCopy()
+			pbCopy.HostPort = uint16(nat.Port(binding[i].HostPort).Int())
+			pbCopy.HostIP = net.ParseIP(binding[i].HostIp)
+			pbList = append(pbList, pbCopy)
+		}
+
+		if container.hostConfig.PublishAllPorts && len(binding) == 0 {
+			pbList = append(pbList, pb)
+		}
+	}
+
+	createOptions = append(createOptions,
+		libnetwork.CreateOptionPortMapping(pbList),
+		libnetwork.CreateOptionExposedPorts(exposeList))
+
+	if container.Config.MacAddress != "" {
+		mac, err := net.ParseMAC(container.Config.MacAddress)
+		if err != nil {
+			return nil, err
+		}
+
+		genericOption := options.Generic{
+			netlabel.MacAddress: mac,
+		}
+
+		createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption))
+	}
+
+	return createOptions, nil
+}
+
+func (container *Container) AllocateNetwork() error {
+	mode := container.hostConfig.NetworkMode
+	if container.Config.NetworkDisabled || mode.IsContainer() {
+		return nil
+	}
+
+	var err error
+
+	n, err := container.daemon.netController.NetworkByName(string(mode))
+	if err != nil {
+		return fmt.Errorf("error locating network with name %s: %v", string(mode), err)
+	}
+
+	createOptions, err := container.buildCreateEndpointOptions()
+	if err != nil {
+		return err
+	}
+
+	ep, err := n.CreateEndpoint(container.Name, createOptions...)
+	if err != nil {
+		return err
+	}
+
+	if err := container.updateNetworkSettings(n, ep); err != nil {
+		return err
+	}
+
+	joinOptions, err := container.buildJoinOptions()
+	if err != nil {
+		return err
+	}
+
+	if _, err := ep.Join(container.ID, joinOptions...); err != nil {
+		return err
+	}
+
+	if err := container.updateJoinInfo(ep); err != nil {
+		return fmt.Errorf("Updating join info failed: %v", err)
+	}
+
+	if err := container.WriteHostConfig(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (container *Container) initializeNetworking() error {
+	var err error
+
+	// Make sure NetworkMode has an acceptable value before
+	// initializing networking.
+	if container.hostConfig.NetworkMode == runconfig.NetworkMode("") {
+		container.hostConfig.NetworkMode = runconfig.NetworkMode("bridge")
+	}
+
+	if container.hostConfig.NetworkMode.IsContainer() {
+		// we need to get the hosts files from the container to join
+		nc, err := container.getNetworkedContainer()
+		if err != nil {
+			return err
+		}
+		container.HostnamePath = nc.HostnamePath
+		container.HostsPath = nc.HostsPath
+		container.ResolvConfPath = nc.ResolvConfPath
+		container.Config.Hostname = nc.Config.Hostname
+		container.Config.Domainname = nc.Config.Domainname
+		return nil
+	}
+
+	if container.daemon.config.DisableNetwork {
+		container.Config.NetworkDisabled = true
+	}
+
+	if container.hostConfig.NetworkMode.IsHost() {
+		container.Config.Hostname, err = os.Hostname()
+		if err != nil {
+			return err
+		}
+
+		parts := strings.SplitN(container.Config.Hostname, ".", 2)
+		if len(parts) > 1 {
+			container.Config.Hostname = parts[0]
+			container.Config.Domainname = parts[1]
+		}
+
+	}
+
+	if err := container.AllocateNetwork(); err != nil {
+		return err
+	}
+
+	return container.buildHostnameFile()
+}
+
+// Make sure the config is compatible with the current kernel
+func (container *Container) verifyDaemonSettings() {
+	if container.hostConfig.Memory > 0 && !container.daemon.sysInfo.MemoryLimit {
+		logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.")
+		container.hostConfig.Memory = 0
+	}
+	if container.hostConfig.Memory > 0 && container.hostConfig.MemorySwap != -1 && !container.daemon.sysInfo.SwapLimit {
+		logrus.Warnf("Your kernel does not support swap limit capabilities. Limitation discarded.")
+		container.hostConfig.MemorySwap = -1
+	}
+	if container.daemon.sysInfo.IPv4ForwardingDisabled {
+		logrus.Warnf("IPv4 forwarding is disabled. Networking will not work")
+	}
+}
+
+func (container *Container) ExportRw() (archive.Archive, error) {
+	if err := container.Mount(); err != nil {
+		return nil, err
+	}
+	if container.daemon == nil {
+		return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
+	}
+	archive, err := container.daemon.Diff(container)
+	if err != nil {
+		container.Unmount()
+		return nil, err
+	}
+	return ioutils.NewReadCloserWrapper(archive, func() error {
+			err := archive.Close()
+			container.Unmount()
+			return err
+		}),
+		nil
+}
+
+func (container *Container) getIpcContainer() (*Container, error) {
+	containerID := container.hostConfig.IpcMode.Container()
+	c, err := container.daemon.Get(containerID)
+	if err != nil {
+		return nil, err
+	}
+	if !c.IsRunning() {
+		return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID)
+	}
+	return c, nil
+}
+
+func (container *Container) setupWorkingDirectory() error {
+	if container.Config.WorkingDir != "" {
+		container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir)
+
+		pth, err := container.GetResourcePath(container.Config.WorkingDir)
+		if err != nil {
+			return err
+		}
+
+		pthInfo, err := os.Stat(pth)
+		if err != nil {
+			if !os.IsNotExist(err) {
+				return err
+			}
+
+			if err := os.MkdirAll(pth, 0755); err != nil {
+				return err
+			}
+		}
+		if pthInfo != nil && !pthInfo.IsDir() {
+			return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir)
+		}
+	}
+	return nil
+}
+
+func (container *Container) getNetworkedContainer() (*Container, error) {
+	parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2)
+	switch parts[0] {
+	case "container":
+		if len(parts) != 2 {
+			return nil, fmt.Errorf("no container specified to join network")
+		}
+		nc, err := container.daemon.Get(parts[1])
+		if err != nil {
+			return nil, err
+		}
+		if container == nc {
+			return nil, fmt.Errorf("cannot join own network")
+		}
+		if !nc.IsRunning() {
+			return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1])
+		}
+		return nc, nil
+	default:
+		return nil, fmt.Errorf("network mode not set to container")
+	}
+}
+
+func (container *Container) ReleaseNetwork() {
+	if container.hostConfig.NetworkMode.IsContainer() || container.daemon.config.DisableNetwork {
+		return
+	}
+
+	n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID)
+	if err != nil {
+		logrus.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err)
+		return
+	}
+
+	ep, err := n.EndpointByID(container.NetworkSettings.EndpointID)
+	if err != nil {
+		logrus.Errorf("error locating endpoint id %s: %v", container.NetworkSettings.EndpointID, err)
+		return
+	}
+
+	if err := ep.Leave(container.ID); err != nil {
+		logrus.Errorf("leaving endpoint failed: %v", err)
+	}
+
+	if err := ep.Delete(); err != nil {
+		logrus.Errorf("deleting endpoint failed: %v", err)
+	}
+
+	container.NetworkSettings = &network.Settings{}
+}
+
+func disableAllActiveLinks(container *Container) {
+	if container.activeLinks != nil {
+		for _, link := range container.activeLinks {
+			link.Disable()
+		}
+	}
+}
+
+func (container *Container) DisableLink(name string) {
+	if container.activeLinks != nil {
+		if link, exists := container.activeLinks[name]; exists {
+			link.Disable()
+			delete(container.activeLinks, name)
+			if err := container.UpdateNetwork(); err != nil {
+				logrus.Debugf("Could not update network to remove link: %v", err)
+			}
+		} else {
+			logrus.Debugf("Could not find active link for %s", name)
+		}
+	}
+}
+
+func (container *Container) UnmountVolumes(forceSyscall bool) error {
+	var volumeMounts []mountPoint
+
+	for _, mntPoint := range container.MountPoints {
+		dest, err := container.GetResourcePath(mntPoint.Destination)
+		if err != nil {
+			return err
+		}
+
+		volumeMounts = append(volumeMounts, mountPoint{Destination: dest, Volume: mntPoint.Volume})
+	}
+
+	for _, mnt := range container.networkMounts() {
+		dest, err := container.GetResourcePath(mnt.Destination)
+		if err != nil {
+			return err
+		}
+
+		volumeMounts = append(volumeMounts, mountPoint{Destination: dest})
+	}
+
+	for _, volumeMount := range volumeMounts {
+		if forceSyscall {
+			syscall.Unmount(volumeMount.Destination, 0)
+		}
+
+		if volumeMount.Volume != nil {
+			if err := volumeMount.Volume.Unmount(); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/daemon/container_windows.go b/daemon/container_windows.go
new file mode 100644
index 0000000..0807aab
--- /dev/null
+++ b/daemon/container_windows.go
@@ -0,0 +1,171 @@
+// +build windows
+
+package daemon
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/archive"
+)
+
+// TODO Windows. A reasonable default at the moment.
+const DefaultPathEnv = `c:\windows\system32;c:\windows\system32\WindowsPowerShell\v1.0`
+
+type Container struct {
+	CommonContainer
+
+	// Fields below here are platform specific.
+
+	// TODO Windows. Further factoring out of unused fields will be necessary.
+
+	// ---- START OF TEMPORARY DECLARATION ----
+	// TODO Windows. Temporarily keeping fields in to assist in compilation
+	// of the daemon on Windows without affecting many other files in a single
+	// PR, thus making code review significantly harder. These lines will be
+	// removed in subsequent PRs.
+
+	AppArmorProfile string
+	// ---- END OF TEMPORARY DECLARATION ----
+
+}
+
+func killProcessDirectly(container *Container) error {
+	return nil
+}
+
+func (container *Container) setupContainerDns() error {
+	return nil
+}
+
+func (container *Container) updateParentsHosts() error {
+	return nil
+}
+
+func (container *Container) setupLinkedContainers() ([]string, error) {
+	return nil, nil
+}
+
+func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
+	return nil
+}
+
+func (container *Container) initializeNetworking() error {
+	return nil
+}
+
+func (container *Container) setupWorkingDirectory() error {
+	return nil
+}
+
+func (container *Container) verifyDaemonSettings() {
+}
+
+func populateCommand(c *Container, env []string) error {
+	en := &execdriver.Network{
+		Mtu:       c.daemon.config.Mtu,
+		Interface: nil,
+	}
+
+	// TODO Windows. Appropriate network mode (will refactor as part of
+	// libnetwork. For now, even through bridge not used, let it succeed to
+	// allow the Windows daemon to limp during its bring-up
+	parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
+	switch parts[0] {
+	case "none":
+	case "bridge", "": // empty string to support existing containers
+		if !c.Config.NetworkDisabled {
+			network := c.NetworkSettings
+			en.Interface = &execdriver.NetworkInterface{
+				Bridge:     network.Bridge,
+				MacAddress: network.MacAddress,
+			}
+		}
+	case "host", "container":
+		return fmt.Errorf("unsupported network mode: %s", c.hostConfig.NetworkMode)
+	default:
+		return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode)
+	}
+
+	pid := &execdriver.Pid{}
+
+	// TODO Windows. This can probably be factored out.
+	pid.HostPid = c.hostConfig.PidMode.IsHost()
+
+	// TODO Windows. Resource controls to be implemented later.
+	resources := &execdriver.Resources{}
+
+	// TODO Windows. Further refactoring required (privileged/user)
+	processConfig := execdriver.ProcessConfig{
+		Privileged: c.hostConfig.Privileged,
+		Entrypoint: c.Path,
+		Arguments:  c.Args,
+		Tty:        c.Config.Tty,
+		User:       c.Config.User,
+	}
+
+	processConfig.Env = env
+
+	// TODO Windows: Factor out remainder of unused fields.
+	c.command = &execdriver.Command{
+		ID:             c.ID,
+		Rootfs:         c.RootfsPath(),
+		ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
+		InitPath:       "/.dockerinit",
+		WorkingDir:     c.Config.WorkingDir,
+		Network:        en,
+		Pid:            pid,
+		Resources:      resources,
+		CapAdd:         c.hostConfig.CapAdd,
+		CapDrop:        c.hostConfig.CapDrop,
+		ProcessConfig:  processConfig,
+		ProcessLabel:   c.GetProcessLabel(),
+		MountLabel:     c.GetMountLabel(),
+	}
+
+	return nil
+}
+
+// GetSize, return real size, virtual size
+func (container *Container) GetSize() (int64, int64) {
+	// TODO Windows
+	return 0, 0
+}
+
+func (container *Container) AllocateNetwork() error {
+
+	// TODO Windows. This needs reworking with libnetwork. In the
+	// proof-of-concept for //build conference, the Windows daemon
+	// invoked eng.Job("allocate_interface) passing through
+	// RequestedMac.
+
+	return nil
+}
+
+func (container *Container) ExportRw() (archive.Archive, error) {
+	if container.IsRunning() {
+		return nil, fmt.Errorf("Cannot export a running container.")
+	}
+	// TODO Windows. Implementation (different to Linux)
+	return nil, nil
+}
+
+func (container *Container) ReleaseNetwork() {
+	// TODO Windows. Rework with libnetwork
+}
+
+func (container *Container) RestoreNetwork() error {
+	// TODO Windows. Rework with libnetwork
+	return nil
+}
+
+func disableAllActiveLinks(container *Container) {
+}
+
+func (container *Container) DisableLink(name string) {
+}
+
+func (container *Container) UnmountVolumes(forceSyscall bool) error {
+	return nil
+}
diff --git a/daemon/copy.go b/daemon/copy.go
index d42f450..dec30d8 100644
--- a/daemon/copy.go
+++ b/daemon/copy.go
@@ -1,34 +1,16 @@
 package daemon
 
-import (
-	"io"
+import "io"
 
-	"github.com/docker/docker/engine"
-)
-
-func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status {
-	if len(job.Args) != 2 {
-		return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
-	}
-
-	var (
-		name     = job.Args[0]
-		resource = job.Args[1]
-	)
-
+func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
 
-	data, err := container.Copy(resource)
-	if err != nil {
-		return job.Error(err)
+	if res[0] == '/' {
+		res = res[1:]
 	}
-	defer data.Close()
 
-	if _, err := io.Copy(job.Stdout, data); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
+	return container.Copy(res)
 }
diff --git a/daemon/create.go b/daemon/create.go
index 49bc6a7..4a02fc0 100644
--- a/daemon/create.go
+++ b/daemon/create.go
@@ -2,71 +2,47 @@
 
 import (
 	"fmt"
+	"os"
+	"path/filepath"
 	"strings"
 
-	"github.com/docker/docker/engine"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/libcontainer/label"
 )
 
-func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
-	var name string
-	if len(job.Args) == 1 {
-		name = job.Args[0]
-	} else if len(job.Args) > 1 {
-		return job.Errorf("Usage: %s", job.Name)
+func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hostConfig *runconfig.HostConfig) (string, []string, error) {
+	warnings, err := daemon.verifyHostConfig(hostConfig)
+	if err != nil {
+		return "", warnings, err
 	}
 
-	config := runconfig.ContainerConfigFromJob(job)
-	hostConfig := runconfig.ContainerHostConfigFromJob(job)
-
-	if len(hostConfig.LxcConf) > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") {
-		return job.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name())
-	}
-	if hostConfig.Memory != 0 && hostConfig.Memory < 4194304 {
-		return job.Errorf("Minimum memory limit allowed is 4MB")
-	}
-	if hostConfig.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
-		job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
-		hostConfig.Memory = 0
-	}
-	if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit {
-		job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
-		hostConfig.MemorySwap = -1
-	}
-	if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory {
-		return job.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n")
-	}
-	if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 {
-		return job.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.\n")
+	// The check for a valid workdir path is made on the server rather than in the
+	// client. This is because we don't know the type of path (Linux or Windows)
+	// to validate on the client.
+	if config.WorkingDir != "" && !filepath.IsAbs(config.WorkingDir) {
+		return "", warnings, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path.", config.WorkingDir)
 	}
 
 	container, buildWarnings, err := daemon.Create(config, hostConfig, name)
 	if err != nil {
-		if daemon.Graph().IsNotExist(err) {
+		if daemon.Graph().IsNotExist(err, config.Image) {
 			_, tag := parsers.ParseRepositoryTag(config.Image)
 			if tag == "" {
 				tag = graph.DEFAULTTAG
 			}
-			return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
+			return "", warnings, fmt.Errorf("No such image: %s (tag: %s)", config.Image, tag)
 		}
-		return job.Error(err)
+		return "", warnings, err
 	}
-	if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled {
-		job.Errorf("IPv4 forwarding is disabled.\n")
-	}
+
 	container.LogEvent("create")
+	warnings = append(warnings, buildWarnings...)
 
-	job.Printf("%s\n", container.ID)
-
-	for _, warning := range buildWarnings {
-		job.Errorf("%s\n", warning)
-	}
-
-	return engine.StatusOK
+	return container.ID, warnings, nil
 }
 
 // Create creates a new container from the given configuration with a given name.
@@ -93,6 +69,9 @@
 	if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil {
 		return nil, nil, err
 	}
+	if !config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled {
+		warnings = append(warnings, "IPv4 forwarding is disabled.\n")
+	}
 	if hostConfig == nil {
 		hostConfig = &runconfig.HostConfig{}
 	}
@@ -111,17 +90,54 @@
 	if err := daemon.createRootfs(container); err != nil {
 		return nil, nil, err
 	}
-	if hostConfig != nil {
-		if err := daemon.setHostConfig(container, hostConfig); err != nil {
-			return nil, nil, err
-		}
+	if err := daemon.setHostConfig(container, hostConfig); err != nil {
+		return nil, nil, err
 	}
 	if err := container.Mount(); err != nil {
 		return nil, nil, err
 	}
 	defer container.Unmount()
-	if err := container.prepareVolumes(); err != nil {
-		return nil, nil, err
+
+	for spec := range config.Volumes {
+		var (
+			name, destination string
+			parts             = strings.Split(spec, ":")
+		)
+		switch len(parts) {
+		case 2:
+			name, destination = parts[0], filepath.Clean(parts[1])
+		default:
+			name = stringid.GenerateRandomID()
+			destination = filepath.Clean(parts[0])
+		}
+		// Skip volumes for which we already have something mounted on that
+		// destination because of a --volume-from.
+		if container.isDestinationMounted(destination) {
+			continue
+		}
+		path, err := container.GetResourcePath(destination)
+		if err != nil {
+			return nil, nil, err
+		}
+
+		stat, err := os.Stat(path)
+		if err == nil && !stat.IsDir() {
+			return nil, nil, fmt.Errorf("cannot mount volume over existing file, file exists %s", path)
+		}
+
+		v, err := createVolume(name, config.VolumeDriver)
+		if err != nil {
+			return nil, nil, err
+		}
+		if err := label.Relabel(v.Path(), container.MountLabel, "z"); err != nil {
+			return nil, nil, err
+		}
+
+		if err := container.copyImagePathContent(v, destination); err != nil {
+			return nil, nil, err
+		}
+
+		container.addMountPointWithVolume(destination, v, true)
 	}
 	if err := container.ToDisk(); err != nil {
 		return nil, nil, err
@@ -138,9 +154,6 @@
 		if err != nil {
 			return nil, err
 		}
-		if !c.IsRunning() {
-			return nil, fmt.Errorf("cannot join IPC of a non running container: %s", ipcContainer)
-		}
 
 		return label.DupSecOpt(c.ProcessLabel), nil
 	}
diff --git a/daemon/daemon.go b/daemon/daemon.go
index b12d217..8ff13f8 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -1,10 +1,10 @@
 package daemon
 
 import (
-	"bytes"
 	"fmt"
 	"io"
 	"io/ioutil"
+	"net"
 	"os"
 	"path"
 	"path/filepath"
@@ -15,38 +15,43 @@
 	"time"
 
 	"github.com/docker/libcontainer/label"
+	"github.com/docker/libnetwork"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/options"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/autogen/dockerversion"
+	"github.com/docker/docker/daemon/events"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/daemon/execdriver/execdrivers"
-	"github.com/docker/docker/daemon/execdriver/lxc"
 	"github.com/docker/docker/daemon/graphdriver"
 	_ "github.com/docker/docker/daemon/graphdriver/vfs"
-	_ "github.com/docker/docker/daemon/networkdriver/bridge"
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/daemon/logger"
+	"github.com/docker/docker/daemon/network"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/broadcastwriter"
-	"github.com/docker/docker/pkg/common"
+	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/graphdb"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/namesgenerator"
-	"github.com/docker/docker/pkg/networkfs/resolvconf"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers/kernel"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/sysinfo"
 	"github.com/docker/docker/pkg/truncindex"
+	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/trust"
 	"github.com/docker/docker/utils"
-	"github.com/docker/docker/volumes"
-
-	"github.com/go-fsnotify/fsnotify"
+	volumedrivers "github.com/docker/docker/volume/drivers"
+	"github.com/docker/docker/volume/local"
 )
 
+const defaultVolumesPathName = "volumes"
+
 var (
 	validContainerNameChars   = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
 	validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
@@ -97,63 +102,16 @@
 	repositories     *graph.TagStore
 	idIndex          *truncindex.TruncIndex
 	sysInfo          *sysinfo.SysInfo
-	volumes          *volumes.Repository
-	eng              *engine.Engine
 	config           *Config
 	containerGraph   *graphdb.Database
 	driver           graphdriver.Driver
 	execDriver       execdriver.Driver
-	trustStore       *trust.TrustStore
 	statsCollector   *statsCollector
 	defaultLogConfig runconfig.LogConfig
-}
-
-// Install installs daemon capabilities to eng.
-func (daemon *Daemon) Install(eng *engine.Engine) error {
-	// FIXME: remove ImageDelete's dependency on Daemon, then move to graph/
-	for name, method := range map[string]engine.Handler{
-		"attach":            daemon.ContainerAttach,
-		"commit":            daemon.ContainerCommit,
-		"container_changes": daemon.ContainerChanges,
-		"container_copy":    daemon.ContainerCopy,
-		"container_rename":  daemon.ContainerRename,
-		"container_inspect": daemon.ContainerInspect,
-		"container_stats":   daemon.ContainerStats,
-		"containers":        daemon.Containers,
-		"create":            daemon.ContainerCreate,
-		"rm":                daemon.ContainerRm,
-		"export":            daemon.ContainerExport,
-		"info":              daemon.CmdInfo,
-		"kill":              daemon.ContainerKill,
-		"logs":              daemon.ContainerLogs,
-		"pause":             daemon.ContainerPause,
-		"resize":            daemon.ContainerResize,
-		"restart":           daemon.ContainerRestart,
-		"start":             daemon.ContainerStart,
-		"stop":              daemon.ContainerStop,
-		"top":               daemon.ContainerTop,
-		"unpause":           daemon.ContainerUnpause,
-		"wait":              daemon.ContainerWait,
-		"image_delete":      daemon.ImageDelete, // FIXME: see above
-		"execCreate":        daemon.ContainerExecCreate,
-		"execStart":         daemon.ContainerExecStart,
-		"execResize":        daemon.ContainerExecResize,
-		"execInspect":       daemon.ContainerExecInspect,
-	} {
-		if err := eng.Register(name, method); err != nil {
-			return err
-		}
-	}
-	if err := daemon.Repositories().Install(eng); err != nil {
-		return err
-	}
-	if err := daemon.trustStore.Install(eng); err != nil {
-		return err
-	}
-	// FIXME: this hack is necessary for legacy integration tests to access
-	// the daemon object.
-	eng.Hack_SetGlobalVar("httpapi.daemon", daemon)
-	return nil
+	RegistryService  *registry.Service
+	EventsService    *events.Events
+	netController    libnetwork.NetworkController
+	root             string
 }
 
 // Get looks for a container using the provided information, which could be
@@ -200,10 +158,14 @@
 // This is typically done at startup.
 func (daemon *Daemon) load(id string) (*Container, error) {
 	container := &Container{
-		root:         daemon.containerRoot(id),
-		State:        NewState(),
-		execCommands: newExecStore(),
+		CommonContainer: CommonContainer{
+			State:        NewState(),
+			root:         daemon.containerRoot(id),
+			MountPoints:  make(map[string]*mountPoint),
+			execCommands: newExecStore(),
+		},
 	}
+
 	if err := container.FromDisk(); err != nil {
 		return nil, err
 	}
@@ -212,8 +174,6 @@
 		return container, fmt.Errorf("Container %s is stored at %s", container.ID, id)
 	}
 
-	container.readHostConfig()
-
 	return container, nil
 }
 
@@ -253,39 +213,30 @@
 	// we'll waste time if we update it for every container
 	daemon.idIndex.Add(container.ID)
 
-	container.registerVolumes()
+	if err := daemon.verifyOldVolumesInfo(container); err != nil {
+		return err
+	}
 
-	// FIXME: if the container is supposed to be running but is not, auto restart it?
-	//        if so, then we need to restart monitor and init a new lock
-	// If the container is supposed to be running, make sure of it
+	if err := container.prepareMountPoints(); err != nil {
+		return err
+	}
+
 	if container.IsRunning() {
-		log.Debugf("killing old running container %s", container.ID)
+		logrus.Debugf("killing old running container %s", container.ID)
 
-		existingPid := container.Pid
 		container.SetStopped(&execdriver.ExitStatus{ExitCode: 0})
 
-		// We only have to handle this for lxc because the other drivers will ensure that
-		// no processes are left when docker dies
-		if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
-			lxc.KillLxc(container.ID, 9)
-		} else {
-			// use the current driver and ensure that the container is dead x.x
-			cmd := &execdriver.Command{
-				ID: container.ID,
-			}
-			var err error
-			cmd.ProcessConfig.Process, err = os.FindProcess(existingPid)
-			if err != nil {
-				log.Debugf("cannot find existing process for %d", existingPid)
-			}
-			daemon.execDriver.Terminate(cmd)
+		// use the current driver and ensure that the container is dead x.x
+		cmd := &execdriver.Command{
+			ID: container.ID,
 		}
+		daemon.execDriver.Terminate(cmd)
 
 		if err := container.Unmount(); err != nil {
-			log.Debugf("unmount error %s", err)
+			logrus.Debugf("unmount error %s", err)
 		}
 		if err := container.ToDisk(); err != nil {
-			log.Debugf("saving stopped state to disk %s", err)
+			logrus.Debugf("saving stopped state to disk %s", err)
 		}
 	}
 
@@ -301,30 +252,26 @@
 		container.Name = name
 
 		if err := container.ToDisk(); err != nil {
-			log.Debugf("Error saving container name %s", err)
+			logrus.Debugf("Error saving container name %s", err)
 		}
 	}
 	return nil
 }
 
-func (daemon *Daemon) LogToDisk(src *broadcastwriter.BroadcastWriter, dst, stream string) error {
-	log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
-	if err != nil {
-		return err
-	}
-	src.AddWriter(log, stream)
-	return nil
-}
-
 func (daemon *Daemon) restore() error {
+	type cr struct {
+		container  *Container
+		registered bool
+	}
+
 	var (
 		debug         = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "")
-		containers    = make(map[string]*Container)
 		currentDriver = daemon.driver.String()
+		containers    = make(map[string]*cr)
 	)
 
 	if !debug {
-		log.Infof("Loading containers: start.")
+		logrus.Info("Loading containers: start.")
 	}
 	dir, err := ioutil.ReadDir(daemon.repository)
 	if err != nil {
@@ -334,145 +281,80 @@
 	for _, v := range dir {
 		id := v.Name()
 		container, err := daemon.load(id)
-		if !debug && log.GetLevel() == log.InfoLevel {
+		if !debug && logrus.GetLevel() == logrus.InfoLevel {
 			fmt.Print(".")
 		}
 		if err != nil {
-			log.Errorf("Failed to load container %v: %v", id, err)
+			logrus.Errorf("Failed to load container %v: %v", id, err)
 			continue
 		}
 
 		// Ignore the container if it does not support the current driver being used by the graph
 		if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
-			log.Debugf("Loaded container %v", container.ID)
+			logrus.Debugf("Loaded container %v", container.ID)
 
-			containers[container.ID] = container
+			containers[container.ID] = &cr{container: container}
 		} else {
-			log.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
+			logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
 		}
 	}
 
-	registeredContainers := []*Container{}
-
 	if entities := daemon.containerGraph.List("/", -1); entities != nil {
 		for _, p := range entities.Paths() {
-			if !debug && log.GetLevel() == log.InfoLevel {
+			if !debug && logrus.GetLevel() == logrus.InfoLevel {
 				fmt.Print(".")
 			}
 
 			e := entities[p]
 
-			if container, ok := containers[e.ID()]; ok {
-				if err := daemon.register(container, false); err != nil {
-					log.Debugf("Failed to register container %s: %s", container.ID, err)
-				}
-
-				registeredContainers = append(registeredContainers, container)
-
-				// delete from the map so that a new name is not automatically generated
-				delete(containers, e.ID())
+			if c, ok := containers[e.ID()]; ok {
+				c.registered = true
 			}
 		}
 	}
 
-	// Any containers that are left over do not exist in the graph
-	for _, container := range containers {
-		// Try to set the default name for a container if it exists prior to links
-		container.Name, err = daemon.generateNewName(container.ID)
-		if err != nil {
-			log.Debugf("Setting default id - %s", err)
-		}
+	group := sync.WaitGroup{}
+	for _, c := range containers {
+		group.Add(1)
 
-		if err := daemon.register(container, false); err != nil {
-			log.Debugf("Failed to register container %s: %s", container.ID, err)
-		}
+		go func(container *Container, registered bool) {
+			defer group.Done()
 
-		registeredContainers = append(registeredContainers, container)
-	}
+			if !registered {
+				// Try to set the default name for a container if it exists prior to links
+				container.Name, err = daemon.generateNewName(container.ID)
+				if err != nil {
+					logrus.Debugf("Setting default id - %s", err)
+				}
+			}
 
-	// check the restart policy on the containers and restart any container with
-	// the restart policy of "always"
-	if daemon.config.AutoRestart {
-		log.Debugf("Restarting containers...")
+			if err := daemon.register(container, false); err != nil {
+				logrus.Debugf("Failed to register container %s: %s", container.ID, err)
+			}
 
-		for _, container := range registeredContainers {
-			if container.hostConfig.RestartPolicy.Name == "always" ||
-				(container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) {
-				log.Debugf("Starting container %s", container.ID)
+			// check the restart policy on the containers and restart any container with
+			// the restart policy of "always"
+			if daemon.config.AutoRestart && container.shouldRestart() {
+				logrus.Debugf("Starting container %s", container.ID)
 
 				if err := container.Start(); err != nil {
-					log.Debugf("Failed to start container %s: %s", container.ID, err)
+					logrus.Debugf("Failed to start container %s: %s", container.ID, err)
 				}
 			}
-		}
+		}(c.container, c.registered)
 	}
+	group.Wait()
 
 	if !debug {
-		if log.GetLevel() == log.InfoLevel {
+		if logrus.GetLevel() == logrus.InfoLevel {
 			fmt.Println()
 		}
-		log.Infof("Loading containers: done.")
+		logrus.Info("Loading containers: done.")
 	}
 
 	return nil
 }
 
-// set up the watch on the host's /etc/resolv.conf so that we can update container's
-// live resolv.conf when the network changes on the host
-func (daemon *Daemon) setupResolvconfWatcher() error {
-
-	watcher, err := fsnotify.NewWatcher()
-	if err != nil {
-		return err
-	}
-
-	//this goroutine listens for the events on the watch we add
-	//on the resolv.conf file on the host
-	go func() {
-		for {
-			select {
-			case event := <-watcher.Events:
-				if event.Name == "/etc/resolv.conf" &&
-					(event.Op&fsnotify.Write == fsnotify.Write ||
-						event.Op&fsnotify.Create == fsnotify.Create) {
-					// verify a real change happened before we go further--a file write may have happened
-					// without an actual change to the file
-					updatedResolvConf, newResolvConfHash, err := resolvconf.GetIfChanged()
-					if err != nil {
-						log.Debugf("Error retrieving updated host resolv.conf: %v", err)
-					} else if updatedResolvConf != nil {
-						// because the new host resolv.conf might have localhost nameservers..
-						updatedResolvConf, modified := resolvconf.FilterResolvDns(updatedResolvConf, daemon.config.EnableIPv6)
-						if modified {
-							// changes have occurred during localhost cleanup: generate an updated hash
-							newHash, err := utils.HashData(bytes.NewReader(updatedResolvConf))
-							if err != nil {
-								log.Debugf("Error generating hash of new resolv.conf: %v", err)
-							} else {
-								newResolvConfHash = newHash
-							}
-						}
-						log.Debugf("host network resolv.conf changed--walking container list for updates")
-						contList := daemon.containers.List()
-						for _, container := range contList {
-							if err := container.updateResolvConf(updatedResolvConf, newResolvConfHash); err != nil {
-								log.Debugf("Error on resolv.conf update check for container ID: %s: %v", container.ID, err)
-							}
-						}
-					}
-				}
-			case err := <-watcher.Errors:
-				log.Debugf("host resolv.conf notify error: %v", err)
-			}
-		}
-	}()
-
-	if err := watcher.Add("/etc"); err != nil {
-		return err
-	}
-	return nil
-}
-
 func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool {
 	if config != nil {
 		if config.PortSpecs != nil {
@@ -496,7 +378,7 @@
 			return nil, err
 		}
 	}
-	if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 {
+	if config.Entrypoint.Len() == 0 && config.Cmd.Len() == 0 {
 		return nil, fmt.Errorf("No command specified")
 	}
 	return warnings, nil
@@ -505,7 +387,7 @@
 func (daemon *Daemon) generateIdAndName(name string) (string, string, error) {
 	var (
 		err error
-		id  = common.GenerateRandomID()
+		id  = stringid.GenerateRandomID()
 	)
 
 	if name == "" {
@@ -550,7 +432,7 @@
 			nameAsKnownByUser := strings.TrimPrefix(name, "/")
 			return "", fmt.Errorf(
 				"Conflict. The name %q is already in use by container %s. You have to delete (or rename) that container to be able to reuse that name.", nameAsKnownByUser,
-				common.TruncateID(conflictingContainer.ID))
+				stringid.TruncateID(conflictingContainer.ID))
 		}
 	}
 	return name, nil
@@ -573,7 +455,7 @@
 		return name, nil
 	}
 
-	name = "/" + common.TruncateID(id)
+	name = "/" + stringid.TruncateID(id)
 	if _, err := daemon.containerGraph.Set(name, id); err != nil {
 		return "", err
 	}
@@ -588,17 +470,20 @@
 	}
 }
 
-func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint, configCmd []string) (string, []string) {
+func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *runconfig.Entrypoint, configCmd *runconfig.Command) (string, []string) {
 	var (
 		entrypoint string
 		args       []string
 	)
-	if len(configEntrypoint) != 0 {
-		entrypoint = configEntrypoint[0]
-		args = append(configEntrypoint[1:], configCmd...)
+
+	cmdSlice := configCmd.Slice()
+	if configEntrypoint.Len() != 0 {
+		eSlice := configEntrypoint.Slice()
+		entrypoint = eSlice[0]
+		args = append(eSlice[1:], cmdSlice...)
 	} else {
-		entrypoint = configCmd[0]
-		args = configCmd[1:]
+		entrypoint = cmdSlice[0]
+		args = cmdSlice[1:]
 	}
 	return entrypoint, args
 }
@@ -642,22 +527,25 @@
 	entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd)
 
 	container := &Container{
-		// FIXME: we should generate the ID here instead of receiving it as an argument
-		ID:              id,
-		Created:         time.Now().UTC(),
-		Path:            entrypoint,
-		Args:            args, //FIXME: de-duplicate from config
-		Config:          config,
-		hostConfig:      &runconfig.HostConfig{},
-		ImageID:         imgID,
-		NetworkSettings: &NetworkSettings{},
-		Name:            name,
-		Driver:          daemon.driver.String(),
-		ExecDriver:      daemon.execDriver.Name(),
-		State:           NewState(),
-		execCommands:    newExecStore(),
+		CommonContainer: CommonContainer{
+			ID:              id, // FIXME: we should generate the ID here instead of receiving it as an argument
+			Created:         time.Now().UTC(),
+			Path:            entrypoint,
+			Args:            args, //FIXME: de-duplicate from config
+			Config:          config,
+			hostConfig:      &runconfig.HostConfig{},
+			ImageID:         imgID,
+			NetworkSettings: &network.Settings{},
+			Name:            name,
+			Driver:          daemon.driver.String(),
+			ExecDriver:      daemon.execDriver.Name(),
+			State:           NewState(),
+			execCommands:    newExecStore(),
+			MountPoints:     map[string]*mountPoint{},
+		},
 	}
 	container.root = daemon.containerRoot(container.ID)
+
 	return container, err
 }
 
@@ -756,14 +644,14 @@
 func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
 	if hostConfig != nil && hostConfig.Links != nil {
 		for _, l := range hostConfig.Links {
-			parts, err := parsers.PartParser("name:alias", l)
+			name, alias, err := parsers.ParseLink(l)
 			if err != nil {
 				return err
 			}
-			child, err := daemon.Get(parts["name"])
+			child, err := daemon.Get(name)
 			if err != nil {
 				//An error from daemon.Get() means this name could not be found
-				return fmt.Errorf("Could not get container for %s", parts["name"])
+				return fmt.Errorf("Could not get container for %s", name)
 			}
 			for child.hostConfig.NetworkMode.IsContainer() {
 				parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2)
@@ -775,7 +663,7 @@
 			if child.hostConfig.NetworkMode.IsHost() {
 				return runconfig.ErrConflictHostNetworkAndLinks
 			}
-			if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil {
+			if err := daemon.RegisterLink(container, child, alias); err != nil {
 				return err
 			}
 		}
@@ -790,42 +678,18 @@
 	return nil
 }
 
-// FIXME: harmonize with NewGraph()
-func NewDaemon(config *Config, eng *engine.Engine) (*Daemon, error) {
-	daemon, err := NewDaemonFromDirectory(config, eng)
-	if err != nil {
-		return nil, err
-	}
-	return daemon, nil
-}
-
-func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) {
-	if config.Mtu == 0 {
-		config.Mtu = getDefaultNetworkMtu()
-	}
+func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
 	// Check for mutually incompatible config options
-	if config.BridgeIface != "" && config.BridgeIP != "" {
+	if config.Bridge.Iface != "" && config.Bridge.IP != "" {
 		return nil, fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.")
 	}
-	if !config.EnableIptables && !config.InterContainerCommunication {
+	if !config.Bridge.EnableIPTables && !config.Bridge.InterContainerCommunication {
 		return nil, fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.")
 	}
-	if !config.EnableIptables && config.EnableIpMasq {
-		config.EnableIpMasq = false
+	if !config.Bridge.EnableIPTables && config.Bridge.EnableIPMasq {
+		config.Bridge.EnableIPMasq = false
 	}
-	config.DisableNetwork = config.BridgeIface == disableNetworkBridge
-
-	// Claim the pidfile first, to avoid any and all unexpected race conditions.
-	// Some of the init doesn't need a pidfile lock - but let's not try to be smart.
-	if config.Pidfile != "" {
-		if err := utils.CreatePidFile(config.Pidfile); err != nil {
-			return nil, err
-		}
-		eng.OnShutdown(func() {
-			// Always release the pidfile last, just in case
-			utils.RemovePidFile(config.Pidfile)
-		})
-	}
+	config.DisableNetwork = config.Bridge.Iface == disableNetworkBridge
 
 	// Check that the system is supported and we have sufficient privileges
 	if runtime.GOOS != "linux" {
@@ -838,12 +702,15 @@
 		return nil, err
 	}
 
-	// set up the TempDir to use a canonical path
-	tmp, err := utils.TempDir(config.Root)
+	// set up SIGUSR1 handler to dump Go routine stacks
+	setupSigusr1Trap()
+
+	// set up the tmpDir to use a canonical path
+	tmp, err := tempDir(config.Root)
 	if err != nil {
 		return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
 	}
-	realTmp, err := utils.ReadSymlinkedDirectory(tmp)
+	realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
 	if err != nil {
 		return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
 	}
@@ -854,7 +721,7 @@
 	if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
 		realRoot = config.Root
 	} else {
-		realRoot, err = utils.ReadSymlinkedDirectory(config.Root)
+		realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
 		if err != nil {
 			return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
 		}
@@ -871,25 +738,38 @@
 	// Load storage driver
 	driver, err := graphdriver.New(config.Root, config.GraphOptions)
 	if err != nil {
-		return nil, fmt.Errorf("error intializing graphdriver: %v", err)
+		return nil, fmt.Errorf("error initializing graphdriver: %v", err)
 	}
-	log.Debugf("Using graph driver %s", driver)
-	// register cleanup for graph driver
-	eng.OnShutdown(func() {
-		if err := driver.Cleanup(); err != nil {
-			log.Errorf("Error during graph storage driver.Cleanup(): %v", err)
+	logrus.Debugf("Using graph driver %s", driver)
+
+	d := &Daemon{}
+	d.driver = driver
+
+	defer func() {
+		if err != nil {
+			if err := d.Shutdown(); err != nil {
+				logrus.Error(err)
+			}
 		}
-	})
+	}()
+
+	// Verify logging driver type
+	if config.LogConfig.Type != "none" {
+		if _, err := logger.GetLogDriver(config.LogConfig.Type); err != nil {
+			return nil, fmt.Errorf("error finding the logging driver: %v", err)
+		}
+	}
+	logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
 
 	if config.EnableSelinuxSupport {
 		if selinuxEnabled() {
 			// As Docker on btrfs and SELinux are incompatible at present, error on both being enabled
-			if driver.String() == "btrfs" {
+			if d.driver.String() == "btrfs" {
 				return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver")
 			}
-			log.Debug("SELinux enabled successfully")
+			logrus.Debug("SELinux enabled successfully")
 		} else {
-			log.Warn("Docker could not enable SELinux on the host system")
+			logrus.Warn("Docker could not enable SELinux on the host system")
 		}
 	} else {
 		selinuxSetDisabled()
@@ -902,62 +782,54 @@
 	}
 
 	// Migrate the container if it is aufs and aufs is enabled
-	if err = migrateIfAufs(driver, config.Root); err != nil {
+	if err := migrateIfAufs(d.driver, config.Root); err != nil {
 		return nil, err
 	}
 
-	log.Debugf("Creating images graph")
-	g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver)
+	logrus.Debug("Creating images graph")
+	g, err := graph.NewGraph(path.Join(config.Root, "graph"), d.driver)
 	if err != nil {
 		return nil, err
 	}
 
-	volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions)
+	volumesDriver, err := local.New(filepath.Join(config.Root, defaultVolumesPathName))
 	if err != nil {
 		return nil, err
 	}
-
-	volumes, err := volumes.NewRepository(filepath.Join(config.Root, "volumes"), volumesDriver)
-	if err != nil {
-		return nil, err
-	}
+	volumedrivers.Register(volumesDriver, volumesDriver.Name())
 
 	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
 	if err != nil {
 		return nil, err
 	}
 
-	log.Debugf("Creating repository list")
-	repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g, trustKey)
-	if err != nil {
-		return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
-	}
-
 	trustDir := path.Join(config.Root, "trust")
 	if err := os.MkdirAll(trustDir, 0700); err != nil && !os.IsExist(err) {
 		return nil, err
 	}
-	t, err := trust.NewTrustStore(trustDir)
+	trustService, err := trust.NewTrustStore(trustDir)
 	if err != nil {
 		return nil, fmt.Errorf("could not create trust store: %s", err)
 	}
 
+	eventsService := events.New()
+	logrus.Debug("Creating repository list")
+	tagCfg := &graph.TagStoreConfig{
+		Graph:    g,
+		Key:      trustKey,
+		Registry: registryService,
+		Events:   eventsService,
+		Trust:    trustService,
+	}
+	repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+d.driver.String()), tagCfg)
+	if err != nil {
+		return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
+	}
+
 	if !config.DisableNetwork {
-		job := eng.Job("init_networkdriver")
-
-		job.SetenvBool("EnableIptables", config.EnableIptables)
-		job.SetenvBool("InterContainerCommunication", config.InterContainerCommunication)
-		job.SetenvBool("EnableIpForward", config.EnableIpForward)
-		job.SetenvBool("EnableIpMasq", config.EnableIpMasq)
-		job.SetenvBool("EnableIPv6", config.EnableIPv6)
-		job.Setenv("BridgeIface", config.BridgeIface)
-		job.Setenv("BridgeIP", config.BridgeIP)
-		job.Setenv("FixedCIDR", config.FixedCIDR)
-		job.Setenv("FixedCIDRv6", config.FixedCIDRv6)
-		job.Setenv("DefaultBindingIP", config.DefaultIp.String())
-
-		if err := job.Run(); err != nil {
-			return nil, err
+		d.netController, err = initNetworkController(config)
+		if err != nil {
+			return nil, fmt.Errorf("Error initializing network controller: %v", err)
 		}
 	}
 
@@ -966,17 +838,13 @@
 	if err != nil {
 		return nil, err
 	}
-	// register graph close on shutdown
-	eng.OnShutdown(func() {
-		if err := graph.Close(); err != nil {
-			log.Errorf("Error during container graph.Close(): %v", err)
-		}
-	})
+
+	d.containerGraph = graph
 
 	localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION))
 	sysInitPath := utils.DockerInitPath(localCopy)
 	if sysInitPath == "" {
-		return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.com/contributing/devenvironment for official build instructions.")
+		return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See https://docs.docker.com/contributing/devenvironment for official build instructions.")
 	}
 
 	if sysInitPath != localCopy {
@@ -984,7 +852,7 @@
 		if err := os.Mkdir(path.Dir(localCopy), 0700); err != nil && !os.IsExist(err) {
 			return nil, err
 		}
-		if _, err := utils.CopyFile(sysInitPath, localCopy); err != nil {
+		if _, err := fileutils.CopyFile(sysInitPath, localCopy); err != nil {
 			return nil, err
 		}
 		if err := os.Chmod(localCopy, 0700); err != nil {
@@ -994,71 +862,157 @@
 	}
 
 	sysInfo := sysinfo.New(false)
-	const runDir = "/var/run/docker"
-	ed, err := execdrivers.NewDriver(config.ExecDriver, runDir, config.Root, sysInitPath, sysInfo)
+	ed, err := execdrivers.NewDriver(config.ExecDriver, config.ExecOptions, config.ExecRoot, config.Root, sysInitPath, sysInfo)
 	if err != nil {
 		return nil, err
 	}
 
-	daemon := &Daemon{
-		ID:               trustKey.PublicKey().KeyID(),
-		repository:       daemonRepo,
-		containers:       &contStore{s: make(map[string]*Container)},
-		execCommands:     newExecStore(),
-		graph:            g,
-		repositories:     repositories,
-		idIndex:          truncindex.NewTruncIndex([]string{}),
-		sysInfo:          sysInfo,
-		volumes:          volumes,
-		config:           config,
-		containerGraph:   graph,
-		driver:           driver,
-		sysInitPath:      sysInitPath,
-		execDriver:       ed,
-		eng:              eng,
-		trustStore:       t,
-		statsCollector:   newStatsCollector(1 * time.Second),
-		defaultLogConfig: config.LogConfig,
-	}
+	d.ID = trustKey.PublicKey().KeyID()
+	d.repository = daemonRepo
+	d.containers = &contStore{s: make(map[string]*Container)}
+	d.execCommands = newExecStore()
+	d.graph = g
+	d.repositories = repositories
+	d.idIndex = truncindex.NewTruncIndex([]string{})
+	d.sysInfo = sysInfo
+	d.config = config
+	d.sysInitPath = sysInitPath
+	d.execDriver = ed
+	d.statsCollector = newStatsCollector(1 * time.Second)
+	d.defaultLogConfig = config.LogConfig
+	d.RegistryService = registryService
+	d.EventsService = eventsService
+	d.root = config.Root
 
-	eng.OnShutdown(func() {
-		if err := daemon.shutdown(); err != nil {
-			log.Errorf("Error during daemon.shutdown(): %v", err)
-		}
-	})
-
-	if err := daemon.restore(); err != nil {
+	if err := d.restore(); err != nil {
 		return nil, err
 	}
 
-	// set up filesystem watch on resolv.conf for network changes
-	if err := daemon.setupResolvconfWatcher(); err != nil {
-		return nil, err
-	}
-
-	return daemon, nil
+	return d, nil
 }
 
-func (daemon *Daemon) shutdown() error {
-	group := sync.WaitGroup{}
-	log.Debugf("starting clean shutdown of all containers...")
-	for _, container := range daemon.List() {
-		c := container
-		if c.IsRunning() {
-			log.Debugf("stopping %s", c.ID)
-			group.Add(1)
+func initNetworkController(config *Config) (libnetwork.NetworkController, error) {
+	controller, err := libnetwork.New()
+	if err != nil {
+		return nil, fmt.Errorf("error obtaining controller instance: %v", err)
+	}
 
-			go func() {
-				defer group.Done()
-				if err := c.KillSig(15); err != nil {
-					log.Debugf("kill 15 error for %s - %s", c.ID, err)
-				}
-				c.WaitStop(-1 * time.Second)
-				log.Debugf("container stopped %s", c.ID)
-			}()
+	// Initialize default driver "null"
+
+	if err := controller.ConfigureNetworkDriver("null", options.Generic{}); err != nil {
+		return nil, fmt.Errorf("Error initializing null driver: %v", err)
+	}
+
+	// Initialize default network on "null"
+	if _, err := controller.NewNetwork("null", "none"); err != nil {
+		return nil, fmt.Errorf("Error creating default \"null\" network: %v", err)
+	}
+
+	// Initialize default driver "host"
+	if err := controller.ConfigureNetworkDriver("host", options.Generic{}); err != nil {
+		return nil, fmt.Errorf("Error initializing host driver: %v", err)
+	}
+
+	// Initialize default network on "host"
+	if _, err := controller.NewNetwork("host", "host"); err != nil {
+		return nil, fmt.Errorf("Error creating default \"host\" network: %v", err)
+	}
+
+	// Initialize default driver "bridge"
+	option := options.Generic{
+		"EnableIPForwarding": config.Bridge.EnableIPForward}
+
+	if err := controller.ConfigureNetworkDriver("bridge", options.Generic{netlabel.GenericData: option}); err != nil {
+		return nil, fmt.Errorf("Error initializing bridge driver: %v", err)
+	}
+
+	netOption := options.Generic{
+		"BridgeName":          config.Bridge.Iface,
+		"Mtu":                 config.Mtu,
+		"EnableIPTables":      config.Bridge.EnableIPTables,
+		"EnableIPMasquerade":  config.Bridge.EnableIPMasq,
+		"EnableICC":           config.Bridge.InterContainerCommunication,
+		"EnableUserlandProxy": config.Bridge.EnableUserlandProxy,
+	}
+
+	if config.Bridge.IP != "" {
+		ip, bipNet, err := net.ParseCIDR(config.Bridge.IP)
+		if err != nil {
+			return nil, err
+		}
+
+		bipNet.IP = ip
+		netOption["AddressIPv4"] = bipNet
+	}
+
+	if config.Bridge.FixedCIDR != "" {
+		_, fCIDR, err := net.ParseCIDR(config.Bridge.FixedCIDR)
+		if err != nil {
+			return nil, err
+		}
+
+		netOption["FixedCIDR"] = fCIDR
+	}
+
+	if config.Bridge.FixedCIDRv6 != "" {
+		_, fCIDRv6, err := net.ParseCIDR(config.Bridge.FixedCIDRv6)
+		if err != nil {
+			return nil, err
+		}
+
+		netOption["FixedCIDRv6"] = fCIDRv6
+	}
+
+	// --ip processing
+	if config.Bridge.DefaultIP != nil {
+		netOption["DefaultBindingIP"] = config.Bridge.DefaultIP
+	}
+
+	// Initialize default network on "bridge" with the same name
+	_, err = controller.NewNetwork("bridge", "bridge",
+		libnetwork.NetworkOptionGeneric(options.Generic{
+			netlabel.GenericData: netOption,
+			netlabel.EnableIPv6:  config.Bridge.EnableIPv6,
+		}))
+	if err != nil {
+		return nil, fmt.Errorf("Error creating default \"bridge\" network: %v", err)
+	}
+
+	return controller, nil
+}
+
+func (daemon *Daemon) Shutdown() error {
+	if daemon.containerGraph != nil {
+		if err := daemon.containerGraph.Close(); err != nil {
+			logrus.Errorf("Error during container graph.Close(): %v", err)
 		}
 	}
-	group.Wait()
+	if daemon.driver != nil {
+		if err := daemon.driver.Cleanup(); err != nil {
+			logrus.Errorf("Error during graph storage driver.Cleanup(): %v", err)
+		}
+	}
+	if daemon.containers != nil {
+		group := sync.WaitGroup{}
+		logrus.Debug("starting clean shutdown of all containers...")
+		for _, container := range daemon.List() {
+			c := container
+			if c.IsRunning() {
+				logrus.Debugf("stopping %s", c.ID)
+				group.Add(1)
+
+				go func() {
+					defer group.Done()
+					if err := c.KillSig(15); err != nil {
+						logrus.Debugf("kill 15 error for %s - %s", c.ID, err)
+					}
+					c.WaitStop(-1 * time.Second)
+					logrus.Debugf("container stopped %s", c.ID)
+				}()
+			}
+		}
+		group.Wait()
+	}
 
 	return nil
 }
@@ -1097,22 +1051,6 @@
 	return daemon.execDriver.Run(c.command, pipes, startCallback)
 }
 
-func (daemon *Daemon) Pause(c *Container) error {
-	if err := daemon.execDriver.Pause(c.command); err != nil {
-		return err
-	}
-	c.SetPaused()
-	return nil
-}
-
-func (daemon *Daemon) Unpause(c *Container) error {
-	if err := daemon.execDriver.Unpause(c.command); err != nil {
-		return err
-	}
-	c.SetUnpaused()
-	return nil
-}
-
 func (daemon *Daemon) Kill(c *Container, sig int) error {
 	return daemon.execDriver.Kill(c.command, sig)
 }
@@ -1139,26 +1077,6 @@
 	return nil
 }
 
-// Nuke kills all containers then removes all content
-// from the content root, including images, volumes and
-// container filesystems.
-// Again: this will remove your entire docker daemon!
-// FIXME: this is deprecated, and only used in legacy
-// tests. Please remove.
-func (daemon *Daemon) Nuke() error {
-	var wg sync.WaitGroup
-	for _, container := range daemon.List() {
-		wg.Add(1)
-		go func(c *Container) {
-			c.Kill()
-			wg.Done()
-		}(container)
-	}
-	wg.Wait()
-
-	return os.RemoveAll(daemon.config.Root)
-}
-
 // FIXME: this is a convenience function for integration tests
 // which need direct access to daemon.graph.
 // Once the tests switch to using engine and jobs, this method
@@ -1227,22 +1145,103 @@
 	return match, nil
 }
 
+// tempDir returns the default directory to use for temporary files.
+func tempDir(rootDir string) (string, error) {
+	var tmpDir string
+	if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
+		tmpDir = filepath.Join(rootDir, "tmp")
+	}
+	return tmpDir, os.MkdirAll(tmpDir, 0700)
+}
+
 func checkKernel() error {
 	// Check for unsupported kernel versions
 	// FIXME: it would be cleaner to not test for specific versions, but rather
 	// test for specific functionalities.
 	// Unfortunately we can't test for the feature "does not cause a kernel panic"
 	// without actually causing a kernel panic, so we need this workaround until
-	// the circumstances of pre-3.8 crashes are clearer.
-	// For details see http://github.com/docker/docker/issues/407
+	// the circumstances of pre-3.10 crashes are clearer.
+	// For details see https://github.com/docker/docker/issues/407
 	if k, err := kernel.GetKernelVersion(); err != nil {
-		log.Warnf("%s", err)
+		logrus.Warnf("%s", err)
 	} else {
-		if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
+		if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 10, Minor: 0}) < 0 {
 			if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
-				log.Warnf("You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
+				logrus.Warnf("You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.10.0.", k.String())
 			}
 		}
 	}
 	return nil
 }
+
+func (daemon *Daemon) verifyHostConfig(hostConfig *runconfig.HostConfig) ([]string, error) {
+	var warnings []string
+
+	if hostConfig == nil {
+		return warnings, nil
+	}
+
+	if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") {
+		return warnings, fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name())
+	}
+	if hostConfig.Memory != 0 && hostConfig.Memory < 4194304 {
+		return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB")
+	}
+	if hostConfig.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
+		warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
+		hostConfig.Memory = 0
+	}
+	if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit {
+		warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.")
+		hostConfig.MemorySwap = -1
+	}
+	if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory {
+		return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.")
+	}
+	if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 {
+		return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.")
+	}
+	if hostConfig.CpuPeriod > 0 && !daemon.SystemConfig().CpuCfsPeriod {
+		warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.")
+		hostConfig.CpuPeriod = 0
+	}
+	if hostConfig.CpuQuota > 0 && !daemon.SystemConfig().CpuCfsQuota {
+		warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.")
+		hostConfig.CpuQuota = 0
+	}
+	if hostConfig.BlkioWeight > 0 && (hostConfig.BlkioWeight < 10 || hostConfig.BlkioWeight > 1000) {
+		return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.")
+	}
+	if hostConfig.OomKillDisable && !daemon.SystemConfig().OomKillDisable {
+		hostConfig.OomKillDisable = false
+		return warnings, fmt.Errorf("Your kernel does not support oom kill disable.")
+	}
+
+	return warnings, nil
+}
+
+func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
+	container.Lock()
+	if err := parseSecurityOpt(container, hostConfig); err != nil {
+		container.Unlock()
+		return err
+	}
+	container.Unlock()
+
+	// Do not lock while creating volumes since this could be calling out to external plugins
+	// Don't want to block other actions, like `docker ps` because we're waiting on an external plugin
+	if err := daemon.registerMountPoints(container, hostConfig); err != nil {
+		return err
+	}
+
+	container.Lock()
+	defer container.Unlock()
+	// Register any links from the host config before starting the container
+	if err := daemon.RegisterLinks(container, hostConfig); err != nil {
+		return err
+	}
+
+	container.hostConfig = hostConfig
+	container.toDisk()
+	return nil
+}
diff --git a/daemon/daemon_aufs.go b/daemon/daemon_aufs.go
index 7d4d3c3..377e829 100644
--- a/daemon/daemon_aufs.go
+++ b/daemon/daemon_aufs.go
@@ -3,7 +3,7 @@
 package daemon
 
 import (
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/daemon/graphdriver/aufs"
 	"github.com/docker/docker/graph"
@@ -13,7 +13,7 @@
 // If aufs driver is not built, this func is a noop.
 func migrateIfAufs(driver graphdriver.Driver, root string) error {
 	if ad, ok := driver.(*aufs.Driver); ok {
-		log.Debugf("Migrating existing containers")
+		logrus.Debugf("Migrating existing containers")
 		if err := ad.Migrate(root, graph.SetupInitLayer); err != nil {
 			return err
 		}
diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go
index 43030b6..d4c4be3 100644
--- a/daemon/daemon_test.go
+++ b/daemon/daemon_test.go
@@ -1,11 +1,17 @@
 package daemon
 
 import (
-	"github.com/docker/docker/pkg/graphdb"
-	"github.com/docker/docker/pkg/truncindex"
+	"fmt"
+	"io/ioutil"
 	"os"
 	"path"
+	"path/filepath"
 	"testing"
+
+	"github.com/docker/docker/pkg/graphdb"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/pkg/truncindex"
+	"github.com/docker/docker/volume"
 )
 
 //
@@ -14,24 +20,38 @@
 
 func TestGet(t *testing.T) {
 	c1 := &Container{
-		ID:   "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
-		Name: "tender_bardeen",
+		CommonContainer: CommonContainer{
+			ID:   "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
+			Name: "tender_bardeen",
+		},
 	}
+
 	c2 := &Container{
-		ID:   "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de",
-		Name: "drunk_hawking",
+		CommonContainer: CommonContainer{
+			ID:   "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de",
+			Name: "drunk_hawking",
+		},
 	}
+
 	c3 := &Container{
-		ID:   "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57",
-		Name: "3cdbd1aa",
+		CommonContainer: CommonContainer{
+			ID:   "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57",
+			Name: "3cdbd1aa",
+		},
 	}
+
 	c4 := &Container{
-		ID:   "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5",
-		Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
+		CommonContainer: CommonContainer{
+			ID:   "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5",
+			Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
+		},
 	}
+
 	c5 := &Container{
-		ID:   "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b",
-		Name: "d22d69a2b896",
+		CommonContainer: CommonContainer{
+			ID:   "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b",
+			Name: "d22d69a2b896",
+		},
 	}
 
 	store := &contStore{
@@ -99,3 +119,89 @@
 
 	os.Remove(daemonTestDbPath)
 }
+
+func TestLoadWithVolume(t *testing.T) {
+	tmp, err := ioutil.TempDir("", "docker-daemon-test-")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmp)
+
+	containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
+	containerPath := filepath.Join(tmp, containerId)
+	if err = os.MkdirAll(containerPath, 0755); err != nil {
+		t.Fatal(err)
+	}
+
+	hostVolumeId := stringid.GenerateRandomID()
+	volumePath := filepath.Join(tmp, "vfs", "dir", hostVolumeId)
+
+	config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0,
+"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"},
+"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top",
+"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"",
+"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true,
+"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null,
+"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95",
+"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1",
+"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","PortMapping":null,"Ports":{}},
+"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf",
+"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname",
+"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts",
+"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log",
+"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0,
+"UpdateDns":false,"Volumes":{"/vol1":"%s"},"VolumesRW":{"/vol1":true},"AppliedVolumesFrom":null}`
+
+	cfg := fmt.Sprintf(config, volumePath)
+	if err = ioutil.WriteFile(filepath.Join(containerPath, "config.json"), []byte(cfg), 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	hostConfig := `{"Binds":[],"ContainerIDFile":"","LxcConf":[],"Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"",
+"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,
+"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
+"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
+	if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	if err = os.MkdirAll(volumePath, 0755); err != nil {
+		t.Fatal(err)
+	}
+
+	daemon := &Daemon{
+		repository: tmp,
+		root:       tmp,
+	}
+
+	c, err := daemon.load(containerId)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = daemon.verifyOldVolumesInfo(c)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(c.MountPoints) != 1 {
+		t.Fatalf("Expected 1 volume mounted, was 0\n")
+	}
+
+	m := c.MountPoints["/vol1"]
+	if m.Name != hostVolumeId {
+		t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name)
+	}
+
+	if m.Destination != "/vol1" {
+		t.Fatalf("Expected mount destination /vol1, was %s\n", m.Destination)
+	}
+
+	if !m.RW {
+		t.Fatalf("Expected mount point to be RW but it was not\n")
+	}
+
+	if m.Driver != volume.DefaultDriverName {
+		t.Fatalf("Expected mount driver local, was %s\n", m.Driver)
+	}
+}
diff --git a/daemon/daemon_zfs.go b/daemon/daemon_zfs.go
new file mode 100644
index 0000000..2fc1d87
--- /dev/null
+++ b/daemon/daemon_zfs.go
@@ -0,0 +1,7 @@
+// +build !exclude_graphdriver_zfs,linux
+
+package daemon
+
+import (
+	_ "github.com/docker/docker/daemon/graphdriver/zfs"
+)
diff --git a/daemon/debugtrap.go b/daemon/debugtrap.go
new file mode 100644
index 0000000..949bf3d
--- /dev/null
+++ b/daemon/debugtrap.go
@@ -0,0 +1,21 @@
+// +build !windows
+
+package daemon
+
+import (
+	"os"
+	"os/signal"
+	"syscall"
+
+	psignal "github.com/docker/docker/pkg/signal"
+)
+
+func setupSigusr1Trap() {
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, syscall.SIGUSR1)
+	go func() {
+		for range c {
+			psignal.DumpStacks()
+		}
+	}()
+}
diff --git a/daemon/debugtrap_unsupported.go b/daemon/debugtrap_unsupported.go
new file mode 100644
index 0000000..1600e8b
--- /dev/null
+++ b/daemon/debugtrap_unsupported.go
@@ -0,0 +1,7 @@
+// +build !linux,!darwin,!freebsd
+
+package daemon
+
+func setupSigusr1Trap() {
+	return
+}
diff --git a/daemon/delete.go b/daemon/delete.go
index 40f86df..39d5005 100644
--- a/daemon/delete.go
+++ b/daemon/delete.go
@@ -5,103 +5,72 @@
 	"os"
 	"path"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/engine"
+	"github.com/Sirupsen/logrus"
 )
 
-func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
-	}
-	name := job.Args[0]
-	removeVolume := job.GetenvBool("removeVolume")
-	removeLink := job.GetenvBool("removeLink")
-	forceRemove := job.GetenvBool("forceRemove")
+type ContainerRmConfig struct {
+	ForceRemove, RemoveVolume, RemoveLink bool
+}
 
+func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error {
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
-	if removeLink {
+	if config.RemoveLink {
 		name, err := GetFullContainerName(name)
 		if err != nil {
-			job.Error(err)
+			return err
 		}
 		parent, n := path.Split(name)
 		if parent == "/" {
-			return job.Errorf("Conflict, cannot remove the default name of the container")
+			return fmt.Errorf("Conflict, cannot remove the default name of the container")
 		}
 		pe := daemon.ContainerGraph().Get(parent)
 		if pe == nil {
-			return job.Errorf("Cannot get parent %s for name %s", parent, name)
+			return fmt.Errorf("Cannot get parent %s for name %s", parent, name)
 		}
 		parentContainer, _ := daemon.Get(pe.ID())
 
+		if err := daemon.ContainerGraph().Delete(name); err != nil {
+			return err
+		}
+
 		if parentContainer != nil {
 			parentContainer.DisableLink(n)
 		}
 
-		if err := daemon.ContainerGraph().Delete(name); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
+		return nil
 	}
 
-	if container != nil {
-		// stop collection of stats for the container regardless
-		// if stats are currently getting collected.
-		daemon.statsCollector.stopCollection(container)
-		if container.IsRunning() {
-			if forceRemove {
-				if err := container.Kill(); err != nil {
-					return job.Errorf("Could not kill running container, cannot remove - %v", err)
-				}
-			} else {
-				return job.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
-			}
-		}
-
-		if forceRemove {
-			if err := daemon.ForceRm(container); err != nil {
-				log.Errorf("Cannot destroy container %s: %v", name, err)
-			}
-		} else {
-			if err := daemon.Rm(container); err != nil {
-				return job.Errorf("Cannot destroy container %s: %v", name, err)
-			}
-		}
-		container.LogEvent("destroy")
-		if removeVolume {
-			daemon.DeleteVolumes(container.VolumePaths())
-		}
+	if err := daemon.rm(container, config.ForceRemove); err != nil {
+		return fmt.Errorf("Cannot destroy container %s: %v", name, err)
 	}
-	return engine.StatusOK
-}
 
-func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) {
-	for id := range volumeIDs {
-		if err := daemon.volumes.Delete(id); err != nil {
-			log.Infof("%s", err)
-			continue
-		}
+	container.LogEvent("destroy")
+
+	if config.RemoveVolume {
+		container.removeMountPoints()
 	}
-}
-
-func (daemon *Daemon) Rm(container *Container) (err error) {
-	return daemon.commonRm(container, false)
-}
-
-func (daemon *Daemon) ForceRm(container *Container) (err error) {
-	return daemon.commonRm(container, true)
+	return nil
 }
 
 // Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem.
-func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err error) {
-	if container == nil {
-		return fmt.Errorf("The given container is <nil>")
+func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
+	if container.IsRunning() {
+		if !forceRemove {
+			return fmt.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
+		}
+		if err := container.Kill(); err != nil {
+			return fmt.Errorf("Could not kill running container, cannot remove - %v", err)
+		}
 	}
 
+	// stop collection of stats for the container regardless
+	// if stats are currently getting collected.
+	daemon.statsCollector.stopCollection(container)
+
 	element := daemon.containers.Get(container.ID)
 	if element == nil {
 		return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
@@ -132,12 +101,12 @@
 		if err != nil && forceRemove {
 			daemon.idIndex.Delete(container.ID)
 			daemon.containers.Delete(container.ID)
+			os.RemoveAll(container.root)
 		}
 	}()
 
-	container.derefVolumes()
 	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
-		log.Debugf("Unable to remove container from link graph: %s", err)
+		logrus.Debugf("Unable to remove container from link graph: %s", err)
 	}
 
 	if err = daemon.driver.Remove(container.ID); err != nil {
@@ -163,3 +132,7 @@
 
 	return nil
 }
+
+func (daemon *Daemon) DeleteVolumes(c *Container) error {
+	return c.removeMountPoints()
+}
diff --git a/daemon/events/events.go b/daemon/events/events.go
new file mode 100644
index 0000000..07ee29a
--- /dev/null
+++ b/daemon/events/events.go
@@ -0,0 +1,66 @@
+package events
+
+import (
+	"sync"
+	"time"
+
+	"github.com/docker/docker/pkg/jsonmessage"
+	"github.com/docker/docker/pkg/pubsub"
+)
+
+const eventsLimit = 64
+
+// Events is pubsub channel for *jsonmessage.JSONMessage
+type Events struct {
+	mu     sync.Mutex
+	events []*jsonmessage.JSONMessage
+	pub    *pubsub.Publisher
+}
+
+// New returns new *Events instance
+func New() *Events {
+	return &Events{
+		events: make([]*jsonmessage.JSONMessage, 0, eventsLimit),
+		pub:    pubsub.NewPublisher(100*time.Millisecond, 1024),
+	}
+}
+
+// Subscribe adds new listener to events, returns slice of 64 stored last events
+// channel in which you can expect new events in form of interface{}, so you
+// need type assertion.
+func (e *Events) Subscribe() ([]*jsonmessage.JSONMessage, chan interface{}) {
+	e.mu.Lock()
+	current := make([]*jsonmessage.JSONMessage, len(e.events))
+	copy(current, e.events)
+	l := e.pub.Subscribe()
+	e.mu.Unlock()
+	return current, l
+}
+
+// Evict evicts listener from pubsub
+func (e *Events) Evict(l chan interface{}) {
+	e.pub.Evict(l)
+}
+
+// Log broadcasts event to listeners. Each listener has 100 millisecond for
+// receiving event or it will be skipped.
+func (e *Events) Log(action, id, from string) {
+	go func() {
+		e.mu.Lock()
+		jm := &jsonmessage.JSONMessage{Status: action, ID: id, From: from, Time: time.Now().UTC().Unix()}
+		if len(e.events) == cap(e.events) {
+			// discard oldest event
+			copy(e.events, e.events[1:])
+			e.events[len(e.events)-1] = jm
+		} else {
+			e.events = append(e.events, jm)
+		}
+		e.mu.Unlock()
+		e.pub.Publish(jm)
+	}()
+}
+
+// SubscribersCount returns number of event listeners
+func (e *Events) SubscribersCount() int {
+	return e.pub.Len()
+}
diff --git a/daemon/events/events_test.go b/daemon/events/events_test.go
new file mode 100644
index 0000000..7aa8d9f
--- /dev/null
+++ b/daemon/events/events_test.go
@@ -0,0 +1,135 @@
+package events
+
+import (
+	"fmt"
+	"testing"
+	"time"
+
+	"github.com/docker/docker/pkg/jsonmessage"
+)
+
+func TestEventsLog(t *testing.T) {
+	e := New()
+	_, l1 := e.Subscribe()
+	_, l2 := e.Subscribe()
+	defer e.Evict(l1)
+	defer e.Evict(l2)
+	count := e.SubscribersCount()
+	if count != 2 {
+		t.Fatalf("Must be 2 subscribers, got %d", count)
+	}
+	e.Log("test", "cont", "image")
+	select {
+	case msg := <-l1:
+		jmsg, ok := msg.(*jsonmessage.JSONMessage)
+		if !ok {
+			t.Fatalf("Unexpected type %T", msg)
+		}
+		if len(e.events) != 1 {
+			t.Fatalf("Must be only one event, got %d", len(e.events))
+		}
+		if jmsg.Status != "test" {
+			t.Fatalf("Status should be test, got %s", jmsg.Status)
+		}
+		if jmsg.ID != "cont" {
+			t.Fatalf("ID should be cont, got %s", jmsg.ID)
+		}
+		if jmsg.From != "image" {
+			t.Fatalf("From should be image, got %s", jmsg.From)
+		}
+	case <-time.After(1 * time.Second):
+		t.Fatal("Timeout waiting for broadcasted message")
+	}
+	select {
+	case msg := <-l2:
+		jmsg, ok := msg.(*jsonmessage.JSONMessage)
+		if !ok {
+			t.Fatalf("Unexpected type %T", msg)
+		}
+		if len(e.events) != 1 {
+			t.Fatalf("Must be only one event, got %d", len(e.events))
+		}
+		if jmsg.Status != "test" {
+			t.Fatalf("Status should be test, got %s", jmsg.Status)
+		}
+		if jmsg.ID != "cont" {
+			t.Fatalf("ID should be cont, got %s", jmsg.ID)
+		}
+		if jmsg.From != "image" {
+			t.Fatalf("From should be image, got %s", jmsg.From)
+		}
+	case <-time.After(1 * time.Second):
+		t.Fatal("Timeout waiting for broadcasted message")
+	}
+}
+
+func TestEventsLogTimeout(t *testing.T) {
+	e := New()
+	_, l := e.Subscribe()
+	defer e.Evict(l)
+
+	c := make(chan struct{})
+	go func() {
+		e.Log("test", "cont", "image")
+		close(c)
+	}()
+
+	select {
+	case <-c:
+	case <-time.After(time.Second):
+		t.Fatal("Timeout publishing message")
+	}
+}
+
+func TestLogEvents(t *testing.T) {
+	e := New()
+
+	for i := 0; i < eventsLimit+16; i++ {
+		action := fmt.Sprintf("action_%d", i)
+		id := fmt.Sprintf("cont_%d", i)
+		from := fmt.Sprintf("image_%d", i)
+		e.Log(action, id, from)
+	}
+	time.Sleep(50 * time.Millisecond)
+	current, l := e.Subscribe()
+	for i := 0; i < 10; i++ {
+		num := i + eventsLimit + 16
+		action := fmt.Sprintf("action_%d", num)
+		id := fmt.Sprintf("cont_%d", num)
+		from := fmt.Sprintf("image_%d", num)
+		e.Log(action, id, from)
+	}
+	if len(e.events) != eventsLimit {
+		t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events))
+	}
+
+	var msgs []*jsonmessage.JSONMessage
+	for len(msgs) < 10 {
+		m := <-l
+		jm, ok := (m).(*jsonmessage.JSONMessage)
+		if !ok {
+			t.Fatalf("Unexpected type %T", m)
+		}
+		msgs = append(msgs, jm)
+	}
+	if len(current) != eventsLimit {
+		t.Fatalf("Must be %d events, got %d", eventsLimit, len(current))
+	}
+	first := current[0]
+	if first.Status != "action_16" {
+		t.Fatalf("First action is %s, must be action_16", first.Status)
+	}
+	last := current[len(current)-1]
+	if last.Status != "action_79" {
+		t.Fatalf("Last action is %s, must be action_79", last.Status)
+	}
+
+	firstC := msgs[0]
+	if firstC.Status != "action_80" {
+		t.Fatalf("First action is %s, must be action_80", firstC.Status)
+	}
+	lastC := msgs[len(msgs)-1]
+	if lastC.Status != "action_89" {
+		t.Fatalf("Last action is %s, must be action_89", lastC.Status)
+	}
+}
diff --git a/daemon/exec.go b/daemon/exec.go
index 3af40ac..043afe0 100644
--- a/daemon/exec.go
+++ b/daemon/exec.go
@@ -7,14 +7,11 @@
 	"strings"
 	"sync"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
-	"github.com/docker/docker/daemon/execdriver/lxc"
-	"github.com/docker/docker/engine"
 	"github.com/docker/docker/pkg/broadcastwriter"
-	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/promise"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/runconfig"
 )
 
@@ -111,37 +108,30 @@
 	return container, nil
 }
 
-func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s [options] container command [args]", job.Name)
+func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) {
+
+	// Not all drivers support Exec (LXC for example)
+	if err := checkExecSupport(d.execDriver.Name()); err != nil {
+		return "", err
 	}
 
-	if strings.HasPrefix(d.execDriver.Name(), lxc.DriverName) {
-		return job.Error(lxc.ErrExec)
-	}
-
-	var name = job.Args[0]
-
-	container, err := d.getActiveContainer(name)
+	container, err := d.getActiveContainer(config.Container)
 	if err != nil {
-		return job.Error(err)
+		return "", err
 	}
 
-	config, err := runconfig.ExecConfigFromJob(job)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd)
+	cmd := runconfig.NewCommand(config.Cmd...)
+	entrypoint, args := d.getEntrypointAndArgs(runconfig.NewEntrypoint(), cmd)
 
 	processConfig := execdriver.ProcessConfig{
 		Tty:        config.Tty,
 		Entrypoint: entrypoint,
 		Arguments:  args,
+		User:       config.User,
 	}
 
 	execConfig := &execConfig{
-		ID:            common.GenerateRandomID(),
+		ID:            stringid.GenerateRandomID(),
 		OpenStdin:     config.AttachStdin,
 		OpenStdout:    config.AttachStdout,
 		OpenStderr:    config.AttachStderr,
@@ -155,25 +145,20 @@
 
 	d.registerExecCommand(execConfig)
 
-	job.Printf("%s\n", execConfig.ID)
+	return execConfig.ID, nil
 
-	return engine.StatusOK
 }
 
-func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s [options] exec", job.Name)
-	}
+func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
 
 	var (
 		cStdin           io.ReadCloser
 		cStdout, cStderr io.Writer
-		execName         = job.Args[0]
 	)
 
 	execConfig, err := d.getExecConfig(execName)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	func() {
@@ -185,10 +170,10 @@
 		execConfig.Running = true
 	}()
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
-	log.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID)
+	logrus.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID)
 	container := execConfig.Container
 
 	container.LogEvent("exec_start: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " "))
@@ -197,16 +182,16 @@
 		r, w := io.Pipe()
 		go func() {
 			defer w.Close()
-			defer log.Debugf("Closing buffered stdin pipe")
-			io.Copy(w, job.Stdin)
+			defer logrus.Debugf("Closing buffered stdin pipe")
+			io.Copy(w, stdin)
 		}()
 		cStdin = r
 	}
 	if execConfig.OpenStdout {
-		cStdout = job.Stdout
+		cStdout = stdout
 	}
 	if execConfig.OpenStderr {
-		cStderr = job.Stderr
+		cStderr = stderr
 	}
 
 	execConfig.StreamConfig.stderr = broadcastwriter.New()
@@ -218,7 +203,7 @@
 		execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
 	}
 
-	attachErr := d.Attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr)
+	attachErr := attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr)
 
 	execErr := make(chan error)
 
@@ -227,8 +212,7 @@
 	// the exitStatus) even after the cmd is done running.
 
 	go func() {
-		err := container.Exec(execConfig)
-		if err != nil {
+		if err := container.Exec(execConfig); err != nil {
 			execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err)
 		}
 	}()
@@ -236,14 +220,14 @@
 	select {
 	case err := <-attachErr:
 		if err != nil {
-			return job.Errorf("attach failed with error: %s", err)
+			return fmt.Errorf("attach failed with error: %s", err)
 		}
 		break
 	case err := <-execErr:
-		return job.Error(err)
+		return err
 	}
 
-	return engine.StatusOK
+	return nil
 }
 
 func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
@@ -259,72 +243,3 @@
 
 	return exitStatus, err
 }
-
-func (container *Container) GetExecIDs() []string {
-	return container.execCommands.List()
-}
-
-func (container *Container) Exec(execConfig *execConfig) error {
-	container.Lock()
-	defer container.Unlock()
-
-	waitStart := make(chan struct{})
-
-	callback := func(processConfig *execdriver.ProcessConfig, pid int) {
-		if processConfig.Tty {
-			// The callback is called after the process Start()
-			// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave
-			// which we close here.
-			if c, ok := processConfig.Stdout.(io.Closer); ok {
-				c.Close()
-			}
-		}
-		close(waitStart)
-	}
-
-	// We use a callback here instead of a goroutine and an chan for
-	// syncronization purposes
-	cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) })
-
-	// Exec should not return until the process is actually running
-	select {
-	case <-waitStart:
-	case err := <-cErr:
-		return err
-	}
-
-	return nil
-}
-
-func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error {
-	var (
-		err      error
-		exitCode int
-	)
-
-	pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin)
-	exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback)
-	if err != nil {
-		log.Errorf("Error running command in existing container %s: %s", container.ID, err)
-	}
-
-	log.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
-	if execConfig.OpenStdin {
-		if err := execConfig.StreamConfig.stdin.Close(); err != nil {
-			log.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
-		}
-	}
-	if err := execConfig.StreamConfig.stdout.Clean(); err != nil {
-		log.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
-	}
-	if err := execConfig.StreamConfig.stderr.Clean(); err != nil {
-		log.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
-	}
-	if execConfig.ProcessConfig.Terminal != nil {
-		if err := execConfig.ProcessConfig.Terminal.Close(); err != nil {
-			log.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
-		}
-	}
-
-	return err
-}
diff --git a/daemon/exec_linux.go b/daemon/exec_linux.go
new file mode 100644
index 0000000..a360326
--- /dev/null
+++ b/daemon/exec_linux.go
@@ -0,0 +1,18 @@
+// +build linux
+
+package daemon
+
+import (
+	"strings"
+
+	"github.com/docker/docker/daemon/execdriver/lxc"
+)
+
+// checkExecSupport returns an error if the exec driver does not support exec,
+// or nil if it is supported.
+func checkExecSupport(drivername string) error {
+	if strings.HasPrefix(drivername, lxc.DriverName) {
+		return lxc.ErrExec
+	}
+	return nil
+}
diff --git a/daemon/exec_windows.go b/daemon/exec_windows.go
new file mode 100644
index 0000000..d6f244e
--- /dev/null
+++ b/daemon/exec_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package daemon
+
+// checkExecSupport returns an error if the exec driver does not support exec,
+// or nil if it is supported.
+func checkExecSupport(DriverName string) error {
+	return nil
+}
diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go
index e937de3..eca77e9 100644
--- a/daemon/execdriver/driver.go
+++ b/daemon/execdriver/driver.go
@@ -1,21 +1,14 @@
 package execdriver
 
 import (
-	"encoding/json"
 	"errors"
 	"io"
-	"io/ioutil"
-	"os"
 	"os/exec"
-	"path/filepath"
-	"strconv"
-	"strings"
 	"time"
 
-	"github.com/docker/docker/daemon/execdriver/native/template"
+	// TODO Windows: Factor out ulimit
 	"github.com/docker/docker/pkg/ulimit"
 	"github.com/docker/libcontainer"
-	"github.com/docker/libcontainer/cgroups/fs"
 	"github.com/docker/libcontainer/configs"
 )
 
@@ -79,6 +72,7 @@
 	Interface      *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled
 	Mtu            int               `json:"mtu"`
 	ContainerID    string            `json:"container_id"` // id of the container to join network.
+	NamespacePath  string            `json:"namespace_path"`
 	HostNetworking bool              `json:"host_networking"`
 }
 
@@ -93,6 +87,11 @@
 	HostPid bool `json:"host_pid"`
 }
 
+// UTS settings of the container
+type UTS struct {
+	HostUTS bool `json:"host_uts"`
+}
+
 type NetworkInterface struct {
 	Gateway              string `json:"gateway"`
 	IPAddress            string `json:"ip"`
@@ -103,14 +102,21 @@
 	LinkLocalIPv6Address string `json:"link_local_ipv6"`
 	GlobalIPv6PrefixLen  int    `json:"global_ipv6_prefix_len"`
 	IPv6Gateway          string `json:"ipv6_gateway"`
+	HairpinMode          bool   `json:"hairpin_mode"`
 }
 
+// TODO Windows: Factor out ulimit.Rlimit
 type Resources struct {
-	Memory     int64            `json:"memory"`
-	MemorySwap int64            `json:"memory_swap"`
-	CpuShares  int64            `json:"cpu_shares"`
-	CpusetCpus string           `json:"cpuset_cpus"`
-	Rlimits    []*ulimit.Rlimit `json:"rlimits"`
+	Memory         int64            `json:"memory"`
+	MemorySwap     int64            `json:"memory_swap"`
+	CpuShares      int64            `json:"cpu_shares"`
+	CpusetCpus     string           `json:"cpuset_cpus"`
+	CpusetMems     string           `json:"cpuset_mems"`
+	CpuPeriod      int64            `json:"cpu_period"`
+	CpuQuota       int64            `json:"cpu_quota"`
+	BlkioWeight    int64            `json:"blkio_weight"`
+	Rlimits        []*ulimit.Rlimit `json:"rlimits"`
+	OomKillDisable bool             `json:"oom_kill_disable"`
 }
 
 type ResourceStats struct {
@@ -141,6 +147,9 @@
 	Console    string   `json:"-"` // dev/console path
 }
 
+// TODO Windows: Factor out unused fields such as LxcConfig, AppArmorProfile,
+// and CgroupParent.
+//
 // Process wrapps an os/exec.Cmd to add more metadata
 type Command struct {
 	ID                 string            `json:"id"`
@@ -152,6 +161,7 @@
 	Network            *Network          `json:"network"`
 	Ipc                *Ipc              `json:"ipc"`
 	Pid                *Pid              `json:"pid"`
+	UTS                *UTS              `json:"uts"`
 	Resources          *Resources        `json:"resources"`
 	Mounts             []Mount           `json:"mounts"`
 	AllowedDevices     []*configs.Device `json:"allowed_devices"`
@@ -166,142 +176,3 @@
 	AppArmorProfile    string            `json:"apparmor_profile"`
 	CgroupParent       string            `json:"cgroup_parent"` // The parent cgroup for this command.
 }
-
-func InitContainer(c *Command) *configs.Config {
-	container := template.New()
-
-	container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
-	container.Cgroups.Name = c.ID
-	container.Cgroups.AllowedDevices = c.AllowedDevices
-	container.Readonlyfs = c.ReadonlyRootfs
-	container.Devices = c.AutoCreatedDevices
-	container.Rootfs = c.Rootfs
-	container.Readonlyfs = c.ReadonlyRootfs
-
-	// check to see if we are running in ramdisk to disable pivot root
-	container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
-
-	// Default parent cgroup is "docker". Override if required.
-	if c.CgroupParent != "" {
-		container.Cgroups.Parent = c.CgroupParent
-	}
-	return container
-}
-
-func getEnv(key string, env []string) string {
-	for _, pair := range env {
-		parts := strings.Split(pair, "=")
-		if parts[0] == key {
-			return parts[1]
-		}
-	}
-	return ""
-}
-
-func SetupCgroups(container *configs.Config, c *Command) error {
-	if c.Resources != nil {
-		container.Cgroups.CpuShares = c.Resources.CpuShares
-		container.Cgroups.Memory = c.Resources.Memory
-		container.Cgroups.MemoryReservation = c.Resources.Memory
-		container.Cgroups.MemorySwap = c.Resources.MemorySwap
-		container.Cgroups.CpusetCpus = c.Resources.CpusetCpus
-	}
-
-	return nil
-}
-
-// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
-func getNetworkInterfaceStats(interfaceName string) (*libcontainer.NetworkInterface, error) {
-	out := &libcontainer.NetworkInterface{Name: interfaceName}
-	// This can happen if the network runtime information is missing - possible if the
-	// container was created by an old version of libcontainer.
-	if interfaceName == "" {
-		return out, nil
-	}
-	type netStatsPair struct {
-		// Where to write the output.
-		Out *uint64
-		// The network stats file to read.
-		File string
-	}
-	// Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
-	netStats := []netStatsPair{
-		{Out: &out.RxBytes, File: "tx_bytes"},
-		{Out: &out.RxPackets, File: "tx_packets"},
-		{Out: &out.RxErrors, File: "tx_errors"},
-		{Out: &out.RxDropped, File: "tx_dropped"},
-
-		{Out: &out.TxBytes, File: "rx_bytes"},
-		{Out: &out.TxPackets, File: "rx_packets"},
-		{Out: &out.TxErrors, File: "rx_errors"},
-		{Out: &out.TxDropped, File: "rx_dropped"},
-	}
-	for _, netStat := range netStats {
-		data, err := readSysfsNetworkStats(interfaceName, netStat.File)
-		if err != nil {
-			return nil, err
-		}
-		*(netStat.Out) = data
-	}
-	return out, nil
-}
-
-// Reads the specified statistics available under /sys/class/net/<EthInterface>/statistics
-func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) {
-	data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile))
-	if err != nil {
-		return 0, err
-	}
-	return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
-}
-
-func Stats(containerDir string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) {
-	f, err := os.Open(filepath.Join(containerDir, "state.json"))
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-
-	type network struct {
-		Type              string
-		HostInterfaceName string
-	}
-
-	state := struct {
-		CgroupPaths map[string]string `json:"cgroup_paths"`
-		Networks    []network
-	}{}
-
-	if err := json.NewDecoder(f).Decode(&state); err != nil {
-		return nil, err
-	}
-	now := time.Now()
-
-	mgr := fs.Manager{Paths: state.CgroupPaths}
-	cstats, err := mgr.GetStats()
-	if err != nil {
-		return nil, err
-	}
-	stats := &libcontainer.Stats{CgroupStats: cstats}
-	// if the container does not have any memory limit specified set the
-	// limit to the machines memory
-	memoryLimit := containerMemoryLimit
-	if memoryLimit == 0 {
-		memoryLimit = machineMemory
-	}
-	for _, iface := range state.Networks {
-		switch iface.Type {
-		case "veth":
-			istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
-			if err != nil {
-				return nil, err
-			}
-			stats.Interfaces = append(stats.Interfaces, istats)
-		}
-	}
-	return &ResourceStats{
-		Stats:       stats,
-		Read:        now,
-		MemoryLimit: memoryLimit,
-	}, nil
-}
diff --git a/daemon/execdriver/driver_linux.go b/daemon/execdriver/driver_linux.go
new file mode 100644
index 0000000..63d043e
--- /dev/null
+++ b/daemon/execdriver/driver_linux.go
@@ -0,0 +1,159 @@
+package execdriver
+
+import (
+	"encoding/json"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/docker/docker/daemon/execdriver/native/template"
+	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/cgroups/fs"
+	"github.com/docker/libcontainer/configs"
+)
+
+func InitContainer(c *Command) *configs.Config {
+	container := template.New()
+
+	container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
+	container.Cgroups.Name = c.ID
+	container.Cgroups.AllowedDevices = c.AllowedDevices
+	container.Devices = c.AutoCreatedDevices
+	container.Rootfs = c.Rootfs
+	container.Readonlyfs = c.ReadonlyRootfs
+
+	// check to see if we are running in ramdisk to disable pivot root
+	container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
+
+	// Default parent cgroup is "docker". Override if required.
+	if c.CgroupParent != "" {
+		container.Cgroups.Parent = c.CgroupParent
+	}
+	return container
+}
+
+func getEnv(key string, env []string) string {
+	for _, pair := range env {
+		parts := strings.Split(pair, "=")
+		if parts[0] == key {
+			return parts[1]
+		}
+	}
+	return ""
+}
+
+func SetupCgroups(container *configs.Config, c *Command) error {
+	if c.Resources != nil {
+		container.Cgroups.CpuShares = c.Resources.CpuShares
+		container.Cgroups.Memory = c.Resources.Memory
+		container.Cgroups.MemoryReservation = c.Resources.Memory
+		container.Cgroups.MemorySwap = c.Resources.MemorySwap
+		container.Cgroups.CpusetCpus = c.Resources.CpusetCpus
+		container.Cgroups.CpusetMems = c.Resources.CpusetMems
+		container.Cgroups.CpuPeriod = c.Resources.CpuPeriod
+		container.Cgroups.CpuQuota = c.Resources.CpuQuota
+		container.Cgroups.BlkioWeight = c.Resources.BlkioWeight
+		container.Cgroups.OomKillDisable = c.Resources.OomKillDisable
+	}
+
+	return nil
+}
+
+// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
+func getNetworkInterfaceStats(interfaceName string) (*libcontainer.NetworkInterface, error) {
+	out := &libcontainer.NetworkInterface{Name: interfaceName}
+	// This can happen if the network runtime information is missing - possible if the
+	// container was created by an old version of libcontainer.
+	if interfaceName == "" {
+		return out, nil
+	}
+	type netStatsPair struct {
+		// Where to write the output.
+		Out *uint64
+		// The network stats file to read.
+		File string
+	}
+	// Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
+	netStats := []netStatsPair{
+		{Out: &out.RxBytes, File: "tx_bytes"},
+		{Out: &out.RxPackets, File: "tx_packets"},
+		{Out: &out.RxErrors, File: "tx_errors"},
+		{Out: &out.RxDropped, File: "tx_dropped"},
+
+		{Out: &out.TxBytes, File: "rx_bytes"},
+		{Out: &out.TxPackets, File: "rx_packets"},
+		{Out: &out.TxErrors, File: "rx_errors"},
+		{Out: &out.TxDropped, File: "rx_dropped"},
+	}
+	for _, netStat := range netStats {
+		data, err := readSysfsNetworkStats(interfaceName, netStat.File)
+		if err != nil {
+			return nil, err
+		}
+		*(netStat.Out) = data
+	}
+	return out, nil
+}
+
+// Reads the specified statistics available under /sys/class/net/<EthInterface>/statistics
+func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) {
+	data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile))
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+func Stats(containerDir string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) {
+	f, err := os.Open(filepath.Join(containerDir, "state.json"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	type network struct {
+		Type              string
+		HostInterfaceName string
+	}
+
+	state := struct {
+		CgroupPaths map[string]string `json:"cgroup_paths"`
+		Networks    []network
+	}{}
+
+	if err := json.NewDecoder(f).Decode(&state); err != nil {
+		return nil, err
+	}
+	now := time.Now()
+
+	mgr := fs.Manager{Paths: state.CgroupPaths}
+	cstats, err := mgr.GetStats()
+	if err != nil {
+		return nil, err
+	}
+	stats := &libcontainer.Stats{CgroupStats: cstats}
+	// if the container does not have any memory limit specified set the
+	// limit to the machines memory
+	memoryLimit := containerMemoryLimit
+	if memoryLimit == 0 {
+		memoryLimit = machineMemory
+	}
+	for _, iface := range state.Networks {
+		switch iface.Type {
+		case "veth":
+			istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
+			if err != nil {
+				return nil, err
+			}
+			stats.Interfaces = append(stats.Interfaces, istats)
+		}
+	}
+	return &ResourceStats{
+		Stats:       stats,
+		Read:        now,
+		MemoryLimit: memoryLimit,
+	}, nil
+}
diff --git a/daemon/execdriver/execdrivers/execdrivers.go b/daemon/execdriver/execdrivers/execdrivers.go
deleted file mode 100644
index f6f97c9..0000000
--- a/daemon/execdriver/execdrivers/execdrivers.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package execdrivers
-
-import (
-	"fmt"
-	"path"
-
-	"github.com/docker/docker/daemon/execdriver"
-	"github.com/docker/docker/daemon/execdriver/lxc"
-	"github.com/docker/docker/daemon/execdriver/native"
-	"github.com/docker/docker/pkg/sysinfo"
-)
-
-func NewDriver(name, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) {
-	switch name {
-	case "lxc":
-		// we want to give the lxc driver the full docker root because it needs
-		// to access and write config and template files in /var/lib/docker/containers/*
-		// to be backwards compatible
-		return lxc.NewDriver(root, libPath, initPath, sysInfo.AppArmor)
-	case "native":
-		return native.NewDriver(path.Join(root, "execdriver", "native"), initPath)
-	}
-	return nil, fmt.Errorf("unknown exec driver %s", name)
-}
diff --git a/daemon/execdriver/execdrivers/execdrivers_linux.go b/daemon/execdriver/execdrivers/execdrivers_linux.go
new file mode 100644
index 0000000..89dedc7
--- /dev/null
+++ b/daemon/execdriver/execdrivers/execdrivers_linux.go
@@ -0,0 +1,26 @@
+// +build linux
+
+package execdrivers
+
+import (
+	"fmt"
+	"path"
+
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/execdriver/lxc"
+	"github.com/docker/docker/daemon/execdriver/native"
+	"github.com/docker/docker/pkg/sysinfo"
+)
+
+func NewDriver(name string, options []string, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) {
+	switch name {
+	case "lxc":
+		// we want to give the lxc driver the full docker root because it needs
+		// to access and write config and template files in /var/lib/docker/containers/*
+		// to be backwards compatible
+		return lxc.NewDriver(root, libPath, initPath, sysInfo.AppArmor)
+	case "native":
+		return native.NewDriver(path.Join(root, "execdriver", "native"), initPath, options)
+	}
+	return nil, fmt.Errorf("unknown exec driver %s", name)
+}
diff --git a/daemon/execdriver/execdrivers/execdrivers_windows.go b/daemon/execdriver/execdrivers/execdrivers_windows.go
new file mode 100644
index 0000000..aca21ea
--- /dev/null
+++ b/daemon/execdriver/execdrivers/execdrivers_windows.go
@@ -0,0 +1,19 @@
+// +build windows
+
+package execdrivers
+
+import (
+	"fmt"
+
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/daemon/execdriver/windows"
+	"github.com/docker/docker/pkg/sysinfo"
+)
+
+func NewDriver(name string, options []string, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) {
+	switch name {
+	case "windows":
+		return windows.NewDriver(root, initPath)
+	}
+	return nil, fmt.Errorf("unknown exec driver %s", name)
+}
diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go
index 55c4ac4..4b5730a 100644
--- a/daemon/execdriver/lxc/driver.go
+++ b/daemon/execdriver/lxc/driver.go
@@ -1,3 +1,5 @@
+// +build linux
+
 package lxc
 
 import (
@@ -10,24 +12,26 @@
 	"os/exec"
 	"path"
 	"path/filepath"
+	"runtime"
 	"strconv"
 	"strings"
 	"sync"
 	"syscall"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/stringutils"
 	sysinfo "github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/term"
 	"github.com/docker/docker/pkg/version"
-	"github.com/docker/docker/utils"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/cgroups"
 	"github.com/docker/libcontainer/configs"
 	"github.com/docker/libcontainer/system"
 	"github.com/docker/libcontainer/user"
 	"github.com/kr/pty"
+	"github.com/vishvananda/netns"
 )
 
 const DriverName = "lxc"
@@ -78,6 +82,41 @@
 	return fmt.Sprintf("%s-%s", DriverName, version)
 }
 
+func setupNetNs(nsPath string) (*os.Process, error) {
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	origns, err := netns.Get()
+	if err != nil {
+		return nil, err
+	}
+	defer origns.Close()
+
+	f, err := os.OpenFile(nsPath, os.O_RDONLY, 0)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get network namespace %q: %v", nsPath, err)
+	}
+	defer f.Close()
+
+	nsFD := f.Fd()
+	if err := netns.Set(netns.NsHandle(nsFD)); err != nil {
+		return nil, fmt.Errorf("failed to set network namespace %q: %v", nsPath, err)
+	}
+	defer netns.Set(origns)
+
+	cmd := exec.Command("/bin/sh", "-c", "while true; do sleep 1; done")
+	if err := cmd.Start(); err != nil {
+		return nil, fmt.Errorf("failed to start netns process: %v", err)
+	}
+
+	return cmd.Process, nil
+}
+
+func killNetNsProc(proc *os.Process) {
+	proc.Kill()
+	proc.Wait()
+}
+
 func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
 	var (
 		term     execdriver.Terminal
@@ -85,16 +124,25 @@
 		dataPath = d.containerDir(c.ID)
 	)
 
+	if c.Network.NamespacePath == "" && c.Network.ContainerID == "" {
+		return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("empty namespace path for non-container network")
+	}
+
+	container, err := d.createContainer(c)
+	if err != nil {
+		return execdriver.ExitStatus{ExitCode: -1}, err
+	}
+
 	if c.ProcessConfig.Tty {
 		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
 	} else {
 		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
 	}
-	c.ProcessConfig.Terminal = term
-	container, err := d.createContainer(c)
 	if err != nil {
 		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
+	c.ProcessConfig.Terminal = term
+
 	d.Lock()
 	d.activeContainers[c.ID] = &activeContainer{
 		container: container,
@@ -120,6 +168,7 @@
 		"lxc-start",
 		"-n", c.ID,
 		"-f", configPath,
+		"-q",
 	}
 
 	// From lxc>=1.1 the default behavior is to daemonize containers after start
@@ -128,10 +177,20 @@
 		params = append(params, "-F")
 	}
 
+	proc := &os.Process{}
 	if c.Network.ContainerID != "" {
 		params = append(params,
 			"--share-net", c.Network.ContainerID,
 		)
+	} else {
+		proc, err = setupNetNs(c.Network.NamespacePath)
+		if err != nil {
+			return execdriver.ExitStatus{ExitCode: -1}, err
+		}
+
+		pidStr := fmt.Sprintf("%d", proc.Pid)
+		params = append(params,
+			"--share-net", pidStr)
 	}
 	if c.Ipc != nil {
 		if c.Ipc.ContainerID != "" {
@@ -149,15 +208,6 @@
 		"--",
 		c.InitPath,
 	)
-	if c.Network.Interface != nil {
-		params = append(params,
-			"-g", c.Network.Interface.Gateway,
-			"-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
-		)
-	}
-	params = append(params,
-		"-mtu", strconv.Itoa(c.Network.Mtu),
-	)
 
 	if c.ProcessConfig.User != "" {
 		params = append(params, "-u", c.ProcessConfig.User)
@@ -187,13 +237,13 @@
 		// without exec in go we have to do this horrible shell hack...
 		shellString :=
 			"mount --make-rslave /; exec " +
-				utils.ShellQuoteArguments(params)
+				stringutils.ShellQuoteArguments(params)
 
 		params = []string{
 			"unshare", "-m", "--", "/bin/sh", "-c", shellString,
 		}
 	}
-	log.Debugf("lxc params %s", params)
+	logrus.Debugf("lxc params %s", params)
 	var (
 		name = params[0]
 		arg  = params[1:]
@@ -206,10 +256,12 @@
 	c.ProcessConfig.Args = append([]string{name}, arg...)
 
 	if err := createDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
+		killNetNsProc(proc)
 		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 
 	if err := c.ProcessConfig.Start(); err != nil {
+		killNetNsProc(proc)
 		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 
@@ -237,8 +289,10 @@
 	// Poll lxc for RUNNING status
 	pid, err := d.waitForStart(c, waitLock)
 	if err != nil {
+		killNetNsProc(proc)
 		return terminate(err)
 	}
+	killNetNsProc(proc)
 
 	cgroupPaths, err := cgroupPaths(c.ID)
 	if err != nil {
@@ -263,7 +317,7 @@
 	c.ContainerPid = pid
 
 	if startCallback != nil {
-		log.Debugf("Invoking startCallback")
+		logrus.Debugf("Invoking startCallback")
 		startCallback(&c.ProcessConfig, pid)
 	}
 
@@ -271,19 +325,20 @@
 	oomKillNotification, err := notifyOnOOM(cgroupPaths)
 
 	<-waitLock
+	exitCode := getExitCode(c)
 
 	if err == nil {
 		_, oomKill = <-oomKillNotification
-		log.Debugf("oomKill error %s waitErr %s", oomKill, waitErr)
+		logrus.Debugf("oomKill error: %v, waitErr: %v", oomKill, waitErr)
 	} else {
-		log.Warnf("Your kernel does not support OOM notifications: %s", err)
+		logrus.Warnf("Your kernel does not support OOM notifications: %s", err)
 	}
 
 	// check oom error
-	exitCode := getExitCode(c)
 	if oomKill {
 		exitCode = 137
 	}
+
 	return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr
 }
 
@@ -351,11 +406,11 @@
 	if err != nil {
 		return nil, err
 	}
-	log.Debugf("subsystems: %s", subsystems)
+	logrus.Debugf("subsystems: %s", subsystems)
 	paths := make(map[string]string)
 	for _, subsystem := range subsystems {
 		cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem)
-		log.Debugf("cgroup path %s %s", cgroupRoot, cgroupDir)
+		logrus.Debugf("cgroup path %s %s", cgroupRoot, cgroupDir)
 		if err != nil {
 			//unsupported subystem
 			continue
@@ -461,7 +516,11 @@
 }
 
 func (d *driver) Kill(c *execdriver.Command, sig int) error {
-	return KillLxc(c.ID, sig)
+	if sig == 9 || c.ProcessConfig.Process == nil {
+		return KillLxc(c.ID, sig)
+	}
+
+	return c.ProcessConfig.Process.Signal(syscall.Signal(sig))
 }
 
 func (d *driver) Pause(c *execdriver.Command) error {
@@ -521,7 +580,8 @@
 	if err == nil {
 		output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput()
 	} else {
-		output, err = exec.Command("lxc-stop", "-k", "-n", id, strconv.Itoa(sig)).CombinedOutput()
+		// lxc-stop does not take arbitrary signals like lxc-kill does
+		output, err = exec.Command("lxc-stop", "-k", "-n", id).CombinedOutput()
 	}
 	if err != nil {
 		return fmt.Errorf("Err: %s Output: %s", err, output)
@@ -576,7 +636,7 @@
 
 	output, err := i.driver.getInfo(i.ID)
 	if err != nil {
-		log.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output)
+		logrus.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output)
 		return false
 	}
 	if strings.Contains(string(output), "RUNNING") {
diff --git a/daemon/execdriver/lxc/info.go b/daemon/execdriver/lxc/info.go
index 27b4c58..279211f 100644
--- a/daemon/execdriver/lxc/info.go
+++ b/daemon/execdriver/lxc/info.go
@@ -1,3 +1,5 @@
+// +build linux
+
 package lxc
 
 import (
diff --git a/daemon/execdriver/lxc/info_test.go b/daemon/execdriver/lxc/info_test.go
index edafc02..996d56b 100644
--- a/daemon/execdriver/lxc/info_test.go
+++ b/daemon/execdriver/lxc/info_test.go
@@ -1,3 +1,5 @@
+// +build linux
+
 package lxc
 
 import (
diff --git a/daemon/execdriver/lxc/init.go b/daemon/execdriver/lxc/init.go
index e995026..a47ece9 100644
--- a/daemon/execdriver/lxc/init.go
+++ b/daemon/execdriver/lxc/init.go
@@ -1,10 +1,11 @@
+// +build linux
+
 package lxc
 
 import (
 	"encoding/json"
 	"flag"
 	"fmt"
-	"io/ioutil"
 	"log"
 	"os"
 	"os/exec"
@@ -107,12 +108,13 @@
 func setupEnv(args *InitArgs) error {
 	// Get env
 	var env []string
-	content, err := ioutil.ReadFile(".dockerenv")
+	dockerenv, err := os.Open(".dockerenv")
 	if err != nil {
 		return fmt.Errorf("Unable to load environment variables: %v", err)
 	}
-	if err := json.Unmarshal(content, &env); err != nil {
-		return fmt.Errorf("Unable to unmarshal environment variables: %v", err)
+	defer dockerenv.Close()
+	if err := json.NewDecoder(dockerenv).Decode(&env); err != nil {
+		return fmt.Errorf("Unable to decode environment variables: %v", err)
 	}
 	// Propagate the plugin-specific container env variable
 	env = append(env, "container="+os.Getenv("container"))
@@ -141,13 +143,3 @@
 	}
 	return nil
 }
-
-func getEnv(args *InitArgs, key string) string {
-	for _, kv := range args.Env {
-		parts := strings.SplitN(kv, "=", 2)
-		if parts[0] == key && len(parts) == 2 {
-			return parts[1]
-		}
-	}
-	return ""
-}
diff --git a/daemon/execdriver/lxc/lxc_init_linux.go b/daemon/execdriver/lxc/lxc_init_linux.go
index e7bc2b5..fb89ac6 100644
--- a/daemon/execdriver/lxc/lxc_init_linux.go
+++ b/daemon/execdriver/lxc/lxc_init_linux.go
@@ -1,3 +1,5 @@
+// +build linux
+
 package lxc
 
 import (
diff --git a/daemon/execdriver/lxc/lxc_init_unsupported.go b/daemon/execdriver/lxc/lxc_init_unsupported.go
index 97bc8a9..3b7be13 100644
--- a/daemon/execdriver/lxc/lxc_init_unsupported.go
+++ b/daemon/execdriver/lxc/lxc_init_unsupported.go
@@ -3,5 +3,5 @@
 package lxc
 
 func finalizeNamespace(args *InitArgs) error {
-	panic("Not supported on darwin")
+	panic("Not supported on this platform")
 }
diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go
index e4a8ed6..6bb50e6 100644
--- a/daemon/execdriver/lxc/lxc_template.go
+++ b/daemon/execdriver/lxc/lxc_template.go
@@ -1,3 +1,5 @@
+// +build linux
+
 package lxc
 
 import (
@@ -6,30 +8,15 @@
 	"strings"
 	"text/template"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
 	nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template"
-	"github.com/docker/docker/utils"
+	"github.com/docker/docker/pkg/stringutils"
 	"github.com/docker/libcontainer/label"
 )
 
 const LxcTemplate = `
-{{if .Network.Interface}}
-# network configuration
-lxc.network.type = veth
-lxc.network.link = {{.Network.Interface.Bridge}}
-lxc.network.name = eth0
-lxc.network.mtu = {{.Network.Mtu}}
-lxc.network.flags = up
-{{else if .Network.HostNetworking}}
 lxc.network.type = none
-{{else}}
-# network is disabled (-n=false)
-lxc.network.type = empty
-lxc.network.flags = up
-lxc.network.mtu = {{.Network.Mtu}}
-{{end}}
-
 # root filesystem
 {{$ROOTFS := .Rootfs}}
 lxc.rootfs = {{$ROOTFS}}
@@ -62,7 +49,7 @@
 # NOTICE: These mounts must be applied within the namespace
 {{if .ProcessConfig.Privileged}}
 # WARNING: mounting procfs and/or sysfs read-write is a known attack vector.
-# See e.g. http://blog.zx2c4.com/749 and http://bit.ly/T9CkqJ
+# See e.g. http://blog.zx2c4.com/749 and https://bit.ly/T9CkqJ
 # We mount them read-write here, but later, dockerinit will call the Restrict() function to remount them read-only.
 # We cannot mount them directly read-only, because that would prevent loading AppArmor profiles.
 lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0
@@ -107,9 +94,24 @@
 {{if .Resources.CpuShares}}
 lxc.cgroup.cpu.shares = {{.Resources.CpuShares}}
 {{end}}
+{{if .Resources.CpuPeriod}}
+lxc.cgroup.cpu.cfs_period_us = {{.Resources.CpuPeriod}}
+{{end}}
 {{if .Resources.CpusetCpus}}
 lxc.cgroup.cpuset.cpus = {{.Resources.CpusetCpus}}
 {{end}}
+{{if .Resources.CpusetMems}}
+lxc.cgroup.cpuset.mems = {{.Resources.CpusetMems}}
+{{end}}
+{{if .Resources.CpuQuota}}
+lxc.cgroup.cpu.cfs_quota_us = {{.Resources.CpuQuota}}
+{{end}}
+{{if .Resources.BlkioWeight}}
+lxc.cgroup.blkio.weight = {{.Resources.BlkioWeight}}
+{{end}}
+{{if .Resources.OomKillDisable}}
+lxc.cgroup.memory.oom_control = {{.Resources.OomKillDisable}}
+{{end}}
 {{end}}
 
 {{if .LxcConfig}}
@@ -128,6 +130,7 @@
 {{if .Network.Interface.MacAddress}}
 lxc.network.hwaddr = {{.Network.Interface.MacAddress}}
 {{end}}
+{{end}}
 {{if .ProcessConfig.Env}}
 lxc.utsname = {{getHostname .ProcessConfig.Env}}
 {{end}}
@@ -147,7 +150,6 @@
 		{{end}}
 	{{end}}
 {{end}}
-{{end}}
 `
 
 var LxcTemplateCompiled *template.Template
@@ -160,14 +162,14 @@
 
 func keepCapabilities(adds []string, drops []string) ([]string, error) {
 	container := nativeTemplate.New()
-	log.Debugf("adds %s drops %s\n", adds, drops)
+	logrus.Debugf("adds %s drops %s\n", adds, drops)
 	caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops)
 	if err != nil {
 		return nil, err
 	}
 	var newCaps []string
 	for _, cap := range caps {
-		log.Debugf("cap %s\n", cap)
+		logrus.Debugf("cap %s\n", cap)
 		realCap := execdriver.GetCapability(cap)
 		numCap := fmt.Sprintf("%d", realCap.Value)
 		newCaps = append(newCaps, numCap)
@@ -177,11 +179,11 @@
 }
 
 func dropList(drops []string) ([]string, error) {
-	if utils.StringsContainsNoCase(drops, "all") {
+	if stringutils.InSlice(drops, "all") {
 		var newCaps []string
 		for _, capName := range execdriver.GetAllCapabilities() {
 			cap := execdriver.GetCapability(capName)
-			log.Debugf("drop cap %s\n", cap.Key)
+			logrus.Debugf("drop cap %s\n", cap.Key)
 			numCap := fmt.Sprintf("%d", cap.Value)
 			newCaps = append(newCaps, numCap)
 		}
@@ -192,7 +194,7 @@
 
 func isDirectory(source string) string {
 	f, err := os.Stat(source)
-	log.Debugf("dir: %s\n", source)
+	logrus.Debugf("dir: %s\n", source)
 	if err != nil {
 		if os.IsNotExist(err) {
 			return "dir"
diff --git a/daemon/execdriver/lxc/lxc_template_unit_test.go b/daemon/execdriver/lxc/lxc_template_unit_test.go
index 78760f6..904fa12 100644
--- a/daemon/execdriver/lxc/lxc_template_unit_test.go
+++ b/daemon/execdriver/lxc/lxc_template_unit_test.go
@@ -29,14 +29,14 @@
 	os.MkdirAll(path.Join(root, "containers", "1"), 0777)
 
 	// Memory is allocated randomly for testing
-	rand.Seed(time.Now().UTC().UnixNano())
+	r := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
 	var (
 		memMin = 33554432
 		memMax = 536870912
-		mem    = memMin + rand.Intn(memMax-memMin)
+		mem    = memMin + r.Intn(memMax-memMin)
 		cpuMin = 100
 		cpuMax = 10000
-		cpu    = cpuMin + rand.Intn(cpuMax-cpuMin)
+		cpu    = cpuMin + r.Intn(cpuMax-cpuMin)
 	)
 
 	driver, err := NewDriver(root, root, "", false)
@@ -264,13 +264,8 @@
 			"lxc.cgroup.cpuset.cpus = 0,1",
 		},
 		Network: &execdriver.Network{
-			Mtu: 1500,
-			Interface: &execdriver.NetworkInterface{
-				Gateway:     "10.10.10.1",
-				IPAddress:   "10.10.10.10",
-				IPPrefixLen: 24,
-				Bridge:      "docker0",
-			},
+			Mtu:       1500,
+			Interface: nil,
 		},
 		ProcessConfig:   processConfig,
 		CapAdd:          []string{"net_admin", "syslog"},
@@ -282,13 +277,6 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	// network
-	grepFile(t, p, "lxc.network.type = veth")
-	grepFile(t, p, "lxc.network.link = docker0")
-	grepFile(t, p, "lxc.network.name = eth0")
-	grepFile(t, p, "lxc.network.ipv4 = 10.10.10.10/24")
-	grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1")
-	grepFile(t, p, "lxc.network.flags = up")
 	grepFile(t, p, "lxc.aa_profile = lxc-container-default-with-nesting")
 	// hostname
 	grepFile(t, p, "lxc.utsname = testhost")
@@ -329,13 +317,8 @@
 			"lxc.network.ipv4 = 172.0.0.1",
 		},
 		Network: &execdriver.Network{
-			Mtu: 1500,
-			Interface: &execdriver.NetworkInterface{
-				Gateway:     "10.10.10.1",
-				IPAddress:   "10.10.10.10",
-				IPPrefixLen: 24,
-				Bridge:      "docker0",
-			},
+			Mtu:       1500,
+			Interface: nil,
 		},
 		ProcessConfig: processConfig,
 		CapAdd:        []string{"NET_ADMIN", "SYSLOG"},
@@ -346,13 +329,6 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	// network
-	grepFile(t, p, "lxc.network.type = veth")
-	grepFile(t, p, "lxc.network.link = docker0")
-	grepFile(t, p, "lxc.network.name = eth0")
-	grepFile(t, p, "lxc.network.ipv4 = 172.0.0.1")
-	grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1")
-	grepFile(t, p, "lxc.network.flags = up")
 
 	// hostname
 	grepFile(t, p, "lxc.utsname = testhost")
diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go
index d278249..1b2d723 100644
--- a/daemon/execdriver/native/create.go
+++ b/daemon/execdriver/native/create.go
@@ -29,6 +29,10 @@
 		return nil, err
 	}
 
+	if err := d.createUTS(container, c); err != nil {
+		return nil, err
+	}
+
 	if err := d.createNetwork(container, c); err != nil {
 		return nil, err
 	}
@@ -63,9 +67,7 @@
 		return nil, err
 	}
 
-	if err := d.setupLabels(container, c); err != nil {
-		return nil, err
-	}
+	d.setupLabels(container, c)
 	d.setupRlimits(container, c)
 	return container, nil
 }
@@ -87,39 +89,9 @@
 }
 
 func (d *driver) createNetwork(container *configs.Config, c *execdriver.Command) error {
-	if c.Network.HostNetworking {
-		container.Namespaces.Remove(configs.NEWNET)
+	if c.Network == nil {
 		return nil
 	}
-
-	container.Networks = []*configs.Network{
-		{
-			Type: "loopback",
-		},
-	}
-
-	iName, err := generateIfaceName()
-	if err != nil {
-		return err
-	}
-	if c.Network.Interface != nil {
-		vethNetwork := configs.Network{
-			Name:              "eth0",
-			HostInterfaceName: iName,
-			Mtu:               c.Network.Mtu,
-			Address:           fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
-			MacAddress:        c.Network.Interface.MacAddress,
-			Gateway:           c.Network.Interface.Gateway,
-			Type:              "veth",
-			Bridge:            c.Network.Interface.Bridge,
-		}
-		if c.Network.Interface.GlobalIPv6Address != "" {
-			vethNetwork.IPv6Address = fmt.Sprintf("%s/%d", c.Network.Interface.GlobalIPv6Address, c.Network.Interface.GlobalIPv6PrefixLen)
-			vethNetwork.IPv6Gateway = c.Network.Interface.IPv6Gateway
-		}
-		container.Networks = append(container.Networks, &vethNetwork)
-	}
-
 	if c.Network.ContainerID != "" {
 		d.Lock()
 		active := d.activeContainers[c.Network.ContainerID]
@@ -135,8 +107,14 @@
 		}
 
 		container.Namespaces.Add(configs.NEWNET, state.NamespacePaths[configs.NEWNET])
+		return nil
 	}
 
+	if c.Network.NamespacePath == "" {
+		return fmt.Errorf("network namespace path is empty")
+	}
+
+	container.Namespaces.Add(configs.NEWNET, c.Network.NamespacePath)
 	return nil
 }
 
@@ -174,6 +152,16 @@
 	return nil
 }
 
+func (d *driver) createUTS(container *configs.Config, c *execdriver.Command) error {
+	if c.UTS.HostUTS {
+		container.Namespaces.Remove(configs.NEWUTS)
+		container.Hostname = ""
+		return nil
+	}
+
+	return nil
+}
+
 func (d *driver) setPrivileged(container *configs.Config) (err error) {
 	container.Capabilities = execdriver.GetAllCapabilities()
 	container.Cgroups.AllowAllDevices = true
@@ -218,8 +206,12 @@
 
 	// Filter out mounts that are overriden by user supplied mounts
 	var defaultMounts []*configs.Mount
+	_, mountDev := userMounts["/dev"]
 	for _, m := range container.Mounts {
 		if _, ok := userMounts[m.Destination]; !ok {
+			if mountDev && strings.HasPrefix(m.Destination, "/dev/") {
+				continue
+			}
 			defaultMounts = append(defaultMounts, m)
 		}
 	}
@@ -243,9 +235,7 @@
 	return nil
 }
 
-func (d *driver) setupLabels(container *configs.Config, c *execdriver.Command) error {
+func (d *driver) setupLabels(container *configs.Config, c *execdriver.Command) {
 	container.ProcessLabel = c.ProcessLabel
 	container.MountLabel = c.MountLabel
-
-	return nil
 }
diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go
index 3177f2b..4da3e34 100644
--- a/daemon/execdriver/native/driver.go
+++ b/daemon/execdriver/native/driver.go
@@ -3,10 +3,8 @@
 package native
 
 import (
-	"encoding/json"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 	"os/exec"
 	"path/filepath"
@@ -15,8 +13,9 @@
 	"syscall"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/reexec"
 	sysinfo "github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/term"
@@ -42,24 +41,58 @@
 	sync.Mutex
 }
 
-func NewDriver(root, initPath string) (*driver, error) {
+func NewDriver(root, initPath string, options []string) (*driver, error) {
 	meminfo, err := sysinfo.ReadMemInfo()
 	if err != nil {
 		return nil, err
 	}
 
-	if err := os.MkdirAll(root, 0700); err != nil {
+	if err := sysinfo.MkdirAll(root, 0700); err != nil {
 		return nil, err
 	}
 	// native driver root is at docker_root/execdriver/native. Put apparmor at docker_root
 	if err := apparmor.InstallDefaultProfile(); err != nil {
 		return nil, err
 	}
+
+	// choose cgroup manager
+	// this makes sure there are no breaking changes to people
+	// who upgrade from versions without native.cgroupdriver opt
 	cgm := libcontainer.Cgroupfs
 	if systemd.UseSystemd() {
 		cgm = libcontainer.SystemdCgroups
 	}
 
+	// parse the options
+	for _, option := range options {
+		key, val, err := parsers.ParseKeyValueOpt(option)
+		if err != nil {
+			return nil, err
+		}
+		key = strings.ToLower(key)
+		switch key {
+		case "native.cgroupdriver":
+			// override the default if they set options
+			switch val {
+			case "systemd":
+				if systemd.UseSystemd() {
+					cgm = libcontainer.SystemdCgroups
+				} else {
+					// warn them that they chose the wrong driver
+					logrus.Warn("You cannot use systemd as native.cgroupdriver, using cgroupfs instead")
+				}
+			case "cgroupfs":
+				cgm = libcontainer.Cgroupfs
+			default:
+				return nil, fmt.Errorf("Unknown native.cgroupdriver given %q. try cgroupfs or systemd", val)
+			}
+		default:
+			return nil, fmt.Errorf("Unknown option %s\n", key)
+		}
+	}
+
+	logrus.Debugf("Using %v as native.cgroupdriver", cgm)
+
 	f, err := libcontainer.New(
 		root,
 		cgm,
@@ -90,8 +123,6 @@
 		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
 
-	var term execdriver.Terminal
-
 	p := &libcontainer.Process{
 		Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...),
 		Env:  c.ProcessConfig.Env,
@@ -99,36 +130,9 @@
 		User: c.ProcessConfig.User,
 	}
 
-	if c.ProcessConfig.Tty {
-		rootuid, err := container.HostUID()
-		if err != nil {
-			return execdriver.ExitStatus{ExitCode: -1}, err
-		}
-		cons, err := p.NewConsole(rootuid)
-		if err != nil {
-			return execdriver.ExitStatus{ExitCode: -1}, err
-		}
-		term, err = NewTtyConsole(cons, pipes, rootuid)
-	} else {
-		p.Stdout = pipes.Stdout
-		p.Stderr = pipes.Stderr
-		r, w, err := os.Pipe()
-		if err != nil {
-			return execdriver.ExitStatus{ExitCode: -1}, err
-		}
-		if pipes.Stdin != nil {
-			go func() {
-				io.Copy(w, pipes.Stdin)
-				w.Close()
-			}()
-			p.Stdin = r
-		}
-		term = &execdriver.StdConsole{}
-	}
-	if err != nil {
+	if err := setupPipes(container, &c.ProcessConfig, p, pipes); err != nil {
 		return execdriver.ExitStatus{ExitCode: -1}, err
 	}
-	c.ProcessConfig.Terminal = term
 
 	cont, err := d.factory.Create(c.ID, container)
 	if err != nil {
@@ -159,7 +163,7 @@
 	oom := notifyOnOOM(cont)
 	waitF := p.Wait
 	if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) {
-		// we need such hack for tracking processes with inerited fds,
+		// we need such hack for tracking processes with inherited fds,
 		// because cmd.Wait() waiting for all streams to be copied
 		waitF = waitInPIDHost(p, cont)
 	}
@@ -182,7 +186,7 @@
 func notifyOnOOM(container libcontainer.Container) <-chan struct{} {
 	oom, err := container.NotifyOOM()
 	if err != nil {
-		log.Warnf("Your kernel does not support OOM notifications: %s", err)
+		logrus.Warnf("Your kernel does not support OOM notifications: %s", err)
 		c := make(chan struct{})
 		close(c)
 		return c
@@ -193,27 +197,27 @@
 func killCgroupProcs(c libcontainer.Container) {
 	var procs []*os.Process
 	if err := c.Pause(); err != nil {
-		log.Warn(err)
+		logrus.Warn(err)
 	}
 	pids, err := c.Processes()
 	if err != nil {
 		// don't care about childs if we can't get them, this is mostly because cgroup already deleted
-		log.Warnf("Failed to get processes from container %s: %v", c.ID(), err)
+		logrus.Warnf("Failed to get processes from container %s: %v", c.ID(), err)
 	}
 	for _, pid := range pids {
 		if p, err := os.FindProcess(pid); err == nil {
 			procs = append(procs, p)
 			if err := p.Kill(); err != nil {
-				log.Warn(err)
+				logrus.Warn(err)
 			}
 		}
 	}
 	if err := c.Resume(); err != nil {
-		log.Warn(err)
+		logrus.Warn(err)
 	}
 	for _, p := range procs {
 		if _, err := p.Wait(); err != nil {
-			log.Warn(err)
+			logrus.Warn(err)
 		}
 	}
 }
@@ -241,7 +245,9 @@
 }
 
 func (d *driver) Kill(c *execdriver.Command, sig int) error {
+	d.Lock()
 	active := d.activeContainers[c.ID]
+	d.Unlock()
 	if active == nil {
 		return fmt.Errorf("active container for %s does not exist", c.ID)
 	}
@@ -253,7 +259,9 @@
 }
 
 func (d *driver) Pause(c *execdriver.Command) error {
+	d.Lock()
 	active := d.activeContainers[c.ID]
+	d.Unlock()
 	if active == nil {
 		return fmt.Errorf("active container for %s does not exist", c.ID)
 	}
@@ -261,7 +269,9 @@
 }
 
 func (d *driver) Unpause(c *execdriver.Command) error {
+	d.Lock()
 	active := d.activeContainers[c.ID]
+	d.Unlock()
 	if active == nil {
 		return fmt.Errorf("active container for %s does not exist", c.ID)
 	}
@@ -313,14 +323,6 @@
 	return active.Processes()
 }
 
-func (d *driver) writeContainerFile(container *configs.Config, id string) error {
-	data, err := json.Marshal(container)
-	if err != nil {
-		return err
-	}
-	return ioutil.WriteFile(filepath.Join(d.root, id, "container.json"), data, 0655)
-}
-
 func (d *driver) cleanContainer(id string) error {
 	d.Lock()
 	delete(d.activeContainers, id)
@@ -337,7 +339,9 @@
 }
 
 func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
+	d.Lock()
 	c := d.activeContainers[id]
+	d.Unlock()
 	if c == nil {
 		return nil, execdriver.ErrNotRunning
 	}
@@ -359,16 +363,6 @@
 	}, nil
 }
 
-func getEnv(key string, env []string) string {
-	for _, pair := range env {
-		parts := strings.Split(pair, "=")
-		if parts[0] == key {
-			return parts[1]
-		}
-	}
-	return ""
-}
-
 type TtyConsole struct {
 	console libcontainer.Console
 }
@@ -419,3 +413,40 @@
 func (t *TtyConsole) Close() error {
 	return t.console.Close()
 }
+
+func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConfig, p *libcontainer.Process, pipes *execdriver.Pipes) error {
+	var term execdriver.Terminal
+	var err error
+
+	if processConfig.Tty {
+		rootuid, err := container.HostUID()
+		if err != nil {
+			return err
+		}
+		cons, err := p.NewConsole(rootuid)
+		if err != nil {
+			return err
+		}
+		term, err = NewTtyConsole(cons, pipes, rootuid)
+	} else {
+		p.Stdout = pipes.Stdout
+		p.Stderr = pipes.Stderr
+		r, w, err := os.Pipe()
+		if err != nil {
+			return err
+		}
+		if pipes.Stdin != nil {
+			go func() {
+				io.Copy(w, pipes.Stdin)
+				w.Close()
+			}()
+			p.Stdin = r
+		}
+		term = &execdriver.StdConsole{}
+	}
+	if err != nil {
+		return err
+	}
+	processConfig.Terminal = term
+	return nil
+}
diff --git a/daemon/execdriver/native/exec.go b/daemon/execdriver/native/exec.go
index af6dcd2..a9b0e79 100644
--- a/daemon/execdriver/native/exec.go
+++ b/daemon/execdriver/native/exec.go
@@ -14,46 +14,25 @@
 	"github.com/docker/libcontainer/utils"
 )
 
-// TODO(vishh): Add support for running in priviledged mode and running as a different user.
+// TODO(vishh): Add support for running in privileged mode.
 func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
 	active := d.activeContainers[c.ID]
 	if active == nil {
 		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
 	}
 
-	var term execdriver.Terminal
-	var err error
-
 	p := &libcontainer.Process{
 		Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
 		Env:  c.ProcessConfig.Env,
 		Cwd:  c.WorkingDir,
-		User: c.ProcessConfig.User,
+		User: processConfig.User,
 	}
 
-	if processConfig.Tty {
-		config := active.Config()
-		rootuid, err := config.HostUID()
-		if err != nil {
-			return -1, err
-		}
-		cons, err := p.NewConsole(rootuid)
-		if err != nil {
-			return -1, err
-		}
-		term, err = NewTtyConsole(cons, pipes, rootuid)
-	} else {
-		p.Stdout = pipes.Stdout
-		p.Stderr = pipes.Stderr
-		p.Stdin = pipes.Stdin
-		term = &execdriver.StdConsole{}
-	}
-	if err != nil {
+	config := active.Config()
+	if err := setupPipes(&config, processConfig, p, pipes); err != nil {
 		return -1, err
 	}
 
-	processConfig.Terminal = term
-
 	if err := active.Start(p); err != nil {
 		return -1, err
 	}
diff --git a/daemon/execdriver/native/init.go b/daemon/execdriver/native/init.go
index f57d6cd..2a6cd26 100644
--- a/daemon/execdriver/native/init.go
+++ b/daemon/execdriver/native/init.go
@@ -32,7 +32,7 @@
 	if err != nil {
 		fatal(err)
 	}
-	if err := factory.StartInitialization(3); err != nil {
+	if err := factory.StartInitialization(); err != nil {
 		fatal(err)
 	}
 
diff --git a/daemon/execdriver/native/utils.go b/daemon/execdriver/native/utils.go
deleted file mode 100644
index a703926..0000000
--- a/daemon/execdriver/native/utils.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build linux
-
-package native
-
-//func findUserArgs() []string {
-//for i, a := range os.Args {
-//if a == "--" {
-//return os.Args[i+1:]
-//}
-//}
-//return []string{}
-//}
-
-//// loadConfigFromFd loads a container's config from the sync pipe that is provided by
-//// fd 3 when running a process
-//func loadConfigFromFd() (*configs.Config, error) {
-//var config *libcontainer.Config
-//if err := json.NewDecoder(os.NewFile(3, "child")).Decode(&config); err != nil {
-//return nil, err
-//}
-//return config, nil
-//}
diff --git a/daemon/execdriver/utils.go b/daemon/execdriver/utils.go
index e1fc9b9..fd5a270 100644
--- a/daemon/execdriver/utils.go
+++ b/daemon/execdriver/utils.go
@@ -4,48 +4,29 @@
 	"fmt"
 	"strings"
 
-	"github.com/docker/docker/utils"
+	"github.com/docker/docker/pkg/stringutils"
 	"github.com/syndtr/gocapability/capability"
 )
 
-var capabilityList = Capabilities{
-	{Key: "SETPCAP", Value: capability.CAP_SETPCAP},
-	{Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE},
-	{Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO},
-	{Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT},
-	{Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN},
-	{Key: "SYS_NICE", Value: capability.CAP_SYS_NICE},
-	{Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE},
-	{Key: "SYS_TIME", Value: capability.CAP_SYS_TIME},
-	{Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG},
-	{Key: "MKNOD", Value: capability.CAP_MKNOD},
-	{Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE},
-	{Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL},
-	{Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE},
-	{Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN},
-	{Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN},
-	{Key: "SYSLOG", Value: capability.CAP_SYSLOG},
-	{Key: "CHOWN", Value: capability.CAP_CHOWN},
-	{Key: "NET_RAW", Value: capability.CAP_NET_RAW},
-	{Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE},
-	{Key: "FOWNER", Value: capability.CAP_FOWNER},
-	{Key: "DAC_READ_SEARCH", Value: capability.CAP_DAC_READ_SEARCH},
-	{Key: "FSETID", Value: capability.CAP_FSETID},
-	{Key: "KILL", Value: capability.CAP_KILL},
-	{Key: "SETGID", Value: capability.CAP_SETGID},
-	{Key: "SETUID", Value: capability.CAP_SETUID},
-	{Key: "LINUX_IMMUTABLE", Value: capability.CAP_LINUX_IMMUTABLE},
-	{Key: "NET_BIND_SERVICE", Value: capability.CAP_NET_BIND_SERVICE},
-	{Key: "NET_BROADCAST", Value: capability.CAP_NET_BROADCAST},
-	{Key: "IPC_LOCK", Value: capability.CAP_IPC_LOCK},
-	{Key: "IPC_OWNER", Value: capability.CAP_IPC_OWNER},
-	{Key: "SYS_CHROOT", Value: capability.CAP_SYS_CHROOT},
-	{Key: "SYS_PTRACE", Value: capability.CAP_SYS_PTRACE},
-	{Key: "SYS_BOOT", Value: capability.CAP_SYS_BOOT},
-	{Key: "LEASE", Value: capability.CAP_LEASE},
-	{Key: "SETFCAP", Value: capability.CAP_SETFCAP},
-	{Key: "WAKE_ALARM", Value: capability.CAP_WAKE_ALARM},
-	{Key: "BLOCK_SUSPEND", Value: capability.CAP_BLOCK_SUSPEND},
+var capabilityList Capabilities
+
+func init() {
+	last := capability.CAP_LAST_CAP
+	// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
+	if last == capability.Cap(63) {
+		last = capability.CAP_BLOCK_SUSPEND
+	}
+	for _, cap := range capability.List() {
+		if cap > last {
+			continue
+		}
+		capabilityList = append(capabilityList,
+			&CapabilityMapping{
+				Key:   strings.ToUpper(cap.String()),
+				Value: cap,
+			},
+		)
+	}
 }
 
 type (
@@ -89,17 +70,17 @@
 		if strings.ToLower(cap) == "all" {
 			continue
 		}
-		if !utils.StringsContainsNoCase(allCaps, cap) {
+		if !stringutils.InSlice(allCaps, cap) {
 			return nil, fmt.Errorf("Unknown capability drop: %q", cap)
 		}
 	}
 
 	// handle --cap-add=all
-	if utils.StringsContainsNoCase(adds, "all") {
+	if stringutils.InSlice(adds, "all") {
 		basics = allCaps
 	}
 
-	if !utils.StringsContainsNoCase(drops, "all") {
+	if !stringutils.InSlice(drops, "all") {
 		for _, cap := range basics {
 			// skip `all` aready handled above
 			if strings.ToLower(cap) == "all" {
@@ -107,7 +88,7 @@
 			}
 
 			// if we don't drop `all`, add back all the non-dropped caps
-			if !utils.StringsContainsNoCase(drops, cap) {
+			if !stringutils.InSlice(drops, cap) {
 				newCaps = append(newCaps, strings.ToUpper(cap))
 			}
 		}
@@ -119,12 +100,12 @@
 			continue
 		}
 
-		if !utils.StringsContainsNoCase(allCaps, cap) {
+		if !stringutils.InSlice(allCaps, cap) {
 			return nil, fmt.Errorf("Unknown capability to add: %q", cap)
 		}
 
 		// add cap if not already in the list
-		if !utils.StringsContainsNoCase(newCaps, cap) {
+		if !stringutils.InSlice(newCaps, cap) {
 			newCaps = append(newCaps, strings.ToUpper(cap))
 		}
 	}
diff --git a/daemon/execdriver/windows/unsupported.go b/daemon/execdriver/windows/unsupported.go
new file mode 100644
index 0000000..0a492e1
--- /dev/null
+++ b/daemon/execdriver/windows/unsupported.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package windows
+
+import (
+	"fmt"
+
+	"github.com/docker/docker/daemon/execdriver"
+)
+
+func NewDriver(root, initPath string) (execdriver.Driver, error) {
+	return nil, fmt.Errorf("Windows driver not supported on non-Windows")
+}
diff --git a/daemon/execdriver/windows/windows.go b/daemon/execdriver/windows/windows.go
new file mode 100644
index 0000000..9837270
--- /dev/null
+++ b/daemon/execdriver/windows/windows.go
@@ -0,0 +1,97 @@
+// +build windows
+
+/*
+ This is the Windows driver for containers.
+
+ TODO Windows: It is currently a placeholder to allow compilation of the
+ daemon. Future PRs will have an implementation of this driver.
+*/
+
+package windows
+
+import (
+	"fmt"
+
+	"github.com/docker/docker/daemon/execdriver"
+)
+
+const (
+	DriverName = "Windows"
+	Version    = "Placeholder"
+)
+
+type activeContainer struct {
+	command *execdriver.Command
+}
+
+type driver struct {
+	root     string
+	initPath string
+}
+
+type info struct {
+	ID     string
+	driver *driver
+}
+
+func NewDriver(root, initPath string) (*driver, error) {
+	return &driver{
+		root:     root,
+		initPath: initPath,
+	}, nil
+}
+
+func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
+	return execdriver.ExitStatus{ExitCode: 0}, nil
+}
+
+func (d *driver) Terminate(p *execdriver.Command) error {
+	return nil
+}
+
+func (d *driver) Kill(p *execdriver.Command, sig int) error {
+	return nil
+}
+
+func kill(ID string, PID int) error {
+	return nil
+}
+
+func (d *driver) Pause(c *execdriver.Command) error {
+	return fmt.Errorf("Windows: Containers cannot be paused")
+}
+
+func (d *driver) Unpause(c *execdriver.Command) error {
+	return fmt.Errorf("Windows: Containers cannot be paused")
+}
+
+func (i *info) IsRunning() bool {
+	return false
+}
+
+func (d *driver) Info(id string) execdriver.Info {
+	return &info{
+		ID:     id,
+		driver: d,
+	}
+}
+
+func (d *driver) Name() string {
+	return fmt.Sprintf("%s Date %s", DriverName, Version)
+}
+
+func (d *driver) GetPidsForContainer(id string) ([]int, error) {
+	return nil, fmt.Errorf("GetPidsForContainer: GetPidsForContainer() not implemented")
+}
+
+func (d *driver) Clean(id string) error {
+	return nil
+}
+
+func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
+	return nil, fmt.Errorf("Windows: Stats not implemented")
+}
+
+func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
+	return 0, nil
+}
diff --git a/daemon/export.go b/daemon/export.go
index 859c80f..b94b610 100644
--- a/daemon/export.go
+++ b/daemon/export.go
@@ -1,33 +1,27 @@
 package daemon
 
 import (
+	"fmt"
 	"io"
-
-	"github.com/docker/docker/engine"
 )
 
-func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s container_id", job.Name)
-	}
-	name := job.Args[0]
-
+func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	data, err := container.Export()
 	if err != nil {
-		return job.Errorf("%s: %s", name, err)
+		return fmt.Errorf("%s: %s", name, err)
 	}
 	defer data.Close()
 
 	// Stream the entire contents of the container (basically a volatile snapshot)
-	if _, err := io.Copy(job.Stdout, data); err != nil {
-		return job.Errorf("%s: %s", name, err)
+	if _, err := io.Copy(out, data); err != nil {
+		return fmt.Errorf("%s: %s", name, err)
 	}
 	// FIXME: factor job-specific LogEvent to engine.Job.Run()
 	container.LogEvent("export")
-	return engine.StatusOK
+	return nil
 }
diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go
index a986fe2..b319d24 100644
--- a/daemon/graphdriver/aufs/aufs.go
+++ b/daemon/graphdriver/aufs/aufs.go
@@ -31,13 +31,13 @@
 	"sync"
 	"syscall"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
-	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/directory"
 	mountpk "github.com/docker/docker/pkg/mount"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/libcontainer/label"
 )
 
@@ -221,7 +221,7 @@
 	defer a.Unlock()
 
 	if a.active[id] != 0 {
-		log.Errorf("Removing active id %s", id)
+		logrus.Errorf("Removing active id %s", id)
 	}
 
 	// Make sure the dir is umounted first
@@ -410,7 +410,7 @@
 
 	for _, id := range ids {
 		if err := a.unmount(id); err != nil {
-			log.Errorf("Unmounting %s: %s", common.TruncateID(id), err)
+			logrus.Errorf("Unmounting %s: %s", stringid.TruncateID(id), err)
 		}
 	}
 
@@ -480,14 +480,14 @@
 	enableDirpermLock.Do(func() {
 		base, err := ioutil.TempDir("", "docker-aufs-base")
 		if err != nil {
-			log.Errorf("error checking dirperm1: %v", err)
+			logrus.Errorf("error checking dirperm1: %v", err)
 			return
 		}
 		defer os.RemoveAll(base)
 
 		union, err := ioutil.TempDir("", "docker-aufs-union")
 		if err != nil {
-			log.Errorf("error checking dirperm1: %v", err)
+			logrus.Errorf("error checking dirperm1: %v", err)
 			return
 		}
 		defer os.RemoveAll(union)
@@ -498,7 +498,7 @@
 		}
 		enableDirperm = true
 		if err := Unmount(union); err != nil {
-			log.Errorf("error checking dirperm1: failed to unmount %v", err)
+			logrus.Errorf("error checking dirperm1: failed to unmount %v", err)
 		}
 	})
 	return enableDirperm
diff --git a/daemon/graphdriver/aufs/migrate.go b/daemon/graphdriver/aufs/migrate.go
index dda7cb7..dd61098 100644
--- a/daemon/graphdriver/aufs/migrate.go
+++ b/daemon/graphdriver/aufs/migrate.go
@@ -162,7 +162,7 @@
 	}
 	// If the destination is a symlink then we already tried to relocate once before
 	// and it failed so we delete it and try to remove
-	if s != nil && s.Mode()&os.ModeSymlink == os.ModeSymlink {
+	if s != nil && s.Mode()&os.ModeSymlink != 0 {
 		if err := os.RemoveAll(newPath); err != nil {
 			return err
 		}
diff --git a/daemon/graphdriver/aufs/mount.go b/daemon/graphdriver/aufs/mount.go
index a3a5a86..0a3d9d1 100644
--- a/daemon/graphdriver/aufs/mount.go
+++ b/daemon/graphdriver/aufs/mount.go
@@ -4,12 +4,12 @@
 	"os/exec"
 	"syscall"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 )
 
 func Unmount(target string) error {
 	if err := exec.Command("auplink", target, "flush").Run(); err != nil {
-		log.Errorf("Couldn't run auplink before unmount: %s", err)
+		logrus.Errorf("Couldn't run auplink before unmount: %s", err)
 	}
 	if err := syscall.Unmount(target, 0); err != nil {
 		return err
diff --git a/daemon/graphdriver/devmapper/README.md b/daemon/graphdriver/devmapper/README.md
index 1dc9180..2b6e5e2 100644
--- a/daemon/graphdriver/devmapper/README.md
+++ b/daemon/graphdriver/devmapper/README.md
@@ -186,7 +186,7 @@
     can be achieved by zeroing the first 4k to indicate empty
     metadata, like this:
 
-    ``dd if=/dev/zero of=$metadata_dev bs=4096 count=1```
+    ``dd if=/dev/zero of=$metadata_dev bs=4096 count=1``
 
     Example use:
 
@@ -216,3 +216,59 @@
     Example use:
 
     ``docker -d --storage-opt dm.blkdiscard=false``
+
+ *  `dm.override_udev_sync_check`
+
+    Overrides the `udev` synchronization checks between `devicemapper` and `udev`.
+    `udev` is the device manager for the Linux kernel.
+
+    To view the `udev` sync support of a Docker daemon that is using the
+    `devicemapper` driver, run:
+
+        $ docker info
+	[...]
+	 Udev Sync Supported: true
+	[...]
+
+    When `udev` sync support is `true`, then `devicemapper` and udev can
+    coordinate the activation and deactivation of devices for containers.
+
+    When `udev` sync support is `false`, a race condition occurs between
+    the`devicemapper` and `udev` during create and cleanup. The race condition
+    results in errors and failures. (For information on these failures, see
+    [docker#4036](https://github.com/docker/docker/issues/4036))
+
+    To allow the `docker` daemon to start, regardless of `udev` sync not being
+    supported, set `dm.override_udev_sync_check` to true:
+
+        $ docker -d --storage-opt dm.override_udev_sync_check=true
+
+    When this value is `true`, the  `devicemapper` continues and simply warns
+    you the errors are happening.
+
+    > **Note**: The ideal is to pursue a `docker` daemon and environment that
+    > does support synchronizing with `udev`. For further discussion on this
+    > topic, see [docker#4036](https://github.com/docker/docker/issues/4036).
+    > Otherwise, set this flag for migrating existing Docker daemons to a
+    > daemon with a supported environment.
+
+ *  `dm.use_deferred_removal`
+
+    Enables use of deferred device removal if libdm and kernel driver
+    support the mechanism.
+
+    Deferred device removal means that if device is busy when devices is
+    being removed/deactivated, then a deferred removal is scheduled on
+    device. And devices automatically goes away when last user of device
+    exits.
+
+    For example, when contianer exits, its associated thin device is
+    removed. If that devices has leaked into some other mount namespace
+    can can't be removed now, container exit will still be successful
+    and this option will just schedule device for deferred removal and
+    will not wait in a loop trying to remove a busy device.
+
+    Example use:
+
+    ``docker -d --storage-opt dm.use_deferred_removal=true``
+
diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go
index 686d72b..2480517 100644
--- a/daemon/graphdriver/devmapper/deviceset.go
+++ b/daemon/graphdriver/devmapper/deviceset.go
@@ -18,7 +18,7 @@
 	"syscall"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/devicemapper"
 	"github.com/docker/docker/pkg/parsers"
@@ -30,9 +30,16 @@
 	DefaultDataLoopbackSize     int64  = 100 * 1024 * 1024 * 1024
 	DefaultMetaDataLoopbackSize int64  = 2 * 1024 * 1024 * 1024
 	DefaultBaseFsSize           uint64 = 10 * 1024 * 1024 * 1024
-	DefaultThinpBlockSize       uint32 = 128      // 64K = 128 512b sectors
+	DefaultThinpBlockSize       uint32 = 128 // 64K = 128 512b sectors
+	DefaultUdevSyncOverride     bool   = false
 	MaxDeviceId                 int    = 0xffffff // 24 bit, pool limit
 	DeviceIdMapSz               int    = (MaxDeviceId + 1) / 8
+	// We retry device removal so many a times that even error messages
+	// will fill up console during normal operation. So only log Fatal
+	// messages by default.
+	DMLogLevel                   int  = devicemapper.LogLevelFatal
+	DriverDeferredRemovalSupport bool = false
+	EnableDeferredRemoval        bool = false
 )
 
 const deviceSetMetaFile string = "deviceset-metadata"
@@ -83,20 +90,22 @@
 	deviceIdMap   []byte
 
 	// Options
-	dataLoopbackSize     int64
-	metaDataLoopbackSize int64
-	baseFsSize           uint64
-	filesystem           string
-	mountOptions         string
-	mkfsArgs             []string
-	dataDevice           string // block or loop dev
-	dataLoopFile         string // loopback file, if used
-	metadataDevice       string // block or loop dev
-	metadataLoopFile     string // loopback file, if used
-	doBlkDiscard         bool
-	thinpBlockSize       uint32
-	thinPoolDevice       string
-	Transaction          `json:"-"`
+	dataLoopbackSize      int64
+	metaDataLoopbackSize  int64
+	baseFsSize            uint64
+	filesystem            string
+	mountOptions          string
+	mkfsArgs              []string
+	dataDevice            string // block or loop dev
+	dataLoopFile          string // loopback file, if used
+	metadataDevice        string // block or loop dev
+	metadataLoopFile      string // loopback file, if used
+	doBlkDiscard          bool
+	thinpBlockSize        uint32
+	thinPoolDevice        string
+	Transaction           `json:"-"`
+	overrideUdevSyncCheck bool
+	deferredRemove        bool // use deferred removal
 }
 
 type DiskUsage struct {
@@ -106,15 +115,16 @@
 }
 
 type Status struct {
-	PoolName          string
-	DataFile          string // actual block device for data
-	DataLoopback      string // loopback file, if used
-	MetadataFile      string // actual block device for metadata
-	MetadataLoopback  string // loopback file, if used
-	Data              DiskUsage
-	Metadata          DiskUsage
-	SectorSize        uint64
-	UdevSyncSupported bool
+	PoolName              string
+	DataFile              string // actual block device for data
+	DataLoopback          string // loopback file, if used
+	MetadataFile          string // actual block device for metadata
+	MetadataLoopback      string // loopback file, if used
+	Data                  DiskUsage
+	Metadata              DiskUsage
+	SectorSize            uint64
+	UdevSyncSupported     bool
+	DeferredRemoveEnabled bool
 }
 
 type DevStatus struct {
@@ -205,14 +215,14 @@
 		if !os.IsNotExist(err) {
 			return "", err
 		}
-		log.Debugf("Creating loopback file %s for device-manage use", filename)
+		logrus.Debugf("Creating loopback file %s for device-manage use", filename)
 		file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
 		if err != nil {
 			return "", err
 		}
 		defer file.Close()
 
-		if err = file.Truncate(size); err != nil {
+		if err := file.Truncate(size); err != nil {
 			return "", err
 		}
 	}
@@ -320,21 +330,21 @@
 
 	// Skip some of the meta files which are not device files.
 	if strings.HasSuffix(finfo.Name(), ".migrated") {
-		log.Debugf("Skipping file %s", path)
+		logrus.Debugf("Skipping file %s", path)
 		return nil
 	}
 
 	if strings.HasPrefix(finfo.Name(), ".") {
-		log.Debugf("Skipping file %s", path)
+		logrus.Debugf("Skipping file %s", path)
 		return nil
 	}
 
 	if finfo.Name() == deviceSetMetaFile {
-		log.Debugf("Skipping file %s", path)
+		logrus.Debugf("Skipping file %s", path)
 		return nil
 	}
 
-	log.Debugf("Loading data for file %s", path)
+	logrus.Debugf("Loading data for file %s", path)
 
 	hash := finfo.Name()
 	if hash == "base" {
@@ -347,7 +357,7 @@
 	}
 
 	if dinfo.DeviceId > MaxDeviceId {
-		log.Errorf("Ignoring Invalid DeviceId=%d", dinfo.DeviceId)
+		logrus.Errorf("Ignoring Invalid DeviceId=%d", dinfo.DeviceId)
 		return nil
 	}
 
@@ -355,17 +365,17 @@
 	devices.markDeviceIdUsed(dinfo.DeviceId)
 	devices.Unlock()
 
-	log.Debugf("Added deviceId=%d to DeviceIdMap", dinfo.DeviceId)
+	logrus.Debugf("Added deviceId=%d to DeviceIdMap", dinfo.DeviceId)
 	return nil
 }
 
 func (devices *DeviceSet) constructDeviceIdMap() error {
-	log.Debugf("[deviceset] constructDeviceIdMap()")
-	defer log.Debugf("[deviceset] constructDeviceIdMap() END")
+	logrus.Debugf("[deviceset] constructDeviceIdMap()")
+	defer logrus.Debugf("[deviceset] constructDeviceIdMap() END")
 
 	var scan = func(path string, info os.FileInfo, err error) error {
 		if err != nil {
-			log.Debugf("Can't walk the file %s", path)
+			logrus.Debugf("Can't walk the file %s", path)
 			return nil
 		}
 
@@ -381,7 +391,7 @@
 }
 
 func (devices *DeviceSet) unregisterDevice(id int, hash string) error {
-	log.Debugf("unregisterDevice(%v, %v)", id, hash)
+	logrus.Debugf("unregisterDevice(%v, %v)", id, hash)
 	info := &DevInfo{
 		Hash:     hash,
 		DeviceId: id,
@@ -392,7 +402,7 @@
 	devices.devicesLock.Unlock()
 
 	if err := devices.removeMetadata(info); err != nil {
-		log.Debugf("Error removing metadata: %s", err)
+		logrus.Debugf("Error removing metadata: %s", err)
 		return err
 	}
 
@@ -400,7 +410,7 @@
 }
 
 func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionId uint64) (*DevInfo, error) {
-	log.Debugf("registerDevice(%v, %v)", id, hash)
+	logrus.Debugf("registerDevice(%v, %v)", id, hash)
 	info := &DevInfo{
 		Hash:          hash,
 		DeviceId:      id,
@@ -426,7 +436,13 @@
 }
 
 func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error {
-	log.Debugf("activateDeviceIfNeeded(%v)", info.Hash)
+	logrus.Debugf("activateDeviceIfNeeded(%v)", info.Hash)
+
+	// Make sure deferred removal on device is canceled, if one was
+	// scheduled.
+	if err := devices.cancelDeferredRemoval(info); err != nil {
+		return fmt.Errorf("Deivce Deferred Removal Cancellation Failed: %s", err)
+	}
 
 	if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 {
 		return nil
@@ -542,7 +558,7 @@
 	}
 
 	if err := devices.openTransaction(hash, deviceId); err != nil {
-		log.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId)
+		logrus.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId)
 		devices.markDeviceIdFree(deviceId)
 		return nil, err
 	}
@@ -554,7 +570,7 @@
 				// happen. Now we have a mechianism to find
 				// a free device Id. So something is not right.
 				// Give a warning and continue.
-				log.Errorf("Device Id %d exists in pool but it is supposed to be unused", deviceId)
+				logrus.Errorf("Device Id %d exists in pool but it is supposed to be unused", deviceId)
 				deviceId, err = devices.getNextFreeDeviceId()
 				if err != nil {
 					return nil, err
@@ -563,14 +579,14 @@
 				devices.refreshTransaction(deviceId)
 				continue
 			}
-			log.Debugf("Error creating device: %s", err)
+			logrus.Debugf("Error creating device: %s", err)
 			devices.markDeviceIdFree(deviceId)
 			return nil, err
 		}
 		break
 	}
 
-	log.Debugf("Registering device (id %v) with FS size %v", deviceId, devices.baseFsSize)
+	logrus.Debugf("Registering device (id %v) with FS size %v", deviceId, devices.baseFsSize)
 	info, err := devices.registerDevice(deviceId, hash, devices.baseFsSize, devices.OpenTransactionId)
 	if err != nil {
 		_ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId)
@@ -594,7 +610,7 @@
 	}
 
 	if err := devices.openTransaction(hash, deviceId); err != nil {
-		log.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId)
+		logrus.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId)
 		devices.markDeviceIdFree(deviceId)
 		return err
 	}
@@ -606,7 +622,7 @@
 				// happen. Now we have a mechianism to find
 				// a free device Id. So something is not right.
 				// Give a warning and continue.
-				log.Errorf("Device Id %d exists in pool but it is supposed to be unused", deviceId)
+				logrus.Errorf("Device Id %d exists in pool but it is supposed to be unused", deviceId)
 				deviceId, err = devices.getNextFreeDeviceId()
 				if err != nil {
 					return err
@@ -615,7 +631,7 @@
 				devices.refreshTransaction(deviceId)
 				continue
 			}
-			log.Debugf("Error creating snap device: %s", err)
+			logrus.Debugf("Error creating snap device: %s", err)
 			devices.markDeviceIdFree(deviceId)
 			return err
 		}
@@ -625,7 +641,7 @@
 	if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size, devices.OpenTransactionId); err != nil {
 		devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId)
 		devices.markDeviceIdFree(deviceId)
-		log.Debugf("Error registering device: %s", err)
+		logrus.Debugf("Error registering device: %s", err)
 		return err
 	}
 
@@ -660,7 +676,7 @@
 	}
 
 	if oldInfo != nil && !oldInfo.Initialized {
-		log.Debugf("Removing uninitialized base image")
+		logrus.Debugf("Removing uninitialized base image")
 		if err := devices.DeleteDevice(""); err != nil {
 			return err
 		}
@@ -681,7 +697,7 @@
 		}
 	}
 
-	log.Debugf("Initializing base device-mapper thin volume")
+	logrus.Debugf("Initializing base device-mapper thin volume")
 
 	// Create initial device
 	info, err := devices.createRegisterDevice("")
@@ -689,9 +705,9 @@
 		return err
 	}
 
-	log.Debugf("Creating filesystem on base device-mapper thin volume")
+	logrus.Debugf("Creating filesystem on base device-mapper thin volume")
 
-	if err = devices.activateDeviceIfNeeded(info); err != nil {
+	if err := devices.activateDeviceIfNeeded(info); err != nil {
 		return err
 	}
 
@@ -700,7 +716,7 @@
 	}
 
 	info.Initialized = true
-	if err = devices.saveMetadata(info); err != nil {
+	if err := devices.saveMetadata(info); err != nil {
 		info.Initialized = false
 		return err
 	}
@@ -723,14 +739,22 @@
 }
 
 func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) {
-	if level >= devicemapper.LogLevelDebug {
-		// (vbatts) libdm debug is very verbose. If you're debugging libdm, you can
-		// comment out this check yourself
-		level = devicemapper.LogLevelInfo
+	// By default libdm sends us all the messages including debug ones.
+	// We need to filter out messages here and figure out which one
+	// should be printed.
+	if level > DMLogLevel {
+		return
 	}
 
 	// FIXME(vbatts) push this back into ./pkg/devicemapper/
-	log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
+	if level <= devicemapper.LogLevelErr {
+		logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
+	} else if level <= devicemapper.LogLevelInfo {
+		logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
+	} else {
+		// FIXME(vbatts) push this back into ./pkg/devicemapper/
+		logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
+	}
 }
 
 func major(device uint64) uint64 {
@@ -846,24 +870,24 @@
 }
 
 func (devices *DeviceSet) rollbackTransaction() error {
-	log.Debugf("Rolling back open transaction: TransactionId=%d hash=%s device_id=%d", devices.OpenTransactionId, devices.DeviceIdHash, devices.DeviceId)
+	logrus.Debugf("Rolling back open transaction: TransactionId=%d hash=%s device_id=%d", devices.OpenTransactionId, devices.DeviceIdHash, devices.DeviceId)
 
 	// A device id might have already been deleted before transaction
 	// closed. In that case this call will fail. Just leave a message
 	// in case of failure.
 	if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceId); err != nil {
-		log.Errorf("Unable to delete device: %s", err)
+		logrus.Errorf("Unable to delete device: %s", err)
 	}
 
 	dinfo := &DevInfo{Hash: devices.DeviceIdHash}
 	if err := devices.removeMetadata(dinfo); err != nil {
-		log.Errorf("Unable to remove metadata: %s", err)
+		logrus.Errorf("Unable to remove metadata: %s", err)
 	} else {
 		devices.markDeviceIdFree(devices.DeviceId)
 	}
 
 	if err := devices.removeTransactionMetaData(); err != nil {
-		log.Errorf("Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err)
+		logrus.Errorf("Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err)
 	}
 
 	return nil
@@ -883,7 +907,7 @@
 	// If open transaction Id is less than pool transaction Id, something
 	// is wrong. Bail out.
 	if devices.OpenTransactionId < devices.TransactionId {
-		log.Errorf("Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionId, devices.TransactionId)
+		logrus.Errorf("Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionId, devices.TransactionId)
 		return nil
 	}
 
@@ -940,32 +964,80 @@
 
 func (devices *DeviceSet) closeTransaction() error {
 	if err := devices.updatePoolTransactionId(); err != nil {
-		log.Debugf("Failed to close Transaction")
+		logrus.Debugf("Failed to close Transaction")
 		return err
 	}
 	return nil
 }
 
-func (devices *DeviceSet) initDevmapper(doInit bool) error {
-	if os.Getenv("DEBUG") != "" {
-		devicemapper.LogInitVerbose(devicemapper.LogLevelDebug)
-	} else {
-		devicemapper.LogInitVerbose(devicemapper.LogLevelWarn)
+func determineDriverCapabilities(version string) error {
+	/*
+	 * Driver version 4.27.0 and greater support deferred activation
+	 * feature.
+	 */
+
+	logrus.Debugf("devicemapper: driver version is %s", version)
+
+	versionSplit := strings.Split(version, ".")
+	major, err := strconv.Atoi(versionSplit[0])
+	if err != nil {
+		return graphdriver.ErrNotSupported
 	}
+
+	if major > 4 {
+		DriverDeferredRemovalSupport = true
+		return nil
+	}
+
+	if major < 4 {
+		return nil
+	}
+
+	minor, err := strconv.Atoi(versionSplit[1])
+	if err != nil {
+		return graphdriver.ErrNotSupported
+	}
+
+	/*
+	 * If major is 4 and minor is 27, then there is no need to
+	 * check for patch level as it can not be less than 0.
+	 */
+	if minor >= 27 {
+		DriverDeferredRemovalSupport = true
+		return nil
+	}
+
+	return nil
+}
+
+func (devices *DeviceSet) initDevmapper(doInit bool) error {
 	// give ourselves to libdm as a log handler
 	devicemapper.LogInit(devices)
 
-	_, err := devicemapper.GetDriverVersion()
+	version, err := devicemapper.GetDriverVersion()
 	if err != nil {
 		// Can't even get driver version, assume not supported
 		return graphdriver.ErrNotSupported
 	}
 
+	if err := determineDriverCapabilities(version); err != nil {
+		return graphdriver.ErrNotSupported
+	}
+
+	// If user asked for deferred removal and both library and driver
+	// supports deferred removal use it.
+	if EnableDeferredRemoval && DriverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport == true {
+		logrus.Debugf("devmapper: Deferred removal support enabled.")
+		devices.deferredRemove = true
+	}
+
 	// https://github.com/docker/docker/issues/4036
 	if supported := devicemapper.UdevSetSyncSupport(true); !supported {
-		log.Warnf("Udev sync is not supported. This will lead to unexpected behavior, data loss and errors")
+		logrus.Errorf("Udev sync is not supported. This will lead to unexpected behavior, data loss and errors. For more information, see https://docs.docker.com/reference/commandline/cli/#daemon-storage-driver-option")
+		if !devices.overrideUdevSyncCheck {
+			return graphdriver.ErrNotSupported
+		}
 	}
-	log.Debugf("devicemapper: udev sync support: %v", devicemapper.UdevSyncSupported())
 
 	if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) {
 		return err
@@ -985,13 +1057,13 @@
 	//	- The target of this device is at major <maj> and minor <min>
 	//	- If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
 	devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino)
-	log.Debugf("Generated prefix: %s", devices.devicePrefix)
+	logrus.Debugf("Generated prefix: %s", devices.devicePrefix)
 
 	// Check for the existence of the thin-pool device
-	log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
+	logrus.Debugf("Checking for existence of the pool '%s'", devices.getPoolName())
 	info, err := devicemapper.GetInfo(devices.getPoolName())
 	if info == nil {
-		log.Debugf("Error device devicemapper.GetInfo: %s", err)
+		logrus.Debugf("Error device devicemapper.GetInfo: %s", err)
 		return err
 	}
 
@@ -1007,7 +1079,7 @@
 
 	// If the pool doesn't exist, create it
 	if info.Exists == 0 && devices.thinPoolDevice == "" {
-		log.Debugf("Pool doesn't exist. Creating it.")
+		logrus.Debugf("Pool doesn't exist. Creating it.")
 
 		var (
 			dataFile     *os.File
@@ -1029,7 +1101,7 @@
 
 			data, err := devices.ensureImage("data", devices.dataLoopbackSize)
 			if err != nil {
-				log.Debugf("Error device ensureImage (data): %s", err)
+				logrus.Debugf("Error device ensureImage (data): %s", err)
 				return err
 			}
 
@@ -1062,7 +1134,7 @@
 
 			metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize)
 			if err != nil {
-				log.Debugf("Error device ensureImage (metadata): %s", err)
+				logrus.Debugf("Error device ensureImage (metadata): %s", err)
 				return err
 			}
 
@@ -1088,21 +1160,21 @@
 	// If we didn't just create the data or metadata image, we need to
 	// load the transaction id and migrate old metadata
 	if !createdLoopback {
-		if err = devices.initMetaData(); err != nil {
+		if err := devices.initMetaData(); err != nil {
 			return err
 		}
 	}
 
 	// Right now this loads only NextDeviceId. If there is more metadata
 	// down the line, we might have to move it earlier.
-	if err = devices.loadDeviceSetMetaData(); err != nil {
+	if err := devices.loadDeviceSetMetaData(); err != nil {
 		return err
 	}
 
 	// Setup the base image
 	if doInit {
 		if err := devices.setupBaseImage(); err != nil {
-			log.Debugf("Error device setupBaseImage: %s", err)
+			logrus.Debugf("Error device setupBaseImage: %s", err)
 			return err
 		}
 	}
@@ -1111,8 +1183,8 @@
 }
 
 func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
-	log.Debugf("[deviceset] AddDevice(hash=%s basehash=%s)", hash, baseHash)
-	defer log.Debugf("[deviceset] AddDevice(hash=%s basehash=%s) END", hash, baseHash)
+	logrus.Debugf("[deviceset] AddDevice(hash=%s basehash=%s)", hash, baseHash)
+	defer logrus.Debugf("[deviceset] AddDevice(hash=%s basehash=%s) END", hash, baseHash)
 
 	baseInfo, err := devices.lookupDevice(baseHash)
 	if err != nil {
@@ -1143,26 +1215,26 @@
 		// manually
 		if err := devices.activateDeviceIfNeeded(info); err == nil {
 			if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil {
-				log.Debugf("Error discarding block on device: %s (ignoring)", err)
+				logrus.Debugf("Error discarding block on device: %s (ignoring)", err)
 			}
 		}
 	}
 
 	devinfo, _ := devicemapper.GetInfo(info.Name())
 	if devinfo != nil && devinfo.Exists != 0 {
-		if err := devices.removeDeviceAndWait(info.Name()); err != nil {
-			log.Debugf("Error removing device: %s", err)
+		if err := devices.removeDevice(info.Name()); err != nil {
+			logrus.Debugf("Error removing device: %s", err)
 			return err
 		}
 	}
 
 	if err := devices.openTransaction(info.Hash, info.DeviceId); err != nil {
-		log.Debugf("Error opening transaction hash = %s deviceId = %d", "", info.DeviceId)
+		logrus.Debugf("Error opening transaction hash = %s deviceId = %d", "", info.DeviceId)
 		return err
 	}
 
 	if err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil {
-		log.Debugf("Error deleting device: %s", err)
+		logrus.Debugf("Error deleting device: %s", err)
 		return err
 	}
 
@@ -1195,8 +1267,8 @@
 }
 
 func (devices *DeviceSet) deactivatePool() error {
-	log.Debugf("[devmapper] deactivatePool()")
-	defer log.Debugf("[devmapper] deactivatePool END")
+	logrus.Debugf("[devmapper] deactivatePool()")
+	defer logrus.Debugf("[devmapper] deactivatePool END")
 	devname := devices.getPoolDevName()
 
 	devinfo, err := devicemapper.GetInfo(devname)
@@ -1205,7 +1277,7 @@
 	}
 	if d, err := devicemapper.GetDeps(devname); err == nil {
 		// Access to more Debug output
-		log.Debugf("[devmapper] devicemapper.GetDeps() %s: %#v", devname, d)
+		logrus.Debugf("[devmapper] devicemapper.GetDeps() %s: %#v", devname, d)
 	}
 	if devinfo.Exists != 0 {
 		return devicemapper.RemoveDevice(devname)
@@ -1215,34 +1287,38 @@
 }
 
 func (devices *DeviceSet) deactivateDevice(info *DevInfo) error {
-	log.Debugf("[devmapper] deactivateDevice(%s)", info.Hash)
-	defer log.Debugf("[devmapper] deactivateDevice END(%s)", info.Hash)
-
-	// Wait for the unmount to be effective,
-	// by watching the value of Info.OpenCount for the device
-	if err := devices.waitClose(info); err != nil {
-		log.Errorf("Error waiting for device %s to close: %s", info.Hash, err)
-	}
+	logrus.Debugf("[devmapper] deactivateDevice(%s)", info.Hash)
+	defer logrus.Debugf("[devmapper] deactivateDevice END(%s)", info.Hash)
 
 	devinfo, err := devicemapper.GetInfo(info.Name())
 	if err != nil {
 		return err
 	}
-	if devinfo.Exists != 0 {
-		if err := devices.removeDeviceAndWait(info.Name()); err != nil {
+
+	if devinfo.Exists == 0 {
+		return nil
+	}
+
+	if devices.deferredRemove {
+		if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil {
+			return err
+		}
+	} else {
+		if err := devices.removeDevice(info.Name()); err != nil {
 			return err
 		}
 	}
-
 	return nil
 }
 
-// Issues the underlying dm remove operation and then waits
-// for it to finish.
-func (devices *DeviceSet) removeDeviceAndWait(devname string) error {
+// Issues the underlying dm remove operation.
+func (devices *DeviceSet) removeDevice(devname string) error {
 	var err error
 
-	for i := 0; i < 1000; i++ {
+	logrus.Debugf("[devmapper] removeDevice START(%s)", devname)
+	defer logrus.Debugf("[devmapper] removeDevice END(%s)", devname)
+
+	for i := 0; i < 200; i++ {
 		err = devicemapper.RemoveDevice(devname)
 		if err == nil {
 			break
@@ -1254,80 +1330,56 @@
 		// If we see EBUSY it may be a transient error,
 		// sleep a bit a retry a few times.
 		devices.Unlock()
-		time.Sleep(10 * time.Millisecond)
+		time.Sleep(100 * time.Millisecond)
 		devices.Lock()
 	}
-	if err != nil {
-		return err
-	}
 
-	if err := devices.waitRemove(devname); err != nil {
-		return err
-	}
-	return nil
+	return err
 }
 
-// waitRemove blocks until either:
-// a) the device registered at <device_set_prefix>-<hash> is removed,
-// or b) the 10 second timeout expires.
-func (devices *DeviceSet) waitRemove(devname string) error {
-	log.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname)
-	defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname)
-	i := 0
-	for ; i < 1000; i++ {
-		devinfo, err := devicemapper.GetInfo(devname)
-		if err != nil {
-			// If there is an error we assume the device doesn't exist.
-			// The error might actually be something else, but we can't differentiate.
+func (devices *DeviceSet) cancelDeferredRemoval(info *DevInfo) error {
+	if !devices.deferredRemove {
+		return nil
+	}
+
+	logrus.Debugf("[devmapper] cancelDeferredRemoval START(%s)", info.Name())
+	defer logrus.Debugf("[devmapper] cancelDeferredRemoval END(%s)", info.Name)
+
+	devinfo, err := devicemapper.GetInfoWithDeferred(info.Name())
+
+	if devinfo != nil && devinfo.DeferredRemove == 0 {
+		return nil
+	}
+
+	// Cancel deferred remove
+	for i := 0; i < 100; i++ {
+		err = devicemapper.CancelDeferredRemove(info.Name())
+		if err == nil {
+			break
+		}
+
+		if err == devicemapper.ErrEnxio {
+			// Device is probably already gone. Return success.
 			return nil
 		}
-		if i%100 == 0 {
-			log.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
-		}
-		if devinfo.Exists == 0 {
-			break
-		}
 
-		devices.Unlock()
-		time.Sleep(10 * time.Millisecond)
-		devices.Lock()
-	}
-	if i == 1000 {
-		return fmt.Errorf("Timeout while waiting for device %s to be removed", devname)
-	}
-	return nil
-}
-
-// waitClose blocks until either:
-// a) the device registered at <device_set_prefix>-<hash> is closed,
-// or b) the 10 second timeout expires.
-func (devices *DeviceSet) waitClose(info *DevInfo) error {
-	i := 0
-	for ; i < 1000; i++ {
-		devinfo, err := devicemapper.GetInfo(info.Name())
-		if err != nil {
+		if err != devicemapper.ErrBusy {
 			return err
 		}
-		if i%100 == 0 {
-			log.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount)
-		}
-		if devinfo.OpenCount == 0 {
-			break
-		}
+
+		// If we see EBUSY it may be a transient error,
+		// sleep a bit a retry a few times.
 		devices.Unlock()
-		time.Sleep(10 * time.Millisecond)
+		time.Sleep(100 * time.Millisecond)
 		devices.Lock()
 	}
-	if i == 1000 {
-		return fmt.Errorf("Timeout while waiting for device %s to close", info.Hash)
-	}
-	return nil
+	return err
 }
 
 func (devices *DeviceSet) Shutdown() error {
-	log.Debugf("[deviceset %s] Shutdown()", devices.devicePrefix)
-	log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
-	defer log.Debugf("[deviceset %s] Shutdown() END", devices.devicePrefix)
+	logrus.Debugf("[deviceset %s] Shutdown()", devices.devicePrefix)
+	logrus.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
+	defer logrus.Debugf("[deviceset %s] Shutdown() END", devices.devicePrefix)
 
 	var devs []*DevInfo
 
@@ -1344,12 +1396,12 @@
 			// container. This means it'll go away from the global scope directly,
 			// and the device will be released when that container dies.
 			if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil {
-				log.Debugf("Shutdown unmounting %s, error: %s", info.mountPath, err)
+				logrus.Debugf("Shutdown unmounting %s, error: %s", info.mountPath, err)
 			}
 
 			devices.Lock()
 			if err := devices.deactivateDevice(info); err != nil {
-				log.Debugf("Shutdown deactivate %s , error: %s", info.Hash, err)
+				logrus.Debugf("Shutdown deactivate %s , error: %s", info.Hash, err)
 			}
 			devices.Unlock()
 		}
@@ -1361,7 +1413,7 @@
 		info.lock.Lock()
 		devices.Lock()
 		if err := devices.deactivateDevice(info); err != nil {
-			log.Debugf("Shutdown deactivate base , error: %s", err)
+			logrus.Debugf("Shutdown deactivate base , error: %s", err)
 		}
 		devices.Unlock()
 		info.lock.Unlock()
@@ -1370,7 +1422,7 @@
 	devices.Lock()
 	if devices.thinPoolDevice == "" {
 		if err := devices.deactivatePool(); err != nil {
-			log.Debugf("Shutdown deactivate pool , error: %s", err)
+			logrus.Debugf("Shutdown deactivate pool , error: %s", err)
 		}
 	}
 
@@ -1422,11 +1474,7 @@
 	options = joinMountOptions(options, devices.mountOptions)
 	options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
 
-	err = syscall.Mount(info.DevName(), path, fstype, flags, joinMountOptions("discard", options))
-	if err != nil && err == syscall.EINVAL {
-		err = syscall.Mount(info.DevName(), path, fstype, flags, options)
-	}
-	if err != nil {
+	if err := syscall.Mount(info.DevName(), path, fstype, flags, options); err != nil {
 		return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err)
 	}
 
@@ -1437,8 +1485,8 @@
 }
 
 func (devices *DeviceSet) UnmountDevice(hash string) error {
-	log.Debugf("[devmapper] UnmountDevice(hash=%s)", hash)
-	defer log.Debugf("[devmapper] UnmountDevice(hash=%s) END", hash)
+	logrus.Debugf("[devmapper] UnmountDevice(hash=%s)", hash)
+	defer logrus.Debugf("[devmapper] UnmountDevice(hash=%s) END", hash)
 
 	info, err := devices.lookupDevice(hash)
 	if err != nil {
@@ -1460,11 +1508,11 @@
 		return nil
 	}
 
-	log.Debugf("[devmapper] Unmount(%s)", info.mountPath)
+	logrus.Debugf("[devmapper] Unmount(%s)", info.mountPath)
 	if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil {
 		return err
 	}
-	log.Debugf("[devmapper] Unmount done")
+	logrus.Debugf("[devmapper] Unmount done")
 
 	if err := devices.deactivateDevice(info); err != nil {
 		return err
@@ -1549,14 +1597,16 @@
 		return nil, fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err)
 	}
 
-	if sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()); err != nil {
+	sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName())
+
+	if err != nil {
 		return nil, err
-	} else {
-		status.SizeInSectors = sizeInSectors
-		status.MappedSectors = mappedSectors
-		status.HighestMappedSector = highestMappedSector
 	}
 
+	status.SizeInSectors = sizeInSectors
+	status.MappedSectors = mappedSectors
+	status.HighestMappedSector = highestMappedSector
+
 	return status, nil
 }
 
@@ -1582,9 +1632,8 @@
 
 func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) {
 	buf := new(syscall.Statfs_t)
-	err := syscall.Statfs(loopFile, buf)
-	if err != nil {
-		log.Warnf("Couldn't stat loopfile filesystem %v: %v", loopFile, err)
+	if err := syscall.Statfs(loopFile, buf); err != nil {
+		logrus.Warnf("Couldn't stat loopfile filesystem %v: %v", loopFile, err)
 		return 0, err
 	}
 	return buf.Bfree * uint64(buf.Bsize), nil
@@ -1594,7 +1643,7 @@
 	if loopFile != "" {
 		fi, err := os.Stat(loopFile)
 		if err != nil {
-			log.Warnf("Couldn't stat loopfile %v: %v", loopFile, err)
+			logrus.Warnf("Couldn't stat loopfile %v: %v", loopFile, err)
 			return false, err
 		}
 		return fi.Mode().IsRegular(), nil
@@ -1615,6 +1664,7 @@
 	status.MetadataFile = devices.MetadataDevicePath()
 	status.MetadataLoopback = devices.metadataLoopFile
 	status.UdevSyncSupported = devicemapper.UdevSyncSupported()
+	status.DeferredRemoveEnabled = devices.deferredRemove
 
 	totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus()
 	if err == nil {
@@ -1654,15 +1704,16 @@
 	devicemapper.SetDevDir("/dev")
 
 	devices := &DeviceSet{
-		root:                 root,
-		MetaData:             MetaData{Devices: make(map[string]*DevInfo)},
-		dataLoopbackSize:     DefaultDataLoopbackSize,
-		metaDataLoopbackSize: DefaultMetaDataLoopbackSize,
-		baseFsSize:           DefaultBaseFsSize,
-		filesystem:           "ext4",
-		doBlkDiscard:         true,
-		thinpBlockSize:       DefaultThinpBlockSize,
-		deviceIdMap:          make([]byte, DeviceIdMapSz),
+		root:                  root,
+		MetaData:              MetaData{Devices: make(map[string]*DevInfo)},
+		dataLoopbackSize:      DefaultDataLoopbackSize,
+		metaDataLoopbackSize:  DefaultMetaDataLoopbackSize,
+		baseFsSize:            DefaultBaseFsSize,
+		overrideUdevSyncCheck: DefaultUdevSyncOverride,
+		filesystem:            "ext4",
+		doBlkDiscard:          true,
+		thinpBlockSize:        DefaultThinpBlockSize,
+		deviceIdMap:           make([]byte, DeviceIdMapSz),
 	}
 
 	foundBlkDiscard := false
@@ -1719,6 +1770,18 @@
 			}
 			// convert to 512b sectors
 			devices.thinpBlockSize = uint32(size) >> 9
+		case "dm.override_udev_sync_check":
+			devices.overrideUdevSyncCheck, err = strconv.ParseBool(val)
+			if err != nil {
+				return nil, err
+			}
+
+		case "dm.use_deferred_removal":
+			EnableDeferredRemoval, err = strconv.ParseBool(val)
+			if err != nil {
+				return nil, err
+			}
+
 		default:
 			return nil, fmt.Errorf("Unknown option %s\n", key)
 		}
diff --git a/daemon/graphdriver/devmapper/devmapper_test.go b/daemon/graphdriver/devmapper/devmapper_test.go
index 6cb7572..60006af 100644
--- a/daemon/graphdriver/devmapper/devmapper_test.go
+++ b/daemon/graphdriver/devmapper/devmapper_test.go
@@ -13,6 +13,7 @@
 	DefaultDataLoopbackSize = 300 * 1024 * 1024
 	DefaultMetaDataLoopbackSize = 200 * 1024 * 1024
 	DefaultBaseFsSize = 300 * 1024 * 1024
+	DefaultUdevSyncOverride = true
 	if err := graphtest.InitLoopbacks(); err != nil {
 		panic(err)
 	}
diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go
index 6dd05ca..bdf7f87 100644
--- a/daemon/graphdriver/devmapper/driver.go
+++ b/daemon/graphdriver/devmapper/driver.go
@@ -8,7 +8,7 @@
 	"os"
 	"path"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/devicemapper"
 	"github.com/docker/docker/pkg/mount"
@@ -77,6 +77,7 @@
 		{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))},
 		{"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))},
 		{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
+		{"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
 	}
 	if len(s.DataLoopback) > 0 {
 		status = append(status, [2]string{"Data loop file", s.DataLoopback})
@@ -164,7 +165,7 @@
 func (d *Driver) Put(id string) error {
 	err := d.DeviceSet.UnmountDevice(id)
 	if err != nil {
-		log.Errorf("Error unmounting device %s: %s", id, err)
+		logrus.Errorf("Error unmounting device %s: %s", id, err)
 	}
 	return err
 }
diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go
index 0488345..963acdf 100644
--- a/daemon/graphdriver/driver.go
+++ b/daemon/graphdriver/driver.go
@@ -4,29 +4,16 @@
 	"errors"
 	"fmt"
 	"os"
-	"path"
+	"path/filepath"
 	"strings"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/pkg/archive"
 )
 
 type FsMagic uint32
 
 const (
-	FsMagicBtrfs       = FsMagic(0x9123683E)
-	FsMagicAufs        = FsMagic(0x61756673)
-	FsMagicExtfs       = FsMagic(0x0000EF53)
-	FsMagicCramfs      = FsMagic(0x28cd3d45)
-	FsMagicRamFs       = FsMagic(0x858458f6)
-	FsMagicTmpFs       = FsMagic(0x01021994)
-	FsMagicSquashFs    = FsMagic(0x73717368)
-	FsMagicNfsFs       = FsMagic(0x00006969)
-	FsMagicReiserFs    = FsMagic(0x52654973)
-	FsMagicSmbFs       = FsMagic(0x0000517B)
-	FsMagicJffs2Fs     = FsMagic(0x000072b6)
-	FsMagicZfs         = FsMagic(0x2fc12fc1)
-	FsMagicXfs         = FsMagic(0x58465342)
 	FsMagicUnsupported = FsMagic(0x00000000)
 )
 
@@ -34,35 +21,10 @@
 	DefaultDriver string
 	// All registred drivers
 	drivers map[string]InitFunc
-	// Slice of drivers that should be used in an order
-	priority = []string{
-		"aufs",
-		"btrfs",
-		"devicemapper",
-		"overlay",
-		"vfs",
-	}
 
 	ErrNotSupported   = errors.New("driver not supported")
 	ErrPrerequisites  = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
 	ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
-
-	FsNames = map[FsMagic]string{
-		FsMagicAufs:        "aufs",
-		FsMagicBtrfs:       "btrfs",
-		FsMagicExtfs:       "extfs",
-		FsMagicCramfs:      "cramfs",
-		FsMagicRamFs:       "ramfs",
-		FsMagicTmpFs:       "tmpfs",
-		FsMagicSquashFs:    "squashfs",
-		FsMagicNfsFs:       "nfs",
-		FsMagicReiserFs:    "reiserfs",
-		FsMagicSmbFs:       "smb",
-		FsMagicJffs2Fs:     "jffs2",
-		FsMagicZfs:         "zfs",
-		FsMagicXfs:         "xfs",
-		FsMagicUnsupported: "unsupported",
-	}
 )
 
 type InitFunc func(root string, options []string) (Driver, error)
@@ -134,7 +96,7 @@
 
 func GetDriver(name, home string, options []string) (Driver, error) {
 	if initFunc, exists := drivers[name]; exists {
-		return initFunc(path.Join(home, name), options)
+		return initFunc(filepath.Join(home, name), options)
 	}
 	return nil, ErrNotSupported
 }
@@ -142,10 +104,40 @@
 func New(root string, options []string) (driver Driver, err error) {
 	for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} {
 		if name != "" {
+			logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver
 			return GetDriver(name, root, options)
 		}
 	}
 
+	// Guess for prior driver
+	priorDrivers := scanPriorDrivers(root)
+	for _, name := range priority {
+		if name == "vfs" {
+			// don't use vfs even if there is state present.
+			continue
+		}
+		for _, prior := range priorDrivers {
+			// of the state found from prior drivers, check in order of our priority
+			// which we would prefer
+			if prior == name {
+				driver, err = GetDriver(name, root, options)
+				if err != nil {
+					// unlike below, we will return error here, because there is prior
+					// state, and now it is no longer supported/prereq/compatible, so
+					// something changed and needs attention. Otherwise the daemon's
+					// images would just "disappear".
+					logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err)
+					return nil, err
+				}
+				if err := checkPriorDriver(name, root); err != nil {
+					return nil, err
+				}
+				logrus.Infof("[graphdriver] using prior storage driver %q", name)
+				return driver, nil
+			}
+		}
+	}
+
 	// Check for priority drivers first
 	for _, name := range priority {
 		driver, err = GetDriver(name, root, options)
@@ -155,34 +147,47 @@
 			}
 			return nil, err
 		}
-		checkPriorDriver(name, root)
 		return driver, nil
 	}
 
 	// Check all registered drivers if no priority driver is found
-	for name, initFunc := range drivers {
+	for _, initFunc := range drivers {
 		if driver, err = initFunc(root, options); err != nil {
 			if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS {
 				continue
 			}
 			return nil, err
 		}
-		checkPriorDriver(name, root)
 		return driver, nil
 	}
 	return nil, fmt.Errorf("No supported storage backend found")
 }
 
-func checkPriorDriver(name, root string) {
+// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
+func scanPriorDrivers(root string) []string {
 	priorDrivers := []string{}
-	for prior := range drivers {
+	for driver := range drivers {
+		p := filepath.Join(root, driver)
+		if _, err := os.Stat(p); err == nil {
+			priorDrivers = append(priorDrivers, driver)
+		}
+	}
+	return priorDrivers
+}
+
+func checkPriorDriver(name, root string) error {
+	priorDrivers := []string{}
+	for _, prior := range scanPriorDrivers(root) {
 		if prior != name && prior != "vfs" {
-			if _, err := os.Stat(path.Join(root, prior)); err == nil {
+			if _, err := os.Stat(filepath.Join(root, prior)); err == nil {
 				priorDrivers = append(priorDrivers, prior)
 			}
 		}
 	}
+
 	if len(priorDrivers) > 0 {
-		log.Warnf("Graphdriver %s selected. Your graphdriver directory %s already contains data managed by other graphdrivers: %s", name, root, strings.Join(priorDrivers, ","))
+
+		return errors.New(fmt.Sprintf("%q contains other graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", root, strings.Join(priorDrivers, ",")))
 	}
+	return nil
 }
diff --git a/daemon/graphdriver/driver_linux.go b/daemon/graphdriver/driver_linux.go
index acf96d1..88d88e2 100644
--- a/daemon/graphdriver/driver_linux.go
+++ b/daemon/graphdriver/driver_linux.go
@@ -1,13 +1,64 @@
+// +build linux
+
 package graphdriver
 
 import (
-	"path"
+	"path/filepath"
 	"syscall"
 )
 
+const (
+	FsMagicAufs     = FsMagic(0x61756673)
+	FsMagicBtrfs    = FsMagic(0x9123683E)
+	FsMagicCramfs   = FsMagic(0x28cd3d45)
+	FsMagicExtfs    = FsMagic(0x0000EF53)
+	FsMagicF2fs     = FsMagic(0xF2F52010)
+	FsMagicJffs2Fs  = FsMagic(0x000072b6)
+	FsMagicJfs      = FsMagic(0x3153464a)
+	FsMagicNfsFs    = FsMagic(0x00006969)
+	FsMagicRamFs    = FsMagic(0x858458f6)
+	FsMagicReiserFs = FsMagic(0x52654973)
+	FsMagicSmbFs    = FsMagic(0x0000517B)
+	FsMagicSquashFs = FsMagic(0x73717368)
+	FsMagicTmpFs    = FsMagic(0x01021994)
+	FsMagicXfs      = FsMagic(0x58465342)
+	FsMagicZfs      = FsMagic(0x2fc12fc1)
+)
+
+var (
+	// Slice of drivers that should be used in an order
+	priority = []string{
+		"aufs",
+		"btrfs",
+		"zfs",
+		"devicemapper",
+		"overlay",
+		"vfs",
+	}
+
+	FsNames = map[FsMagic]string{
+		FsMagicAufs:        "aufs",
+		FsMagicBtrfs:       "btrfs",
+		FsMagicCramfs:      "cramfs",
+		FsMagicExtfs:       "extfs",
+		FsMagicF2fs:        "f2fs",
+		FsMagicJffs2Fs:     "jffs2",
+		FsMagicJfs:         "jfs",
+		FsMagicNfsFs:       "nfs",
+		FsMagicRamFs:       "ramfs",
+		FsMagicReiserFs:    "reiserfs",
+		FsMagicSmbFs:       "smb",
+		FsMagicSquashFs:    "squashfs",
+		FsMagicTmpFs:       "tmpfs",
+		FsMagicUnsupported: "unsupported",
+		FsMagicXfs:         "xfs",
+		FsMagicZfs:         "zfs",
+	}
+)
+
 func GetFSMagic(rootpath string) (FsMagic, error) {
 	var buf syscall.Statfs_t
-	if err := syscall.Statfs(path.Dir(rootpath), &buf); err != nil {
+	if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil {
 		return 0, err
 	}
 	return FsMagic(buf.Type), nil
diff --git a/daemon/graphdriver/driver_unsupported.go b/daemon/graphdriver/driver_unsupported.go
index 27933b6..3f36864 100644
--- a/daemon/graphdriver/driver_unsupported.go
+++ b/daemon/graphdriver/driver_unsupported.go
@@ -1,7 +1,14 @@
-// +build !linux
+// +build !linux,!windows
 
 package graphdriver
 
+var (
+	// Slice of drivers that should be used in an order
+	priority = []string{
+		"unsupported",
+	}
+)
+
 func GetFSMagic(rootpath string) (FsMagic, error) {
 	return FsMagicUnsupported, nil
 }
diff --git a/daemon/graphdriver/driver_windows.go b/daemon/graphdriver/driver_windows.go
new file mode 100644
index 0000000..3ba0978
--- /dev/null
+++ b/daemon/graphdriver/driver_windows.go
@@ -0,0 +1,26 @@
+package graphdriver
+
+type DiffDiskDriver interface {
+	Driver
+	CopyDiff(id, sourceId string) error
+}
+
+const (
+	FsMagicWindows = FsMagic(0xa1b1830f)
+)
+
+var (
+	// Slice of drivers that should be used in an order
+	priority = []string{
+		"windows",
+	}
+
+	FsNames = map[FsMagic]string{
+		FsMagicWindows:     "windows",
+		FsMagicUnsupported: "unsupported",
+	}
+)
+
+func GetFSMagic(rootpath string) (FsMagic, error) {
+	return FsMagicWindows, nil
+}
diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go
index ab1b08f..e091e61 100644
--- a/daemon/graphdriver/fsdiff.go
+++ b/daemon/graphdriver/fsdiff.go
@@ -5,7 +5,7 @@
 import (
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/ioutils"
@@ -120,11 +120,11 @@
 	defer driver.Put(id)
 
 	start := time.Now().UTC()
-	log.Debugf("Start untar layer")
+	logrus.Debugf("Start untar layer")
 	if size, err = chrootarchive.ApplyLayer(layerFs, diff); err != nil {
 		return
 	}
-	log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
+	logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
 
 	return
 }
diff --git a/daemon/graphdriver/graphtest/graphtest.go b/daemon/graphdriver/graphtest/graphtest.go
index 2bd30f6..d9908d4 100644
--- a/daemon/graphdriver/graphtest/graphtest.go
+++ b/daemon/graphdriver/graphtest/graphtest.go
@@ -24,7 +24,7 @@
 // InitLoopbacks ensures that the loopback devices are properly created within
 // the system running the device mapper tests.
 func InitLoopbacks() error {
-	stat_t, err := getBaseLoopStats()
+	statT, err := getBaseLoopStats()
 	if err != nil {
 		return err
 	}
@@ -34,10 +34,10 @@
 		// only create new loopback files if they don't exist
 		if _, err := os.Stat(loopPath); err != nil {
 			if mkerr := syscall.Mknod(loopPath,
-				uint32(stat_t.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil {
+				uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil {
 				return mkerr
 			}
-			os.Chown(loopPath, int(stat_t.Uid), int(stat_t.Gid))
+			os.Chown(loopPath, int(statT.Uid), int(statT.Gid))
 		}
 	}
 	return nil
diff --git a/daemon/graphdriver/overlay/copy.go b/daemon/graphdriver/overlay/copy.go
index ae6bee5..f43b117 100644
--- a/daemon/graphdriver/overlay/copy.go
+++ b/daemon/graphdriver/overlay/copy.go
@@ -71,9 +71,12 @@
 			return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
 		}
 
+		isHardlink := false
+
 		switch f.Mode() & os.ModeType {
 		case 0: // Regular file
 			if flags&CopyHardlink != 0 {
+				isHardlink = true
 				if err := os.Link(srcPath, dstPath); err != nil {
 					return err
 				}
@@ -114,6 +117,12 @@
 			return fmt.Errorf("Unknown file type for %s\n", srcPath)
 		}
 
+		// Everything below is copying metadata from src to dst. All this metadata
+		// already shares an inode for hardlinks.
+		if isHardlink {
+			return nil
+		}
+
 		if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
 			return err
 		}
diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go
index afe12c5..df6a7db 100644
--- a/daemon/graphdriver/overlay/overlay.go
+++ b/daemon/graphdriver/overlay/overlay.go
@@ -12,7 +12,7 @@
 	"sync"
 	"syscall"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
@@ -113,13 +113,13 @@
 	// check if they are running over btrfs or aufs
 	switch fsMagic {
 	case graphdriver.FsMagicBtrfs:
-		log.Error("'overlay' is not supported over btrfs.")
+		logrus.Error("'overlay' is not supported over btrfs.")
 		return nil, graphdriver.ErrIncompatibleFS
 	case graphdriver.FsMagicAufs:
-		log.Error("'overlay' is not supported over aufs.")
+		logrus.Error("'overlay' is not supported over aufs.")
 		return nil, graphdriver.ErrIncompatibleFS
 	case graphdriver.FsMagicZfs:
-		log.Error("'overlay' is not supported over zfs.")
+		logrus.Error("'overlay' is not supported over zfs.")
 		return nil, graphdriver.ErrIncompatibleFS
 	}
 
@@ -153,7 +153,7 @@
 			return nil
 		}
 	}
-	log.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
+	logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
 	return graphdriver.ErrNotSupported
 }
 
@@ -273,10 +273,10 @@
 	if mount != nil {
 		mount.count++
 		return mount.path, nil
-	} else {
-		mount = &ActiveMount{count: 1}
 	}
 
+	mount = &ActiveMount{count: 1}
+
 	dir := d.dir(id)
 	if _, err := os.Stat(dir); err != nil {
 		return "", err
@@ -317,7 +317,15 @@
 
 	mount := d.active[id]
 	if mount == nil {
-		log.Debugf("Put on a non-mounted device %s", id)
+		logrus.Debugf("Put on a non-mounted device %s", id)
+		// but it might be still here
+		if d.Exists(id) {
+			mergedDir := path.Join(d.dir(id), "merged")
+			err := syscall.Unmount(mergedDir, 0)
+			if err != nil {
+				logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
+			}
+		}
 		return nil
 	}
 
@@ -330,7 +338,7 @@
 	if mount.mounted {
 		err := syscall.Unmount(mount.path, 0)
 		if err != nil {
-			log.Debugf("Failed to unmount %s overlay: %v", id, err)
+			logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
 		}
 		return err
 	}
diff --git a/daemon/graphdriver/zfs/MAINTAINERS b/daemon/graphdriver/zfs/MAINTAINERS
new file mode 100644
index 0000000..9c270c5
--- /dev/null
+++ b/daemon/graphdriver/zfs/MAINTAINERS
@@ -0,0 +1,2 @@
+Jörg Thalheim <joerg@higgsboson.tk> (@Mic92)
+Arthur Gautier <baloo@gandi.net> (@baloose)
diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go
new file mode 100644
index 0000000..f334f6d
--- /dev/null
+++ b/daemon/graphdriver/zfs/zfs.go
@@ -0,0 +1,311 @@
+// +build linux
+
+package zfs
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+	"path"
+	"strconv"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/mount"
+	"github.com/docker/docker/pkg/parsers"
+	zfs "github.com/mistifyio/go-zfs"
+)
+
+type ZfsOptions struct {
+	fsName    string
+	mountPath string
+}
+
+func init() {
+	graphdriver.Register("zfs", Init)
+}
+
+type Logger struct{}
+
+func (*Logger) Log(cmd []string) {
+	log.Debugf("[zfs] %s", strings.Join(cmd, " "))
+}
+
+func Init(base string, opt []string) (graphdriver.Driver, error) {
+	var err error
+	options, err := parseOptions(opt)
+	if err != nil {
+		return nil, err
+	}
+	options.mountPath = base
+
+	rootdir := path.Dir(base)
+
+	if options.fsName == "" {
+		err = checkRootdirFs(rootdir)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if _, err := exec.LookPath("zfs"); err != nil {
+		return nil, fmt.Errorf("zfs command is not available: %v", err)
+	}
+
+	file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600)
+	if err != nil {
+		return nil, fmt.Errorf("cannot open /dev/zfs: %v", err)
+	}
+	defer file.Close()
+
+	if options.fsName == "" {
+		options.fsName, err = lookupZfsDataset(rootdir)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	zfs.SetLogger(new(Logger))
+
+	filesystems, err := zfs.Filesystems(options.fsName)
+	if err != nil {
+		return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err)
+	}
+
+	filesystemsCache := make(map[string]bool, len(filesystems))
+	var rootDataset *zfs.Dataset
+	for _, fs := range filesystems {
+		if fs.Name == options.fsName {
+			rootDataset = fs
+		}
+		filesystemsCache[fs.Name] = true
+	}
+
+	if rootDataset == nil {
+		return nil, fmt.Errorf("BUG: zfs get all -t filesystems -rHp '%s' should contain '%s'", options.fsName, options.fsName)
+	}
+
+	d := &Driver{
+		dataset:          rootDataset,
+		options:          options,
+		filesystemsCache: filesystemsCache,
+	}
+	return graphdriver.NaiveDiffDriver(d), nil
+}
+
+func parseOptions(opt []string) (ZfsOptions, error) {
+	var options ZfsOptions
+	options.fsName = ""
+	for _, option := range opt {
+		key, val, err := parsers.ParseKeyValueOpt(option)
+		if err != nil {
+			return options, err
+		}
+		key = strings.ToLower(key)
+		switch key {
+		case "zfs.fsname":
+			options.fsName = val
+		default:
+			return options, fmt.Errorf("Unknown option %s", key)
+		}
+	}
+	return options, nil
+}
+
+func checkRootdirFs(rootdir string) error {
+	var buf syscall.Statfs_t
+	if err := syscall.Statfs(rootdir, &buf); err != nil {
+		return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+	}
+
+	if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs {
+		log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
+		return graphdriver.ErrPrerequisites
+	}
+	return nil
+}
+
+func lookupZfsDataset(rootdir string) (string, error) {
+	var stat syscall.Stat_t
+	if err := syscall.Stat(rootdir, &stat); err != nil {
+		return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err)
+	}
+	wantedDev := stat.Dev
+
+	mounts, err := mount.GetMounts()
+	if err != nil {
+		return "", err
+	}
+	for _, m := range mounts {
+		if err := syscall.Stat(m.Mountpoint, &stat); err != nil {
+			log.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
+			continue // may fail on fuse file systems
+		}
+
+		if stat.Dev == wantedDev && m.Fstype == "zfs" {
+			return m.Source, nil
+		}
+	}
+
+	return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir)
+}
+
+type Driver struct {
+	dataset          *zfs.Dataset
+	options          ZfsOptions
+	sync.Mutex       // protects filesystem cache against concurrent access
+	filesystemsCache map[string]bool
+}
+
+func (d *Driver) String() string {
+	return "zfs"
+}
+
+func (d *Driver) Cleanup() error {
+	return nil
+}
+
+func (d *Driver) Status() [][2]string {
+	parts := strings.Split(d.dataset.Name, "/")
+	pool, err := zfs.GetZpool(parts[0])
+
+	var poolName, poolHealth string
+	if err == nil {
+		poolName = pool.Name
+		poolHealth = pool.Health
+	} else {
+		poolName = fmt.Sprintf("error while getting pool information %v", err)
+		poolHealth = "not available"
+	}
+
+	quota := "no"
+	if d.dataset.Quota != 0 {
+		quota = strconv.FormatUint(d.dataset.Quota, 10)
+	}
+
+	return [][2]string{
+		{"Zpool", poolName},
+		{"Zpool Health", poolHealth},
+		{"Parent Dataset", d.dataset.Name},
+		{"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)},
+		{"Space Available", strconv.FormatUint(d.dataset.Avail, 10)},
+		{"Parent Quota", quota},
+		{"Compression", d.dataset.Compression},
+	}
+}
+
+func (d *Driver) cloneFilesystem(name, parentName string) error {
+	snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond())
+	parentDataset := zfs.Dataset{Name: parentName}
+	snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false)
+	if err != nil {
+		return err
+	}
+
+	_, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"})
+	if err == nil {
+		d.Lock()
+		d.filesystemsCache[name] = true
+		d.Unlock()
+	}
+
+	if err != nil {
+		snapshot.Destroy(zfs.DestroyDeferDeletion)
+		return err
+	}
+	return snapshot.Destroy(zfs.DestroyDeferDeletion)
+}
+
+func (d *Driver) ZfsPath(id string) string {
+	return d.options.fsName + "/" + id
+}
+
+func (d *Driver) MountPath(id string) string {
+	return path.Join(d.options.mountPath, "graph", id)
+}
+
+func (d *Driver) Create(id string, parent string) error {
+	err := d.create(id, parent)
+	if err == nil {
+		return nil
+	}
+	if zfsError, ok := err.(*zfs.Error); ok {
+		if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") {
+			return err
+		}
+		// aborted build -> cleanup
+	} else {
+		return err
+	}
+
+	dataset := zfs.Dataset{Name: d.ZfsPath(id)}
+	if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil {
+		return err
+	}
+
+	// retry
+	return d.create(id, parent)
+}
+
+func (d *Driver) create(id, parent string) error {
+	name := d.ZfsPath(id)
+	if parent == "" {
+		mountoptions := map[string]string{"mountpoint": "legacy"}
+		fs, err := zfs.CreateFilesystem(name, mountoptions)
+		if err == nil {
+			d.Lock()
+			d.filesystemsCache[fs.Name] = true
+			d.Unlock()
+		}
+		return err
+	}
+	return d.cloneFilesystem(name, d.ZfsPath(parent))
+}
+
+func (d *Driver) Remove(id string) error {
+	name := d.ZfsPath(id)
+	dataset := zfs.Dataset{Name: name}
+	err := dataset.Destroy(zfs.DestroyRecursive)
+	if err == nil {
+		d.Lock()
+		delete(d.filesystemsCache, name)
+		d.Unlock()
+	}
+	return err
+}
+
+func (d *Driver) Get(id, mountLabel string) (string, error) {
+	mountpoint := d.MountPath(id)
+	filesystem := d.ZfsPath(id)
+	log.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, mountLabel)
+
+	// Create the target directories if they don't exist
+	if err := os.MkdirAll(mountpoint, 0755); err != nil && !os.IsExist(err) {
+		return "", err
+	}
+
+	err := mount.Mount(filesystem, mountpoint, "zfs", mountLabel)
+	if err != nil {
+		return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
+	}
+
+	return mountpoint, nil
+}
+
+func (d *Driver) Put(id string) error {
+	mountpoint := d.MountPath(id)
+	log.Debugf(`[zfs] unmount("%s")`, mountpoint)
+
+	if err := mount.Unmount(mountpoint); err != nil {
+		return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
+	}
+	return nil
+}
+
+func (d *Driver) Exists(id string) bool {
+	return d.filesystemsCache[d.ZfsPath(id)] == true
+}
diff --git a/daemon/graphdriver/zfs/zfs_test.go b/daemon/graphdriver/zfs/zfs_test.go
new file mode 100644
index 0000000..c20eb98
--- /dev/null
+++ b/daemon/graphdriver/zfs/zfs_test.go
@@ -0,0 +1,30 @@
+// +build linux
+
+package zfs
+
+import (
+	"github.com/docker/docker/daemon/graphdriver/graphtest"
+	"testing"
+)
+
+// This avoids creating a new driver for each test if all tests are run
+// Make sure to put new tests between TestZfsSetup and TestZfsTeardown
+func TestZfsSetup(t *testing.T) {
+	graphtest.GetDriver(t, "zfs")
+}
+
+func TestZfsCreateEmpty(t *testing.T) {
+	graphtest.DriverTestCreateEmpty(t, "zfs")
+}
+
+func TestZfsCreateBase(t *testing.T) {
+	graphtest.DriverTestCreateBase(t, "zfs")
+}
+
+func TestZfsCreateSnap(t *testing.T) {
+	graphtest.DriverTestCreateSnap(t, "zfs")
+}
+
+func TestZfsTeardown(t *testing.T) {
+	graphtest.PutDriver(t)
+}
diff --git a/daemon/graphdriver/zfs/zfs_unsupported.go b/daemon/graphdriver/zfs/zfs_unsupported.go
new file mode 100644
index 0000000..a30a0f6
--- /dev/null
+++ b/daemon/graphdriver/zfs/zfs_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package zfs
diff --git a/daemon/history.go b/daemon/history.go
index 0b125ad..f717508 100644
--- a/daemon/history.go
+++ b/daemon/history.go
@@ -19,9 +19,7 @@
 
 func (history *History) Swap(i, j int) {
 	containers := *history
-	tmp := containers[i]
-	containers[i] = containers[j]
-	containers[j] = tmp
+	containers[i], containers[j] = containers[j], containers[i]
 }
 
 func (history *History) Add(container *Container) {
diff --git a/daemon/image_delete.go b/daemon/image_delete.go
index 0c0a534..ece33a3 100644
--- a/daemon/image_delete.go
+++ b/daemon/image_delete.go
@@ -4,37 +4,33 @@
 	"fmt"
 	"strings"
 
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/graph"
 	"github.com/docker/docker/image"
-	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/utils"
 )
 
-func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s IMAGE", job.Name)
+// FIXME: remove ImageDelete's dependency on Daemon, then move to graph/
+func (daemon *Daemon) ImageDelete(name string, force, noprune bool) ([]types.ImageDelete, error) {
+	list := []types.ImageDelete{}
+	if err := daemon.imgDeleteHelper(name, &list, true, force, noprune); err != nil {
+		return nil, err
 	}
-	imgs := engine.NewTable("", 0)
-	if err := daemon.DeleteImage(job.Eng, job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil {
-		return job.Error(err)
+	if len(list) == 0 {
+		return nil, fmt.Errorf("Conflict, %s wasn't deleted", name)
 	}
-	if len(imgs.Data) == 0 {
-		return job.Errorf("Conflict, %s wasn't deleted", job.Args[0])
-	}
-	if _, err := imgs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
+
+	return list, nil
 }
 
-// FIXME: make this private and use the job instead
-func (daemon *Daemon) DeleteImage(eng *engine.Engine, name string, imgs *engine.Table, first, force, noprune bool) error {
+func (daemon *Daemon) imgDeleteHelper(name string, list *[]types.ImageDelete, first, force, noprune bool) error {
 	var (
 		repoName, tag string
 		tags          = []string{}
 	)
+	repoAndTags := make(map[string][]string)
 
 	// FIXME: please respect DRY and centralize repo+tag parsing in a single central place! -- shykes
 	repoName, tag = parsers.ParseRepositoryTag(name)
@@ -73,19 +69,25 @@
 			if repoName == "" || repoName == parsedRepo {
 				repoName = parsedRepo
 				if parsedTag != "" {
-					tags = append(tags, parsedTag)
+					repoAndTags[repoName] = append(repoAndTags[repoName], parsedTag)
 				}
 			} else if repoName != parsedRepo && !force && first {
 				// the id belongs to multiple repos, like base:latest and user:test,
 				// in that case return conflict
 				return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name)
+			} else {
+				//the id belongs to multiple repos, with -f just delete all
+				repoName = parsedRepo
+				if parsedTag != "" {
+					repoAndTags[repoName] = append(repoAndTags[repoName], parsedTag)
+				}
 			}
 		}
 	} else {
-		tags = append(tags, tag)
+		repoAndTags[repoName] = append(repoAndTags[repoName], tag)
 	}
 
-	if !first && len(tags) > 0 {
+	if !first && len(repoAndTags) > 0 {
 		return nil
 	}
 
@@ -96,16 +98,18 @@
 	}
 
 	// Untag the current image
-	for _, tag := range tags {
-		tagDeleted, err := daemon.Repositories().Delete(repoName, tag)
-		if err != nil {
-			return err
-		}
-		if tagDeleted {
-			out := &engine.Env{}
-			out.Set("Untagged", utils.ImageReference(repoName, tag))
-			imgs.Add(out)
-			eng.Job("log", "untag", img.ID, "").Run()
+	for repoName, tags := range repoAndTags {
+		for _, tag := range tags {
+			tagDeleted, err := daemon.Repositories().Delete(repoName, tag)
+			if err != nil {
+				return err
+			}
+			if tagDeleted {
+				*list = append(*list, types.ImageDelete{
+					Untagged: utils.ImageReference(repoName, tag),
+				})
+				daemon.EventsService.Log("untag", img.ID, "")
+			}
 		}
 	}
 	tags = daemon.Repositories().ByID()[img.ID]
@@ -117,12 +121,12 @@
 			if err := daemon.Graph().Delete(img.ID); err != nil {
 				return err
 			}
-			out := &engine.Env{}
-			out.SetJson("Deleted", img.ID)
-			imgs.Add(out)
-			eng.Job("log", "delete", img.ID, "").Run()
+			*list = append(*list, types.ImageDelete{
+				Deleted: img.ID,
+			})
+			daemon.EventsService.Log("delete", img.ID, "")
 			if img.Parent != "" && !noprune {
-				err := daemon.DeleteImage(eng, img.Parent, imgs, false, force, noprune)
+				err := daemon.imgDeleteHelper(img.Parent, list, false, force, noprune)
 				if first {
 					return err
 				}
@@ -138,7 +142,7 @@
 	for _, container := range daemon.List() {
 		parent, err := daemon.Repositories().LookupImage(container.ImageID)
 		if err != nil {
-			if daemon.Graph().IsNotExist(err) {
+			if daemon.Graph().IsNotExist(err, container.ImageID) {
 				return nil
 			}
 			return err
@@ -148,11 +152,11 @@
 			if imgID == p.ID {
 				if container.IsRunning() {
 					if force {
-						return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it, stop it and retry", common.TruncateID(imgID), common.TruncateID(container.ID))
+						return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it, stop it and retry", stringid.TruncateID(imgID), stringid.TruncateID(container.ID))
 					}
-					return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", common.TruncateID(imgID), common.TruncateID(container.ID))
+					return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", stringid.TruncateID(imgID), stringid.TruncateID(container.ID))
 				} else if !force {
-					return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it, use -f to force", common.TruncateID(imgID), common.TruncateID(container.ID))
+					return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it, use -f to force", stringid.TruncateID(imgID), stringid.TruncateID(container.ID))
 				}
 			}
 			return nil
diff --git a/daemon/info.go b/daemon/info.go
index 965c370..edec5f9 100644
--- a/daemon/info.go
+++ b/daemon/info.go
@@ -5,9 +5,10 @@
 	"runtime"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/autogen/dockerversion"
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/docker/pkg/parsers/operatingsystem"
 	"github.com/docker/docker/pkg/system"
@@ -15,7 +16,7 @@
 	"github.com/docker/docker/utils"
 )
 
-func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
+func (daemon *Daemon) SystemInfo() (*types.Info, error) {
 	images, _ := daemon.Graph().Map()
 	var imgcount int
 	if images == nil {
@@ -32,16 +33,20 @@
 	if s, err := operatingsystem.GetOperatingSystem(); err == nil {
 		operatingSystem = s
 	}
-	if inContainer, err := operatingsystem.IsContainerized(); err != nil {
-		log.Errorf("Could not determine if daemon is containerized: %v", err)
-		operatingSystem += " (error determining if containerized)"
-	} else if inContainer {
-		operatingSystem += " (containerized)"
+
+	// Don't do containerized check on Windows
+	if runtime.GOOS != "windows" {
+		if inContainer, err := operatingsystem.IsContainerized(); err != nil {
+			logrus.Errorf("Could not determine if daemon is containerized: %v", err)
+			operatingSystem += " (error determining if containerized)"
+		} else if inContainer {
+			operatingSystem += " (containerized)"
+		}
 	}
 
 	meminfo, err := system.ReadMemInfo()
 	if err != nil {
-		log.Errorf("Could not read system memory info: %v", err)
+		logrus.Errorf("Could not read system memory info: %v", err)
 	}
 
 	// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
@@ -51,60 +56,50 @@
 		initPath = daemon.SystemInitPath()
 	}
 
-	cjob := job.Eng.Job("subscribers_count")
-	env, _ := cjob.Stdout.AddEnv()
-	if err := cjob.Run(); err != nil {
-		return job.Error(err)
-	}
-	registryJob := job.Eng.Job("registry_config")
-	registryEnv, _ := registryJob.Stdout.AddEnv()
-	if err := registryJob.Run(); err != nil {
-		return job.Error(err)
-	}
-	registryConfig := registry.ServiceConfig{}
-	if err := registryEnv.GetJson("config", &registryConfig); err != nil {
-		return job.Error(err)
-	}
-	v := &engine.Env{}
-	v.SetJson("ID", daemon.ID)
-	v.SetInt("Containers", len(daemon.List()))
-	v.SetInt("Images", imgcount)
-	v.Set("Driver", daemon.GraphDriver().String())
-	v.SetJson("DriverStatus", daemon.GraphDriver().Status())
-	v.SetBool("MemoryLimit", daemon.SystemConfig().MemoryLimit)
-	v.SetBool("SwapLimit", daemon.SystemConfig().SwapLimit)
-	v.SetBool("IPv4Forwarding", !daemon.SystemConfig().IPv4ForwardingDisabled)
-	v.SetBool("Debug", os.Getenv("DEBUG") != "")
-	v.SetInt("NFd", utils.GetTotalUsedFds())
-	v.SetInt("NGoroutines", runtime.NumGoroutine())
-	v.Set("SystemTime", time.Now().Format(time.RFC3339Nano))
-	v.Set("ExecutionDriver", daemon.ExecutionDriver().Name())
-	v.SetInt("NEventsListener", env.GetInt("count"))
-	v.Set("KernelVersion", kernelVersion)
-	v.Set("OperatingSystem", operatingSystem)
-	v.Set("IndexServerAddress", registry.IndexServerAddress())
-	v.SetJson("RegistryConfig", registryConfig)
-	v.Set("InitSha1", dockerversion.INITSHA1)
-	v.Set("InitPath", initPath)
-	v.SetInt("NCPU", runtime.NumCPU())
-	v.SetInt64("MemTotal", meminfo.MemTotal)
-	v.Set("DockerRootDir", daemon.Config().Root)
-	if http_proxy := os.Getenv("http_proxy"); http_proxy != "" {
-		v.Set("HttpProxy", http_proxy)
-	}
-	if https_proxy := os.Getenv("https_proxy"); https_proxy != "" {
-		v.Set("HttpsProxy", https_proxy)
-	}
-	if no_proxy := os.Getenv("no_proxy"); no_proxy != "" {
-		v.Set("NoProxy", no_proxy)
+	v := &types.Info{
+		ID:                 daemon.ID,
+		Containers:         len(daemon.List()),
+		Images:             imgcount,
+		Driver:             daemon.GraphDriver().String(),
+		DriverStatus:       daemon.GraphDriver().Status(),
+		MemoryLimit:        daemon.SystemConfig().MemoryLimit,
+		SwapLimit:          daemon.SystemConfig().SwapLimit,
+		CpuCfsPeriod:       daemon.SystemConfig().CpuCfsPeriod,
+		CpuCfsQuota:        daemon.SystemConfig().CpuCfsQuota,
+		IPv4Forwarding:     !daemon.SystemConfig().IPv4ForwardingDisabled,
+		Debug:              os.Getenv("DEBUG") != "",
+		NFd:                fileutils.GetTotalUsedFds(),
+		OomKillDisable:     daemon.SystemConfig().OomKillDisable,
+		NGoroutines:        runtime.NumGoroutine(),
+		SystemTime:         time.Now().Format(time.RFC3339Nano),
+		ExecutionDriver:    daemon.ExecutionDriver().Name(),
+		LoggingDriver:      daemon.defaultLogConfig.Type,
+		NEventsListener:    daemon.EventsService.SubscribersCount(),
+		KernelVersion:      kernelVersion,
+		OperatingSystem:    operatingSystem,
+		IndexServerAddress: registry.IndexServerAddress(),
+		RegistryConfig:     daemon.RegistryService.Config,
+		InitSha1:           dockerversion.INITSHA1,
+		InitPath:           initPath,
+		NCPU:               runtime.NumCPU(),
+		MemTotal:           meminfo.MemTotal,
+		DockerRootDir:      daemon.Config().Root,
+		Labels:             daemon.Config().Labels,
+		ExperimentalBuild:  utils.ExperimentalBuild(),
 	}
 
+	if httpProxy := os.Getenv("http_proxy"); httpProxy != "" {
+		v.HttpProxy = httpProxy
+	}
+	if httpsProxy := os.Getenv("https_proxy"); httpsProxy != "" {
+		v.HttpsProxy = httpsProxy
+	}
+	if noProxy := os.Getenv("no_proxy"); noProxy != "" {
+		v.NoProxy = noProxy
+	}
 	if hostname, err := os.Hostname(); err == nil {
-		v.SetJson("Name", hostname)
+		v.Name = hostname
 	}
-	v.SetList("Labels", daemon.Config().Labels)
-	if _, err := v.WriteTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
+
+	return v, nil
 }
diff --git a/daemon/inspect.go b/daemon/inspect.go
index 0826579..146bd77 100644
--- a/daemon/inspect.go
+++ b/daemon/inspect.go
@@ -1,99 +1,99 @@
 package daemon
 
 import (
-	"encoding/json"
 	"fmt"
 
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/runconfig"
 )
 
-func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("usage: %s NAME", job.Name)
-	}
-	name := job.Args[0]
+type ContainerJSONRaw struct {
+	*Container
+	HostConfig *runconfig.HostConfig
+
+	// Unused fields for backward compatibility with API versions < 1.12.
+	Volumes   map[string]string
+	VolumesRW map[string]bool
+}
+
+func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
 
 	container.Lock()
 	defer container.Unlock()
-	if job.GetenvBool("raw") {
-		b, err := json.Marshal(&struct {
-			*Container
-			HostConfig *runconfig.HostConfig
-		}{container, container.hostConfig})
-		if err != nil {
-			return job.Error(err)
-		}
-		job.Stdout.Write(b)
-		return engine.StatusOK
-	}
 
-	out := &engine.Env{}
-	out.SetJson("Id", container.ID)
-	out.SetAuto("Created", container.Created)
-	out.SetJson("Path", container.Path)
-	out.SetList("Args", container.Args)
-	out.SetJson("Config", container.Config)
-	out.SetJson("State", container.State)
-	out.Set("Image", container.ImageID)
-	out.SetJson("NetworkSettings", container.NetworkSettings)
-	out.Set("ResolvConfPath", container.ResolvConfPath)
-	out.Set("HostnamePath", container.HostnamePath)
-	out.Set("HostsPath", container.HostsPath)
-	out.Set("LogPath", container.LogPath)
-	out.SetJson("Name", container.Name)
-	out.SetInt("RestartCount", container.RestartCount)
-	out.Set("Driver", container.Driver)
-	out.Set("ExecDriver", container.ExecDriver)
-	out.Set("MountLabel", container.MountLabel)
-	out.Set("ProcessLabel", container.ProcessLabel)
-	out.SetJson("Volumes", container.Volumes)
-	out.SetJson("VolumesRW", container.VolumesRW)
-	out.SetJson("AppArmorProfile", container.AppArmorProfile)
-
-	out.SetList("ExecIDs", container.GetExecIDs())
+	// make a copy to play with
+	hostConfig := *container.hostConfig
 
 	if children, err := daemon.Children(container.Name); err == nil {
 		for linkAlias, child := range children {
-			container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
+			hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
 		}
 	}
 	// we need this trick to preserve empty log driver, so
 	// container will use daemon defaults even if daemon change them
-	if container.hostConfig.LogConfig.Type == "" {
-		container.hostConfig.LogConfig = daemon.defaultLogConfig
-		defer func() {
-			container.hostConfig.LogConfig = runconfig.LogConfig{}
-		}()
+	if hostConfig.LogConfig.Type == "" {
+		hostConfig.LogConfig = daemon.defaultLogConfig
 	}
 
-	out.SetJson("HostConfig", container.hostConfig)
-
-	container.hostConfig.Links = nil
-	if _, err := out.WriteTo(job.Stdout); err != nil {
-		return job.Error(err)
+	containerState := &types.ContainerState{
+		Running:    container.State.Running,
+		Paused:     container.State.Paused,
+		Restarting: container.State.Restarting,
+		OOMKilled:  container.State.OOMKilled,
+		Dead:       container.State.Dead,
+		Pid:        container.State.Pid,
+		ExitCode:   container.State.ExitCode,
+		Error:      container.State.Error,
+		StartedAt:  container.State.StartedAt,
+		FinishedAt: container.State.FinishedAt,
 	}
-	return engine.StatusOK
+
+	volumes := make(map[string]string)
+	volumesRW := make(map[string]bool)
+
+	for _, m := range container.MountPoints {
+		volumes[m.Destination] = m.Path()
+		volumesRW[m.Destination] = m.RW
+	}
+
+	contJSON := &types.ContainerJSON{
+		Id:              container.ID,
+		Created:         container.Created,
+		Path:            container.Path,
+		Args:            container.Args,
+		Config:          container.Config,
+		State:           containerState,
+		Image:           container.ImageID,
+		NetworkSettings: container.NetworkSettings,
+		ResolvConfPath:  container.ResolvConfPath,
+		HostnamePath:    container.HostnamePath,
+		HostsPath:       container.HostsPath,
+		LogPath:         container.LogPath,
+		Name:            container.Name,
+		RestartCount:    container.RestartCount,
+		Driver:          container.Driver,
+		ExecDriver:      container.ExecDriver,
+		MountLabel:      container.MountLabel,
+		ProcessLabel:    container.ProcessLabel,
+		Volumes:         volumes,
+		VolumesRW:       volumesRW,
+		AppArmorProfile: container.AppArmorProfile,
+		ExecIDs:         container.GetExecIDs(),
+		HostConfig:      &hostConfig,
+	}
+
+	return contJSON, nil
 }
 
-func (daemon *Daemon) ContainerExecInspect(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("usage: %s ID", job.Name)
-	}
-	id := job.Args[0]
+func (daemon *Daemon) ContainerExecInspect(id string) (*execConfig, error) {
 	eConfig, err := daemon.getExecConfig(id)
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
 
-	b, err := json.Marshal(*eConfig)
-	if err != nil {
-		return job.Error(err)
-	}
-	job.Stdout.Write(b)
-	return engine.StatusOK
+	return eConfig, nil
 }
diff --git a/daemon/kill.go b/daemon/kill.go
index 84094f8..5d828f1 100644
--- a/daemon/kill.go
+++ b/daemon/kill.go
@@ -1,60 +1,31 @@
 package daemon
 
 import (
-	"strconv"
-	"strings"
+	"fmt"
 	"syscall"
-
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/signal"
 )
 
 // ContainerKill send signal to the container
 // If no signal is given (sig 0), then Kill with SIGKILL and wait
 // for the container to exit.
 // If a signal is given, then just send it to the container and return.
-func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status {
-	if n := len(job.Args); n < 1 || n > 2 {
-		return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
-	}
-	var (
-		name = job.Args[0]
-		sig  uint64
-		err  error
-	)
-
-	// If we have a signal, look at it. Otherwise, do nothing
-	if len(job.Args) == 2 && job.Args[1] != "" {
-		// Check if we passed the signal as a number:
-		// The largest legal signal is 31, so let's parse on 5 bits
-		sig, err = strconv.ParseUint(job.Args[1], 10, 5)
-		if err != nil {
-			// The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL")
-			sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")])
-		}
-
-		if sig == 0 {
-			return job.Errorf("Invalid signal: %s", job.Args[1])
-		}
-	}
-
+func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
 	if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
 		if err := container.Kill(); err != nil {
-			return job.Errorf("Cannot kill container %s: %s", name, err)
+			return fmt.Errorf("Cannot kill container %s: %s", name, err)
 		}
-		container.LogEvent("kill")
 	} else {
 		// Otherwise, just send the requested signal
 		if err := container.KillSig(int(sig)); err != nil {
-			return job.Errorf("Cannot kill container %s: %s", name, err)
+			return fmt.Errorf("Cannot kill container %s: %s", name, err)
 		}
-		// FIXME: Add event for signals
 	}
-	return engine.StatusOK
+	container.LogEvent("kill")
+	return nil
 }
diff --git a/daemon/list.go b/daemon/list.go
index 130ac05..9924298 100644
--- a/daemon/list.go
+++ b/daemon/list.go
@@ -6,12 +6,9 @@
 	"strconv"
 	"strings"
 
-	"github.com/docker/docker/graph"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/nat"
 	"github.com/docker/docker/pkg/graphdb"
-	"github.com/docker/docker/utils"
-
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers/filters"
 )
 
@@ -20,31 +17,37 @@
 	return daemon.containers.List()
 }
 
-func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
+type ContainersConfig struct {
+	All     bool
+	Since   string
+	Before  string
+	Limit   int
+	Size    bool
+	Filters string
+}
+
+func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, error) {
 	var (
 		foundBefore bool
 		displayed   int
-		all         = job.GetenvBool("all")
-		since       = job.Getenv("since")
-		before      = job.Getenv("before")
-		n           = job.GetenvInt("limit")
-		size        = job.GetenvBool("size")
+		all         = config.All
+		n           = config.Limit
 		psFilters   filters.Args
-		filt_exited []int
+		filtExited  []int
 	)
-	outs := engine.NewTable("Created", 0)
+	containers := []*types.Container{}
 
-	psFilters, err := filters.FromParam(job.Getenv("filters"))
+	psFilters, err := filters.FromParam(config.Filters)
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
 	if i, ok := psFilters["exited"]; ok {
 		for _, value := range i {
 			code, err := strconv.Atoi(value)
 			if err != nil {
-				return job.Error(err)
+				return nil, err
 			}
-			filt_exited = append(filt_exited, code)
+			filtExited = append(filtExited, code)
 		}
 	}
 
@@ -62,17 +65,17 @@
 	}, 1)
 
 	var beforeCont, sinceCont *Container
-	if before != "" {
-		beforeCont, err = daemon.Get(before)
+	if config.Before != "" {
+		beforeCont, err = daemon.Get(config.Before)
 		if err != nil {
-			return job.Error(err)
+			return nil, err
 		}
 	}
 
-	if since != "" {
-		sinceCont, err = daemon.Get(since)
+	if config.Since != "" {
+		sinceCont, err = daemon.Get(config.Since)
 		if err != nil {
-			return job.Error(err)
+			return nil, err
 		}
 	}
 
@@ -80,7 +83,7 @@
 	writeCont := func(container *Container) error {
 		container.Lock()
 		defer container.Unlock()
-		if !container.Running && !all && n <= 0 && since == "" && before == "" {
+		if !container.Running && !all && n <= 0 && config.Since == "" && config.Before == "" {
 			return nil
 		}
 		if !psFilters.Match("name", container.Name) {
@@ -95,7 +98,7 @@
 			return nil
 		}
 
-		if before != "" && !foundBefore {
+		if config.Before != "" && !foundBefore {
 			if container.ID == beforeCont.ID {
 				foundBefore = true
 			}
@@ -104,20 +107,20 @@
 		if n > 0 && displayed == n {
 			return errLast
 		}
-		if since != "" {
+		if config.Since != "" {
 			if container.ID == sinceCont.ID {
 				return errLast
 			}
 		}
-		if len(filt_exited) > 0 {
-			should_skip := true
-			for _, code := range filt_exited {
+		if len(filtExited) > 0 {
+			shouldSkip := true
+			for _, code := range filtExited {
 				if code == container.ExitCode && !container.Running {
-					should_skip = false
+					shouldSkip = false
 					break
 				}
 			}
-			if should_skip {
+			if shouldSkip {
 				return nil
 			}
 		}
@@ -126,15 +129,11 @@
 			return nil
 		}
 		displayed++
-		out := &engine.Env{}
-		out.SetJson("Id", container.ID)
-		out.SetList("Names", names[container.ID])
-		img := container.Config.Image
-		_, tag := parsers.ParseRepositoryTag(container.Config.Image)
-		if tag == "" {
-			img = utils.ImageReference(img, graph.DEFAULTTAG)
+		newC := &types.Container{
+			ID:    container.ID,
+			Names: names[container.ID],
 		}
-		out.SetJson("Image", img)
+		newC.Image = container.Config.Image
 		if len(container.Args) > 0 {
 			args := []string{}
 			for _, arg := range container.Args {
@@ -146,38 +145,51 @@
 			}
 			argsAsString := strings.Join(args, " ")
 
-			out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString))
+			newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString)
 		} else {
-			out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
+			newC.Command = fmt.Sprintf("%s", container.Path)
 		}
-		out.SetInt64("Created", container.Created.Unix())
-		out.Set("Status", container.State.String())
-		str, err := container.NetworkSettings.PortMappingAPI().ToListString()
-		if err != nil {
-			return err
+		newC.Created = int(container.Created.Unix())
+		newC.Status = container.State.String()
+
+		newC.Ports = []types.Port{}
+		for port, bindings := range container.NetworkSettings.Ports {
+			p, _ := nat.ParsePort(port.Port())
+			if len(bindings) == 0 {
+				newC.Ports = append(newC.Ports, types.Port{
+					PrivatePort: p,
+					Type:        port.Proto(),
+				})
+				continue
+			}
+			for _, binding := range bindings {
+				h, _ := nat.ParsePort(binding.HostPort)
+				newC.Ports = append(newC.Ports, types.Port{
+					PrivatePort: p,
+					PublicPort:  h,
+					Type:        port.Proto(),
+					IP:          binding.HostIp,
+				})
+			}
 		}
-		out.Set("Ports", str)
-		if size {
+
+		if config.Size {
 			sizeRw, sizeRootFs := container.GetSize()
-			out.SetInt64("SizeRw", sizeRw)
-			out.SetInt64("SizeRootFs", sizeRootFs)
+			newC.SizeRw = int(sizeRw)
+			newC.SizeRootFs = int(sizeRootFs)
 		}
-		out.SetJson("Labels", container.Config.Labels)
-		outs.Add(out)
+		newC.Labels = container.Config.Labels
+		containers = append(containers, newC)
 		return nil
 	}
 
 	for _, container := range daemon.List() {
 		if err := writeCont(container); err != nil {
 			if err != errLast {
-				return job.Error(err)
+				return nil, err
 			}
 			break
 		}
 	}
-	outs.ReverseSort()
-	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
+	return containers, nil
 }
diff --git a/daemon/logdrivers_linux.go b/daemon/logdrivers_linux.go
new file mode 100644
index 0000000..e59345f
--- /dev/null
+++ b/daemon/logdrivers_linux.go
@@ -0,0 +1,9 @@
+package daemon
+
+// Importing packages here only to make sure their init gets called and
+// therefore they register themselves to the logdriver factory.
+import (
+	_ "github.com/docker/docker/daemon/logger/journald"
+	_ "github.com/docker/docker/daemon/logger/jsonfilelog"
+	_ "github.com/docker/docker/daemon/logger/syslog"
+)
diff --git a/daemon/logdrivers_windows.go b/daemon/logdrivers_windows.go
new file mode 100644
index 0000000..5dcbe71
--- /dev/null
+++ b/daemon/logdrivers_windows.go
@@ -0,0 +1,7 @@
+package daemon
+
+// Importing packages here only to make sure their init gets called and
+// therefore they register themselves to the logdriver factory.
+import (
+	_ "github.com/docker/docker/daemon/logger/jsonfilelog"
+)
diff --git a/daemon/logger/copier_test.go b/daemon/logger/copier_test.go
index 45f76ac..54e60ee 100644
--- a/daemon/logger/copier_test.go
+++ b/daemon/logger/copier_test.go
@@ -3,6 +3,7 @@
 import (
 	"bytes"
 	"encoding/json"
+	"errors"
 	"io"
 	"testing"
 	"time"
@@ -12,16 +13,14 @@
 	*json.Encoder
 }
 
-func (l *TestLoggerJSON) Log(m *Message) error {
-	return l.Encode(m)
-}
+func (l *TestLoggerJSON) Log(m *Message) error { return l.Encode(m) }
 
-func (l *TestLoggerJSON) Close() error {
-	return nil
-}
+func (l *TestLoggerJSON) Close() error { return nil }
 
-func (l *TestLoggerJSON) Name() string {
-	return "json"
+func (l *TestLoggerJSON) Name() string { return "json" }
+
+func (l *TestLoggerJSON) GetReader() (io.Reader, error) {
+	return nil, errors.New("not used in the test")
 }
 
 type TestLoggerText struct {
@@ -33,12 +32,12 @@
 	return err
 }
 
-func (l *TestLoggerText) Close() error {
-	return nil
-}
+func (l *TestLoggerText) Close() error { return nil }
 
-func (l *TestLoggerText) Name() string {
-	return "text"
+func (l *TestLoggerText) Name() string { return "text" }
+
+func (l *TestLoggerText) GetReader() (io.Reader, error) {
+	return nil, errors.New("not used in the test")
 }
 
 func TestCopier(t *testing.T) {
diff --git a/daemon/logger/factory.go b/daemon/logger/factory.go
new file mode 100644
index 0000000..80234ee
--- /dev/null
+++ b/daemon/logger/factory.go
@@ -0,0 +1,57 @@
+package logger
+
+import (
+	"fmt"
+	"sync"
+)
+
+// Creator is a method that builds a logging driver instance with given context
+type Creator func(Context) (Logger, error)
+
+// Context provides enough information for a logging driver to do its function
+type Context struct {
+	Config        map[string]string
+	ContainerID   string
+	ContainerName string
+	LogPath       string
+}
+
+type logdriverFactory struct {
+	registry map[string]Creator
+	m        sync.Mutex
+}
+
+func (lf *logdriverFactory) register(name string, c Creator) error {
+	lf.m.Lock()
+	defer lf.m.Unlock()
+
+	if _, ok := lf.registry[name]; ok {
+		return fmt.Errorf("logger: log driver named '%s' is already registered", name)
+	}
+	lf.registry[name] = c
+	return nil
+}
+
+func (lf *logdriverFactory) get(name string) (Creator, error) {
+	lf.m.Lock()
+	defer lf.m.Unlock()
+
+	c, ok := lf.registry[name]
+	if !ok {
+		return c, fmt.Errorf("logger: no log driver named '%s' is registered", name)
+	}
+	return c, nil
+}
+
+var factory = &logdriverFactory{registry: make(map[string]Creator)} // global factory instance
+
+// RegisterLogDriver registers the given logging driver builder with given logging
+// driver name.
+func RegisterLogDriver(name string, c Creator) error {
+	return factory.register(name, c)
+}
+
+// GetLogDriver provides the logging driver builder for a logging driver name.
+func GetLogDriver(name string) (Creator, error) {
+	return factory.get(name)
+}
diff --git a/daemon/logger/journald/journald.go b/daemon/logger/journald/journald.go
new file mode 100644
index 0000000..7aa28f0
--- /dev/null
+++ b/daemon/logger/journald/journald.go
@@ -0,0 +1,60 @@
+// +build linux
+
+package journald
+
+import (
+	"fmt"
+	"io"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/coreos/go-systemd/journal"
+	"github.com/docker/docker/daemon/logger"
+)
+
+const name = "journald"
+
+type Journald struct {
+	Jmap map[string]string
+}
+
+func init() {
+	if err := logger.RegisterLogDriver(name, New); err != nil {
+		logrus.Fatal(err)
+	}
+}
+
+func New(ctx logger.Context) (logger.Logger, error) {
+	if !journal.Enabled() {
+		return nil, fmt.Errorf("journald is not enabled on this host")
+	}
+	// Strip a leading slash so that people can search for
+	// CONTAINER_NAME=foo rather than CONTAINER_NAME=/foo.
+	name := ctx.ContainerName
+	if name[0] == '/' {
+		name = name[1:]
+	}
+	jmap := map[string]string{
+		"CONTAINER_ID":      ctx.ContainerID[:12],
+		"CONTAINER_ID_FULL": ctx.ContainerID,
+		"CONTAINER_NAME":    name}
+	return &Journald{Jmap: jmap}, nil
+}
+
+func (s *Journald) Log(msg *logger.Message) error {
+	if msg.Source == "stderr" {
+		return journal.Send(string(msg.Line), journal.PriErr, s.Jmap)
+	}
+	return journal.Send(string(msg.Line), journal.PriInfo, s.Jmap)
+}
+
+func (s *Journald) Close() error {
+	return nil
+}
+
+func (s *Journald) Name() string {
+	return name
+}
+
+func (s *Journald) GetReader() (io.Reader, error) {
+	return nil, logger.ReadLogsNotSupported
+}
diff --git a/daemon/logger/journald/journald_unsupported.go b/daemon/logger/journald/journald_unsupported.go
new file mode 100644
index 0000000..110833c
--- /dev/null
+++ b/daemon/logger/journald/journald_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package journald
diff --git a/daemon/logger/jsonfilelog/jsonfilelog.go b/daemon/logger/jsonfilelog/jsonfilelog.go
index faa6bf9..3931e27 100644
--- a/daemon/logger/jsonfilelog/jsonfilelog.go
+++ b/daemon/logger/jsonfilelog/jsonfilelog.go
@@ -2,11 +2,18 @@
 
 import (
 	"bytes"
+	"io"
 	"os"
 	"sync"
 
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/pkg/timeutils"
+)
+
+const (
+	Name = "json-file"
 )
 
 // JSONFileLogger is Logger implementation for default docker logging:
@@ -15,17 +22,26 @@
 	buf *bytes.Buffer
 	f   *os.File   // store for closing
 	mu  sync.Mutex // protects buffer
+
+	ctx logger.Context
+}
+
+func init() {
+	if err := logger.RegisterLogDriver(Name, New); err != nil {
+		logrus.Fatal(err)
+	}
 }
 
 // New creates new JSONFileLogger which writes to filename
-func New(filename string) (logger.Logger, error) {
-	log, err := os.OpenFile(filename, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
+func New(ctx logger.Context) (logger.Logger, error) {
+	log, err := os.OpenFile(ctx.LogPath, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
 	if err != nil {
 		return nil, err
 	}
 	return &JSONFileLogger{
 		f:   log,
 		buf: bytes.NewBuffer(nil),
+		ctx: ctx,
 	}, nil
 }
 
@@ -33,7 +49,12 @@
 func (l *JSONFileLogger) Log(msg *logger.Message) error {
 	l.mu.Lock()
 	defer l.mu.Unlock()
-	err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSONBuf(l.buf)
+
+	timestamp, err := timeutils.FastMarshalJSON(msg.Timestamp)
+	if err != nil {
+		return err
+	}
+	err = (&jsonlog.JSONLogBytes{Log: append(msg.Line, '\n'), Stream: msg.Source, Created: timestamp}).MarshalJSONBuf(l.buf)
 	if err != nil {
 		return err
 	}
@@ -47,6 +68,14 @@
 	return nil
 }
 
+func (l *JSONFileLogger) GetReader() (io.Reader, error) {
+	return os.Open(l.ctx.LogPath)
+}
+
+func (l *JSONFileLogger) LogPath() string {
+	return l.ctx.LogPath
+}
+
 // Close closes underlying file
 func (l *JSONFileLogger) Close() error {
 	return l.f.Close()
@@ -54,5 +83,5 @@
 
 // Name returns name of this logger
 func (l *JSONFileLogger) Name() string {
-	return "JSONFile"
+	return Name
 }
diff --git a/daemon/logger/jsonfilelog/jsonfilelog_test.go b/daemon/logger/jsonfilelog/jsonfilelog_test.go
index e951c1b..568650b 100644
--- a/daemon/logger/jsonfilelog/jsonfilelog_test.go
+++ b/daemon/logger/jsonfilelog/jsonfilelog_test.go
@@ -12,18 +12,22 @@
 )
 
 func TestJSONFileLogger(t *testing.T) {
+	cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657"
 	tmp, err := ioutil.TempDir("", "docker-logger-")
 	if err != nil {
 		t.Fatal(err)
 	}
 	defer os.RemoveAll(tmp)
 	filename := filepath.Join(tmp, "container.log")
-	l, err := New(filename)
+	l, err := New(logger.Context{
+		ContainerID: cid,
+		LogPath:     filename,
+	})
 	if err != nil {
 		t.Fatal(err)
 	}
 	defer l.Close()
-	cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657"
+
 	if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line1"), Source: "src1"}); err != nil {
 		t.Fatal(err)
 	}
@@ -48,18 +52,22 @@
 }
 
 func BenchmarkJSONFileLogger(b *testing.B) {
+	cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657"
 	tmp, err := ioutil.TempDir("", "docker-logger-")
 	if err != nil {
 		b.Fatal(err)
 	}
 	defer os.RemoveAll(tmp)
 	filename := filepath.Join(tmp, "container.log")
-	l, err := New(filename)
+	l, err := New(logger.Context{
+		ContainerID: cid,
+		LogPath:     filename,
+	})
 	if err != nil {
 		b.Fatal(err)
 	}
 	defer l.Close()
-	cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657"
+
 	testLine := "Line that thinks that it is log line from docker\n"
 	msg := &logger.Message{ContainerID: cid, Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()}
 	jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON()
diff --git a/daemon/logger/logger.go b/daemon/logger/logger.go
index 078e67d..29aafd3 100644
--- a/daemon/logger/logger.go
+++ b/daemon/logger/logger.go
@@ -1,6 +1,12 @@
 package logger
 
-import "time"
+import (
+	"errors"
+	"io"
+	"time"
+)
+
+var ReadLogsNotSupported = errors.New("configured logging reader does not support reading")
 
 // Message is datastructure that represents record from some container
 type Message struct {
@@ -15,4 +21,5 @@
 	Log(*Message) error
 	Name() string
 	Close() error
+	GetReader() (io.Reader, error)
 }
diff --git a/daemon/logger/syslog/syslog.go b/daemon/logger/syslog/syslog.go
index eecb333..6a6377f 100644
--- a/daemon/logger/syslog/syslog.go
+++ b/daemon/logger/syslog/syslog.go
@@ -1,26 +1,37 @@
+// +build linux
+
 package syslog
 
 import (
 	"fmt"
+	"io"
 	"log/syslog"
 	"os"
 	"path"
-	"sync"
 
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/logger"
 )
 
+const name = "syslog"
+
 type Syslog struct {
 	writer *syslog.Writer
-	tag    string
-	mu     sync.Mutex
 }
 
-func New(tag string) (logger.Logger, error) {
+func init() {
+	if err := logger.RegisterLogDriver(name, New); err != nil {
+		logrus.Fatal(err)
+	}
+}
+
+func New(ctx logger.Context) (logger.Logger, error) {
+	tag := ctx.ContainerID[:12]
 	log, err := syslog.New(syslog.LOG_DAEMON, fmt.Sprintf("%s/%s", path.Base(os.Args[0]), tag))
 	if err != nil {
 		return nil, err
 	}
+
 	return &Syslog{
 		writer: log,
 	}, nil
@@ -34,12 +45,13 @@
 }
 
 func (s *Syslog) Close() error {
-	if s.writer != nil {
-		return s.writer.Close()
-	}
-	return nil
+	return s.writer.Close()
 }
 
 func (s *Syslog) Name() string {
-	return "Syslog"
+	return name
+}
+
+func (s *Syslog) GetReader() (io.Reader, error) {
+	return nil, logger.ReadLogsNotSupported
 }
diff --git a/daemon/logger/syslog/syslog_unsupported.go b/daemon/logger/syslog/syslog_unsupported.go
new file mode 100644
index 0000000..50cc51b
--- /dev/null
+++ b/daemon/logger/syslog/syslog_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package syslog
diff --git a/daemon/logs.go b/daemon/logs.go
index 356d08c..d388b9c 100644
--- a/daemon/logs.go
+++ b/daemon/logs.go
@@ -5,85 +5,83 @@
 	"encoding/json"
 	"fmt"
 	"io"
+	"net"
 	"os"
 	"strconv"
-	"sync"
+	"syscall"
+	"time"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/engine"
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/daemon/logger/jsonfilelog"
 	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/docker/docker/pkg/tailfile"
 	"github.com/docker/docker/pkg/timeutils"
 )
 
-func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
-	}
+type ContainerLogsConfig struct {
+	Follow, Timestamps   bool
+	Tail                 string
+	Since                time.Time
+	UseStdout, UseStderr bool
+	OutStream            io.Writer
+}
 
+func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) error {
 	var (
-		name   = job.Args[0]
-		stdout = job.GetenvBool("stdout")
-		stderr = job.GetenvBool("stderr")
-		tail   = job.Getenv("tail")
-		follow = job.GetenvBool("follow")
-		times  = job.GetenvBool("timestamps")
 		lines  = -1
 		format string
 	)
-	if !(stdout || stderr) {
-		return job.Errorf("You must choose at least one stream")
+	if !(config.UseStdout || config.UseStderr) {
+		return fmt.Errorf("You must choose at least one stream")
 	}
-	if times {
+	if config.Timestamps {
 		format = timeutils.RFC3339NanoFixed
 	}
-	if tail == "" {
-		tail = "all"
+	if config.Tail == "" {
+		config.Tail = "all"
 	}
+
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
-	if container.LogDriverType() != "json-file" {
-		return job.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver")
-	}
-	cLog, err := container.ReadLog("json")
-	if err != nil && os.IsNotExist(err) {
-		// Legacy logs
-		log.Debugf("Old logs format")
-		if stdout {
-			cLog, err := container.ReadLog("stdout")
-			if err != nil {
-				log.Errorf("Error reading logs (stdout): %s", err)
-			} else if _, err := io.Copy(job.Stdout, cLog); err != nil {
-				log.Errorf("Error streaming logs (stdout): %s", err)
-			}
-		}
-		if stderr {
-			cLog, err := container.ReadLog("stderr")
-			if err != nil {
-				log.Errorf("Error reading logs (stderr): %s", err)
-			} else if _, err := io.Copy(job.Stderr, cLog); err != nil {
-				log.Errorf("Error streaming logs (stderr): %s", err)
-			}
-		}
-	} else if err != nil {
-		log.Errorf("Error reading logs (json): %s", err)
+
+	var (
+		outStream = config.OutStream
+		errStream io.Writer
+	)
+	if !container.Config.Tty {
+		errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
+		outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
 	} else {
-		if tail != "all" {
+		errStream = outStream
+	}
+
+	if container.LogDriverType() != jsonfilelog.Name {
+		return fmt.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver")
+	}
+	logDriver, err := container.getLogger()
+	cLog, err := logDriver.GetReader()
+	if err != nil {
+		logrus.Errorf("Error reading logs: %s", err)
+	} else {
+		// json-file driver
+		if config.Tail != "all" {
 			var err error
-			lines, err = strconv.Atoi(tail)
+			lines, err = strconv.Atoi(config.Tail)
 			if err != nil {
-				log.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err)
+				logrus.Errorf("Failed to parse tail %s, error: %v, show all logs", config.Tail, err)
 				lines = -1
 			}
 		}
+
 		if lines != 0 {
 			if lines > 0 {
 				f := cLog.(*os.File)
 				ls, err := tailfile.TailFile(f, lines)
 				if err != nil {
-					return job.Error(err)
+					return err
 				}
 				tmp := bytes.NewBuffer([]byte{})
 				for _, l := range ls {
@@ -91,62 +89,75 @@
 				}
 				cLog = tmp
 			}
+
 			dec := json.NewDecoder(cLog)
 			l := &jsonlog.JSONLog{}
 			for {
+				l.Reset()
 				if err := dec.Decode(l); err == io.EOF {
 					break
 				} else if err != nil {
-					log.Errorf("Error streaming logs: %s", err)
+					logrus.Errorf("Error streaming logs: %s", err)
 					break
 				}
 				logLine := l.Log
-				if times {
+				if !config.Since.IsZero() && l.Created.Before(config.Since) {
+					continue
+				}
+				if config.Timestamps {
 					// format can be "" or time format, so here can't be error
 					logLine, _ = l.Format(format)
 				}
-				if l.Stream == "stdout" && stdout {
-					io.WriteString(job.Stdout, logLine)
+				if l.Stream == "stdout" && config.UseStdout {
+					io.WriteString(outStream, logLine)
 				}
-				if l.Stream == "stderr" && stderr {
-					io.WriteString(job.Stderr, logLine)
+				if l.Stream == "stderr" && config.UseStderr {
+					io.WriteString(errStream, logLine)
 				}
-				l.Reset()
 			}
 		}
 	}
-	if follow && container.IsRunning() {
-		errors := make(chan error, 2)
-		wg := sync.WaitGroup{}
 
-		if stdout {
-			wg.Add(1)
-			stdoutPipe := container.StdoutLogPipe()
-			defer stdoutPipe.Close()
+	if config.Follow && container.IsRunning() {
+		chErr := make(chan error)
+		var stdoutPipe, stderrPipe io.ReadCloser
+
+		// write an empty chunk of data (this is to ensure that the
+		// HTTP Response is sent immediatly, even if the container has
+		// not yet produced any data)
+		outStream.Write(nil)
+
+		if config.UseStdout {
+			stdoutPipe = container.StdoutLogPipe()
 			go func() {
-				errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format)
-				wg.Done()
+				logrus.Debug("logs: stdout stream begin")
+				chErr <- jsonlog.WriteLog(stdoutPipe, outStream, format, config.Since)
+				logrus.Debug("logs: stdout stream end")
 			}()
 		}
-		if stderr {
-			wg.Add(1)
-			stderrPipe := container.StderrLogPipe()
-			defer stderrPipe.Close()
+		if config.UseStderr {
+			stderrPipe = container.StderrLogPipe()
 			go func() {
-				errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format)
-				wg.Done()
+				logrus.Debug("logs: stderr stream begin")
+				chErr <- jsonlog.WriteLog(stderrPipe, errStream, format, config.Since)
+				logrus.Debug("logs: stderr stream end")
 			}()
 		}
 
-		wg.Wait()
-		close(errors)
+		err = <-chErr
+		if stdoutPipe != nil {
+			stdoutPipe.Close()
+		}
+		if stderrPipe != nil {
+			stderrPipe.Close()
+		}
+		<-chErr // wait for 2nd goroutine to exit, otherwise bad things will happen
 
-		for err := range errors {
-			if err != nil {
-				log.Errorf("%s", err)
+		if err != nil && err != io.EOF && err != io.ErrClosedPipe {
+			if e, ok := err.(*net.OpError); ok && e.Err != syscall.EPIPE {
+				logrus.Errorf("error streaming logs: %v", err)
 			}
 		}
-
 	}
-	return engine.StatusOK
+	return nil
 }
diff --git a/daemon/monitor.go b/daemon/monitor.go
index 7c18b7a..dfade8e 100644
--- a/daemon/monitor.go
+++ b/daemon/monitor.go
@@ -6,9 +6,9 @@
 	"sync"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
-	"github.com/docker/docker/pkg/common"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/runconfig"
 )
 
@@ -89,7 +89,7 @@
 	// because they share same runconfig and change image. Must be fixed
 	// in builder/builder.go
 	if err := m.container.toDisk(); err != nil {
-		log.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err)
+		logrus.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err)
 
 		return err
 	}
@@ -145,7 +145,7 @@
 				return err
 			}
 
-			log.Errorf("Error running container: %s", err)
+			logrus.Errorf("Error running container: %s", err)
 		}
 
 		// here container.Lock is already lost
@@ -223,14 +223,14 @@
 		return false
 	}
 
-	switch m.restartPolicy.Name {
-	case "always":
+	switch {
+	case m.restartPolicy.IsAlways():
 		return true
-	case "on-failure":
+	case m.restartPolicy.IsOnFailure():
 		// the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count
 		if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max {
-			log.Debugf("stopping restart of container %s because maximum failure could of %d has been reached",
-				common.TruncateID(m.container.ID), max)
+			logrus.Debugf("stopping restart of container %s because maximum failure could of %d has been reached",
+				stringid.TruncateID(m.container.ID), max)
 			return false
 		}
 
@@ -263,7 +263,7 @@
 	}
 
 	if err := m.container.ToDisk(); err != nil {
-		log.Debugf("%s", err)
+		logrus.Debugf("%s", err)
 	}
 }
 
@@ -279,21 +279,21 @@
 
 	if container.Config.OpenStdin {
 		if err := container.stdin.Close(); err != nil {
-			log.Errorf("%s: Error close stdin: %s", container.ID, err)
+			logrus.Errorf("%s: Error close stdin: %s", container.ID, err)
 		}
 	}
 
 	if err := container.stdout.Clean(); err != nil {
-		log.Errorf("%s: Error close stdout: %s", container.ID, err)
+		logrus.Errorf("%s: Error close stdout: %s", container.ID, err)
 	}
 
 	if err := container.stderr.Clean(); err != nil {
-		log.Errorf("%s: Error close stderr: %s", container.ID, err)
+		logrus.Errorf("%s: Error close stderr: %s", container.ID, err)
 	}
 
 	if container.command != nil && container.command.ProcessConfig.Terminal != nil {
 		if err := container.command.ProcessConfig.Terminal.Close(); err != nil {
-			log.Errorf("%s: Error closing terminal: %s", container.ID, err)
+			logrus.Errorf("%s: Error closing terminal: %s", container.ID, err)
 		}
 	}
 
@@ -311,7 +311,7 @@
 			}()
 			select {
 			case <-time.After(1 * time.Second):
-				log.Warnf("Logger didn't exit in time: logs may be truncated")
+				logrus.Warnf("Logger didn't exit in time: logs may be truncated")
 			case <-exit:
 			}
 		}
diff --git a/daemon/network/settings.go b/daemon/network/settings.go
new file mode 100644
index 0000000..ca60ff1
--- /dev/null
+++ b/daemon/network/settings.go
@@ -0,0 +1,29 @@
+package network
+
+import "github.com/docker/docker/nat"
+
+type Address struct {
+	Addr      string
+	PrefixLen int
+}
+
+type Settings struct {
+	Bridge                 string
+	EndpointID             string
+	Gateway                string
+	GlobalIPv6Address      string
+	GlobalIPv6PrefixLen    int
+	HairpinMode            bool
+	IPAddress              string
+	IPPrefixLen            int
+	IPv6Gateway            string
+	LinkLocalIPv6Address   string
+	LinkLocalIPv6PrefixLen int
+	MacAddress             string
+	NetworkID              string
+	PortMapping            map[string]map[string]string // Deprecated
+	Ports                  nat.PortMap
+	SandboxKey             string
+	SecondaryIPAddresses   []Address
+	SecondaryIPv6Addresses []Address
+}
diff --git a/daemon/network_settings.go b/daemon/network_settings.go
deleted file mode 100644
index 97c2e3a..0000000
--- a/daemon/network_settings.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package daemon
-
-import (
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/nat"
-)
-
-// FIXME: move deprecated port stuff to nat to clean up the core.
-type PortMapping map[string]string // Deprecated
-
-type NetworkSettings struct {
-	IPAddress              string
-	IPPrefixLen            int
-	MacAddress             string
-	LinkLocalIPv6Address   string
-	LinkLocalIPv6PrefixLen int
-	GlobalIPv6Address      string
-	GlobalIPv6PrefixLen    int
-	Gateway                string
-	IPv6Gateway            string
-	Bridge                 string
-	PortMapping            map[string]PortMapping // Deprecated
-	Ports                  nat.PortMap
-}
-
-func (settings *NetworkSettings) PortMappingAPI() *engine.Table {
-	var outs = engine.NewTable("", 0)
-	for port, bindings := range settings.Ports {
-		p, _ := nat.ParsePort(port.Port())
-		if len(bindings) == 0 {
-			out := &engine.Env{}
-			out.SetInt("PrivatePort", p)
-			out.Set("Type", port.Proto())
-			outs.Add(out)
-			continue
-		}
-		for _, binding := range bindings {
-			out := &engine.Env{}
-			h, _ := nat.ParsePort(binding.HostPort)
-			out.SetInt("PrivatePort", p)
-			out.SetInt("PublicPort", h)
-			out.Set("Type", port.Proto())
-			out.Set("IP", binding.HostIp)
-			outs.Add(out)
-		}
-	}
-	return outs
-}
diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go
deleted file mode 100644
index 30a3a12..0000000
--- a/daemon/networkdriver/bridge/driver.go
+++ /dev/null
@@ -1,740 +0,0 @@
-package bridge
-
-import (
-	"encoding/hex"
-	"errors"
-	"fmt"
-	"io/ioutil"
-	"net"
-	"os"
-	"os/exec"
-	"strings"
-	"sync"
-
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/daemon/networkdriver"
-	"github.com/docker/docker/daemon/networkdriver/ipallocator"
-	"github.com/docker/docker/daemon/networkdriver/portmapper"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/nat"
-	"github.com/docker/docker/pkg/iptables"
-	"github.com/docker/docker/pkg/networkfs/resolvconf"
-	"github.com/docker/docker/pkg/parsers/kernel"
-	"github.com/docker/libcontainer/netlink"
-)
-
-const (
-	DefaultNetworkBridge     = "docker0"
-	MaxAllocatedPortAttempts = 10
-)
-
-// Network interface represents the networking stack of a container
-type networkInterface struct {
-	IP           net.IP
-	IPv6         net.IP
-	PortMappings []net.Addr // There are mappings to the host interfaces
-}
-
-type ifaces struct {
-	c map[string]*networkInterface
-	sync.Mutex
-}
-
-func (i *ifaces) Set(key string, n *networkInterface) {
-	i.Lock()
-	i.c[key] = n
-	i.Unlock()
-}
-
-func (i *ifaces) Get(key string) *networkInterface {
-	i.Lock()
-	res := i.c[key]
-	i.Unlock()
-	return res
-}
-
-var (
-	addrs = []string{
-		// Here we don't follow the convention of using the 1st IP of the range for the gateway.
-		// This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges.
-		// In theory this shouldn't matter - in practice there's bound to be a few scripts relying
-		// on the internal addressing or other stupid things like that.
-		// They shouldn't, but hey, let's not break them unless we really have to.
-		"172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23
-		"10.0.42.1/16",   // Don't even try using the entire /8, that's too intrusive
-		"10.1.42.1/16",
-		"10.42.42.1/16",
-		"172.16.42.1/24",
-		"172.16.43.1/24",
-		"172.16.44.1/24",
-		"10.0.42.1/24",
-		"10.0.43.1/24",
-		"192.168.42.1/24",
-		"192.168.43.1/24",
-		"192.168.44.1/24",
-	}
-
-	bridgeIface       string
-	bridgeIPv4Network *net.IPNet
-	bridgeIPv6Addr    net.IP
-	globalIPv6Network *net.IPNet
-	portMapper        *portmapper.PortMapper
-	once              sync.Once
-
-	defaultBindingIP  = net.ParseIP("0.0.0.0")
-	currentInterfaces = ifaces{c: make(map[string]*networkInterface)}
-)
-
-func initPortMapper() {
-	once.Do(func() {
-		portMapper = portmapper.New()
-	})
-}
-
-func InitDriver(job *engine.Job) engine.Status {
-	var (
-		networkv4      *net.IPNet
-		networkv6      *net.IPNet
-		addrv4         net.Addr
-		addrsv6        []net.Addr
-		enableIPTables = job.GetenvBool("EnableIptables")
-		enableIPv6     = job.GetenvBool("EnableIPv6")
-		icc            = job.GetenvBool("InterContainerCommunication")
-		ipMasq         = job.GetenvBool("EnableIpMasq")
-		ipForward      = job.GetenvBool("EnableIpForward")
-		bridgeIP       = job.Getenv("BridgeIP")
-		bridgeIPv6     = "fe80::1/64"
-		fixedCIDR      = job.Getenv("FixedCIDR")
-		fixedCIDRv6    = job.Getenv("FixedCIDRv6")
-	)
-
-	// try to modprobe bridge first
-	// see gh#12177
-	if out, err := exec.Command("modprobe", "-va", "bridge", "nf_nat").Output(); err != nil {
-		log.Warnf("Running modprobe bridge nf_nat failed with message: %s, error: %v", out, err)
-	}
-
-	initPortMapper()
-
-	if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" {
-		defaultBindingIP = net.ParseIP(defaultIP)
-	}
-
-	bridgeIface = job.Getenv("BridgeIface")
-	usingDefaultBridge := false
-	if bridgeIface == "" {
-		usingDefaultBridge = true
-		bridgeIface = DefaultNetworkBridge
-	}
-
-	addrv4, addrsv6, err := networkdriver.GetIfaceAddr(bridgeIface)
-
-	if err != nil {
-		// No Bridge existent, create one
-		// If we're not using the default bridge, fail without trying to create it
-		if !usingDefaultBridge {
-			return job.Error(err)
-		}
-
-		// If the iface is not found, try to create it
-		if err := configureBridge(bridgeIP, bridgeIPv6, enableIPv6); err != nil {
-			return job.Error(err)
-		}
-
-		addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
-		if err != nil {
-			return job.Error(err)
-		}
-
-		if fixedCIDRv6 != "" {
-			// Setting route to global IPv6 subnet
-			log.Infof("Adding route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface)
-			if err := netlink.AddRoute(fixedCIDRv6, "", "", bridgeIface); err != nil {
-				log.Fatalf("Could not add route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface)
-			}
-		}
-	} else {
-		// Bridge exists already, getting info...
-		// Validate that the bridge ip matches the ip specified by BridgeIP
-		if bridgeIP != "" {
-			networkv4 = addrv4.(*net.IPNet)
-			bip, _, err := net.ParseCIDR(bridgeIP)
-			if err != nil {
-				return job.Error(err)
-			}
-			if !networkv4.IP.Equal(bip) {
-				return job.Errorf("Bridge ip (%s) does not match existing bridge configuration %s", networkv4.IP, bip)
-			}
-		}
-
-		// A bridge might exist but not have any IPv6 addr associated with it yet
-		// (for example, an existing Docker installation that has only been used
-		// with IPv4 and docker0 already is set up) In that case, we can perform
-		// the bridge init for IPv6 here, else we will error out below if --ipv6=true
-		if len(addrsv6) == 0 && enableIPv6 {
-			if err := setupIPv6Bridge(bridgeIPv6); err != nil {
-				return job.Error(err)
-			}
-			// Recheck addresses now that IPv6 is setup on the bridge
-			addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface)
-			if err != nil {
-				return job.Error(err)
-			}
-		}
-
-		// TODO: Check if route to fixedCIDRv6 is set
-	}
-
-	if enableIPv6 {
-		bip6, _, err := net.ParseCIDR(bridgeIPv6)
-		if err != nil {
-			return job.Error(err)
-		}
-		found := false
-		for _, addrv6 := range addrsv6 {
-			networkv6 = addrv6.(*net.IPNet)
-			if networkv6.IP.Equal(bip6) {
-				found = true
-				break
-			}
-		}
-		if !found {
-			return job.Errorf("Bridge IPv6 does not match existing bridge configuration %s", bip6)
-		}
-	}
-
-	networkv4 = addrv4.(*net.IPNet)
-
-	if enableIPv6 {
-		if len(addrsv6) == 0 {
-			return job.Error(errors.New("IPv6 enabled but no IPv6 detected"))
-		}
-		bridgeIPv6Addr = networkv6.IP
-	}
-
-	// Configure iptables for link support
-	if enableIPTables {
-		if err := setupIPTables(addrv4, icc, ipMasq); err != nil {
-			return job.Error(err)
-		}
-
-	}
-
-	if ipForward {
-		// Enable IPv4 forwarding
-		if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil {
-			job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
-		}
-
-		if fixedCIDRv6 != "" {
-			// Enable IPv6 forwarding
-			if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/default/forwarding", []byte{'1', '\n'}, 0644); err != nil {
-				job.Logf("WARNING: unable to enable IPv6 default forwarding: %s\n", err)
-			}
-			if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/all/forwarding", []byte{'1', '\n'}, 0644); err != nil {
-				job.Logf("WARNING: unable to enable IPv6 all forwarding: %s\n", err)
-			}
-		}
-	}
-
-	// We can always try removing the iptables
-	if err := iptables.RemoveExistingChain("DOCKER", iptables.Nat); err != nil {
-		return job.Error(err)
-	}
-
-	if enableIPTables {
-		_, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Nat)
-		if err != nil {
-			return job.Error(err)
-		}
-		chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter)
-		if err != nil {
-			return job.Error(err)
-		}
-		portMapper.SetIptablesChain(chain)
-	}
-
-	bridgeIPv4Network = networkv4
-	if fixedCIDR != "" {
-		_, subnet, err := net.ParseCIDR(fixedCIDR)
-		if err != nil {
-			return job.Error(err)
-		}
-		log.Debugf("Subnet: %v", subnet)
-		if err := ipallocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil {
-			return job.Error(err)
-		}
-	}
-
-	if fixedCIDRv6 != "" {
-		_, subnet, err := net.ParseCIDR(fixedCIDRv6)
-		if err != nil {
-			return job.Error(err)
-		}
-		log.Debugf("Subnet: %v", subnet)
-		if err := ipallocator.RegisterSubnet(subnet, subnet); err != nil {
-			return job.Error(err)
-		}
-		globalIPv6Network = subnet
-	}
-
-	// Block BridgeIP in IP allocator
-	ipallocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP)
-
-	// https://github.com/docker/docker/issues/2768
-	job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeIPv4Network.IP)
-
-	for name, f := range map[string]engine.Handler{
-		"allocate_interface": Allocate,
-		"release_interface":  Release,
-		"allocate_port":      AllocatePort,
-		"link":               LinkContainers,
-	} {
-		if err := job.Eng.Register(name, f); err != nil {
-			return job.Error(err)
-		}
-	}
-	return engine.StatusOK
-}
-
-func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
-	// Enable NAT
-
-	if ipmasq {
-		natArgs := []string{"-s", addr.String(), "!", "-o", bridgeIface, "-j", "MASQUERADE"}
-
-		if !iptables.Exists(iptables.Nat, "POSTROUTING", natArgs...) {
-			if output, err := iptables.Raw(append([]string{
-				"-t", string(iptables.Nat), "-I", "POSTROUTING"}, natArgs...)...); err != nil {
-				return fmt.Errorf("Unable to enable network bridge NAT: %s", err)
-			} else if len(output) != 0 {
-				return &iptables.ChainError{Chain: "POSTROUTING", Output: output}
-			}
-		}
-	}
-
-	var (
-		args       = []string{"-i", bridgeIface, "-o", bridgeIface, "-j"}
-		acceptArgs = append(args, "ACCEPT")
-		dropArgs   = append(args, "DROP")
-	)
-
-	if !icc {
-		iptables.Raw(append([]string{"-D", "FORWARD"}, acceptArgs...)...)
-
-		if !iptables.Exists(iptables.Filter, "FORWARD", dropArgs...) {
-			log.Debugf("Disable inter-container communication")
-			if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, dropArgs...)...); err != nil {
-				return fmt.Errorf("Unable to prevent intercontainer communication: %s", err)
-			} else if len(output) != 0 {
-				return fmt.Errorf("Error disabling intercontainer communication: %s", output)
-			}
-		}
-	} else {
-		iptables.Raw(append([]string{"-D", "FORWARD"}, dropArgs...)...)
-
-		if !iptables.Exists(iptables.Filter, "FORWARD", acceptArgs...) {
-			log.Debugf("Enable inter-container communication")
-			if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, acceptArgs...)...); err != nil {
-				return fmt.Errorf("Unable to allow intercontainer communication: %s", err)
-			} else if len(output) != 0 {
-				return fmt.Errorf("Error enabling intercontainer communication: %s", output)
-			}
-		}
-	}
-
-	// Accept all non-intercontainer outgoing packets
-	outgoingArgs := []string{"-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"}
-	if !iptables.Exists(iptables.Filter, "FORWARD", outgoingArgs...) {
-		if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, outgoingArgs...)...); err != nil {
-			return fmt.Errorf("Unable to allow outgoing packets: %s", err)
-		} else if len(output) != 0 {
-			return &iptables.ChainError{Chain: "FORWARD outgoing", Output: output}
-		}
-	}
-
-	// Accept incoming packets for existing connections
-	existingArgs := []string{"-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}
-
-	if !iptables.Exists(iptables.Filter, "FORWARD", existingArgs...) {
-		if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, existingArgs...)...); err != nil {
-			return fmt.Errorf("Unable to allow incoming packets: %s", err)
-		} else if len(output) != 0 {
-			return &iptables.ChainError{Chain: "FORWARD incoming", Output: output}
-		}
-	}
-	return nil
-}
-
-func RequestPort(ip net.IP, proto string, port int) (int, error) {
-	initPortMapper()
-	return portMapper.Allocator.RequestPort(ip, proto, port)
-}
-
-// configureBridge attempts to create and configure a network bridge interface named `bridgeIface` on the host
-// If bridgeIP is empty, it will try to find a non-conflicting IP from the Docker-specified private ranges
-// If the bridge `bridgeIface` already exists, it will only perform the IP address association with the existing
-// bridge (fixes issue #8444)
-// If an address which doesn't conflict with existing interfaces can't be found, an error is returned.
-func configureBridge(bridgeIP string, bridgeIPv6 string, enableIPv6 bool) error {
-	nameservers := []string{}
-	resolvConf, _ := resolvconf.Get()
-	// We don't check for an error here, because we don't really care
-	// if we can't read /etc/resolv.conf. So instead we skip the append
-	// if resolvConf is nil. It either doesn't exist, or we can't read it
-	// for some reason.
-	if resolvConf != nil {
-		nameservers = append(nameservers, resolvconf.GetNameserversAsCIDR(resolvConf)...)
-	}
-
-	var ifaceAddr string
-	if len(bridgeIP) != 0 {
-		_, _, err := net.ParseCIDR(bridgeIP)
-		if err != nil {
-			return err
-		}
-		ifaceAddr = bridgeIP
-	} else {
-		for _, addr := range addrs {
-			_, dockerNetwork, err := net.ParseCIDR(addr)
-			if err != nil {
-				return err
-			}
-			if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil {
-				if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil {
-					ifaceAddr = addr
-					break
-				} else {
-					log.Debugf("%s %s", addr, err)
-				}
-			}
-		}
-	}
-
-	if ifaceAddr == "" {
-		return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface)
-	}
-	log.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr)
-
-	if err := createBridgeIface(bridgeIface); err != nil {
-		// The bridge may already exist, therefore we can ignore an "exists" error
-		if !os.IsExist(err) {
-			return err
-		}
-	}
-
-	iface, err := net.InterfaceByName(bridgeIface)
-	if err != nil {
-		return err
-	}
-
-	ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr)
-	if err != nil {
-		return err
-	}
-
-	if err := netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil {
-		return fmt.Errorf("Unable to add private network: %s", err)
-	}
-
-	if enableIPv6 {
-		if err := setupIPv6Bridge(bridgeIPv6); err != nil {
-			return err
-		}
-	}
-
-	if err := netlink.NetworkLinkUp(iface); err != nil {
-		return fmt.Errorf("Unable to start network bridge: %s", err)
-	}
-	return nil
-}
-
-func setupIPv6Bridge(bridgeIPv6 string) error {
-
-	iface, err := net.InterfaceByName(bridgeIface)
-	if err != nil {
-		return err
-	}
-	// Enable IPv6 on the bridge
-	procFile := "/proc/sys/net/ipv6/conf/" + iface.Name + "/disable_ipv6"
-	if err := ioutil.WriteFile(procFile, []byte{'0', '\n'}, 0644); err != nil {
-		return fmt.Errorf("Unable to enable IPv6 addresses on bridge: %v", err)
-	}
-
-	ipAddr6, ipNet6, err := net.ParseCIDR(bridgeIPv6)
-	if err != nil {
-		return fmt.Errorf("Unable to parse bridge IPv6 address: %q, error: %v", bridgeIPv6, err)
-	}
-
-	if err := netlink.NetworkLinkAddIp(iface, ipAddr6, ipNet6); err != nil {
-		return fmt.Errorf("Unable to add private IPv6 network: %v", err)
-	}
-
-	return nil
-}
-
-func createBridgeIface(name string) error {
-	kv, err := kernel.GetKernelVersion()
-	// Only set the bridge's mac address if the kernel version is > 3.3
-	// before that it was not supported
-	setBridgeMacAddr := err == nil && (kv.Kernel >= 3 && kv.Major >= 3)
-	log.Debugf("setting bridge mac address = %v", setBridgeMacAddr)
-	return netlink.CreateBridge(name, setBridgeMacAddr)
-}
-
-// Generate a IEEE802 compliant MAC address from the given IP address.
-//
-// The generator is guaranteed to be consistent: the same IP will always yield the same
-// MAC address. This is to avoid ARP cache issues.
-func generateMacAddr(ip net.IP) net.HardwareAddr {
-	hw := make(net.HardwareAddr, 6)
-
-	// The first byte of the MAC address has to comply with these rules:
-	// 1. Unicast: Set the least-significant bit to 0.
-	// 2. Address is locally administered: Set the second-least-significant bit (U/L) to 1.
-	// 3. As "small" as possible: The veth address has to be "smaller" than the bridge address.
-	hw[0] = 0x02
-
-	// The first 24 bits of the MAC represent the Organizationally Unique Identifier (OUI).
-	// Since this address is locally administered, we can do whatever we want as long as
-	// it doesn't conflict with other addresses.
-	hw[1] = 0x42
-
-	// Insert the IP address into the last 32 bits of the MAC address.
-	// This is a simple way to guarantee the address will be consistent and unique.
-	copy(hw[2:], ip.To4())
-
-	return hw
-}
-
-func linkLocalIPv6FromMac(mac string) (string, error) {
-	hx := strings.Replace(mac, ":", "", -1)
-	hw, err := hex.DecodeString(hx)
-	if err != nil {
-		return "", errors.New("Could not parse MAC address " + mac)
-	}
-
-	hw[0] ^= 0x2
-
-	return fmt.Sprintf("fe80::%x%x:%xff:fe%x:%x%x/64", hw[0], hw[1], hw[2], hw[3], hw[4], hw[5]), nil
-}
-
-// Allocate a network interface
-func Allocate(job *engine.Job) engine.Status {
-	var (
-		ip            net.IP
-		mac           net.HardwareAddr
-		err           error
-		id            = job.Args[0]
-		requestedIP   = net.ParseIP(job.Getenv("RequestedIP"))
-		requestedIPv6 = net.ParseIP(job.Getenv("RequestedIPv6"))
-		globalIPv6    net.IP
-	)
-
-	ip, err = ipallocator.RequestIP(bridgeIPv4Network, requestedIP)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	// If no explicit mac address was given, generate a random one.
-	if mac, err = net.ParseMAC(job.Getenv("RequestedMac")); err != nil {
-		mac = generateMacAddr(ip)
-	}
-
-	if globalIPv6Network != nil {
-		// If globalIPv6Network Size is at least a /80 subnet generate IPv6 address from MAC address
-		netmask_ones, _ := globalIPv6Network.Mask.Size()
-		if requestedIPv6 == nil && netmask_ones <= 80 {
-			requestedIPv6 = make(net.IP, len(globalIPv6Network.IP))
-			copy(requestedIPv6, globalIPv6Network.IP)
-			for i, h := range mac {
-				requestedIPv6[i+10] = h
-			}
-		}
-
-		globalIPv6, err = ipallocator.RequestIP(globalIPv6Network, requestedIPv6)
-		if err != nil {
-			log.Errorf("Allocator: RequestIP v6: %v", err)
-			return job.Error(err)
-		}
-		log.Infof("Allocated IPv6 %s", globalIPv6)
-	}
-
-	out := engine.Env{}
-	out.Set("IP", ip.String())
-	out.Set("Mask", bridgeIPv4Network.Mask.String())
-	out.Set("Gateway", bridgeIPv4Network.IP.String())
-	out.Set("MacAddress", mac.String())
-	out.Set("Bridge", bridgeIface)
-
-	size, _ := bridgeIPv4Network.Mask.Size()
-	out.SetInt("IPPrefixLen", size)
-
-	// If linklocal IPv6
-	localIPv6Net, err := linkLocalIPv6FromMac(mac.String())
-	if err != nil {
-		return job.Error(err)
-	}
-	localIPv6, _, _ := net.ParseCIDR(localIPv6Net)
-	out.Set("LinkLocalIPv6", localIPv6.String())
-	out.Set("MacAddress", mac.String())
-
-	if globalIPv6Network != nil {
-		out.Set("GlobalIPv6", globalIPv6.String())
-		sizev6, _ := globalIPv6Network.Mask.Size()
-		out.SetInt("GlobalIPv6PrefixLen", sizev6)
-		out.Set("IPv6Gateway", bridgeIPv6Addr.String())
-	}
-
-	currentInterfaces.Set(id, &networkInterface{
-		IP:   ip,
-		IPv6: globalIPv6,
-	})
-
-	out.WriteTo(job.Stdout)
-
-	return engine.StatusOK
-}
-
-// Release an interface for a select ip
-func Release(job *engine.Job) engine.Status {
-	var (
-		id                 = job.Args[0]
-		containerInterface = currentInterfaces.Get(id)
-	)
-
-	if containerInterface == nil {
-		return job.Errorf("No network information to release for %s", id)
-	}
-
-	for _, nat := range containerInterface.PortMappings {
-		if err := portMapper.Unmap(nat); err != nil {
-			log.Infof("Unable to unmap port %s: %s", nat, err)
-		}
-	}
-
-	if err := ipallocator.ReleaseIP(bridgeIPv4Network, containerInterface.IP); err != nil {
-		log.Infof("Unable to release IPv4 %s", err)
-	}
-	if globalIPv6Network != nil {
-		if err := ipallocator.ReleaseIP(globalIPv6Network, containerInterface.IPv6); err != nil {
-			log.Infof("Unable to release IPv6 %s", err)
-		}
-	}
-	return engine.StatusOK
-}
-
-// Allocate an external port and map it to the interface
-func AllocatePort(job *engine.Job) engine.Status {
-	var (
-		err error
-
-		ip            = defaultBindingIP
-		id            = job.Args[0]
-		hostIP        = job.Getenv("HostIP")
-		hostPort      = job.GetenvInt("HostPort")
-		containerPort = job.GetenvInt("ContainerPort")
-		proto         = job.Getenv("Proto")
-		network       = currentInterfaces.Get(id)
-	)
-
-	if hostIP != "" {
-		ip = net.ParseIP(hostIP)
-		if ip == nil {
-			return job.Errorf("Bad parameter: invalid host ip %s", hostIP)
-		}
-	}
-
-	// host ip, proto, and host port
-	var container net.Addr
-	switch proto {
-	case "tcp":
-		container = &net.TCPAddr{IP: network.IP, Port: containerPort}
-	case "udp":
-		container = &net.UDPAddr{IP: network.IP, Port: containerPort}
-	default:
-		return job.Errorf("unsupported address type %s", proto)
-	}
-
-	//
-	// Try up to 10 times to get a port that's not already allocated.
-	//
-	// In the event of failure to bind, return the error that portmapper.Map
-	// yields.
-	//
-
-	var host net.Addr
-	for i := 0; i < MaxAllocatedPortAttempts; i++ {
-		if host, err = portMapper.Map(container, ip, hostPort); err == nil {
-			break
-		}
-		// There is no point in immediately retrying to map an explicitly
-		// chosen port.
-		if hostPort != 0 {
-			job.Logf("Failed to allocate and map port %d: %s", hostPort, err)
-			break
-		}
-		job.Logf("Failed to allocate and map port: %s, retry: %d", err, i+1)
-	}
-
-	if err != nil {
-		return job.Error(err)
-	}
-
-	network.PortMappings = append(network.PortMappings, host)
-
-	out := engine.Env{}
-	switch netAddr := host.(type) {
-	case *net.TCPAddr:
-		out.Set("HostIP", netAddr.IP.String())
-		out.SetInt("HostPort", netAddr.Port)
-	case *net.UDPAddr:
-		out.Set("HostIP", netAddr.IP.String())
-		out.SetInt("HostPort", netAddr.Port)
-	}
-	if _, err := out.WriteTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-
-	return engine.StatusOK
-}
-
-func LinkContainers(job *engine.Job) engine.Status {
-	var (
-		action       = job.Args[0]
-		nfAction     iptables.Action
-		childIP      = job.Getenv("ChildIP")
-		parentIP     = job.Getenv("ParentIP")
-		ignoreErrors = job.GetenvBool("IgnoreErrors")
-		ports        = job.GetenvList("Ports")
-	)
-
-	switch action {
-	case "-A":
-		nfAction = iptables.Append
-	case "-I":
-		nfAction = iptables.Insert
-	case "-D":
-		nfAction = iptables.Delete
-	default:
-		return job.Errorf("Invalid action '%s' specified", action)
-	}
-
-	ip1 := net.ParseIP(parentIP)
-	if ip1 == nil {
-		return job.Errorf("Parent IP '%s' is invalid", parentIP)
-	}
-	ip2 := net.ParseIP(childIP)
-	if ip2 == nil {
-		return job.Errorf("Child IP '%s' is invalid", childIP)
-	}
-
-	chain := iptables.Chain{Name: "DOCKER", Bridge: bridgeIface}
-	for _, p := range ports {
-		port := nat.Port(p)
-		if err := chain.Link(nfAction, ip1, ip2, port.Int(), port.Proto()); !ignoreErrors && err != nil {
-			return job.Error(err)
-		}
-	}
-	return engine.StatusOK
-}
diff --git a/daemon/networkdriver/bridge/driver_test.go b/daemon/networkdriver/bridge/driver_test.go
deleted file mode 100644
index 8c20dff..0000000
--- a/daemon/networkdriver/bridge/driver_test.go
+++ /dev/null
@@ -1,279 +0,0 @@
-package bridge
-
-import (
-	"fmt"
-	"net"
-	"strconv"
-	"testing"
-
-	"github.com/docker/docker/daemon/networkdriver/portmapper"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/iptables"
-)
-
-func init() {
-	// reset the new proxy command for mocking out the userland proxy in tests
-	portmapper.NewProxy = portmapper.NewMockProxyCommand
-}
-
-func findFreePort(t *testing.T) int {
-	l, err := net.Listen("tcp", ":0")
-	if err != nil {
-		t.Fatal("Failed to find a free port")
-	}
-	defer l.Close()
-
-	result, err := net.ResolveTCPAddr("tcp", l.Addr().String())
-	if err != nil {
-		t.Fatal("Failed to resolve address to identify free port")
-	}
-	return result.Port
-}
-
-func newPortAllocationJob(eng *engine.Engine, port int) (job *engine.Job) {
-	strPort := strconv.Itoa(port)
-
-	job = eng.Job("allocate_port", "container_id")
-	job.Setenv("HostIP", "127.0.0.1")
-	job.Setenv("HostPort", strPort)
-	job.Setenv("Proto", "tcp")
-	job.Setenv("ContainerPort", strPort)
-	return
-}
-
-func newPortAllocationJobWithInvalidHostIP(eng *engine.Engine, port int) (job *engine.Job) {
-	strPort := strconv.Itoa(port)
-
-	job = eng.Job("allocate_port", "container_id")
-	job.Setenv("HostIP", "localhost")
-	job.Setenv("HostPort", strPort)
-	job.Setenv("Proto", "tcp")
-	job.Setenv("ContainerPort", strPort)
-	return
-}
-
-func TestAllocatePortDetection(t *testing.T) {
-	eng := engine.New()
-	eng.Logging = false
-
-	freePort := findFreePort(t)
-
-	// Init driver
-	job := eng.Job("initdriver")
-	if res := InitDriver(job); res != engine.StatusOK {
-		t.Fatal("Failed to initialize network driver")
-	}
-
-	// Allocate interface
-	job = eng.Job("allocate_interface", "container_id")
-	if res := Allocate(job); res != engine.StatusOK {
-		t.Fatal("Failed to allocate network interface")
-	}
-
-	// Allocate same port twice, expect failure on second call
-	job = newPortAllocationJob(eng, freePort)
-	if res := AllocatePort(job); res != engine.StatusOK {
-		t.Fatal("Failed to find a free port to allocate")
-	}
-	if res := AllocatePort(job); res == engine.StatusOK {
-		t.Fatal("Duplicate port allocation granted by AllocatePort")
-	}
-}
-
-func TestHostnameFormatChecking(t *testing.T) {
-	eng := engine.New()
-	eng.Logging = false
-
-	freePort := findFreePort(t)
-
-	// Init driver
-	job := eng.Job("initdriver")
-	if res := InitDriver(job); res != engine.StatusOK {
-		t.Fatal("Failed to initialize network driver")
-	}
-
-	// Allocate interface
-	job = eng.Job("allocate_interface", "container_id")
-	if res := Allocate(job); res != engine.StatusOK {
-		t.Fatal("Failed to allocate network interface")
-	}
-
-	// Allocate port with invalid HostIP, expect failure with Bad Request http status
-	job = newPortAllocationJobWithInvalidHostIP(eng, freePort)
-	if res := AllocatePort(job); res == engine.StatusOK {
-		t.Fatal("Failed to check invalid HostIP")
-	}
-}
-
-func newInterfaceAllocation(t *testing.T, input engine.Env) (output engine.Env) {
-	eng := engine.New()
-	eng.Logging = false
-
-	done := make(chan bool)
-
-	// set IPv6 global if given
-	if input.Exists("globalIPv6Network") {
-		_, globalIPv6Network, _ = net.ParseCIDR(input.Get("globalIPv6Network"))
-	}
-
-	job := eng.Job("allocate_interface", "container_id")
-	job.Env().Init(&input)
-	reader, _ := job.Stdout.AddPipe()
-	go func() {
-		output.Decode(reader)
-		done <- true
-	}()
-
-	res := Allocate(job)
-	job.Stdout.Close()
-	<-done
-
-	if input.Exists("expectFail") && input.GetBool("expectFail") {
-		if res == engine.StatusOK {
-			t.Fatal("Doesn't fail to allocate network interface")
-		}
-	} else {
-		if res != engine.StatusOK {
-			t.Fatal("Failed to allocate network interface")
-		}
-	}
-
-	if input.Exists("globalIPv6Network") {
-		// check for bug #11427
-		_, subnet, _ := net.ParseCIDR(input.Get("globalIPv6Network"))
-		if globalIPv6Network.IP.String() != subnet.IP.String() {
-			t.Fatal("globalIPv6Network was modified during allocation")
-		}
-		// clean up IPv6 global
-		globalIPv6Network = nil
-	}
-
-	return
-}
-
-func TestIPv6InterfaceAllocationAutoNetmaskGt80(t *testing.T) {
-
-	input := engine.Env{}
-
-	_, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/81")
-
-	// set global ipv6
-	input.Set("globalIPv6Network", subnet.String())
-
-	output := newInterfaceAllocation(t, input)
-
-	// ensure low manually assigend global ip
-	ip := net.ParseIP(output.Get("GlobalIPv6"))
-	_, subnet, _ = net.ParseCIDR(fmt.Sprintf("%s/%d", subnet.IP.String(), 120))
-	if !subnet.Contains(ip) {
-		t.Fatalf("Error ip %s not in subnet %s", ip.String(), subnet.String())
-	}
-}
-
-func TestIPv6InterfaceAllocationAutoNetmaskLe80(t *testing.T) {
-
-	input := engine.Env{}
-
-	_, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/80")
-
-	// set global ipv6
-	input.Set("globalIPv6Network", subnet.String())
-	input.Set("RequestedMac", "ab:cd:ab:cd:ab:cd")
-
-	output := newInterfaceAllocation(t, input)
-
-	// ensure global ip with mac
-	ip := net.ParseIP(output.Get("GlobalIPv6"))
-	expected_ip := net.ParseIP("2001:db8:1234:1234:1234:abcd:abcd:abcd")
-	if ip.String() != expected_ip.String() {
-		t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String())
-	}
-
-	// ensure link local format
-	ip = net.ParseIP(output.Get("LinkLocalIPv6"))
-	expected_ip = net.ParseIP("fe80::a9cd:abff:fecd:abcd")
-	if ip.String() != expected_ip.String() {
-		t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String())
-	}
-
-}
-
-func TestIPv6InterfaceAllocationRequest(t *testing.T) {
-
-	input := engine.Env{}
-
-	_, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/80")
-	expected_ip := net.ParseIP("2001:db8:1234:1234:1234::1328")
-
-	// set global ipv6
-	input.Set("globalIPv6Network", subnet.String())
-	input.Set("RequestedIPv6", expected_ip.String())
-
-	output := newInterfaceAllocation(t, input)
-
-	// ensure global ip with mac
-	ip := net.ParseIP(output.Get("GlobalIPv6"))
-	if ip.String() != expected_ip.String() {
-		t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String())
-	}
-
-	// retry -> fails for duplicated address
-	input.SetBool("expectFail", true)
-	output = newInterfaceAllocation(t, input)
-}
-
-func TestMacAddrGeneration(t *testing.T) {
-	ip := net.ParseIP("192.168.0.1")
-	mac := generateMacAddr(ip).String()
-
-	// Should be consistent.
-	if generateMacAddr(ip).String() != mac {
-		t.Fatal("Inconsistent MAC address")
-	}
-
-	// Should be unique.
-	ip2 := net.ParseIP("192.168.0.2")
-	if generateMacAddr(ip2).String() == mac {
-		t.Fatal("Non-unique MAC address")
-	}
-}
-
-func TestLinkContainers(t *testing.T) {
-	eng := engine.New()
-	eng.Logging = false
-
-	// Init driver
-	job := eng.Job("initdriver")
-	if res := InitDriver(job); res != engine.StatusOK {
-		t.Fatal("Failed to initialize network driver")
-	}
-
-	// Allocate interface
-	job = eng.Job("allocate_interface", "container_id")
-	if res := Allocate(job); res != engine.StatusOK {
-		t.Fatal("Failed to allocate network interface")
-	}
-
-	job.Args[0] = "-I"
-
-	job.Setenv("ChildIP", "172.17.0.2")
-	job.Setenv("ParentIP", "172.17.0.1")
-	job.SetenvBool("IgnoreErrors", false)
-	job.SetenvList("Ports", []string{"1234"})
-
-	bridgeIface = "lo"
-	_, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if res := LinkContainers(job); res != engine.StatusOK {
-		t.Fatalf("LinkContainers failed")
-	}
-
-	// flush rules
-	if _, err = iptables.Raw([]string{"-F", "DOCKER"}...); err != nil {
-		t.Fatal(err)
-	}
-
-}
diff --git a/daemon/networkdriver/ipallocator/allocator.go b/daemon/networkdriver/ipallocator/allocator.go
deleted file mode 100644
index a728d1b..0000000
--- a/daemon/networkdriver/ipallocator/allocator.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package ipallocator
-
-import (
-	"errors"
-	"math/big"
-	"net"
-	"sync"
-
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/daemon/networkdriver"
-)
-
-// allocatedMap is thread-unsafe set of allocated IP
-type allocatedMap struct {
-	p     map[string]struct{}
-	last  *big.Int
-	begin *big.Int
-	end   *big.Int
-}
-
-func newAllocatedMap(network *net.IPNet) *allocatedMap {
-	firstIP, lastIP := networkdriver.NetworkRange(network)
-	begin := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1))
-	end := big.NewInt(0).Sub(ipToBigInt(lastIP), big.NewInt(1))
-
-	return &allocatedMap{
-		p:     make(map[string]struct{}),
-		begin: begin,
-		end:   end,
-		last:  big.NewInt(0).Sub(begin, big.NewInt(1)), // so first allocated will be begin
-	}
-}
-
-type networkSet map[string]*allocatedMap
-
-var (
-	ErrNoAvailableIPs           = errors.New("no available ip addresses on network")
-	ErrIPAlreadyAllocated       = errors.New("ip already allocated")
-	ErrIPOutOfRange             = errors.New("requested ip is out of range")
-	ErrNetworkAlreadyRegistered = errors.New("network already registered")
-	ErrBadSubnet                = errors.New("network does not contain specified subnet")
-)
-
-var (
-	lock         = sync.Mutex{}
-	allocatedIPs = networkSet{}
-)
-
-// RegisterSubnet registers network in global allocator with bounds
-// defined by subnet. If you want to use network range you must call
-// this method before first RequestIP, otherwise full network range will be used
-func RegisterSubnet(network *net.IPNet, subnet *net.IPNet) error {
-	lock.Lock()
-	defer lock.Unlock()
-	key := network.String()
-	if _, ok := allocatedIPs[key]; ok {
-		return ErrNetworkAlreadyRegistered
-	}
-	n := newAllocatedMap(network)
-	beginIP, endIP := networkdriver.NetworkRange(subnet)
-	begin := big.NewInt(0).Add(ipToBigInt(beginIP), big.NewInt(1))
-	end := big.NewInt(0).Sub(ipToBigInt(endIP), big.NewInt(1))
-
-	// Check that subnet is within network
-	if !(begin.Cmp(n.begin) >= 0 && end.Cmp(n.end) <= 0 && begin.Cmp(end) == -1) {
-		return ErrBadSubnet
-	}
-	n.begin.Set(begin)
-	n.end.Set(end)
-	n.last.Sub(begin, big.NewInt(1))
-	allocatedIPs[key] = n
-	return nil
-}
-
-// RequestIP requests an available ip from the given network.  It
-// will return the next available ip if the ip provided is nil.  If the
-// ip provided is not nil it will validate that the provided ip is available
-// for use or return an error
-func RequestIP(network *net.IPNet, ip net.IP) (net.IP, error) {
-	lock.Lock()
-	defer lock.Unlock()
-	key := network.String()
-	allocated, ok := allocatedIPs[key]
-	if !ok {
-		allocated = newAllocatedMap(network)
-		allocatedIPs[key] = allocated
-	}
-
-	if ip == nil {
-		return allocated.getNextIP()
-	}
-	return allocated.checkIP(ip)
-}
-
-// ReleaseIP adds the provided ip back into the pool of
-// available ips to be returned for use.
-func ReleaseIP(network *net.IPNet, ip net.IP) error {
-	lock.Lock()
-	defer lock.Unlock()
-	if allocated, exists := allocatedIPs[network.String()]; exists {
-		delete(allocated.p, ip.String())
-	}
-	return nil
-}
-
-func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) {
-	if _, ok := allocated.p[ip.String()]; ok {
-		return nil, ErrIPAlreadyAllocated
-	}
-
-	pos := ipToBigInt(ip)
-	// Verify that the IP address is within our network range.
-	if pos.Cmp(allocated.begin) == -1 || pos.Cmp(allocated.end) == 1 {
-		return nil, ErrIPOutOfRange
-	}
-
-	// Register the IP.
-	allocated.p[ip.String()] = struct{}{}
-
-	return ip, nil
-}
-
-// return an available ip if one is currently available.  If not,
-// return the next available ip for the nextwork
-func (allocated *allocatedMap) getNextIP() (net.IP, error) {
-	pos := big.NewInt(0).Set(allocated.last)
-	allRange := big.NewInt(0).Sub(allocated.end, allocated.begin)
-	for i := big.NewInt(0); i.Cmp(allRange) <= 0; i.Add(i, big.NewInt(1)) {
-		pos.Add(pos, big.NewInt(1))
-		if pos.Cmp(allocated.end) == 1 {
-			pos.Set(allocated.begin)
-		}
-		if _, ok := allocated.p[bigIntToIP(pos).String()]; ok {
-			continue
-		}
-		allocated.p[bigIntToIP(pos).String()] = struct{}{}
-		allocated.last.Set(pos)
-		return bigIntToIP(pos), nil
-	}
-	return nil, ErrNoAvailableIPs
-}
-
-// Converts a 4 bytes IP into a 128 bit integer
-func ipToBigInt(ip net.IP) *big.Int {
-	x := big.NewInt(0)
-	if ip4 := ip.To4(); ip4 != nil {
-		return x.SetBytes(ip4)
-	}
-	if ip6 := ip.To16(); ip6 != nil {
-		return x.SetBytes(ip6)
-	}
-
-	log.Errorf("ipToBigInt: Wrong IP length! %s", ip)
-	return nil
-}
-
-// Converts 128 bit integer into a 4 bytes IP address
-func bigIntToIP(v *big.Int) net.IP {
-	return net.IP(v.Bytes())
-}
diff --git a/daemon/networkdriver/ipallocator/allocator_test.go b/daemon/networkdriver/ipallocator/allocator_test.go
deleted file mode 100644
index 8e0d8fd..0000000
--- a/daemon/networkdriver/ipallocator/allocator_test.go
+++ /dev/null
@@ -1,681 +0,0 @@
-package ipallocator
-
-import (
-	"fmt"
-	"math/big"
-	"net"
-	"testing"
-)
-
-func reset() {
-	allocatedIPs = networkSet{}
-}
-
-func TestConversion(t *testing.T) {
-	ip := net.ParseIP("127.0.0.1")
-	i := ipToBigInt(ip)
-	if i.Cmp(big.NewInt(0x7f000001)) != 0 {
-		t.Fatal("incorrect conversion")
-	}
-	conv := bigIntToIP(i)
-	if !ip.Equal(conv) {
-		t.Error(conv.String())
-	}
-}
-
-func TestConversionIPv6(t *testing.T) {
-	ip := net.ParseIP("2a00:1450::1")
-	ip2 := net.ParseIP("2a00:1450::2")
-	ip3 := net.ParseIP("2a00:1450::1:1")
-	i := ipToBigInt(ip)
-	val, success := big.NewInt(0).SetString("2a001450000000000000000000000001", 16)
-	if !success {
-		t.Fatal("Hex-String to BigInt conversion failed.")
-	}
-	if i.Cmp(val) != 0 {
-		t.Fatal("incorrent conversion")
-	}
-
-	conv := bigIntToIP(i)
-	conv2 := bigIntToIP(big.NewInt(0).Add(i, big.NewInt(1)))
-	conv3 := bigIntToIP(big.NewInt(0).Add(i, big.NewInt(0x10000)))
-
-	if !ip.Equal(conv) {
-		t.Error("2a00:1450::1 should be equal to " + conv.String())
-	}
-	if !ip2.Equal(conv2) {
-		t.Error("2a00:1450::2 should be equal to " + conv2.String())
-	}
-	if !ip3.Equal(conv3) {
-		t.Error("2a00:1450::1:1 should be equal to " + conv3.String())
-	}
-}
-
-func TestRequestNewIps(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{192, 168, 0, 1},
-		Mask: []byte{255, 255, 255, 0},
-	}
-
-	var ip net.IP
-	var err error
-
-	for i := 1; i < 10; i++ {
-		ip, err = RequestIP(network, nil)
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		if expected := fmt.Sprintf("192.168.0.%d", i); ip.String() != expected {
-			t.Fatalf("Expected ip %s got %s", expected, ip.String())
-		}
-	}
-	value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String()
-	if err := ReleaseIP(network, ip); err != nil {
-		t.Fatal(err)
-	}
-	ip, err = RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if ip.String() != value {
-		t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String())
-	}
-}
-
-func TestRequestNewIpV6(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
-		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
-	}
-
-	var ip net.IP
-	var err error
-	for i := 1; i < 10; i++ {
-		ip, err = RequestIP(network, nil)
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		if expected := fmt.Sprintf("2a00:1450::%d", i); ip.String() != expected {
-			t.Fatalf("Expected ip %s got %s", expected, ip.String())
-		}
-	}
-	value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String()
-	if err := ReleaseIP(network, ip); err != nil {
-		t.Fatal(err)
-	}
-	ip, err = RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if ip.String() != value {
-		t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String())
-	}
-}
-
-func TestReleaseIp(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{192, 168, 0, 1},
-		Mask: []byte{255, 255, 255, 0},
-	}
-
-	ip, err := RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if err := ReleaseIP(network, ip); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestReleaseIpV6(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
-		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
-	}
-
-	ip, err := RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if err := ReleaseIP(network, ip); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestGetReleasedIp(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{192, 168, 0, 1},
-		Mask: []byte{255, 255, 255, 0},
-	}
-
-	ip, err := RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	value := ip.String()
-	if err := ReleaseIP(network, ip); err != nil {
-		t.Fatal(err)
-	}
-
-	for i := 0; i < 253; i++ {
-		_, err = RequestIP(network, nil)
-		if err != nil {
-			t.Fatal(err)
-		}
-		err = ReleaseIP(network, ip)
-		if err != nil {
-			t.Fatal(err)
-		}
-	}
-
-	ip, err = RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if ip.String() != value {
-		t.Fatalf("Expected to receive same ip %s got %s", value, ip.String())
-	}
-}
-
-func TestGetReleasedIpV6(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
-		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0},
-	}
-
-	ip, err := RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	value := ip.String()
-	if err := ReleaseIP(network, ip); err != nil {
-		t.Fatal(err)
-	}
-
-	for i := 0; i < 253; i++ {
-		_, err = RequestIP(network, nil)
-		if err != nil {
-			t.Fatal(err)
-		}
-		err = ReleaseIP(network, ip)
-		if err != nil {
-			t.Fatal(err)
-		}
-	}
-
-	ip, err = RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if ip.String() != value {
-		t.Fatalf("Expected to receive same ip %s got %s", value, ip.String())
-	}
-}
-
-func TestRequestSpecificIp(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{192, 168, 0, 1},
-		Mask: []byte{255, 255, 255, 224},
-	}
-
-	ip := net.ParseIP("192.168.0.5")
-
-	// Request a "good" IP.
-	if _, err := RequestIP(network, ip); err != nil {
-		t.Fatal(err)
-	}
-
-	// Request the same IP again.
-	if _, err := RequestIP(network, ip); err != ErrIPAlreadyAllocated {
-		t.Fatalf("Got the same IP twice: %#v", err)
-	}
-
-	// Request an out of range IP.
-	if _, err := RequestIP(network, net.ParseIP("192.168.0.42")); err != ErrIPOutOfRange {
-		t.Fatalf("Got an out of range IP: %#v", err)
-	}
-}
-
-func TestRequestSpecificIpV6(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
-		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
-	}
-
-	ip := net.ParseIP("2a00:1450::5")
-
-	// Request a "good" IP.
-	if _, err := RequestIP(network, ip); err != nil {
-		t.Fatal(err)
-	}
-
-	// Request the same IP again.
-	if _, err := RequestIP(network, ip); err != ErrIPAlreadyAllocated {
-		t.Fatalf("Got the same IP twice: %#v", err)
-	}
-
-	// Request an out of range IP.
-	if _, err := RequestIP(network, net.ParseIP("2a00:1500::1")); err != ErrIPOutOfRange {
-		t.Fatalf("Got an out of range IP: %#v", err)
-	}
-}
-
-func TestIPAllocator(t *testing.T) {
-	expectedIPs := []net.IP{
-		0: net.IPv4(127, 0, 0, 1),
-		1: net.IPv4(127, 0, 0, 2),
-		2: net.IPv4(127, 0, 0, 3),
-		3: net.IPv4(127, 0, 0, 4),
-		4: net.IPv4(127, 0, 0, 5),
-		5: net.IPv4(127, 0, 0, 6),
-	}
-
-	gwIP, n, _ := net.ParseCIDR("127.0.0.1/29")
-
-	network := &net.IPNet{IP: gwIP, Mask: n.Mask}
-	// Pool after initialisation (f = free, u = used)
-	// 1(f) - 2(f) - 3(f) - 4(f) - 5(f) - 6(f)
-	//  ↑
-
-	// Check that we get 6 IPs, from 127.0.0.1–127.0.0.6, in that
-	// order.
-	for i := 0; i < 6; i++ {
-		ip, err := RequestIP(network, nil)
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		assertIPEquals(t, expectedIPs[i], ip)
-	}
-	// Before loop begin
-	// 1(f) - 2(f) - 3(f) - 4(f) - 5(f) - 6(f)
-	//  ↑
-
-	// After i = 0
-	// 1(u) - 2(f) - 3(f) - 4(f) - 5(f) - 6(f)
-	//         ↑
-
-	// After i = 1
-	// 1(u) - 2(u) - 3(f) - 4(f) - 5(f) - 6(f)
-	//                ↑
-
-	// After i = 2
-	// 1(u) - 2(u) - 3(u) - 4(f) - 5(f) - 6(f)
-	//                       ↑
-
-	// After i = 3
-	// 1(u) - 2(u) - 3(u) - 4(u) - 5(f) - 6(f)
-	//                              ↑
-
-	// After i = 4
-	// 1(u) - 2(u) - 3(u) - 4(u) - 5(u) - 6(f)
-	//                                     ↑
-
-	// After i = 5
-	// 1(u) - 2(u) - 3(u) - 4(u) - 5(u) - 6(u)
-	//  ↑
-
-	// Check that there are no more IPs
-	ip, err := RequestIP(network, nil)
-	if err == nil {
-		t.Fatalf("There shouldn't be any IP addresses at this point, got %s\n", ip)
-	}
-
-	// Release some IPs in non-sequential order
-	if err := ReleaseIP(network, expectedIPs[3]); err != nil {
-		t.Fatal(err)
-	}
-	// 1(u) - 2(u) - 3(u) - 4(f) - 5(u) - 6(u)
-	//                       ↑
-
-	if err := ReleaseIP(network, expectedIPs[2]); err != nil {
-		t.Fatal(err)
-	}
-	// 1(u) - 2(u) - 3(f) - 4(f) - 5(u) - 6(u)
-	//                ↑
-
-	if err := ReleaseIP(network, expectedIPs[4]); err != nil {
-		t.Fatal(err)
-	}
-	// 1(u) - 2(u) - 3(f) - 4(f) - 5(f) - 6(u)
-	//                              ↑
-
-	// Make sure that IPs are reused in sequential order, starting
-	// with the first released IP
-	newIPs := make([]net.IP, 3)
-	for i := 0; i < 3; i++ {
-		ip, err := RequestIP(network, nil)
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		newIPs[i] = ip
-	}
-	assertIPEquals(t, expectedIPs[2], newIPs[0])
-	assertIPEquals(t, expectedIPs[3], newIPs[1])
-	assertIPEquals(t, expectedIPs[4], newIPs[2])
-
-	_, err = RequestIP(network, nil)
-	if err == nil {
-		t.Fatal("There shouldn't be any IP addresses at this point")
-	}
-}
-
-func TestAllocateFirstIP(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{192, 168, 0, 0},
-		Mask: []byte{255, 255, 255, 0},
-	}
-
-	firstIP := network.IP.To4().Mask(network.Mask)
-	first := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1))
-
-	ip, err := RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	allocated := ipToBigInt(ip)
-
-	if allocated == first {
-		t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated)
-	}
-}
-
-func TestAllocateAllIps(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{192, 168, 0, 1},
-		Mask: []byte{255, 255, 255, 0},
-	}
-
-	var (
-		current, first net.IP
-		err            error
-		isFirst        = true
-	)
-
-	for err == nil {
-		current, err = RequestIP(network, nil)
-		if isFirst {
-			first = current
-			isFirst = false
-		}
-	}
-
-	if err != ErrNoAvailableIPs {
-		t.Fatal(err)
-	}
-
-	if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs {
-		t.Fatal(err)
-	}
-
-	if err := ReleaseIP(network, first); err != nil {
-		t.Fatal(err)
-	}
-
-	again, err := RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	assertIPEquals(t, first, again)
-
-	// ensure that alloc.last == alloc.begin won't result in dead loop
-	if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs {
-		t.Fatal(err)
-	}
-
-	// Test by making alloc.last the only free ip and ensure we get it back
-	// #1. first of the range, (alloc.last == ipToInt(first) already)
-	if err := ReleaseIP(network, first); err != nil {
-		t.Fatal(err)
-	}
-
-	ret, err := RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	assertIPEquals(t, first, ret)
-
-	// #2. last of the range, note that current is the last one
-	last := net.IPv4(192, 168, 0, 254)
-	setLastTo(t, network, last)
-
-	ret, err = RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	assertIPEquals(t, last, ret)
-
-	// #3. middle of the range
-	mid := net.IPv4(192, 168, 0, 7)
-	setLastTo(t, network, mid)
-
-	ret, err = RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	assertIPEquals(t, mid, ret)
-}
-
-// make sure the pool is full when calling setLastTo.
-// we don't cheat here
-func setLastTo(t *testing.T, network *net.IPNet, ip net.IP) {
-	if err := ReleaseIP(network, ip); err != nil {
-		t.Fatal(err)
-	}
-
-	ret, err := RequestIP(network, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	assertIPEquals(t, ip, ret)
-
-	if err := ReleaseIP(network, ip); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestAllocateDifferentSubnets(t *testing.T) {
-	defer reset()
-	network1 := &net.IPNet{
-		IP:   []byte{192, 168, 0, 1},
-		Mask: []byte{255, 255, 255, 0},
-	}
-	network2 := &net.IPNet{
-		IP:   []byte{127, 0, 0, 1},
-		Mask: []byte{255, 255, 255, 0},
-	}
-	network3 := &net.IPNet{
-		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
-		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
-	}
-	network4 := &net.IPNet{
-		IP:   []byte{0x2a, 0x00, 0x16, 0x32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
-		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
-	}
-	expectedIPs := []net.IP{
-		0: net.IPv4(192, 168, 0, 1),
-		1: net.IPv4(192, 168, 0, 2),
-		2: net.IPv4(127, 0, 0, 1),
-		3: net.IPv4(127, 0, 0, 2),
-		4: net.ParseIP("2a00:1450::1"),
-		5: net.ParseIP("2a00:1450::2"),
-		6: net.ParseIP("2a00:1450::3"),
-		7: net.ParseIP("2a00:1632::1"),
-		8: net.ParseIP("2a00:1632::2"),
-	}
-
-	ip11, err := RequestIP(network1, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	ip12, err := RequestIP(network1, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	ip21, err := RequestIP(network2, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	ip22, err := RequestIP(network2, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	ip31, err := RequestIP(network3, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	ip32, err := RequestIP(network3, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	ip33, err := RequestIP(network3, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	ip41, err := RequestIP(network4, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	ip42, err := RequestIP(network4, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	assertIPEquals(t, expectedIPs[0], ip11)
-	assertIPEquals(t, expectedIPs[1], ip12)
-	assertIPEquals(t, expectedIPs[2], ip21)
-	assertIPEquals(t, expectedIPs[3], ip22)
-	assertIPEquals(t, expectedIPs[4], ip31)
-	assertIPEquals(t, expectedIPs[5], ip32)
-	assertIPEquals(t, expectedIPs[6], ip33)
-	assertIPEquals(t, expectedIPs[7], ip41)
-	assertIPEquals(t, expectedIPs[8], ip42)
-}
-
-func TestRegisterBadTwice(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{192, 168, 1, 1},
-		Mask: []byte{255, 255, 255, 0},
-	}
-	subnet := &net.IPNet{
-		IP:   []byte{192, 168, 1, 8},
-		Mask: []byte{255, 255, 255, 248},
-	}
-
-	if err := RegisterSubnet(network, subnet); err != nil {
-		t.Fatal(err)
-	}
-	subnet = &net.IPNet{
-		IP:   []byte{192, 168, 1, 16},
-		Mask: []byte{255, 255, 255, 248},
-	}
-	if err := RegisterSubnet(network, subnet); err != ErrNetworkAlreadyRegistered {
-		t.Fatalf("Expecteded ErrNetworkAlreadyRegistered error, got %v", err)
-	}
-}
-
-func TestRegisterBadRange(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{192, 168, 1, 1},
-		Mask: []byte{255, 255, 255, 0},
-	}
-	subnet := &net.IPNet{
-		IP:   []byte{192, 168, 1, 1},
-		Mask: []byte{255, 255, 0, 0},
-	}
-	if err := RegisterSubnet(network, subnet); err != ErrBadSubnet {
-		t.Fatalf("Expected ErrBadSubnet error, got %v", err)
-	}
-}
-
-func TestAllocateFromRange(t *testing.T) {
-	defer reset()
-	network := &net.IPNet{
-		IP:   []byte{192, 168, 0, 1},
-		Mask: []byte{255, 255, 255, 0},
-	}
-	// 192.168.1.9 - 192.168.1.14
-	subnet := &net.IPNet{
-		IP:   []byte{192, 168, 0, 8},
-		Mask: []byte{255, 255, 255, 248},
-	}
-
-	if err := RegisterSubnet(network, subnet); err != nil {
-		t.Fatal(err)
-	}
-	expectedIPs := []net.IP{
-		0: net.IPv4(192, 168, 0, 9),
-		1: net.IPv4(192, 168, 0, 10),
-		2: net.IPv4(192, 168, 0, 11),
-		3: net.IPv4(192, 168, 0, 12),
-		4: net.IPv4(192, 168, 0, 13),
-		5: net.IPv4(192, 168, 0, 14),
-	}
-	for _, ip := range expectedIPs {
-		rip, err := RequestIP(network, nil)
-		if err != nil {
-			t.Fatal(err)
-		}
-		assertIPEquals(t, ip, rip)
-	}
-
-	if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs {
-		t.Fatalf("Expected ErrNoAvailableIPs error, got %v", err)
-	}
-	for _, ip := range expectedIPs {
-		ReleaseIP(network, ip)
-		rip, err := RequestIP(network, nil)
-		if err != nil {
-			t.Fatal(err)
-		}
-		assertIPEquals(t, ip, rip)
-	}
-}
-
-func assertIPEquals(t *testing.T, ip1, ip2 net.IP) {
-	if !ip1.Equal(ip2) {
-		t.Fatalf("Expected IP %s, got %s", ip1, ip2)
-	}
-}
-
-func BenchmarkRequestIP(b *testing.B) {
-	network := &net.IPNet{
-		IP:   []byte{192, 168, 0, 1},
-		Mask: []byte{255, 255, 255, 0},
-	}
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		for j := 0; j < 253; j++ {
-			_, err := RequestIP(network, nil)
-			if err != nil {
-				b.Fatal(err)
-			}
-		}
-		reset()
-	}
-}
diff --git a/daemon/networkdriver/network.go b/daemon/networkdriver/network.go
deleted file mode 100644
index 8dda789..0000000
--- a/daemon/networkdriver/network.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package networkdriver
-
-import (
-	"errors"
-)
-
-var (
-	ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver")
-	ErrNetworkOverlaps                = errors.New("requested network overlaps with existing network")
-)
diff --git a/daemon/networkdriver/network_test.go b/daemon/networkdriver/network_test.go
deleted file mode 100644
index 1a6336b..0000000
--- a/daemon/networkdriver/network_test.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package networkdriver
-
-import (
-	"github.com/docker/libcontainer/netlink"
-	"net"
-	"testing"
-)
-
-func TestNonOverlapingNameservers(t *testing.T) {
-	network := &net.IPNet{
-		IP:   []byte{192, 168, 0, 1},
-		Mask: []byte{255, 255, 255, 0},
-	}
-	nameservers := []string{
-		"127.0.0.1/32",
-	}
-
-	if err := CheckNameserverOverlaps(nameservers, network); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestOverlapingNameservers(t *testing.T) {
-	network := &net.IPNet{
-		IP:   []byte{192, 168, 0, 1},
-		Mask: []byte{255, 255, 255, 0},
-	}
-	nameservers := []string{
-		"192.168.0.1/32",
-	}
-
-	if err := CheckNameserverOverlaps(nameservers, network); err == nil {
-		t.Fatalf("Expected error %s got %s", ErrNetworkOverlapsWithNameservers, err)
-	}
-}
-
-func TestCheckRouteOverlaps(t *testing.T) {
-	orig := networkGetRoutesFct
-	defer func() {
-		networkGetRoutesFct = orig
-	}()
-	networkGetRoutesFct = func() ([]netlink.Route, error) {
-		routesData := []string{"10.0.2.0/32", "10.0.3.0/24", "10.0.42.0/24", "172.16.42.0/24", "192.168.142.0/24"}
-
-		routes := []netlink.Route{}
-		for _, addr := range routesData {
-			_, netX, _ := net.ParseCIDR(addr)
-			routes = append(routes, netlink.Route{IPNet: netX})
-		}
-		return routes, nil
-	}
-
-	_, netX, _ := net.ParseCIDR("172.16.0.1/24")
-	if err := CheckRouteOverlaps(netX); err != nil {
-		t.Fatal(err)
-	}
-
-	_, netX, _ = net.ParseCIDR("10.0.2.0/24")
-	if err := CheckRouteOverlaps(netX); err == nil {
-		t.Fatalf("10.0.2.0/24 and 10.0.2.0 should overlap but it doesn't")
-	}
-}
-
-func TestCheckNameserverOverlaps(t *testing.T) {
-	nameservers := []string{"10.0.2.3/32", "192.168.102.1/32"}
-
-	_, netX, _ := net.ParseCIDR("10.0.2.3/32")
-
-	if err := CheckNameserverOverlaps(nameservers, netX); err == nil {
-		t.Fatalf("%s should overlap 10.0.2.3/32 but doesn't", netX)
-	}
-
-	_, netX, _ = net.ParseCIDR("192.168.102.2/32")
-
-	if err := CheckNameserverOverlaps(nameservers, netX); err != nil {
-		t.Fatalf("%s should not overlap %v but it does", netX, nameservers)
-	}
-}
-
-func AssertOverlap(CIDRx string, CIDRy string, t *testing.T) {
-	_, netX, _ := net.ParseCIDR(CIDRx)
-	_, netY, _ := net.ParseCIDR(CIDRy)
-	if !NetworkOverlaps(netX, netY) {
-		t.Errorf("%v and %v should overlap", netX, netY)
-	}
-}
-
-func AssertNoOverlap(CIDRx string, CIDRy string, t *testing.T) {
-	_, netX, _ := net.ParseCIDR(CIDRx)
-	_, netY, _ := net.ParseCIDR(CIDRy)
-	if NetworkOverlaps(netX, netY) {
-		t.Errorf("%v and %v should not overlap", netX, netY)
-	}
-}
-
-func TestNetworkOverlaps(t *testing.T) {
-	//netY starts at same IP and ends within netX
-	AssertOverlap("172.16.0.1/24", "172.16.0.1/25", t)
-	//netY starts within netX and ends at same IP
-	AssertOverlap("172.16.0.1/24", "172.16.0.128/25", t)
-	//netY starts and ends within netX
-	AssertOverlap("172.16.0.1/24", "172.16.0.64/25", t)
-	//netY starts at same IP and ends outside of netX
-	AssertOverlap("172.16.0.1/24", "172.16.0.1/23", t)
-	//netY starts before and ends at same IP of netX
-	AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t)
-	//netY starts before and ends outside of netX
-	AssertOverlap("172.16.1.1/24", "172.16.0.1/22", t)
-	//netY starts and ends before netX
-	AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t)
-	//netX starts and ends before netY
-	AssertNoOverlap("172.16.1.1/25", "172.16.2.1/24", t)
-}
-
-func TestNetworkRange(t *testing.T) {
-	// Simple class C test
-	_, network, _ := net.ParseCIDR("192.168.0.1/24")
-	first, last := NetworkRange(network)
-	if !first.Equal(net.ParseIP("192.168.0.0")) {
-		t.Error(first.String())
-	}
-	if !last.Equal(net.ParseIP("192.168.0.255")) {
-		t.Error(last.String())
-	}
-
-	// Class A test
-	_, network, _ = net.ParseCIDR("10.0.0.1/8")
-	first, last = NetworkRange(network)
-	if !first.Equal(net.ParseIP("10.0.0.0")) {
-		t.Error(first.String())
-	}
-	if !last.Equal(net.ParseIP("10.255.255.255")) {
-		t.Error(last.String())
-	}
-
-	// Class A, random IP address
-	_, network, _ = net.ParseCIDR("10.1.2.3/8")
-	first, last = NetworkRange(network)
-	if !first.Equal(net.ParseIP("10.0.0.0")) {
-		t.Error(first.String())
-	}
-	if !last.Equal(net.ParseIP("10.255.255.255")) {
-		t.Error(last.String())
-	}
-
-	// 32bit mask
-	_, network, _ = net.ParseCIDR("10.1.2.3/32")
-	first, last = NetworkRange(network)
-	if !first.Equal(net.ParseIP("10.1.2.3")) {
-		t.Error(first.String())
-	}
-	if !last.Equal(net.ParseIP("10.1.2.3")) {
-		t.Error(last.String())
-	}
-
-	// 31bit mask
-	_, network, _ = net.ParseCIDR("10.1.2.3/31")
-	first, last = NetworkRange(network)
-	if !first.Equal(net.ParseIP("10.1.2.2")) {
-		t.Error(first.String())
-	}
-	if !last.Equal(net.ParseIP("10.1.2.3")) {
-		t.Error(last.String())
-	}
-
-	// 26bit mask
-	_, network, _ = net.ParseCIDR("10.1.2.3/26")
-	first, last = NetworkRange(network)
-	if !first.Equal(net.ParseIP("10.1.2.0")) {
-		t.Error(first.String())
-	}
-	if !last.Equal(net.ParseIP("10.1.2.63")) {
-		t.Error(last.String())
-	}
-}
diff --git a/daemon/networkdriver/portallocator/portallocator.go b/daemon/networkdriver/portallocator/portallocator.go
deleted file mode 100644
index e4633b8..0000000
--- a/daemon/networkdriver/portallocator/portallocator.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package portallocator
-
-import (
-	"bufio"
-	"errors"
-	"fmt"
-	"net"
-	"os"
-	"sync"
-
-	log "github.com/Sirupsen/logrus"
-)
-
-const (
-	DefaultPortRangeStart = 49153
-	DefaultPortRangeEnd   = 65535
-)
-
-type ipMapping map[string]protoMap
-
-var (
-	ErrAllPortsAllocated = errors.New("all ports are allocated")
-	ErrUnknownProtocol   = errors.New("unknown protocol")
-	defaultIP            = net.ParseIP("0.0.0.0")
-)
-
-type ErrPortAlreadyAllocated struct {
-	ip   string
-	port int
-}
-
-func NewErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated {
-	return ErrPortAlreadyAllocated{
-		ip:   ip,
-		port: port,
-	}
-}
-
-func (e ErrPortAlreadyAllocated) IP() string {
-	return e.ip
-}
-
-func (e ErrPortAlreadyAllocated) Port() int {
-	return e.port
-}
-
-func (e ErrPortAlreadyAllocated) IPPort() string {
-	return fmt.Sprintf("%s:%d", e.ip, e.port)
-}
-
-func (e ErrPortAlreadyAllocated) Error() string {
-	return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port)
-}
-
-type (
-	PortAllocator struct {
-		mutex sync.Mutex
-		ipMap ipMapping
-		Begin int
-		End   int
-	}
-	portMap struct {
-		p          map[int]struct{}
-		begin, end int
-		last       int
-	}
-	protoMap map[string]*portMap
-)
-
-func New() *PortAllocator {
-	start, end, err := getDynamicPortRange()
-	if err != nil {
-		log.Warn(err)
-		start, end = DefaultPortRangeStart, DefaultPortRangeEnd
-	}
-	return &PortAllocator{
-		ipMap: ipMapping{},
-		Begin: start,
-		End:   end,
-	}
-}
-
-func getDynamicPortRange() (start int, end int, err error) {
-	const portRangeKernelParam = "/proc/sys/net/ipv4/ip_local_port_range"
-	portRangeFallback := fmt.Sprintf("using fallback port range %d-%d", DefaultPortRangeStart, DefaultPortRangeEnd)
-	file, err := os.Open(portRangeKernelParam)
-	if err != nil {
-		return 0, 0, fmt.Errorf("port allocator - %s due to error: %v", portRangeFallback, err)
-	}
-	n, err := fmt.Fscanf(bufio.NewReader(file), "%d\t%d", &start, &end)
-	if n != 2 || err != nil {
-		if err == nil {
-			err = fmt.Errorf("unexpected count of parsed numbers (%d)", n)
-		}
-		return 0, 0, fmt.Errorf("port allocator - failed to parse system ephemeral port range from %s - %s: %v", portRangeKernelParam, portRangeFallback, err)
-	}
-	return start, end, nil
-}
-
-// RequestPort requests new port from global ports pool for specified ip and proto.
-// If port is 0 it returns first free port. Otherwise it cheks port availability
-// in pool and return that port or error if port is already busy.
-func (p *PortAllocator) RequestPort(ip net.IP, proto string, port int) (int, error) {
-	p.mutex.Lock()
-	defer p.mutex.Unlock()
-
-	if proto != "tcp" && proto != "udp" {
-		return 0, ErrUnknownProtocol
-	}
-
-	if ip == nil {
-		ip = defaultIP
-	}
-	ipstr := ip.String()
-	protomap, ok := p.ipMap[ipstr]
-	if !ok {
-		protomap = protoMap{
-			"tcp": p.newPortMap(),
-			"udp": p.newPortMap(),
-		}
-
-		p.ipMap[ipstr] = protomap
-	}
-	mapping := protomap[proto]
-	if port > 0 {
-		if _, ok := mapping.p[port]; !ok {
-			mapping.p[port] = struct{}{}
-			return port, nil
-		}
-		return 0, NewErrPortAlreadyAllocated(ipstr, port)
-	}
-
-	port, err := mapping.findPort()
-	if err != nil {
-		return 0, err
-	}
-	return port, nil
-}
-
-// ReleasePort releases port from global ports pool for specified ip and proto.
-func (p *PortAllocator) ReleasePort(ip net.IP, proto string, port int) error {
-	p.mutex.Lock()
-	defer p.mutex.Unlock()
-
-	if ip == nil {
-		ip = defaultIP
-	}
-	protomap, ok := p.ipMap[ip.String()]
-	if !ok {
-		return nil
-	}
-	delete(protomap[proto].p, port)
-	return nil
-}
-
-func (p *PortAllocator) newPortMap() *portMap {
-	return &portMap{
-		p:     map[int]struct{}{},
-		begin: p.Begin,
-		end:   p.End,
-		last:  p.End,
-	}
-}
-
-// ReleaseAll releases all ports for all ips.
-func (p *PortAllocator) ReleaseAll() error {
-	p.mutex.Lock()
-	p.ipMap = ipMapping{}
-	p.mutex.Unlock()
-	return nil
-}
-
-func (pm *portMap) findPort() (int, error) {
-	port := pm.last
-	for i := 0; i <= pm.end-pm.begin; i++ {
-		port++
-		if port > pm.end {
-			port = pm.begin
-		}
-
-		if _, ok := pm.p[port]; !ok {
-			pm.p[port] = struct{}{}
-			pm.last = port
-			return port, nil
-		}
-	}
-	return 0, ErrAllPortsAllocated
-}
diff --git a/daemon/networkdriver/portallocator/portallocator_test.go b/daemon/networkdriver/portallocator/portallocator_test.go
deleted file mode 100644
index 1720123..0000000
--- a/daemon/networkdriver/portallocator/portallocator_test.go
+++ /dev/null
@@ -1,239 +0,0 @@
-package portallocator
-
-import (
-	"net"
-	"testing"
-)
-
-func TestRequestNewPort(t *testing.T) {
-	p := New()
-
-	port, err := p.RequestPort(defaultIP, "tcp", 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if expected := p.Begin; port != expected {
-		t.Fatalf("Expected port %d got %d", expected, port)
-	}
-}
-
-func TestRequestSpecificPort(t *testing.T) {
-	p := New()
-
-	port, err := p.RequestPort(defaultIP, "tcp", 5000)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if port != 5000 {
-		t.Fatalf("Expected port 5000 got %d", port)
-	}
-}
-
-func TestReleasePort(t *testing.T) {
-	p := New()
-
-	port, err := p.RequestPort(defaultIP, "tcp", 5000)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if port != 5000 {
-		t.Fatalf("Expected port 5000 got %d", port)
-	}
-
-	if err := p.ReleasePort(defaultIP, "tcp", 5000); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestReuseReleasedPort(t *testing.T) {
-	p := New()
-
-	port, err := p.RequestPort(defaultIP, "tcp", 5000)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if port != 5000 {
-		t.Fatalf("Expected port 5000 got %d", port)
-	}
-
-	if err := p.ReleasePort(defaultIP, "tcp", 5000); err != nil {
-		t.Fatal(err)
-	}
-
-	port, err = p.RequestPort(defaultIP, "tcp", 5000)
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestReleaseUnreadledPort(t *testing.T) {
-	p := New()
-
-	port, err := p.RequestPort(defaultIP, "tcp", 5000)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if port != 5000 {
-		t.Fatalf("Expected port 5000 got %d", port)
-	}
-
-	port, err = p.RequestPort(defaultIP, "tcp", 5000)
-
-	switch err.(type) {
-	case ErrPortAlreadyAllocated:
-	default:
-		t.Fatalf("Expected port allocation error got %s", err)
-	}
-}
-
-func TestUnknowProtocol(t *testing.T) {
-	if _, err := New().RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol {
-		t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err)
-	}
-}
-
-func TestAllocateAllPorts(t *testing.T) {
-	p := New()
-
-	for i := 0; i <= p.End-p.Begin; i++ {
-		port, err := p.RequestPort(defaultIP, "tcp", 0)
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		if expected := p.Begin + i; port != expected {
-			t.Fatalf("Expected port %d got %d", expected, port)
-		}
-	}
-
-	if _, err := p.RequestPort(defaultIP, "tcp", 0); err != ErrAllPortsAllocated {
-		t.Fatalf("Expected error %s got %s", ErrAllPortsAllocated, err)
-	}
-
-	_, err := p.RequestPort(defaultIP, "udp", 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// release a port in the middle and ensure we get another tcp port
-	port := p.Begin + 5
-	if err := p.ReleasePort(defaultIP, "tcp", port); err != nil {
-		t.Fatal(err)
-	}
-	newPort, err := p.RequestPort(defaultIP, "tcp", 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if newPort != port {
-		t.Fatalf("Expected port %d got %d", port, newPort)
-	}
-
-	// now pm.last == newPort, release it so that it's the only free port of
-	// the range, and ensure we get it back
-	if err := p.ReleasePort(defaultIP, "tcp", newPort); err != nil {
-		t.Fatal(err)
-	}
-	port, err = p.RequestPort(defaultIP, "tcp", 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if newPort != port {
-		t.Fatalf("Expected port %d got %d", newPort, port)
-	}
-}
-
-func BenchmarkAllocatePorts(b *testing.B) {
-	p := New()
-
-	for i := 0; i < b.N; i++ {
-		for i := 0; i <= p.End-p.Begin; i++ {
-			port, err := p.RequestPort(defaultIP, "tcp", 0)
-			if err != nil {
-				b.Fatal(err)
-			}
-
-			if expected := p.Begin + i; port != expected {
-				b.Fatalf("Expected port %d got %d", expected, port)
-			}
-		}
-		p.ReleaseAll()
-	}
-}
-
-func TestPortAllocation(t *testing.T) {
-	p := New()
-
-	ip := net.ParseIP("192.168.0.1")
-	ip2 := net.ParseIP("192.168.0.2")
-	if port, err := p.RequestPort(ip, "tcp", 80); err != nil {
-		t.Fatal(err)
-	} else if port != 80 {
-		t.Fatalf("Acquire(80) should return 80, not %d", port)
-	}
-	port, err := p.RequestPort(ip, "tcp", 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if port <= 0 {
-		t.Fatalf("Acquire(0) should return a non-zero port")
-	}
-
-	if _, err := p.RequestPort(ip, "tcp", port); err == nil {
-		t.Fatalf("Acquiring a port already in use should return an error")
-	}
-
-	if newPort, err := p.RequestPort(ip, "tcp", 0); err != nil {
-		t.Fatal(err)
-	} else if newPort == port {
-		t.Fatalf("Acquire(0) allocated the same port twice: %d", port)
-	}
-
-	if _, err := p.RequestPort(ip, "tcp", 80); err == nil {
-		t.Fatalf("Acquiring a port already in use should return an error")
-	}
-	if _, err := p.RequestPort(ip2, "tcp", 80); err != nil {
-		t.Fatalf("It should be possible to allocate the same port on a different interface")
-	}
-	if _, err := p.RequestPort(ip2, "tcp", 80); err == nil {
-		t.Fatalf("Acquiring a port already in use should return an error")
-	}
-	if err := p.ReleasePort(ip, "tcp", 80); err != nil {
-		t.Fatal(err)
-	}
-	if _, err := p.RequestPort(ip, "tcp", 80); err != nil {
-		t.Fatal(err)
-	}
-
-	port, err = p.RequestPort(ip, "tcp", 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-	port2, err := p.RequestPort(ip, "tcp", port+1)
-	if err != nil {
-		t.Fatal(err)
-	}
-	port3, err := p.RequestPort(ip, "tcp", 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if port3 == port2 {
-		t.Fatal("Requesting a dynamic port should never allocate a used port")
-	}
-}
-
-func TestNoDuplicateBPR(t *testing.T) {
-	p := New()
-
-	if port, err := p.RequestPort(defaultIP, "tcp", p.Begin); err != nil {
-		t.Fatal(err)
-	} else if port != p.Begin {
-		t.Fatalf("Expected port %d got %d", p.Begin, port)
-	}
-
-	if port, err := p.RequestPort(defaultIP, "tcp", 0); err != nil {
-		t.Fatal(err)
-	} else if port == p.Begin {
-		t.Fatalf("Acquire(0) allocated the same port twice: %d", port)
-	}
-}
diff --git a/daemon/networkdriver/portmapper/mapper.go b/daemon/networkdriver/portmapper/mapper.go
deleted file mode 100644
index 4a98d4a..0000000
--- a/daemon/networkdriver/portmapper/mapper.go
+++ /dev/null
@@ -1,189 +0,0 @@
-package portmapper
-
-import (
-	"errors"
-	"fmt"
-	"net"
-	"sync"
-
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/daemon/networkdriver/portallocator"
-	"github.com/docker/docker/pkg/iptables"
-)
-
-type mapping struct {
-	proto         string
-	userlandProxy UserlandProxy
-	host          net.Addr
-	container     net.Addr
-}
-
-var NewProxy = NewProxyCommand
-
-var (
-	ErrUnknownBackendAddressType = errors.New("unknown container address type not supported")
-	ErrPortMappedForIP           = errors.New("port is already mapped to ip")
-	ErrPortNotMapped             = errors.New("port is not mapped")
-)
-
-type PortMapper struct {
-	chain *iptables.Chain
-
-	// udp:ip:port
-	currentMappings map[string]*mapping
-	lock            sync.Mutex
-
-	Allocator *portallocator.PortAllocator
-}
-
-func New() *PortMapper {
-	return NewWithPortAllocator(portallocator.New())
-}
-
-func NewWithPortAllocator(allocator *portallocator.PortAllocator) *PortMapper {
-	return &PortMapper{
-		currentMappings: make(map[string]*mapping),
-		Allocator:       allocator,
-	}
-}
-
-func (pm *PortMapper) SetIptablesChain(c *iptables.Chain) {
-	pm.chain = c
-}
-
-func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int) (host net.Addr, err error) {
-	pm.lock.Lock()
-	defer pm.lock.Unlock()
-
-	var (
-		m                 *mapping
-		proto             string
-		allocatedHostPort int
-		proxy             UserlandProxy
-	)
-
-	switch container.(type) {
-	case *net.TCPAddr:
-		proto = "tcp"
-		if allocatedHostPort, err = pm.Allocator.RequestPort(hostIP, proto, hostPort); err != nil {
-			return nil, err
-		}
-
-		m = &mapping{
-			proto:     proto,
-			host:      &net.TCPAddr{IP: hostIP, Port: allocatedHostPort},
-			container: container,
-		}
-
-		proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.TCPAddr).IP, container.(*net.TCPAddr).Port)
-	case *net.UDPAddr:
-		proto = "udp"
-		if allocatedHostPort, err = pm.Allocator.RequestPort(hostIP, proto, hostPort); err != nil {
-			return nil, err
-		}
-
-		m = &mapping{
-			proto:     proto,
-			host:      &net.UDPAddr{IP: hostIP, Port: allocatedHostPort},
-			container: container,
-		}
-
-		proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.UDPAddr).IP, container.(*net.UDPAddr).Port)
-	default:
-		return nil, ErrUnknownBackendAddressType
-	}
-
-	// release the allocated port on any further error during return.
-	defer func() {
-		if err != nil {
-			pm.Allocator.ReleasePort(hostIP, proto, allocatedHostPort)
-		}
-	}()
-
-	key := getKey(m.host)
-	if _, exists := pm.currentMappings[key]; exists {
-		return nil, ErrPortMappedForIP
-	}
-
-	containerIP, containerPort := getIPAndPort(m.container)
-	if err := pm.forward(iptables.Append, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil {
-		return nil, err
-	}
-
-	cleanup := func() error {
-		// need to undo the iptables rules before we return
-		proxy.Stop()
-		pm.forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort)
-		if err := pm.Allocator.ReleasePort(hostIP, m.proto, allocatedHostPort); err != nil {
-			return err
-		}
-
-		return nil
-	}
-
-	if err := proxy.Start(); err != nil {
-		if err := cleanup(); err != nil {
-			return nil, fmt.Errorf("Error during port allocation cleanup: %v", err)
-		}
-		return nil, err
-	}
-	m.userlandProxy = proxy
-	pm.currentMappings[key] = m
-	return m.host, nil
-}
-
-func (pm *PortMapper) Unmap(host net.Addr) error {
-	pm.lock.Lock()
-	defer pm.lock.Unlock()
-
-	key := getKey(host)
-	data, exists := pm.currentMappings[key]
-	if !exists {
-		return ErrPortNotMapped
-	}
-
-	data.userlandProxy.Stop()
-
-	delete(pm.currentMappings, key)
-
-	containerIP, containerPort := getIPAndPort(data.container)
-	hostIP, hostPort := getIPAndPort(data.host)
-	if err := pm.forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
-		log.Errorf("Error on iptables delete: %s", err)
-	}
-
-	switch a := host.(type) {
-	case *net.TCPAddr:
-		return pm.Allocator.ReleasePort(a.IP, "tcp", a.Port)
-	case *net.UDPAddr:
-		return pm.Allocator.ReleasePort(a.IP, "udp", a.Port)
-	}
-	return nil
-}
-
-func getKey(a net.Addr) string {
-	switch t := a.(type) {
-	case *net.TCPAddr:
-		return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp")
-	case *net.UDPAddr:
-		return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp")
-	}
-	return ""
-}
-
-func getIPAndPort(a net.Addr) (net.IP, int) {
-	switch t := a.(type) {
-	case *net.TCPAddr:
-		return t.IP, t.Port
-	case *net.UDPAddr:
-		return t.IP, t.Port
-	}
-	return nil, 0
-}
-
-func (pm *PortMapper) forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error {
-	if pm.chain == nil {
-		return nil
-	}
-	return pm.chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort)
-}
diff --git a/daemon/networkdriver/portmapper/mapper_test.go b/daemon/networkdriver/portmapper/mapper_test.go
deleted file mode 100644
index 729fe56..0000000
--- a/daemon/networkdriver/portmapper/mapper_test.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package portmapper
-
-import (
-	"net"
-	"testing"
-
-	"github.com/docker/docker/pkg/iptables"
-)
-
-func init() {
-	// override this func to mock out the proxy server
-	NewProxy = NewMockProxyCommand
-}
-
-func TestSetIptablesChain(t *testing.T) {
-	pm := New()
-
-	c := &iptables.Chain{
-		Name:   "TEST",
-		Bridge: "192.168.1.1",
-	}
-
-	if pm.chain != nil {
-		t.Fatal("chain should be nil at init")
-	}
-
-	pm.SetIptablesChain(c)
-	if pm.chain == nil {
-		t.Fatal("chain should not be nil after set")
-	}
-}
-
-func TestMapPorts(t *testing.T) {
-	pm := New()
-	dstIp1 := net.ParseIP("192.168.0.1")
-	dstIp2 := net.ParseIP("192.168.0.2")
-	dstAddr1 := &net.TCPAddr{IP: dstIp1, Port: 80}
-	dstAddr2 := &net.TCPAddr{IP: dstIp2, Port: 80}
-
-	srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
-	srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")}
-
-	addrEqual := func(addr1, addr2 net.Addr) bool {
-		return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
-	}
-
-	if host, err := pm.Map(srcAddr1, dstIp1, 80); err != nil {
-		t.Fatalf("Failed to allocate port: %s", err)
-	} else if !addrEqual(dstAddr1, host) {
-		t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
-			dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())
-	}
-
-	if _, err := pm.Map(srcAddr1, dstIp1, 80); err == nil {
-		t.Fatalf("Port is in use - mapping should have failed")
-	}
-
-	if _, err := pm.Map(srcAddr2, dstIp1, 80); err == nil {
-		t.Fatalf("Port is in use - mapping should have failed")
-	}
-
-	if _, err := pm.Map(srcAddr2, dstIp2, 80); err != nil {
-		t.Fatalf("Failed to allocate port: %s", err)
-	}
-
-	if pm.Unmap(dstAddr1) != nil {
-		t.Fatalf("Failed to release port")
-	}
-
-	if pm.Unmap(dstAddr2) != nil {
-		t.Fatalf("Failed to release port")
-	}
-
-	if pm.Unmap(dstAddr2) == nil {
-		t.Fatalf("Port already released, but no error reported")
-	}
-}
-
-func TestGetUDPKey(t *testing.T) {
-	addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53}
-
-	key := getKey(addr)
-
-	if expected := "192.168.1.5:53/udp"; key != expected {
-		t.Fatalf("expected key %s got %s", expected, key)
-	}
-}
-
-func TestGetTCPKey(t *testing.T) {
-	addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80}
-
-	key := getKey(addr)
-
-	if expected := "192.168.1.5:80/tcp"; key != expected {
-		t.Fatalf("expected key %s got %s", expected, key)
-	}
-}
-
-func TestGetUDPIPAndPort(t *testing.T) {
-	addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53}
-
-	ip, port := getIPAndPort(addr)
-	if expected := "192.168.1.5"; ip.String() != expected {
-		t.Fatalf("expected ip %s got %s", expected, ip)
-	}
-
-	if ep := 53; port != ep {
-		t.Fatalf("expected port %d got %d", ep, port)
-	}
-}
-
-func TestMapAllPortsSingleInterface(t *testing.T) {
-	pm := New()
-	dstIp1 := net.ParseIP("0.0.0.0")
-	srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
-
-	hosts := []net.Addr{}
-	var host net.Addr
-	var err error
-
-	defer func() {
-		for _, val := range hosts {
-			pm.Unmap(val)
-		}
-	}()
-
-	for i := 0; i < 10; i++ {
-		start, end := pm.Allocator.Begin, pm.Allocator.End
-		for i := start; i < end; i++ {
-			if host, err = pm.Map(srcAddr1, dstIp1, 0); err != nil {
-				t.Fatal(err)
-			}
-
-			hosts = append(hosts, host)
-		}
-
-		if _, err := pm.Map(srcAddr1, dstIp1, start); err == nil {
-			t.Fatalf("Port %d should be bound but is not", start)
-		}
-
-		for _, val := range hosts {
-			if err := pm.Unmap(val); err != nil {
-				t.Fatal(err)
-			}
-		}
-
-		hosts = []net.Addr{}
-	}
-}
diff --git a/daemon/networkdriver/portmapper/mock_proxy.go b/daemon/networkdriver/portmapper/mock_proxy.go
deleted file mode 100644
index 253ce83..0000000
--- a/daemon/networkdriver/portmapper/mock_proxy.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package portmapper
-
-import "net"
-
-func NewMockProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy {
-	return &mockProxyCommand{}
-}
-
-type mockProxyCommand struct {
-}
-
-func (p *mockProxyCommand) Start() error {
-	return nil
-}
-
-func (p *mockProxyCommand) Stop() error {
-	return nil
-}
diff --git a/daemon/networkdriver/portmapper/proxy.go b/daemon/networkdriver/portmapper/proxy.go
deleted file mode 100644
index 5d0aa0b..0000000
--- a/daemon/networkdriver/portmapper/proxy.go
+++ /dev/null
@@ -1,161 +0,0 @@
-package portmapper
-
-import (
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"log"
-	"net"
-	"os"
-	"os/exec"
-	"os/signal"
-	"strconv"
-	"syscall"
-	"time"
-
-	"github.com/docker/docker/pkg/proxy"
-	"github.com/docker/docker/pkg/reexec"
-)
-
-const userlandProxyCommandName = "docker-proxy"
-
-func init() {
-	reexec.Register(userlandProxyCommandName, execProxy)
-}
-
-type UserlandProxy interface {
-	Start() error
-	Stop() error
-}
-
-// proxyCommand wraps an exec.Cmd to run the userland TCP and UDP
-// proxies as separate processes.
-type proxyCommand struct {
-	cmd *exec.Cmd
-}
-
-// execProxy is the reexec function that is registered to start the userland proxies
-func execProxy() {
-	f := os.NewFile(3, "signal-parent")
-	host, container := parseHostContainerAddrs()
-
-	p, err := proxy.NewProxy(host, container)
-	if err != nil {
-		fmt.Fprintf(f, "1\n%s", err)
-		f.Close()
-		os.Exit(1)
-	}
-	go handleStopSignals(p)
-	fmt.Fprint(f, "0\n")
-	f.Close()
-
-	// Run will block until the proxy stops
-	p.Run()
-}
-
-// parseHostContainerAddrs parses the flags passed on reexec to create the TCP or UDP
-// net.Addrs to map the host and container ports
-func parseHostContainerAddrs() (host net.Addr, container net.Addr) {
-	var (
-		proto         = flag.String("proto", "tcp", "proxy protocol")
-		hostIP        = flag.String("host-ip", "", "host ip")
-		hostPort      = flag.Int("host-port", -1, "host port")
-		containerIP   = flag.String("container-ip", "", "container ip")
-		containerPort = flag.Int("container-port", -1, "container port")
-	)
-
-	flag.Parse()
-
-	switch *proto {
-	case "tcp":
-		host = &net.TCPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort}
-		container = &net.TCPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort}
-	case "udp":
-		host = &net.UDPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort}
-		container = &net.UDPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort}
-	default:
-		log.Fatalf("unsupported protocol %s", *proto)
-	}
-
-	return host, container
-}
-
-func handleStopSignals(p proxy.Proxy) {
-	s := make(chan os.Signal, 10)
-	signal.Notify(s, os.Interrupt, syscall.SIGTERM, syscall.SIGSTOP)
-
-	for _ = range s {
-		p.Close()
-
-		os.Exit(0)
-	}
-}
-
-func NewProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy {
-	args := []string{
-		userlandProxyCommandName,
-		"-proto", proto,
-		"-host-ip", hostIP.String(),
-		"-host-port", strconv.Itoa(hostPort),
-		"-container-ip", containerIP.String(),
-		"-container-port", strconv.Itoa(containerPort),
-	}
-
-	return &proxyCommand{
-		cmd: &exec.Cmd{
-			Path: reexec.Self(),
-			Args: args,
-			SysProcAttr: &syscall.SysProcAttr{
-				Pdeathsig: syscall.SIGTERM, // send a sigterm to the proxy if the daemon process dies
-			},
-		},
-	}
-}
-
-func (p *proxyCommand) Start() error {
-	r, w, err := os.Pipe()
-	if err != nil {
-		return fmt.Errorf("proxy unable to open os.Pipe %s", err)
-	}
-	defer r.Close()
-	p.cmd.ExtraFiles = []*os.File{w}
-	if err := p.cmd.Start(); err != nil {
-		return err
-	}
-	w.Close()
-
-	errchan := make(chan error, 1)
-	go func() {
-		buf := make([]byte, 2)
-		r.Read(buf)
-
-		if string(buf) != "0\n" {
-			errStr, err := ioutil.ReadAll(r)
-			if err != nil {
-				errchan <- fmt.Errorf("Error reading exit status from userland proxy: %v", err)
-				return
-			}
-
-			errchan <- fmt.Errorf("Error starting userland proxy: %s", errStr)
-			return
-		}
-		errchan <- nil
-	}()
-
-	select {
-	case err := <-errchan:
-		return err
-	case <-time.After(16 * time.Second):
-		return fmt.Errorf("Timed out proxy starting the userland proxy")
-	}
-}
-
-func (p *proxyCommand) Stop() error {
-	if p.cmd.Process != nil {
-		if err := p.cmd.Process.Signal(os.Interrupt); err != nil {
-			return err
-		}
-		return p.cmd.Wait()
-	}
-	return nil
-}
diff --git a/daemon/networkdriver/utils.go b/daemon/networkdriver/utils.go
deleted file mode 100644
index 9f0c88c..0000000
--- a/daemon/networkdriver/utils.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package networkdriver
-
-import (
-	"errors"
-	"fmt"
-	"net"
-
-	"github.com/docker/libcontainer/netlink"
-)
-
-var (
-	networkGetRoutesFct = netlink.NetworkGetRoutes
-	ErrNoDefaultRoute   = errors.New("no default route")
-)
-
-func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error {
-	if len(nameservers) > 0 {
-		for _, ns := range nameservers {
-			_, nsNetwork, err := net.ParseCIDR(ns)
-			if err != nil {
-				return err
-			}
-			if NetworkOverlaps(toCheck, nsNetwork) {
-				return ErrNetworkOverlapsWithNameservers
-			}
-		}
-	}
-	return nil
-}
-
-func CheckRouteOverlaps(toCheck *net.IPNet) error {
-	networks, err := networkGetRoutesFct()
-	if err != nil {
-		return err
-	}
-
-	for _, network := range networks {
-		if network.IPNet != nil && NetworkOverlaps(toCheck, network.IPNet) {
-			return ErrNetworkOverlaps
-		}
-	}
-	return nil
-}
-
-// Detects overlap between one IPNet and another
-func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool {
-	if len(netX.IP) == len(netY.IP) {
-		if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) {
-			return true
-		}
-		if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) {
-			return true
-		}
-	}
-	return false
-}
-
-// Calculates the first and last IP addresses in an IPNet
-func NetworkRange(network *net.IPNet) (net.IP, net.IP) {
-	var netIP net.IP
-	if network.IP.To4() != nil {
-		netIP = network.IP.To4()
-	} else if network.IP.To16() != nil {
-		netIP = network.IP.To16()
-	} else {
-		return nil, nil
-	}
-
-	lastIP := make([]byte, len(netIP), len(netIP))
-
-	for i := 0; i < len(netIP); i++ {
-		lastIP[i] = netIP[i] | ^network.Mask[i]
-	}
-	return netIP.Mask(network.Mask), net.IP(lastIP)
-}
-
-// Return the first IPv4 address and slice of IPv6 addresses for the specified network interface
-func GetIfaceAddr(name string) (net.Addr, []net.Addr, error) {
-	iface, err := net.InterfaceByName(name)
-	if err != nil {
-		return nil, nil, err
-	}
-	addrs, err := iface.Addrs()
-	if err != nil {
-		return nil, nil, err
-	}
-	var addrs4 []net.Addr
-	var addrs6 []net.Addr
-	for _, addr := range addrs {
-		ip := (addr.(*net.IPNet)).IP
-		if ip4 := ip.To4(); ip4 != nil {
-			addrs4 = append(addrs4, addr)
-		} else if ip6 := ip.To16(); len(ip6) == net.IPv6len {
-			addrs6 = append(addrs6, addr)
-		}
-	}
-	switch {
-	case len(addrs4) == 0:
-		return nil, nil, fmt.Errorf("Interface %v has no IPv4 addresses", name)
-	case len(addrs4) > 1:
-		fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n",
-			name, (addrs4[0].(*net.IPNet)).IP)
-	}
-	return addrs4[0], addrs6, nil
-}
-
-func GetDefaultRouteIface() (*net.Interface, error) {
-	rs, err := networkGetRoutesFct()
-	if err != nil {
-		return nil, fmt.Errorf("unable to get routes: %v", err)
-	}
-	for _, r := range rs {
-		if r.Default {
-			return r.Iface, nil
-		}
-	}
-	return nil, ErrNoDefaultRoute
-}
diff --git a/daemon/pause.go b/daemon/pause.go
index af943de..348f83f 100644
--- a/daemon/pause.go
+++ b/daemon/pause.go
@@ -1,37 +1,18 @@
 package daemon
 
-import (
-	"github.com/docker/docker/engine"
-)
+import "fmt"
 
-func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER", job.Name)
-	}
-	name := job.Args[0]
+// ContainerPause pauses a container
+func (daemon *Daemon) ContainerPause(name string) error {
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
+
 	if err := container.Pause(); err != nil {
-		return job.Errorf("Cannot pause container %s: %s", name, err)
+		return fmt.Errorf("Cannot pause container %s: %s", name, err)
 	}
 	container.LogEvent("pause")
-	return engine.StatusOK
-}
 
-func (daemon *Daemon) ContainerUnpause(job *engine.Job) engine.Status {
-	if n := len(job.Args); n < 1 || n > 2 {
-		return job.Errorf("Usage: %s CONTAINER", job.Name)
-	}
-	name := job.Args[0]
-	container, err := daemon.Get(name)
-	if err != nil {
-		return job.Error(err)
-	}
-	if err := container.Unpause(); err != nil {
-		return job.Errorf("Cannot unpause container %s: %s", name, err)
-	}
-	container.LogEvent("unpause")
-	return engine.StatusOK
+	return nil
 }
diff --git a/daemon/rename.go b/daemon/rename.go
index 6d8293f..72e14a1a 100644
--- a/daemon/rename.go
+++ b/daemon/rename.go
@@ -1,17 +1,17 @@
 package daemon
 
-import "github.com/docker/docker/engine"
+import (
+	"fmt"
+)
 
-func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status {
-	if len(job.Args) != 2 {
-		return job.Errorf("usage: %s OLD_NAME NEW_NAME", job.Name)
+func (daemon *Daemon) ContainerRename(oldName, newName string) error {
+	if oldName == "" || newName == "" {
+		return fmt.Errorf("usage: docker rename OLD_NAME NEW_NAME")
 	}
-	oldName := job.Args[0]
-	newName := job.Args[1]
 
 	container, err := daemon.Get(oldName)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	oldName = container.Name
@@ -19,7 +19,7 @@
 	container.Lock()
 	defer container.Unlock()
 	if newName, err = daemon.reserveName(container.ID, newName); err != nil {
-		return job.Errorf("Error when allocating new name: %s", err)
+		return fmt.Errorf("Error when allocating new name: %s", err)
 	}
 
 	container.Name = newName
@@ -32,13 +32,13 @@
 
 	if err := daemon.containerGraph.Delete(oldName); err != nil {
 		undo()
-		return job.Errorf("Failed to delete container %q: %v", oldName, err)
+		return fmt.Errorf("Failed to delete container %q: %v", oldName, err)
 	}
 
 	if err := container.toDisk(); err != nil {
 		undo()
-		return job.Error(err)
+		return err
 	}
 
-	return engine.StatusOK
+	return nil
 }
diff --git a/daemon/resize.go b/daemon/resize.go
index 860f79e..f225394 100644
--- a/daemon/resize.go
+++ b/daemon/resize.go
@@ -1,53 +1,19 @@
 package daemon
 
-import (
-	"strconv"
-
-	"github.com/docker/docker/engine"
-)
-
-func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status {
-	if len(job.Args) != 3 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
-	}
-	name := job.Args[0]
-	height, err := strconv.Atoi(job.Args[1])
-	if err != nil {
-		return job.Error(err)
-	}
-	width, err := strconv.Atoi(job.Args[2])
-	if err != nil {
-		return job.Error(err)
-	}
+func (daemon *Daemon) ContainerResize(name string, height, width int) error {
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
-	if err := container.Resize(height, width); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
+
+	return container.Resize(height, width)
 }
 
-func (daemon *Daemon) ContainerExecResize(job *engine.Job) engine.Status {
-	if len(job.Args) != 3 {
-		return job.Errorf("Not enough arguments. Usage: %s EXEC HEIGHT WIDTH\n", job.Name)
-	}
-	name := job.Args[0]
-	height, err := strconv.Atoi(job.Args[1])
-	if err != nil {
-		return job.Error(err)
-	}
-	width, err := strconv.Atoi(job.Args[2])
-	if err != nil {
-		return job.Error(err)
-	}
+func (daemon *Daemon) ContainerExecResize(name string, height, width int) error {
 	execConfig, err := daemon.getExecConfig(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
-	if err := execConfig.Resize(height, width); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
+
+	return execConfig.Resize(height, width)
 }
diff --git a/daemon/restart.go b/daemon/restart.go
index bcde628..86cc97d 100644
--- a/daemon/restart.go
+++ b/daemon/restart.go
@@ -1,27 +1,15 @@
 package daemon
 
-import (
-	"github.com/docker/docker/engine"
-)
+import "fmt"
 
-func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
-	}
-	var (
-		name = job.Args[0]
-		t    = 10
-	)
-	if job.EnvExists("t") {
-		t = job.GetenvInt("t")
-	}
+func (daemon *Daemon) ContainerRestart(name string, seconds int) error {
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
-	if err := container.Restart(int(t)); err != nil {
-		return job.Errorf("Cannot restart container %s: %s\n", name, err)
+	if err := container.Restart(seconds); err != nil {
+		return fmt.Errorf("Cannot restart container %s: %s\n", name, err)
 	}
 	container.LogEvent("restart")
-	return engine.StatusOK
+	return nil
 }
diff --git a/daemon/start.go b/daemon/start.go
index e51ada2..09b8b28 100644
--- a/daemon/start.go
+++ b/daemon/start.go
@@ -2,81 +2,40 @@
 
 import (
 	"fmt"
-	"os"
-	"strings"
 
-	"github.com/docker/docker/engine"
 	"github.com/docker/docker/runconfig"
 )
 
-func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
-	if len(job.Args) < 1 {
-		return job.Errorf("Usage: %s container_id", job.Name)
-	}
-	var (
-		name = job.Args[0]
-	)
-
+func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConfig) error {
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	if container.IsPaused() {
-		return job.Errorf("Cannot start a paused container, try unpause instead.")
+		return fmt.Errorf("Cannot start a paused container, try unpause instead.")
 	}
 
 	if container.IsRunning() {
-		return job.Errorf("Container already started")
+		return fmt.Errorf("Container already started")
 	}
 
-	// If no environment was set, then no hostconfig was passed.
+	if _, err = daemon.verifyHostConfig(hostConfig); err != nil {
+		return err
+	}
+
 	// This is kept for backward compatibility - hostconfig should be passed when
 	// creating a container, not during start.
-	if len(job.Environ()) > 0 {
-		hostConfig := runconfig.ContainerHostConfigFromJob(job)
+	if hostConfig != nil {
 		if err := daemon.setHostConfig(container, hostConfig); err != nil {
-			return job.Error(err)
+			return err
 		}
 	}
+
 	if err := container.Start(); err != nil {
 		container.LogEvent("die")
-		return job.Errorf("Cannot start container %s: %s", name, err)
+		return fmt.Errorf("Cannot start container %s: %s", name, err)
 	}
 
-	return engine.StatusOK
-}
-
-func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
-	container.Lock()
-	defer container.Unlock()
-	if err := parseSecurityOpt(container, hostConfig); err != nil {
-		return err
-	}
-
-	// FIXME: this should be handled by the volume subsystem
-	// Validate the HostConfig binds. Make sure that:
-	// the source exists
-	for _, bind := range hostConfig.Binds {
-		splitBind := strings.Split(bind, ":")
-		source := splitBind[0]
-
-		// ensure the source exists on the host
-		_, err := os.Stat(source)
-		if err != nil && os.IsNotExist(err) {
-			err = os.MkdirAll(source, 0755)
-			if err != nil {
-				return fmt.Errorf("Could not create local directory '%s' for bind mount: %v!", source, err)
-			}
-		}
-	}
-	// Register any links from the host config before starting the container
-	if err := daemon.RegisterLinks(container, hostConfig); err != nil {
-		return err
-	}
-
-	container.hostConfig = hostConfig
-	container.toDisk()
-
 	return nil
 }
diff --git a/daemon/state.go b/daemon/state.go
index 6387e6f..4119d0e 100644
--- a/daemon/state.go
+++ b/daemon/state.go
@@ -183,7 +183,7 @@
 	s.waitChan = make(chan struct{})
 }
 
-// SetRestarting is when docker hanldes the auto restart of containers when they are
+// SetRestarting is when docker handles the auto restart of containers when they are
 // in the middle of a stop and being restarted again
 func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) {
 	s.Lock()
diff --git a/daemon/stats.go b/daemon/stats.go
index 85d4a08..c7da913 100644
--- a/daemon/stats.go
+++ b/daemon/stats.go
@@ -2,20 +2,20 @@
 
 import (
 	"encoding/json"
+	"io"
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/daemon/execdriver"
-	"github.com/docker/docker/engine"
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/cgroups"
 )
 
-func (daemon *Daemon) ContainerStats(job *engine.Job) engine.Status {
-	updates, err := daemon.SubscribeToContainerStats(job.Args[0])
+func (daemon *Daemon) ContainerStats(name string, stream bool, out io.Writer) error {
+	updates, err := daemon.SubscribeToContainerStats(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
-	enc := json.NewEncoder(job.Stdout)
+	enc := json.NewEncoder(out)
 	for v := range updates {
 		update := v.(*execdriver.ResourceStats)
 		ss := convertToAPITypes(update.Stats)
@@ -24,11 +24,14 @@
 		ss.CpuStats.SystemUsage = update.SystemUsage
 		if err := enc.Encode(ss); err != nil {
 			// TODO: handle the specific broken pipe
-			daemon.UnsubscribeToContainerStats(job.Args[0], updates)
-			return job.Error(err)
+			daemon.UnsubscribeToContainerStats(name, updates)
+			return err
+		}
+		if !stream {
+			break
 		}
 	}
-	return engine.StatusOK
+	return nil
 }
 
 // convertToAPITypes converts the libcontainer.Stats to the api specific
diff --git a/daemon/stats_collector.go b/daemon/stats_collector.go
index 779bd1a..98b44c3 100644
--- a/daemon/stats_collector.go
+++ b/daemon/stats_collector.go
@@ -9,7 +9,7 @@
 	"sync"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/pkg/pubsub"
 	"github.com/docker/libcontainer/system"
@@ -24,6 +24,7 @@
 		interval:   interval,
 		publishers: make(map[*Container]*pubsub.Publisher),
 		clockTicks: uint64(system.GetClockTicks()),
+		bufReader:  bufio.NewReaderSize(nil, 128),
 	}
 	go s.run()
 	return s
@@ -35,6 +36,7 @@
 	interval   time.Duration
 	clockTicks uint64
 	publishers map[*Container]*pubsub.Publisher
+	bufReader  *bufio.Reader
 }
 
 // collect registers the container with the collector and adds it to
@@ -76,22 +78,42 @@
 }
 
 func (s *statsCollector) run() {
-	for _ = range time.Tick(s.interval) {
+	type publishersPair struct {
+		container *Container
+		publisher *pubsub.Publisher
+	}
+	// we cannot determine the capacity here.
+	// it will grow enough in first iteration
+	var pairs []publishersPair
+
+	for range time.Tick(s.interval) {
+		systemUsage, err := s.getSystemCpuUsage()
+		if err != nil {
+			logrus.Errorf("collecting system cpu usage: %v", err)
+			continue
+		}
+
+		// it does not make sense in the first iteration,
+		// but saves allocations in further iterations
+		pairs = pairs[:0]
+
+		s.m.Lock()
 		for container, publisher := range s.publishers {
-			systemUsage, err := s.getSystemCpuUsage()
-			if err != nil {
-				log.Errorf("collecting system cpu usage for %s: %v", container.ID, err)
-				continue
-			}
-			stats, err := container.Stats()
+			// copy pointers here to release the lock ASAP
+			pairs = append(pairs, publishersPair{container, publisher})
+		}
+		s.m.Unlock()
+
+		for _, pair := range pairs {
+			stats, err := pair.container.Stats()
 			if err != nil {
 				if err != execdriver.ErrNotRunning {
-					log.Errorf("collecting stats for %s: %v", container.ID, err)
+					logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err)
 				}
 				continue
 			}
 			stats.SystemUsage = systemUsage
-			publisher.Publish(stats)
+			pair.publisher.Publish(stats)
 		}
 	}
 }
@@ -101,14 +123,23 @@
 // getSystemCpuUSage returns the host system's cpu usage in nanoseconds
 // for the system to match the cgroup readings are returned in the same format.
 func (s *statsCollector) getSystemCpuUsage() (uint64, error) {
+	var line string
 	f, err := os.Open("/proc/stat")
 	if err != nil {
 		return 0, err
 	}
-	defer f.Close()
-	sc := bufio.NewScanner(f)
-	for sc.Scan() {
-		parts := strings.Fields(sc.Text())
+	defer func() {
+		s.bufReader.Reset(nil)
+		f.Close()
+	}()
+	s.bufReader.Reset(f)
+	err = nil
+	for err == nil {
+		line, err = s.bufReader.ReadString('\n')
+		if err != nil {
+			break
+		}
+		parts := strings.Fields(line)
 		switch parts[0] {
 		case "cpu":
 			if len(parts) < 8 {
diff --git a/daemon/stop.go b/daemon/stop.go
index e2f1d28..b481f87 100644
--- a/daemon/stop.go
+++ b/daemon/stop.go
@@ -1,30 +1,18 @@
 package daemon
 
-import (
-	"github.com/docker/docker/engine"
-)
+import "fmt"
 
-func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s CONTAINER\n", job.Name)
-	}
-	var (
-		name = job.Args[0]
-		t    = 10
-	)
-	if job.EnvExists("t") {
-		t = job.GetenvInt("t")
-	}
+func (daemon *Daemon) ContainerStop(name string, seconds int) error {
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	if !container.IsRunning() {
-		return job.Errorf("Container already stopped")
+		return fmt.Errorf("Container already stopped")
 	}
-	if err := container.Stop(int(t)); err != nil {
-		return job.Errorf("Cannot stop container %s: %s\n", name, err)
+	if err := container.Stop(seconds); err != nil {
+		return fmt.Errorf("Cannot stop container %s: %s\n", name, err)
 	}
 	container.LogEvent("stop")
-	return engine.StatusOK
+	return nil
 }
diff --git a/daemon/top.go b/daemon/top.go
index 782cc83..14b2523 100644
--- a/daemon/top.go
+++ b/daemon/top.go
@@ -1,58 +1,53 @@
 package daemon
 
 import (
+	"fmt"
 	"os/exec"
 	"strconv"
 	"strings"
 
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/api/types"
 )
 
-func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 && len(job.Args) != 2 {
-		return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
-	}
-	var (
-		name   = job.Args[0]
+func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
+	if psArgs == "" {
 		psArgs = "-ef"
-	)
-
-	if len(job.Args) == 2 && job.Args[1] != "" {
-		psArgs = job.Args[1]
 	}
 
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
+
 	if !container.IsRunning() {
-		return job.Errorf("Container %s is not running", name)
+		return nil, fmt.Errorf("Container %s is not running", name)
 	}
+
 	pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID)
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
+
 	output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output()
 	if err != nil {
-		return job.Errorf("Error running ps: %s", err)
+		return nil, fmt.Errorf("Error running ps: %s", err)
 	}
 
+	procList := &types.ContainerProcessList{}
+
 	lines := strings.Split(string(output), "\n")
-	header := strings.Fields(lines[0])
-	out := &engine.Env{}
-	out.SetList("Titles", header)
+	procList.Titles = strings.Fields(lines[0])
 
 	pidIndex := -1
-	for i, name := range header {
+	for i, name := range procList.Titles {
 		if name == "PID" {
 			pidIndex = i
 		}
 	}
 	if pidIndex == -1 {
-		return job.Errorf("Couldn't find PID field in ps output")
+		return nil, fmt.Errorf("Couldn't find PID field in ps output")
 	}
 
-	processes := [][]string{}
 	for _, line := range lines[1:] {
 		if len(line) == 0 {
 			continue
@@ -60,20 +55,18 @@
 		fields := strings.Fields(line)
 		p, err := strconv.Atoi(fields[pidIndex])
 		if err != nil {
-			return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
+			return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
 		}
 
 		for _, pid := range pids {
 			if pid == p {
 				// Make sure number of fields equals number of header titles
 				// merging "overhanging" fields
-				process := fields[:len(header)-1]
-				process = append(process, strings.Join(fields[len(header)-1:], " "))
-				processes = append(processes, process)
+				process := fields[:len(procList.Titles)-1]
+				process = append(process, strings.Join(fields[len(procList.Titles)-1:], " "))
+				procList.Processes = append(procList.Processes, process)
 			}
 		}
 	}
-	out.SetJson("Processes", processes)
-	out.WriteTo(job.Stdout)
-	return engine.StatusOK
+	return procList, nil
 }
diff --git a/daemon/unpause.go b/daemon/unpause.go
new file mode 100644
index 0000000..b13c85e
--- /dev/null
+++ b/daemon/unpause.go
@@ -0,0 +1,18 @@
+package daemon
+
+import "fmt"
+
+// ContainerUnpause unpauses a container
+func (daemon *Daemon) ContainerUnpause(name string) error {
+	container, err := daemon.Get(name)
+	if err != nil {
+		return err
+	}
+
+	if err := container.Unpause(); err != nil {
+		return fmt.Errorf("Cannot unpause container %s: %s", name, err)
+	}
+	container.LogEvent("unpause")
+
+	return nil
+}
diff --git a/daemon/utils.go b/daemon/utils.go
index 6202e6d..ec001ca 100644
--- a/daemon/utils.go
+++ b/daemon/utils.go
@@ -42,7 +42,8 @@
 
 	// merge in the lxc conf options into the generic config map
 	if lxcConf := hostConfig.LxcConf; lxcConf != nil {
-		for _, pair := range lxcConf {
+		lxSlice := lxcConf.Slice()
+		for _, pair := range lxSlice {
 			// because lxc conf gets the driver name lxc.XXXX we need to trim it off
 			// and let the lxc driver add it back later if needed
 			if !strings.Contains(pair.Key, ".") {
diff --git a/daemon/utils_test.go b/daemon/utils_test.go
index ff5b082..f818438 100644
--- a/daemon/utils_test.go
+++ b/daemon/utils_test.go
@@ -4,14 +4,14 @@
 	"testing"
 
 	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/utils"
 )
 
 func TestMergeLxcConfig(t *testing.T) {
+	kv := []runconfig.KeyValuePair{
+		{"lxc.cgroups.cpuset", "1,2"},
+	}
 	hostConfig := &runconfig.HostConfig{
-		LxcConf: []utils.KeyValuePair{
-			{Key: "lxc.cgroups.cpuset", Value: "1,2"},
-		},
+		LxcConf: runconfig.NewLxcConfig(kv),
 	}
 
 	out, err := mergeLxcConfIntoOptions(hostConfig)
diff --git a/daemon/volumes.go b/daemon/volumes.go
index a076b46..19cc153 100644
--- a/daemon/volumes.go
+++ b/daemon/volumes.go
@@ -1,259 +1,107 @@
 package daemon
 
 import (
+	"encoding/json"
 	"fmt"
-	"io"
 	"io/ioutil"
 	"os"
 	"path/filepath"
-	"sort"
 	"strings"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/daemon/execdriver"
 	"github.com/docker/docker/pkg/chrootarchive"
-	"github.com/docker/docker/pkg/symlink"
-	"github.com/docker/docker/pkg/system"
-	"github.com/docker/docker/volumes"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/volume"
+	"github.com/docker/libcontainer/label"
 )
 
-type Mount struct {
-	MountToPath string
-	container   *Container
-	volume      *volumes.Volume
-	Writable    bool
-	copyData    bool
-	from        *Container
-	isBind      bool
+type mountPoint struct {
+	Name        string
+	Destination string
+	Driver      string
+	RW          bool
+	Volume      volume.Volume `json:"-"`
+	Source      string
+	Relabel     string
 }
 
-func (mnt *Mount) Export(resource string) (io.ReadCloser, error) {
-	var name string
-	if resource == mnt.MountToPath[1:] {
-		name = filepath.Base(resource)
-	}
-	path, err := filepath.Rel(mnt.MountToPath[1:], resource)
-	if err != nil {
-		return nil, err
-	}
-	return mnt.volume.Export(path, name)
-}
-
-func (container *Container) prepareVolumes() error {
-	if container.Volumes == nil || len(container.Volumes) == 0 {
-		container.Volumes = make(map[string]string)
-		container.VolumesRW = make(map[string]bool)
+func (m *mountPoint) Setup() (string, error) {
+	if m.Volume != nil {
+		return m.Volume.Mount()
 	}
 
-	return container.createVolumes()
-}
-
-// sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order
-func (container *Container) sortedVolumeMounts() []string {
-	var mountPaths []string
-	for path := range container.Volumes {
-		mountPaths = append(mountPaths, path)
-	}
-
-	sort.Strings(mountPaths)
-	return mountPaths
-}
-
-func (container *Container) createVolumes() error {
-	mounts, err := container.parseVolumeMountConfig()
-	if err != nil {
-		return err
-	}
-
-	for _, mnt := range mounts {
-		if err := mnt.initialize(); err != nil {
-			return err
-		}
-	}
-
-	// On every start, this will apply any new `VolumesFrom` entries passed in via HostConfig, which may override volumes set in `create`
-	return container.applyVolumesFrom()
-}
-
-func (m *Mount) initialize() error {
-	// No need to initialize anything since it's already been initialized
-	if hostPath, exists := m.container.Volumes[m.MountToPath]; exists {
-		// If this is a bind-mount/volumes-from, maybe it was passed in at start instead of create
-		// We need to make sure bind-mounts/volumes-from passed on start can override existing ones.
-		if (!m.volume.IsBindMount && !m.isBind) && m.from == nil {
-			return nil
-		}
-		if m.volume.Path == hostPath {
-			return nil
-		}
-
-		// Make sure we remove these old volumes we don't actually want now.
-		// Ignore any errors here since this is just cleanup, maybe someone volumes-from'd this volume
-		if v := m.container.daemon.volumes.Get(hostPath); v != nil {
-			v.RemoveContainer(m.container.ID)
-			m.container.daemon.volumes.Delete(v.Path)
-		}
-	}
-
-	// This is the full path to container fs + mntToPath
-	containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(m.container.basefs, m.MountToPath), m.container.basefs)
-	if err != nil {
-		return err
-	}
-	m.container.VolumesRW[m.MountToPath] = m.Writable
-	m.container.Volumes[m.MountToPath] = m.volume.Path
-	m.volume.AddContainer(m.container.ID)
-	if m.Writable && m.copyData {
-		// Copy whatever is in the container at the mntToPath to the volume
-		copyExistingContents(containerMntPath, m.volume.Path)
-	}
-
-	return nil
-}
-
-func (container *Container) VolumePaths() map[string]struct{} {
-	var paths = make(map[string]struct{})
-	for _, path := range container.Volumes {
-		paths[path] = struct{}{}
-	}
-	return paths
-}
-
-func (container *Container) registerVolumes() {
-	for path := range container.VolumePaths() {
-		if v := container.daemon.volumes.Get(path); v != nil {
-			v.AddContainer(container.ID)
-			continue
-		}
-
-		// if container was created with an old daemon, this volume may not be registered so we need to make sure it gets registered
-		writable := true
-		if rw, exists := container.VolumesRW[path]; exists {
-			writable = rw
-		}
-		v, err := container.daemon.volumes.FindOrCreateVolume(path, writable)
-		if err != nil {
-			log.Debugf("error registering volume %s: %v", path, err)
-			continue
-		}
-		v.AddContainer(container.ID)
-	}
-}
-
-func (container *Container) derefVolumes() {
-	for path := range container.VolumePaths() {
-		vol := container.daemon.volumes.Get(path)
-		if vol == nil {
-			log.Debugf("Volume %s was not found and could not be dereferenced", path)
-			continue
-		}
-		vol.RemoveContainer(container.ID)
-	}
-}
-
-func (container *Container) parseVolumeMountConfig() (map[string]*Mount, error) {
-	var mounts = make(map[string]*Mount)
-	// Get all the bind mounts
-	for _, spec := range container.hostConfig.Binds {
-		path, mountToPath, writable, err := parseBindMountSpec(spec)
-		if err != nil {
-			return nil, err
-		}
-		// Check if a bind mount has already been specified for the same container path
-		if m, exists := mounts[mountToPath]; exists {
-			return nil, fmt.Errorf("Duplicate volume %q: %q already in use, mounted from %q", path, mountToPath, m.volume.Path)
-		}
-		// Check if a volume already exists for this and use it
-		vol, err := container.daemon.volumes.FindOrCreateVolume(path, writable)
-		if err != nil {
-			return nil, err
-		}
-		mounts[mountToPath] = &Mount{
-			container:   container,
-			volume:      vol,
-			MountToPath: mountToPath,
-			Writable:    writable,
-			isBind:      true, // in case the volume itself is a normal volume, but is being mounted in as a bindmount here
-		}
-	}
-
-	// Get the rest of the volumes
-	for path := range container.Config.Volumes {
-		// Check if this is already added as a bind-mount
-		path = filepath.Clean(path)
-		if _, exists := mounts[path]; exists {
-			continue
-		}
-
-		// Check if this has already been created
-		if _, exists := container.Volumes[path]; exists {
-			continue
-		}
-		realPath, err := container.getResourcePath(path)
-		if err != nil {
-			return nil, fmt.Errorf("failed to evaluate the absolute path of symlink")
-		}
-		if stat, err := os.Stat(realPath); err == nil {
-			if !stat.IsDir() {
-				return nil, fmt.Errorf("file exists at %s, can't create volume there", realPath)
+	if len(m.Source) > 0 {
+		if _, err := os.Stat(m.Source); err != nil {
+			if !os.IsNotExist(err) {
+				return "", err
+			}
+			if err := os.MkdirAll(m.Source, 0755); err != nil {
+				return "", err
 			}
 		}
-
-		vol, err := container.daemon.volumes.FindOrCreateVolume("", true)
-		if err != nil {
-			return nil, err
-		}
-		mounts[path] = &Mount{
-			container:   container,
-			MountToPath: path,
-			volume:      vol,
-			Writable:    true,
-			copyData:    true,
-		}
+		return m.Source, nil
 	}
 
-	return mounts, nil
+	return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined")
 }
 
-func parseBindMountSpec(spec string) (string, string, bool, error) {
-	var (
-		path, mountToPath string
-		writable          bool
-		arr               = strings.Split(spec, ":")
-	)
+func (m *mountPoint) Path() string {
+	if m.Volume != nil {
+		return m.Volume.Path()
+	}
+
+	return m.Source
+}
+
+func parseBindMount(spec string, mountLabel string, config *runconfig.Config) (*mountPoint, error) {
+	bind := &mountPoint{
+		RW: true,
+	}
+	arr := strings.Split(spec, ":")
 
 	switch len(arr) {
 	case 2:
-		path = arr[0]
-		mountToPath = arr[1]
-		writable = true
+		bind.Destination = arr[1]
 	case 3:
-		path = arr[0]
-		mountToPath = arr[1]
-		writable = validMountMode(arr[2]) && arr[2] == "rw"
+		bind.Destination = arr[1]
+		mode := arr[2]
+		if !validMountMode(mode) {
+			return nil, fmt.Errorf("invalid mode for volumes-from: %s", mode)
+		}
+		bind.RW = rwModes[mode]
+		// Relabel will apply a SELinux label, if necessary
+		bind.Relabel = mode
 	default:
-		return "", "", false, fmt.Errorf("Invalid volume specification: %s", spec)
+		return nil, fmt.Errorf("Invalid volume specification: %s", spec)
 	}
 
-	if !filepath.IsAbs(path) {
-		return "", "", false, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", path)
+	name, source, err := parseVolumeSource(arr[0], config)
+	if err != nil {
+		return nil, err
 	}
 
-	path = filepath.Clean(path)
-	mountToPath = filepath.Clean(mountToPath)
-	return path, mountToPath, writable, nil
+	if len(source) == 0 {
+		bind.Driver = config.VolumeDriver
+		if len(bind.Driver) == 0 {
+			bind.Driver = volume.DefaultDriverName
+		}
+	} else {
+		bind.Source = filepath.Clean(source)
+	}
+
+	bind.Name = name
+	bind.Destination = filepath.Clean(bind.Destination)
+	return bind, nil
 }
 
-func parseVolumesFromSpec(spec string) (string, string, error) {
-	specParts := strings.SplitN(spec, ":", 2)
-	if len(specParts) == 0 {
+func parseVolumesFrom(spec string) (string, string, error) {
+	if len(spec) == 0 {
 		return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec)
 	}
 
-	var (
-		id   = specParts[0]
-		mode = "rw"
-	)
+	specParts := strings.SplitN(spec, ":", 2)
+	id := specParts[0]
+	mode := "rw"
+
 	if len(specParts) == 2 {
 		mode = specParts[1]
 		if !validMountMode(mode) {
@@ -263,105 +111,28 @@
 	return id, mode, nil
 }
 
-func (container *Container) applyVolumesFrom() error {
-	volumesFrom := container.hostConfig.VolumesFrom
-	if len(volumesFrom) > 0 && container.AppliedVolumesFrom == nil {
-		container.AppliedVolumesFrom = make(map[string]struct{})
-	}
+// read-write modes
+var rwModes = map[string]bool{
+	"rw":   true,
+	"rw,Z": true,
+	"rw,z": true,
+	"z,rw": true,
+	"Z,rw": true,
+	"Z":    true,
+	"z":    true,
+}
 
-	mountGroups := make(map[string][]*Mount)
-
-	for _, spec := range volumesFrom {
-		id, mode, err := parseVolumesFromSpec(spec)
-		if err != nil {
-			return err
-		}
-		if _, exists := container.AppliedVolumesFrom[id]; exists {
-			// Don't try to apply these since they've already been applied
-			continue
-		}
-
-		c, err := container.daemon.Get(id)
-		if err != nil {
-			return fmt.Errorf("Could not apply volumes of non-existent container %q.", id)
-		}
-
-		var (
-			fromMounts = c.VolumeMounts()
-			mounts     []*Mount
-		)
-
-		for _, mnt := range fromMounts {
-			mnt.Writable = mnt.Writable && (mode == "rw")
-			mounts = append(mounts, mnt)
-		}
-		mountGroups[id] = mounts
-	}
-
-	for id, mounts := range mountGroups {
-		for _, mnt := range mounts {
-			mnt.from = mnt.container
-			mnt.container = container
-			if err := mnt.initialize(); err != nil {
-				return err
-			}
-		}
-		container.AppliedVolumesFrom[id] = struct{}{}
-	}
-	return nil
+// read-only modes
+var roModes = map[string]bool{
+	"ro":   true,
+	"ro,Z": true,
+	"ro,z": true,
+	"z,ro": true,
+	"Z,ro": true,
 }
 
 func validMountMode(mode string) bool {
-	validModes := map[string]bool{
-		"rw": true,
-		"ro": true,
-	}
-
-	return validModes[mode]
-}
-
-func (container *Container) setupMounts() error {
-	mounts := []execdriver.Mount{}
-
-	// Mount user specified volumes
-	// Note, these are not private because you may want propagation of (un)mounts from host
-	// volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you
-	// want this new mount in the container
-	// These mounts must be ordered based on the length of the path that it is being mounted to (lexicographic)
-	for _, path := range container.sortedVolumeMounts() {
-		mounts = append(mounts, execdriver.Mount{
-			Source:      container.Volumes[path],
-			Destination: path,
-			Writable:    container.VolumesRW[path],
-		})
-	}
-
-	if container.ResolvConfPath != "" {
-		mounts = append(mounts, execdriver.Mount{Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: true, Private: true})
-	}
-
-	if container.HostnamePath != "" {
-		mounts = append(mounts, execdriver.Mount{Source: container.HostnamePath, Destination: "/etc/hostname", Writable: true, Private: true})
-	}
-
-	if container.HostsPath != "" {
-		mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true})
-	}
-
-	container.command.Mounts = mounts
-	return nil
-}
-
-func (container *Container) VolumeMounts() map[string]*Mount {
-	mounts := make(map[string]*Mount)
-
-	for mountToPath, path := range container.Volumes {
-		if v := container.daemon.volumes.Get(path); v != nil {
-			mounts[mountToPath] = &Mount{volume: v, container: container, MountToPath: mountToPath, Writable: container.VolumesRW[mountToPath]}
-		}
-	}
-
-	return mounts
+	return roModes[mode] || rwModes[mode]
 }
 
 func copyExistingContents(source, destination string) error {
@@ -369,13 +140,11 @@
 	if err != nil {
 		return err
 	}
-
 	if len(volList) > 0 {
 		srcList, err := ioutil.ReadDir(destination)
 		if err != nil {
 			return err
 		}
-
 		if len(srcList) == 0 {
 			// If the source volume is empty copy files from the root into the volume
 			if err := chrootarchive.CopyWithTar(source, destination); err != nil {
@@ -383,21 +152,146 @@
 			}
 		}
 	}
-
 	return copyOwnership(source, destination)
 }
 
-// copyOwnership copies the permissions and uid:gid of the source file
-// into the destination file
-func copyOwnership(source, destination string) error {
-	stat, err := system.Stat(source)
+// registerMountPoints initializes the container mount points with the configured volumes and bind mounts.
+// It follows the next sequence to decide what to mount in each final destination:
+//
+// 1. Select the previously configured mount points for the containers, if any.
+// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
+// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
+func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
+	binds := map[string]bool{}
+	mountPoints := map[string]*mountPoint{}
+
+	// 1. Read already configured mount points.
+	for name, point := range container.MountPoints {
+		mountPoints[name] = point
+	}
+
+	// 2. Read volumes from other containers.
+	for _, v := range hostConfig.VolumesFrom {
+		containerID, mode, err := parseVolumesFrom(v)
+		if err != nil {
+			return err
+		}
+
+		c, err := daemon.Get(containerID)
+		if err != nil {
+			return err
+		}
+
+		for _, m := range c.MountPoints {
+			cp := m
+			cp.RW = m.RW && mode != "ro"
+
+			if len(m.Source) == 0 {
+				v, err := createVolume(m.Name, m.Driver)
+				if err != nil {
+					return err
+				}
+				cp.Volume = v
+			}
+
+			mountPoints[cp.Destination] = cp
+		}
+	}
+
+	// 3. Read bind mounts
+	for _, b := range hostConfig.Binds {
+		// #10618
+		bind, err := parseBindMount(b, container.MountLabel, container.Config)
+		if err != nil {
+			return err
+		}
+
+		if binds[bind.Destination] {
+			return fmt.Errorf("Duplicate bind mount %s", bind.Destination)
+		}
+
+		if len(bind.Name) > 0 && len(bind.Driver) > 0 {
+			// create the volume
+			v, err := createVolume(bind.Name, bind.Driver)
+			if err != nil {
+				return err
+			}
+			bind.Volume = v
+			bind.Source = v.Path()
+			// Since this is just a named volume and not a typical bind, set to shared mode `z`
+			if bind.Relabel == "" {
+				bind.Relabel = "z"
+			}
+		}
+
+		if err := label.Relabel(bind.Source, container.MountLabel, bind.Relabel); err != nil {
+			return err
+		}
+		binds[bind.Destination] = true
+		mountPoints[bind.Destination] = bind
+	}
+
+	container.Lock()
+	container.MountPoints = mountPoints
+	container.Unlock()
+
+	return nil
+}
+
+// verifyOldVolumesInfo ports volumes configured for the containers pre docker 1.7.
+// It reads the container configuration and creates valid mount points for the old volumes.
+func (daemon *Daemon) verifyOldVolumesInfo(container *Container) error {
+	jsonPath, err := container.jsonPath()
 	if err != nil {
 		return err
 	}
-
-	if err := os.Chown(destination, int(stat.Uid()), int(stat.Gid())); err != nil {
+	f, err := os.Open(jsonPath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return nil
+		}
 		return err
 	}
 
-	return os.Chmod(destination, os.FileMode(stat.Mode()))
+	type oldContVolCfg struct {
+		Volumes   map[string]string
+		VolumesRW map[string]bool
+	}
+
+	vols := oldContVolCfg{
+		Volumes:   make(map[string]string),
+		VolumesRW: make(map[string]bool),
+	}
+	if err := json.NewDecoder(f).Decode(&vols); err != nil {
+		return err
+	}
+
+	for destination, hostPath := range vols.Volumes {
+		vfsPath := filepath.Join(daemon.root, "vfs", "dir")
+
+		if strings.HasPrefix(hostPath, vfsPath) {
+			id := filepath.Base(hostPath)
+
+			rw := vols.VolumesRW != nil && vols.VolumesRW[destination]
+			container.addLocalMountPoint(id, destination, rw)
+		}
+	}
+
+	return container.ToDisk()
+}
+
+func createVolume(name, driverName string) (volume.Volume, error) {
+	vd, err := getVolumeDriver(driverName)
+	if err != nil {
+		return nil, err
+	}
+	return vd.Create(name)
+}
+
+func removeVolume(v volume.Volume) error {
+	vd, err := getVolumeDriver(v.DriverName())
+	if err != nil {
+		return nil
+	}
+	return vd.Remove(v)
 }
diff --git a/daemon/volumes_experimental.go b/daemon/volumes_experimental.go
new file mode 100644
index 0000000..c39b790
--- /dev/null
+++ b/daemon/volumes_experimental.go
@@ -0,0 +1,26 @@
+// +build experimental
+
+package daemon
+
+import (
+	"path/filepath"
+
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/volume"
+	"github.com/docker/docker/volume/drivers"
+)
+
+func getVolumeDriver(name string) (volume.Driver, error) {
+	if name == "" {
+		name = volume.DefaultDriverName
+	}
+	return volumedrivers.Lookup(name)
+}
+
+func parseVolumeSource(spec string, config *runconfig.Config) (string, string, error) {
+	if !filepath.IsAbs(spec) {
+		return spec, "", nil
+	}
+
+	return "", spec, nil
+}
diff --git a/daemon/volumes_experimental_unit_test.go b/daemon/volumes_experimental_unit_test.go
new file mode 100644
index 0000000..842e101
--- /dev/null
+++ b/daemon/volumes_experimental_unit_test.go
@@ -0,0 +1,87 @@
+// +build experimental
+
+package daemon
+
+import (
+	"testing"
+
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/volume"
+	"github.com/docker/docker/volume/drivers"
+)
+
+type fakeDriver struct{}
+
+func (fakeDriver) Name() string                              { return "fake" }
+func (fakeDriver) Create(name string) (volume.Volume, error) { return nil, nil }
+func (fakeDriver) Remove(v volume.Volume) error              { return nil }
+
+func TestGetVolumeDriver(t *testing.T) {
+	_, err := getVolumeDriver("missing")
+	if err == nil {
+		t.Fatal("Expected error, was nil")
+	}
+
+	volumedrivers.Register(fakeDriver{}, "fake")
+	d, err := getVolumeDriver("fake")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if d.Name() != "fake" {
+		t.Fatalf("Expected fake driver, got %s\n", d.Name())
+	}
+}
+
+func TestParseBindMount(t *testing.T) {
+	cases := []struct {
+		bind       string
+		driver     string
+		expDest    string
+		expSource  string
+		expName    string
+		expDriver  string
+		mountLabel string
+		expRW      bool
+		fail       bool
+	}{
+		{"/tmp:/tmp", "", "/tmp", "/tmp", "", "", "", true, false},
+		{"/tmp:/tmp:ro", "", "/tmp", "/tmp", "", "", "", false, false},
+		{"/tmp:/tmp:rw", "", "/tmp", "/tmp", "", "", "", true, false},
+		{"/tmp:/tmp:foo", "", "/tmp", "/tmp", "", "", "", false, true},
+		{"name:/tmp", "", "/tmp", "", "name", "local", "", true, false},
+		{"name:/tmp", "external", "/tmp", "", "name", "external", "", true, false},
+		{"name:/tmp:ro", "local", "/tmp", "", "name", "local", "", false, false},
+		{"local/name:/tmp:rw", "", "/tmp", "", "local/name", "local", "", true, false},
+	}
+
+	for _, c := range cases {
+		conf := &runconfig.Config{VolumeDriver: c.driver}
+		m, err := parseBindMount(c.bind, c.mountLabel, conf)
+		if c.fail {
+			if err == nil {
+				t.Fatalf("Expected error, was nil, for spec %s\n", c.bind)
+			}
+			continue
+		}
+
+		if m.Destination != c.expDest {
+			t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind)
+		}
+
+		if m.Source != c.expSource {
+			t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind)
+		}
+
+		if m.Name != c.expName {
+			t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind)
+		}
+
+		if m.Driver != c.expDriver {
+			t.Fatalf("Expected driver %s, was %s, for spec %s\n", c.expDriver, m.Driver, c.bind)
+		}
+
+		if m.RW != c.expRW {
+			t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind)
+		}
+	}
+}
diff --git a/daemon/volumes_linux.go b/daemon/volumes_linux.go
new file mode 100644
index 0000000..8eea5e0
--- /dev/null
+++ b/daemon/volumes_linux.go
@@ -0,0 +1,70 @@
+// +build !windows
+
+package daemon
+
+import (
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+
+	"github.com/docker/docker/daemon/execdriver"
+	"github.com/docker/docker/pkg/system"
+)
+
+// copyOwnership copies the permissions and uid:gid of the source file
+// into the destination file
+func copyOwnership(source, destination string) error {
+	stat, err := system.Stat(source)
+	if err != nil {
+		return err
+	}
+
+	if err := os.Chown(destination, int(stat.Uid()), int(stat.Gid())); err != nil {
+		return err
+	}
+
+	return os.Chmod(destination, os.FileMode(stat.Mode()))
+}
+
+func (container *Container) setupMounts() ([]execdriver.Mount, error) {
+	var mounts []execdriver.Mount
+	for _, m := range container.MountPoints {
+		path, err := m.Setup()
+		if err != nil {
+			return nil, err
+		}
+
+		mounts = append(mounts, execdriver.Mount{
+			Source:      path,
+			Destination: m.Destination,
+			Writable:    m.RW,
+		})
+	}
+
+	mounts = sortMounts(mounts)
+	return append(mounts, container.networkMounts()...), nil
+}
+
+func sortMounts(m []execdriver.Mount) []execdriver.Mount {
+	sort.Sort(mounts(m))
+	return m
+}
+
+type mounts []execdriver.Mount
+
+func (m mounts) Len() int {
+	return len(m)
+}
+
+func (m mounts) Less(i, j int) bool {
+	return m.parts(i) < m.parts(j)
+}
+
+func (m mounts) Swap(i, j int) {
+	m[i], m[j] = m[j], m[i]
+}
+
+func (m mounts) parts(i int) int {
+	return len(strings.Split(filepath.Clean(m[i].Destination), string(os.PathSeparator)))
+}
diff --git a/daemon/volumes_stubs.go b/daemon/volumes_stubs.go
new file mode 100644
index 0000000..1d2d873
--- /dev/null
+++ b/daemon/volumes_stubs.go
@@ -0,0 +1,24 @@
+// +build !experimental
+
+package daemon
+
+import (
+	"fmt"
+	"path/filepath"
+
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/volume"
+	"github.com/docker/docker/volume/drivers"
+)
+
+func getVolumeDriver(_ string) (volume.Driver, error) {
+	return volumedrivers.Lookup(volume.DefaultDriverName)
+}
+
+func parseVolumeSource(spec string, _ *runconfig.Config) (string, string, error) {
+	if !filepath.IsAbs(spec) {
+		return "", "", fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", spec)
+	}
+
+	return "", spec, nil
+}
diff --git a/daemon/volumes_stubs_unit_test.go b/daemon/volumes_stubs_unit_test.go
new file mode 100644
index 0000000..5f100e5
--- /dev/null
+++ b/daemon/volumes_stubs_unit_test.go
@@ -0,0 +1,82 @@
+// +build !experimental
+
+package daemon
+
+import (
+	"io/ioutil"
+	"os"
+	"testing"
+
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/volume"
+	"github.com/docker/docker/volume/drivers"
+	"github.com/docker/docker/volume/local"
+)
+
+func TestGetVolumeDefaultDriver(t *testing.T) {
+	tmp, err := ioutil.TempDir("", "volume-test-")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmp)
+
+	l, err := local.New(tmp)
+	if err != nil {
+		t.Fatal(err)
+	}
+	volumedrivers.Register(l, volume.DefaultDriverName)
+	d, err := getVolumeDriver("missing")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if d.Name() != volume.DefaultDriverName {
+		t.Fatalf("Expected local driver, was %s\n", d.Name)
+	}
+}
+
+func TestParseBindMount(t *testing.T) {
+	cases := []struct {
+		bind       string
+		expDest    string
+		expSource  string
+		expName    string
+		mountLabel string
+		expRW      bool
+		fail       bool
+	}{
+		{"/tmp:/tmp", "/tmp", "/tmp", "", "", true, false},
+		{"/tmp:/tmp:ro", "/tmp", "/tmp", "", "", false, false},
+		{"/tmp:/tmp:rw", "/tmp", "/tmp", "", "", true, false},
+		{"/tmp:/tmp:foo", "/tmp", "/tmp", "", "", false, true},
+		{"name:/tmp", "", "", "", "", false, true},
+		{"local/name:/tmp:rw", "", "", "", "", true, true},
+	}
+
+	for _, c := range cases {
+		conf := &runconfig.Config{}
+		m, err := parseBindMount(c.bind, c.mountLabel, conf)
+		if c.fail {
+			if err == nil {
+				t.Fatalf("Expected error, was nil, for spec %s\n", c.bind)
+			}
+			continue
+		}
+
+		if m.Destination != c.expDest {
+			t.Fatalf("Expected destination %s, was %s, for spec %s\n", c.expDest, m.Destination, c.bind)
+		}
+
+		if m.Source != c.expSource {
+			t.Fatalf("Expected source %s, was %s, for spec %s\n", c.expSource, m.Source, c.bind)
+		}
+
+		if m.Name != c.expName {
+			t.Fatalf("Expected name %s, was %s for spec %s\n", c.expName, m.Name, c.bind)
+		}
+
+		if m.RW != c.expRW {
+			t.Fatalf("Expected RW %v, was %v for spec %s\n", c.expRW, m.RW, c.bind)
+		}
+	}
+}
diff --git a/daemon/volumes_unit_test.go b/daemon/volumes_unit_test.go
new file mode 100644
index 0000000..b1e7f72
--- /dev/null
+++ b/daemon/volumes_unit_test.go
@@ -0,0 +1,35 @@
+package daemon
+
+import "testing"
+
+func TestParseVolumeFrom(t *testing.T) {
+	cases := []struct {
+		spec    string
+		expId   string
+		expMode string
+		fail    bool
+	}{
+		{"", "", "", true},
+		{"foobar", "foobar", "rw", false},
+		{"foobar:rw", "foobar", "rw", false},
+		{"foobar:ro", "foobar", "ro", false},
+		{"foobar:baz", "", "", true},
+	}
+
+	for _, c := range cases {
+		id, mode, err := parseVolumesFrom(c.spec)
+		if c.fail {
+			if err == nil {
+				t.Fatalf("Expected error, was nil, for spec %s\n", c.spec)
+			}
+			continue
+		}
+
+		if id != c.expId {
+			t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expId, id, c.spec)
+		}
+		if mode != c.expMode {
+			t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec)
+		}
+	}
+}
diff --git a/daemon/volumes_windows.go b/daemon/volumes_windows.go
new file mode 100644
index 0000000..c37ca22
--- /dev/null
+++ b/daemon/volumes_windows.go
@@ -0,0 +1,14 @@
+// +build windows
+
+package daemon
+
+import "github.com/docker/docker/daemon/execdriver"
+
+// Not supported on Windows
+func copyOwnership(source, destination string) error {
+	return nil
+}
+
+func (container *Container) setupMounts() ([]execdriver.Mount, error) {
+	return nil, nil
+}
diff --git a/daemon/wait.go b/daemon/wait.go
index 7579467..1101b2f 100644
--- a/daemon/wait.go
+++ b/daemon/wait.go
@@ -1,21 +1,12 @@
 package daemon
 
-import (
-	"time"
+import "time"
 
-	"github.com/docker/docker/engine"
-)
-
-func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("Usage: %s", job.Name)
-	}
-	name := job.Args[0]
+func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) {
 	container, err := daemon.Get(name)
 	if err != nil {
-		return job.Errorf("%s: %v", job.Name, err)
+		return -1, err
 	}
-	status, _ := container.WaitStop(-1 * time.Second)
-	job.Printf("%d\n", status)
-	return engine.StatusOK
+
+	return container.WaitStop(timeout)
 }
diff --git a/docker/daemon.go b/docker/daemon.go
index b2a985b..fc08bc9 100644
--- a/docker/daemon.go
+++ b/docker/daemon.go
@@ -7,19 +7,20 @@
 	"io"
 	"os"
 	"path/filepath"
-	"strings"
+	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
+	apiserver "github.com/docker/docker/api/server"
 	"github.com/docker/docker/autogen/dockerversion"
-	"github.com/docker/docker/builder"
-	"github.com/docker/docker/builtins"
 	"github.com/docker/docker/daemon"
 	_ "github.com/docker/docker/daemon/execdriver/lxc"
 	_ "github.com/docker/docker/daemon/execdriver/native"
-	"github.com/docker/docker/engine"
 	"github.com/docker/docker/pkg/homedir"
 	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/pidfile"
 	"github.com/docker/docker/pkg/signal"
+	"github.com/docker/docker/pkg/system"
+	"github.com/docker/docker/pkg/timeutils"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/utils"
 )
@@ -32,6 +33,9 @@
 )
 
 func init() {
+	if daemonCfg.LogConfig.Config == nil {
+		daemonCfg.LogConfig.Config = make(map[string]string)
+	}
 	daemonCfg.InstallFlags()
 	registryCfg.InstallFlags()
 }
@@ -40,13 +44,13 @@
 	// Migrate trust key if exists at ~/.docker/key.json and owned by current user
 	oldPath := filepath.Join(homedir.Get(), ".docker", defaultTrustKeyFile)
 	newPath := filepath.Join(getDaemonConfDir(), defaultTrustKeyFile)
-	if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && utils.IsFileOwner(oldPath) {
+	if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) {
 		defer func() {
 			// Ensure old path is removed if no error occurred
 			if err == nil {
 				err = os.Remove(oldPath)
 			} else {
-				log.Warnf("Key migration failed, key file not removed at %s", oldPath)
+				logrus.Warnf("Key migration failed, key file not removed at %s", oldPath)
 			}
 		}()
 
@@ -70,125 +74,144 @@
 			return fmt.Errorf("error copying key: %s", err)
 		}
 
-		log.Infof("Migrated key from %s to %s", oldPath, newPath)
+		logrus.Infof("Migrated key from %s to %s", oldPath, newPath)
 	}
 
 	return nil
 }
 
 func mainDaemon() {
+	if utils.ExperimentalBuild() {
+		logrus.Warn("Running experimental build")
+	}
+
 	if flag.NArg() != 0 {
 		flag.Usage()
 		return
 	}
-	eng := engine.New()
-	signal.Trap(eng.Shutdown)
 
-	if err := migrateKey(); err != nil {
-		log.Fatal(err)
-	}
-	daemonCfg.TrustKeyPath = *flTrustKey
+	logrus.SetFormatter(&logrus.TextFormatter{TimestampFormat: timeutils.RFC3339NanoFixed})
 
-	// Load builtins
-	if err := builtins.Register(eng); err != nil {
-		log.Fatal(err)
-	}
-
-	// load registry service
-	if err := registry.NewService(registryCfg).Install(eng); err != nil {
-		log.Fatal(err)
-	}
-
-	// load the daemon in the background so we can immediately start
-	// the http api so that connections don't fail while the daemon
-	// is booting
-	daemonInitWait := make(chan error)
-	go func() {
-		d, err := daemon.NewDaemon(daemonCfg, eng)
+	var pfile *pidfile.PidFile
+	if daemonCfg.Pidfile != "" {
+		pf, err := pidfile.New(daemonCfg.Pidfile)
 		if err != nil {
-			daemonInitWait <- err
-			return
+			logrus.Fatalf("Error starting daemon: %v", err)
 		}
+		pfile = pf
+		defer func() {
+			if err := pfile.Remove(); err != nil {
+				logrus.Error(err)
+			}
+		}()
+	}
 
-		log.Infof("docker daemon: %s %s; execdriver: %s; graphdriver: %s",
-			dockerversion.VERSION,
-			dockerversion.GITCOMMIT,
-			d.ExecutionDriver().Name(),
-			d.GraphDriver().String(),
-		)
+	serverConfig := &apiserver.ServerConfig{
+		Logging:     true,
+		EnableCors:  daemonCfg.EnableCors,
+		CorsHeaders: daemonCfg.CorsHeaders,
+		Version:     dockerversion.VERSION,
+		SocketGroup: daemonCfg.SocketGroup,
+		Tls:         *flTls,
+		TlsVerify:   *flTlsVerify,
+		TlsCa:       *flCa,
+		TlsCert:     *flCert,
+		TlsKey:      *flKey,
+	}
 
-		if err := d.Install(eng); err != nil {
-			daemonInitWait <- err
-			return
-		}
+	api := apiserver.New(serverConfig)
 
-		b := &builder.BuilderJob{eng, d}
-		b.Install()
-
-		// after the daemon is done setting up we can tell the api to start
-		// accepting connections
-		if err := eng.Job("acceptconnections").Run(); err != nil {
-			daemonInitWait <- err
-			return
-		}
-		daemonInitWait <- nil
-	}()
-
-	// Serve api
-	job := eng.Job("serveapi", flHosts...)
-	job.SetenvBool("Logging", true)
-	job.SetenvBool("EnableCors", daemonCfg.EnableCors)
-	job.Setenv("CorsHeaders", daemonCfg.CorsHeaders)
-	job.Setenv("Version", dockerversion.VERSION)
-	job.Setenv("SocketGroup", daemonCfg.SocketGroup)
-
-	job.SetenvBool("Tls", *flTls)
-	job.SetenvBool("TlsVerify", *flTlsVerify)
-	job.Setenv("TlsCa", *flCa)
-	job.Setenv("TlsCert", *flCert)
-	job.Setenv("TlsKey", *flKey)
-	job.SetenvBool("BufferRequests", true)
-
-	// The serve API job never exits unless an error occurs
+	// The serve API routine never exits unless an error occurs
 	// We need to start it as a goroutine and wait on it so
 	// daemon doesn't exit
 	serveAPIWait := make(chan error)
 	go func() {
-		if err := job.Run(); err != nil {
-			log.Errorf("ServeAPI error: %v", err)
+		if err := api.ServeApi(flHosts); err != nil {
+			logrus.Errorf("ServeAPI error: %v", err)
 			serveAPIWait <- err
 			return
 		}
 		serveAPIWait <- nil
 	}()
 
-	// Wait for the daemon startup goroutine to finish
-	// This makes sure we can actually cleanly shutdown the daemon
-	log.Debug("waiting for daemon to initialize")
-	errDaemon := <-daemonInitWait
-	if errDaemon != nil {
-		eng.Shutdown()
-		outStr := fmt.Sprintf("Shutting down daemon due to errors: %v", errDaemon)
-		if strings.Contains(errDaemon.Error(), "engine is shutdown") {
-			// if the error is "engine is shutdown", we've already reported (or
-			// will report below in API server errors) the error
-			outStr = "Shutting down daemon due to reported errors"
-		}
-		// we must "fatal" exit here as the API server may be happy to
-		// continue listening forever if the error had no impact to API
-		log.Fatal(outStr)
-	} else {
-		log.Info("Daemon has completed initialization")
+	if err := migrateKey(); err != nil {
+		logrus.Fatal(err)
 	}
+	daemonCfg.TrustKeyPath = *flTrustKey
+
+	registryService := registry.NewService(registryCfg)
+	d, err := daemon.NewDaemon(daemonCfg, registryService)
+	if err != nil {
+		if pfile != nil {
+			if err := pfile.Remove(); err != nil {
+				logrus.Error(err)
+			}
+		}
+		logrus.Fatalf("Error starting daemon: %v", err)
+	}
+
+	logrus.Info("Daemon has completed initialization")
+
+	logrus.WithFields(logrus.Fields{
+		"version":     dockerversion.VERSION,
+		"commit":      dockerversion.GITCOMMIT,
+		"execdriver":  d.ExecutionDriver().Name(),
+		"graphdriver": d.GraphDriver().String(),
+	}).Info("Docker daemon")
+
+	signal.Trap(func() {
+		api.Close()
+		<-serveAPIWait
+		shutdownDaemon(d, 15)
+		if pfile != nil {
+			if err := pfile.Remove(); err != nil {
+				logrus.Error(err)
+			}
+		}
+	})
+
+	// after the daemon is done setting up we can tell the api to start
+	// accepting connections with specified daemon
+	api.AcceptConnections(d)
 
 	// Daemon is fully initialized and handling API traffic
-	// Wait for serve API job to complete
+	// Wait for serve API to complete
 	errAPI := <-serveAPIWait
-	// If we have an error here it is unique to API (as daemonErr would have
-	// exited the daemon process above)
-	eng.Shutdown()
+	shutdownDaemon(d, 15)
 	if errAPI != nil {
-		log.Fatalf("Shutting down due to ServeAPI error: %v", errAPI)
+		if pfile != nil {
+			if err := pfile.Remove(); err != nil {
+				logrus.Error(err)
+			}
+		}
+		logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI)
 	}
+}
 
+// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case
+// d.Shutdown() is waiting too long to kill container or worst it's
+// blocked there
+func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) {
+	ch := make(chan struct{})
+	go func() {
+		d.Shutdown()
+		close(ch)
+	}()
+	select {
+	case <-ch:
+		logrus.Debug("Clean shutdown succeded")
+	case <-time.After(timeout * time.Second):
+		logrus.Error("Force shutdown daemon")
+	}
+}
+
+// currentUserIsOwner checks whether the current user is the owner of the given
+// file.
+func currentUserIsOwner(f string) bool {
+	if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil {
+		if int(fileInfo.Uid()) == os.Getuid() {
+			return true
+		}
+	}
+	return false
 }
diff --git a/docker/docker.go b/docker/docker.go
index 3474244..fd40f4b 100644
--- a/docker/docker.go
+++ b/docker/docker.go
@@ -6,16 +6,16 @@
 	"fmt"
 	"io/ioutil"
 	"os"
+	"runtime"
 	"strings"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/api"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/api/client"
 	"github.com/docker/docker/autogen/dockerversion"
+	"github.com/docker/docker/opts"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/reexec"
 	"github.com/docker/docker/pkg/term"
-	"github.com/docker/docker/utils"
 )
 
 const (
@@ -44,31 +44,40 @@
 	}
 
 	if *flLogLevel != "" {
-		lvl, err := log.ParseLevel(*flLogLevel)
+		lvl, err := logrus.ParseLevel(*flLogLevel)
 		if err != nil {
-			log.Fatalf("Unable to parse logging level: %s", *flLogLevel)
+			fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", *flLogLevel)
+			os.Exit(1)
 		}
 		setLogLevel(lvl)
 	} else {
-		setLogLevel(log.InfoLevel)
+		setLogLevel(logrus.InfoLevel)
 	}
 
-	// -D, --debug, -l/--log-level=debug processing
-	// When/if -D is removed this block can be deleted
 	if *flDebug {
 		os.Setenv("DEBUG", "1")
-		setLogLevel(log.DebugLevel)
+		setLogLevel(logrus.DebugLevel)
 	}
 
 	if len(flHosts) == 0 {
 		defaultHost := os.Getenv("DOCKER_HOST")
 		if defaultHost == "" || *flDaemon {
-			// If we do not have a host, default to unix socket
-			defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET)
+			if runtime.GOOS != "windows" {
+				// If we do not have a host, default to unix socket
+				defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
+			} else {
+				// If we do not have a host, default to TCP socket on Windows
+				defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
+			}
 		}
-		defaultHost, err := api.ValidateHost(defaultHost)
+		defaultHost, err := opts.ValidateHost(defaultHost)
 		if err != nil {
-			log.Fatal(err)
+			if *flDaemon {
+				logrus.Fatal(err)
+			} else {
+				fmt.Fprint(os.Stderr, err)
+			}
+			os.Exit(1)
 		}
 		flHosts = append(flHosts, defaultHost)
 	}
@@ -85,7 +94,8 @@
 	}
 
 	if len(flHosts) > 1 {
-		log.Fatal("Please specify only one -H")
+		fmt.Fprintf(os.Stderr, "Please specify only one -H")
+		os.Exit(0)
 	}
 	protoAddrParts := strings.SplitN(flHosts[0], "://", 2)
 
@@ -106,7 +116,8 @@
 		certPool := x509.NewCertPool()
 		file, err := ioutil.ReadFile(*flCa)
 		if err != nil {
-			log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err)
+			fmt.Fprintf(os.Stderr, "Couldn't read ca cert %s: %s\n", *flCa, err)
+			os.Exit(1)
 		}
 		certPool.AppendCertsFromPEM(file)
 		tlsConfig.RootCAs = certPool
@@ -121,7 +132,8 @@
 			*flTls = true
 			cert, err := tls.LoadX509KeyPair(*flCert, *flKey)
 			if err != nil {
-				log.Fatalf("Couldn't load X509 key pair: %q. Make sure the key is encrypted", err)
+				fmt.Fprintf(os.Stderr, "Couldn't load X509 key pair: %q. Make sure the key is encrypted\n", err)
+				os.Exit(1)
 			}
 			tlsConfig.Certificates = []tls.Certificate{cert}
 		}
@@ -136,13 +148,15 @@
 	}
 
 	if err := cli.Cmd(flag.Args()...); err != nil {
-		if sterr, ok := err.(*utils.StatusError); ok {
+		if sterr, ok := err.(client.StatusError); ok {
 			if sterr.Status != "" {
-				log.Println(sterr.Status)
+				fmt.Fprintln(cli.Err(), sterr.Status)
+				os.Exit(1)
 			}
 			os.Exit(sterr.StatusCode)
 		}
-		log.Fatal(err)
+		fmt.Fprintln(cli.Err(), err)
+		os.Exit(1)
 	}
 }
 
diff --git a/docker/flags.go b/docker/flags.go
index 7f0c10d..cbdb6a8 100644
--- a/docker/flags.go
+++ b/docker/flags.go
@@ -5,15 +5,69 @@
 	"os"
 	"path/filepath"
 	"runtime"
+	"sort"
 
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/pkg/homedir"
 	flag "github.com/docker/docker/pkg/mflag"
 )
 
+type command struct {
+	name        string
+	description string
+}
+
+type byName []command
+
+func (a byName) Len() int           { return len(a) }
+func (a byName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byName) Less(i, j int) bool { return a[i].name < a[j].name }
+
 var (
 	dockerCertPath  = os.Getenv("DOCKER_CERT_PATH")
 	dockerTlsVerify = os.Getenv("DOCKER_TLS_VERIFY") != ""
+
+	dockerCommands = []command{
+		{"attach", "Attach to a running container"},
+		{"build", "Build an image from a Dockerfile"},
+		{"commit", "Create a new image from a container's changes"},
+		{"cp", "Copy files/folders from a container's filesystem to the host path"},
+		{"create", "Create a new container"},
+		{"diff", "Inspect changes on a container's filesystem"},
+		{"events", "Get real time events from the server"},
+		{"exec", "Run a command in a running container"},
+		{"export", "Stream the contents of a container as a tar archive"},
+		{"history", "Show the history of an image"},
+		{"images", "List images"},
+		{"import", "Create a new filesystem image from the contents of a tarball"},
+		{"info", "Display system-wide information"},
+		{"inspect", "Return low-level information on a container or image"},
+		{"kill", "Kill a running container"},
+		{"load", "Load an image from a tar archive"},
+		{"login", "Register or log in to a Docker registry server"},
+		{"logout", "Log out from a Docker registry server"},
+		{"logs", "Fetch the logs of a container"},
+		{"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"},
+		{"pause", "Pause all processes within a container"},
+		{"ps", "List containers"},
+		{"pull", "Pull an image or a repository from a Docker registry server"},
+		{"push", "Push an image or a repository to a Docker registry server"},
+		{"rename", "Rename an existing container"},
+		{"restart", "Restart a running container"},
+		{"rm", "Remove one or more containers"},
+		{"rmi", "Remove one or more images"},
+		{"run", "Run a command in a new container"},
+		{"save", "Save an image to a tar archive"},
+		{"search", "Search for an image on the Docker Hub"},
+		{"start", "Start a stopped container"},
+		{"stats", "Display a stream of a containers' resource usage statistics"},
+		{"stop", "Stop a running container"},
+		{"tag", "Tag an image into a repository"},
+		{"top", "Lookup the running processes of a container"},
+		{"unpause", "Unpause a paused container"},
+		{"version", "Show the Docker version information"},
+		{"wait", "Block until a container stops, then print its exit code"},
+	}
 )
 
 func init() {
@@ -75,49 +129,12 @@
 
 		help := "\nCommands:\n"
 
-		for _, command := range [][]string{
-			{"attach", "Attach to a running container"},
-			{"build", "Build an image from a Dockerfile"},
-			{"commit", "Create a new image from a container's changes"},
-			{"cp", "Copy files/folders from a container's filesystem to the host path"},
-			{"create", "Create a new container"},
-			{"diff", "Inspect changes on a container's filesystem"},
-			{"events", "Get real time events from the server"},
-			{"exec", "Run a command in a running container"},
-			{"export", "Stream the contents of a container as a tar archive"},
-			{"history", "Show the history of an image"},
-			{"images", "List images"},
-			{"import", "Create a new filesystem image from the contents of a tarball"},
-			{"info", "Display system-wide information"},
-			{"inspect", "Return low-level information on a container or image"},
-			{"kill", "Kill a running container"},
-			{"load", "Load an image from a tar archive"},
-			{"login", "Register or log in to a Docker registry server"},
-			{"logout", "Log out from a Docker registry server"},
-			{"logs", "Fetch the logs of a container"},
-			{"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"},
-			{"pause", "Pause all processes within a container"},
-			{"ps", "List containers"},
-			{"pull", "Pull an image or a repository from a Docker registry server"},
-			{"push", "Push an image or a repository to a Docker registry server"},
-			{"rename", "Rename an existing container"},
-			{"restart", "Restart a running container"},
-			{"rm", "Remove one or more containers"},
-			{"rmi", "Remove one or more images"},
-			{"run", "Run a command in a new container"},
-			{"save", "Save an image to a tar archive"},
-			{"search", "Search for an image on the Docker Hub"},
-			{"start", "Start a stopped container"},
-			{"stats", "Display a stream of a containers' resource usage statistics"},
-			{"stop", "Stop a running container"},
-			{"tag", "Tag an image into a repository"},
-			{"top", "Lookup the running processes of a container"},
-			{"unpause", "Unpause a paused container"},
-			{"version", "Show the Docker version information"},
-			{"wait", "Block until a container stops, then print its exit code"},
-		} {
-			help += fmt.Sprintf("    %-10.10s%s\n", command[0], command[1])
+		sort.Sort(byName(dockerCommands))
+
+		for _, cmd := range dockerCommands {
+			help += fmt.Sprintf("    %-10.10s%s\n", cmd.name, cmd.description)
 		}
+
 		help += "\nRun 'docker COMMAND --help' for more information on a command."
 		fmt.Fprintf(os.Stdout, "%s\n", help)
 	}
diff --git a/docker/log.go b/docker/log.go
index 0dd9a70..7b43b56 100644
--- a/docker/log.go
+++ b/docker/log.go
@@ -1,14 +1,14 @@
 package main
 
 import (
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"io"
 )
 
-func setLogLevel(lvl log.Level) {
-	log.SetLevel(lvl)
+func setLogLevel(lvl logrus.Level) {
+	logrus.SetLevel(lvl)
 }
 
 func initLogging(stderr io.Writer) {
-	log.SetOutput(stderr)
+	logrus.SetOutput(stderr)
 }
diff --git a/docs/Dockerfile b/docs/Dockerfile
index d5ffae4..bda3ccd 100644
--- a/docs/Dockerfile
+++ b/docs/Dockerfile
@@ -6,11 +6,11 @@
 
 # This section ensures we pull the correct version of each
 # sub project
-ENV COMPOSE_BRANCH 1.2.0
+ENV COMPOSE_BRANCH release
 ENV SWARM_BRANCH v0.2.0
-ENV MACHINE_BRANCH master
-ENV DISTRIB_BRANCH master
-
+ENV MACHINE_BRANCH docs
+ENV DISTRIB_BRANCH docs
+ENV KITEMATIC_BRANCH master
 
 
 # TODO: need the full repo source to get the git version info
@@ -28,81 +28,136 @@
 #COPY ./image/spec/v1.md /docs/sources/reference/image-spec-v1.md
 
 # TODO: don't do this - look at merging the yml file in build.sh
-COPY ./mkdocs.yml mkdocs.yml
-COPY ./s3_website.json s3_website.json
-COPY ./release.sh release.sh
+COPY ./mkdocs.yml ./s3_website.json ./release.sh ./
 
-
+#######################
 # Docker Distribution
-# 
+########################
+
 #ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/mkdocs.yml /docs/mkdocs-distribution.yml
 
-ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/images/notifications.png /docs/sources/registry/images/notifications.png
-ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/images/registry.png /docs/sources/registry/images/registry.png
+ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/images/notifications.png \
+    https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/images/registry.png \
+  /docs/sources/registry/images/
 
-ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/overview.md /docs/sources/registry/overview.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/overview.md
+ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/index.md \
+    https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/deploying.md \
+    https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/configuration.md \
+    https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/storagedrivers.md \
+    https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/notifications.md \
+  /docs/sources/registry/
 
-ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/deploying.md /docs/sources/registry/deploying.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/deploying.md
-
-ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/configuration.md /docs/sources/registry/configuration.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/configuration.md
-
-ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/storagedrivers.md /docs/sources/registry/storagedrivers.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/storagedrivers.md
-
-ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/notifications.md /docs/sources/registry/notifications.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/notifications.md
-
-ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/spec/api.md /docs/sources/registry/spec/api.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/spec/api.md
-
-ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/spec/json.md /docs/sources/registry/spec/json.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/spec/json.md
+ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/spec/api.md \
+    https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/spec/json.md \
+  /docs/sources/registry/spec/
+  
+ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/storage-drivers/s3.md \
+    https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/storage-drivers/azure.md \
+    https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/storage-drivers/filesystem.md \
+    https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/storage-drivers/inmemory.md \
+  /docs/sources/registry/storage-drivers/
 
 ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/spec/auth/token.md /docs/sources/registry/spec/auth/token.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/spec/auth/token.md
 
+RUN sed -i.old '1s;^;no_version_dropdown: true;' \
+  /docs/sources/registry/*.md \
+  /docs/sources/registry/spec/*.md \
+  /docs/sources/registry/spec/auth/*.md \
+  /docs/sources/registry/storage-drivers/*.md 
+
+RUN sed -i.old  -e '/^<!--GITHUB/g' -e '/^IGNORES-->/g'\
+  /docs/sources/registry/*.md \
+  /docs/sources/registry/spec/*.md \
+  /docs/sources/registry/spec/auth/*.md \
+  /docs/sources/registry/storage-drivers/*.md 
+
+#######################
 # Docker Swarm
+#######################
+
 #ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/docs/mkdocs.yml /docs/mkdocs-swarm.yml
 ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/docs/index.md /docs/sources/swarm/index.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/index.md
-ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/discovery/README.md /docs/sources/swarm/discovery.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/discovery.md
-ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/api/README.md /docs/sources/swarm/API.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/API.md
-ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/scheduler/filter/README.md /docs/sources/swarm/scheduler/filter.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/scheduler/filter.md
-ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/scheduler/strategy/README.md /docs/sources/swarm/scheduler/strategy.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/scheduler/strategy.md
 
+ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/discovery/README.md /docs/sources/swarm/discovery.md
+
+ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/api/README.md /docs/sources/swarm/API.md
+
+ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/scheduler/filter/README.md /docs/sources/swarm/scheduler/filter.md
+
+ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/scheduler/strategy/README.md /docs/sources/swarm/scheduler/strategy.md
+
+RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/*.md /docs/sources/swarm/scheduler/*.md
+
+#######################
 # Docker Machine
+#######################
 #ADD https://raw.githubusercontent.com/docker/machine/${MACHINE_BRANCH}/docs/mkdocs.yml /docs/mkdocs-machine.yml
+
 ADD https://raw.githubusercontent.com/docker/machine/${MACHINE_BRANCH}/docs/index.md /docs/sources/machine/index.md
 RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/machine/index.md
 
+#######################
 # Docker Compose
-#ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/mkdocs.yml /docs/mkdocs-compose.yml
-ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/index.md /docs/sources/compose/index.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/index.md
-ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/install.md /docs/sources/compose/install.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/install.md
-ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/cli.md /docs/sources/compose/cli.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/cli.md
-ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/yml.md /docs/sources/compose/yml.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/yml.md
-ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/env.md /docs/sources/compose/env.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/env.md
-ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/completion.md /docs/sources/compose/completion.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/completion.md
+#######################
 
-ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/django.md /docs/sources/compose/django.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/django.md
-ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/rails.md /docs/sources/compose/rails.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/rails.md
-ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/wordpress.md /docs/sources/compose/wordpress.md
-RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/wordpress.md
+#ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/mkdocs.yml /docs/mkdocs-compose.yml
+
+ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/index.md \
+  https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/install.md \
+  https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/cli.md \
+  https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/yml.md \
+  https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/env.md \
+  https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/completion.md \
+  https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/django.md \
+  https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/rails.md \
+  https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/wordpress.md \
+  https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/extends.md \
+  https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/production.md \
+  /docs/sources/compose/
+
+RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/*.md
+
+#######################
+# Kitematic
+#######################
+ADD https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/faq.md \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/index.md \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/known-issues.md \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/minecraft-server.md \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/nginx-web-server.md \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/rethinkdb-dev-database.md \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/userguide.md \
+  /docs/sources/kitematic/
+RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/kitematic/*.md
+ADD https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/browse-images.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/change-folder.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/cli-access-button.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/cli-redis-container.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/cli-terminal.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/containers.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/installing.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-add-server.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-create.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-data-volume.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-login.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-map.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-port.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-restart.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/minecraft-server-address.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-2048-files.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-2048.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-create.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-data-folder.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-data-volume.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-hello-world.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-preview.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/nginx-serving-2048.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/rethink-container.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/rethink-create.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/rethink-ports.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/rethinkdb-preview.png \
+  https://raw.githubusercontent.com/kitematic/kitematic/${KITEMATIC_BRANCH}/docs/assets/volumes-dir.png \
+  /docs/sources/kitematic/assets/
 
 # Then build everything together, ready for mkdocs
-RUN /docs/build.sh
\ No newline at end of file
+RUN /docs/build.sh
diff --git a/docs/README.md b/docs/README.md
index 5feb496..8ff25ad 100755
--- a/docs/README.md
+++ b/docs/README.md
@@ -3,7 +3,7 @@
 The source for Docker documentation is in this directory under `sources/`. Our
 documentation uses extended Markdown, as implemented by
 [MkDocs](http://mkdocs.org).  The current release of the Docker documentation
-resides on [http://docs.docker.com](http://docs.docker.com).
+resides on [https://docs.docker.com](https://docs.docker.com).
 
 ## Understanding the documentation branches and processes
 
@@ -11,7 +11,7 @@
 
 | Branch   | Description                    | URL (published via commit-hook)                                              |
 |----------|--------------------------------|------------------------------------------------------------------------------|
-| `docs`   | Official release documentation | [http://docs.docker.com](http://docs.docker.com)                             |
+| `docs`   | Official release documentation | [https://docs.docker.com](https://docs.docker.com)                             |
 | `master` | Merged but unreleased development work    | [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com) |
 
 Additions and updates to upcoming releases are made in a feature branch off of
@@ -280,3 +280,24 @@
 aws cloudfront  create-invalidation --profile docs.docker.com --distribution-id $DISTRIBUTION_ID --invalidation-batch '{"Paths":{"Quantity":1, "Items":["/v1.1/reference/api/docker_io_oauth_api/"]},"CallerReference":"6Mar2015sventest1"}'
 ```
 
+### Generate the man pages for Mac OSX
+
+When using Docker on Mac OSX the man pages will be missing by default. You can manually generate them by following these steps:
+
+1. Checkout the docker source. You must clone into your `/Users` directory because Boot2Docker can only share this path
+   with the docker containers.
+
+        $ git clone https://github.com/docker/docker.git
+		
+2. Build the docker image.
+   
+        $ cd docker/docs/man
+        $ docker build -t docker/md2man .
+
+3. Build the man pages.
+
+        $ docker run -v /Users/<path-to-git-dir>/docker/docs/man:/docs:rw -w /docs -i docker/md2man /docs/md2man-all.sh
+
+4. Copy the generated man pages to `/usr/share/man`
+
+        $ cp -R man* /usr/share/man/
diff --git a/docs/man/Dockerfile.5.md b/docs/man/Dockerfile.5.md
index 7f88488..c64757c 100644
--- a/docs/man/Dockerfile.5.md
+++ b/docs/man/Dockerfile.5.md
@@ -31,7 +31,7 @@
 
 # USAGE
 
-  sudo docker build .
+  docker build .
 
   -- Runs the steps and commits them, building a final image.
   The path to the source repository defines where to find the context of the
@@ -41,7 +41,7 @@
   daemon.
 
   ```
-  sudo docker build -t repository/tag .
+  docker build -t repository/tag .
   ```
 
   -- specifies a repository and tag at which to save the new image if the build
@@ -69,8 +69,8 @@
   multiple images. Make a note of the last image ID output by the commit before
   each new **FROM** command.
 
-  -- If no tag is given to the **FROM** instruction, latest is assumed. If the
-  used tag does not exist, an error is returned.
+  -- If no tag is given to the **FROM** instruction, Docker applies the 
+  `latest` tag. If the used tag does not exist, an error is returned.
 
 **MAINTAINER**
   -- **MAINTAINER** sets the Author field for the generated images.
@@ -273,10 +273,17 @@
 
 **USER**
   -- `USER daemon`
-  The **USER** instruction sets the username or UID that is used when running the
-  image.
+  Sets the username or UID used for running subsequent commands.
 
-**WRKDIR**
+  The **USER** instruction can optionally be used to set the group or GID. The
+  followings examples are all valid:
+  USER [user | user:group | uid | uid:gid | user:gid | uid:group ]
+
+  Until the **USER** instruction is set, instructions will be run as root. The USER
+  instruction can be used any number of times in a Dockerfile, and will only affect
+  subsequent commands.
+
+**WORKDIR**
   -- `WORKDIR /path/to/workdir`
   The **WORKDIR** instruction sets the working directory for the **RUN**, **CMD**,
   **ENTRYPOINT**, **COPY** and **ADD** Dockerfile commands that follow it. It can
diff --git a/docs/man/README.md b/docs/man/README.md
index 402178a..e25a925 100644
--- a/docs/man/README.md
+++ b/docs/man/README.md
@@ -30,4 +30,4 @@
 the man pages inside the `docker/docs/man/man1` directory using
 Docker volumes. For more information on Docker volumes see the man page for
 `docker run` and also look at the article [Sharing Directories via Volumes]
-(http://docs.docker.com/use/working_with_volumes/).
+(https://docs.docker.com/use/working_with_volumes/).
diff --git a/docs/man/docker-build.1.md b/docs/man/docker-build.1.md
index fe6250f..7a5ceab 100644
--- a/docs/man/docker-build.1.md
+++ b/docs/man/docker-build.1.md
@@ -17,7 +17,11 @@
 [**-m**|**--memory**[=*MEMORY*]]
 [**--memory-swap**[=*MEMORY-SWAP*]]
 [**-c**|**--cpu-shares**[=*0*]]
+[**--cpu-period**[=*0*]]
+[**--cpu-quota**[=*0*]]
 [**--cpuset-cpus**[=*CPUSET-CPUS*]]
+[**--cpuset-mems**[=*CPUSET-MEMS*]]
+[**--cgroup-parent**[=*CGROUP-PARENT*]]
 
 PATH | URL | -
 
@@ -62,6 +66,77 @@
 **-t**, **--tag**=""
    Repository name (and optionally a tag) to be applied to the resulting image in case of success
 
+**-m**, **--memory**=*MEMORY*
+  Memory limit
+
+**--memory-swap**=*MEMORY-SWAP*
+  Total memory (memory + swap), '-1' to disable swap.
+
+**-c**, **--cpu-shares**=*0*
+  CPU shares (relative weight).
+
+  By default, all containers get the same proportion of CPU cycles. You can
+  change this proportion by adjusting the container's CPU share weighting
+  relative to the weighting of all other running containers.
+
+  To modify the proportion from the default of 1024, use the **-c** or
+  **--cpu-shares** flag to set the weighting to 2 or higher.
+
+  The proportion is only applied when CPU-intensive processes are running.
+  When tasks in one container are idle, the other containers can use the
+  left-over CPU time. The actual amount of CPU time used varies depending on
+  the number of containers running on the system.
+
+  For example, consider three containers, one has a cpu-share of 1024 and
+  two others have a cpu-share setting of 512. When processes in all three
+  containers attempt to use 100% of CPU, the first container would receive
+  50% of the total CPU time. If you add a fourth container with a cpu-share
+  of 1024, the first container only gets 33% of the CPU. The remaining containers
+  receive 16.5%, 16.5% and 33% of the CPU.
+
+  On a multi-core system, the shares of CPU time are distributed across the CPU
+  cores. Even if a container is limited to less than 100% of CPU time, it can
+  use 100% of each individual CPU core.
+
+  For example, consider a system with more than three cores. If you start one
+  container **{C0}** with **-c=512** running one process, and another container
+  **{C1}** with **-c=1024** running two processes, this can result in the following
+  division of CPU shares:
+
+      PID    container    CPU    CPU share
+      100    {C0}         0      100% of CPU0
+      101    {C1}         1      100% of CPU1
+      102    {C1}         2      100% of CPU2
+
+**--cpu-period**=*0*
+  Limit the CPU CFS (Completely Fair Scheduler) period.
+
+  Limit the container's CPU usage. This flag causes the kernel to restrict the
+  container's CPU usage to the period you specify.
+
+**--cpu-quota**=*0*
+  Limit the CPU CFS (Completely Fair Scheduler) quota. 
+
+  By default, containers run with the full CPU resource. This flag causes the
+kernel to restrict the container's CPU usage to the quota you specify.
+
+**--cpuset-cpus**=*CPUSET-CPUS*
+  CPUs in which to allow execution (0-3, 0,1).
+
+**--cpuset-mems**=*CPUSET-MEMS*
+  Memory nodes (MEMs) in which to allow execution (-1-3, 0,1). Only effective on
+  NUMA systems.
+
+  For example, if you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1`
+to ensure the processes in your Docker container only use memory from the first
+two memory nodes.
+
+**--cgroup-parent**=*CGROUP-PARENT*
+  Path to `cgroups` under which the container's `cgroup` are created.
+
+  If the path is not absolute, the path is considered relative to the `cgroups` path of the init process.
+Cgroups are created if they do not already exist.
+
 # EXAMPLES
 
 ## Building an image using a Dockerfile located inside the current directory
diff --git a/docs/man/docker-commit.1.md b/docs/man/docker-commit.1.md
index e345919..5a29068 100644
--- a/docs/man/docker-commit.1.md
+++ b/docs/man/docker-commit.1.md
@@ -22,7 +22,7 @@
 
 **-c** , **--change**=[]
    Apply specified Dockerfile instructions while committing the image
-   Supported Dockerfile instructions: ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|LABEL|VOLUME|WORKDIR|COPY
+   Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR`
 
 **--help**
   Print usage statement
@@ -38,7 +38,7 @@
 ## Creating a new image from an existing container
 An existing Fedora based container has had Apache installed while running
 in interactive mode with the bash shell. Apache is also running. To
-create a new image run docker ps to find the container's ID and then run:
+create a new image run `docker ps` to find the container's ID and then run:
 
     # docker commit -m="Added Apache to Fedora base image" \
       -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20
@@ -46,7 +46,7 @@
 ## Apply specified Dockerfile instructions while committing the image
 If an existing container was created without the DEBUG environment
 variable set to "true", you can create a new image based on that
-container by first getting the container's ID with docker ps and
+container by first getting the container's ID with `docker ps` and
 then running:
 
     # docker commit -c="ENV DEBUG true" 98bd7fc99854 debug-image
diff --git a/docs/man/docker-create.1.md b/docs/man/docker-create.1.md
index 1a0da1b..26b6711 100644
--- a/docs/man/docker-create.1.md
+++ b/docs/man/docker-create.1.md
@@ -8,11 +8,15 @@
 **docker create**
 [**-a**|**--attach**[=*[]*]]
 [**--add-host**[=*[]*]]
+[**--blkio-weight**[=*[BLKIO-WEIGHT]*]]
 [**-c**|**--cpu-shares**[=*0*]]
 [**--cap-add**[=*[]*]]
 [**--cap-drop**[=*[]*]]
 [**--cidfile**[=*CIDFILE*]]
+[**--cpu-period**[=*0*]]
 [**--cpuset-cpus**[=*CPUSET-CPUS*]]
+[**--cpuset-mems**[=*CPUSET-MEMS*]]
+[**--cpu-quota**[=*0*]]
 [**--device**[=*[]*]]
 [**--dns-search**[=*[]*]]
 [**--dns**[=*[]*]]
@@ -34,9 +38,11 @@
 [**--mac-address**[=*MAC-ADDRESS*]]
 [**--name**[=*NAME*]]
 [**--net**[=*"bridge"*]]
+[**--oom-kill-disable**[=*false*]]
 [**-P**|**--publish-all**[=*false*]]
 [**-p**|**--publish**[=*[]*]]
 [**--pid**[=*[]*]]
+[**--uts**[=*[]*]]
 [**--privileged**[=*false*]]
 [**--read-only**[=*false*]]
 [**--restart**[=*RESTART*]]
@@ -56,6 +62,9 @@
 **--add-host**=[]
    Add a custom host-to-IP mapping (host:ip)
 
+**--blkio-weight**=0
+   Block IO weight (relative weight) accepts a weight value between 10 and 1000.
+
 **-c**, **--cpu-shares**=0
    CPU shares (relative weight)
 
@@ -71,9 +80,22 @@
 **--cgroup-parent**=""
    Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
 
+**--cpu-peroid**=0
+    Limit the CPU CFS (Completely Fair Scheduler) period
+
 **--cpuset-cpus**=""
    CPUs in which to allow execution (0-3, 0,1)
 
+**--cpuset-mems**=""
+   Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
+
+   If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1`
+then processes in your Docker container will only use memory from the first
+two memory nodes.
+
+**-cpu-quota**=0
+   Limit the CPU CFS (Completely Fair Scheduler) quota
+
 **--device**=[]
    Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)
 
@@ -116,12 +138,13 @@
    Read labels from a file. Delimit each label with an EOL.
 
 **--link**=[]
-   Add link to another container in the form of <name or id>:alias
+   Add link to another container in the form of <name or id>:alias or just
+   <name or id> in which case the alias will match the name.
 
 **--lxc-conf**=[]
    (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
 
-**--log-driver**="|*json-file*|*syslog*|*none*"
+**--log-driver**="|*json-file*|*syslog*|*journald*|*none*"
   Logging driver for container. Default is defined by daemon `--log-driver` flag.
   **Warning**: `docker logs` command works only for `json-file` logging driver.
 
@@ -138,7 +161,7 @@
    Total memory limit (memory + swap)
 
    Set `-1` to disable swap (format: <number><optional unit>, where unit = b, k, m or g).
-This value should always larger than **-m**, so you should alway use this with **-m**.
+This value should always larger than **-m**, so you should always use this with **-m**.
 
 **--mac-address**=""
    Container MAC address (e.g. 92:d0:c6:0a:29:33)
@@ -153,6 +176,9 @@
                                'container:<name|id>': reuses another container network stack
                                'host': use the host network stack inside the container.  Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.
 
+**--oom-kill-disable**=*true*|*false*
+	Whether to disable OOM Killer for the container or not.
+
 **-P**, **--publish-all**=*true*|*false*
    Publish all exposed ports to random ports on the host interfaces. The default is *false*.
 
@@ -168,6 +194,11 @@
      **host**: use the host's PID namespace inside the container.
      Note: the host mode gives the container full access to local PID and is therefore considered insecure.
 
+**--uts**=host
+   Set the UTS mode for the container
+     **host**: use the host's UTS namespace inside the container.
+     Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure.
+
 **--privileged**=*true*|*false*
    Give extended privileges to this container. The default is *false*.
 
diff --git a/docs/man/docker-exec.1.md b/docs/man/docker-exec.1.md
index e755441..c1de7b5 100644
--- a/docs/man/docker-exec.1.md
+++ b/docs/man/docker-exec.1.md
@@ -10,6 +10,7 @@
 [**--help**]
 [**-i**|**--interactive**[=*false*]]
 [**-t**|**--tty**[=*false*]]
+[**-u**|**--user**[=*USER*]]
 CONTAINER COMMAND [ARG...]
 
 # DESCRIPTION
@@ -35,6 +36,14 @@
 **-t**, **--tty**=*true*|*false*
    Allocate a pseudo-TTY. The default is *false*.
 
+**-u**, **--user**=""
+   Sets the username or UID used and optionally the groupname or GID for the specified command.
+
+   The followings examples are all valid:
+   --user [user | user:group | uid | uid:gid | user:gid | uid:group ]
+
+   Without this argument the command will be run as root in the container.
+
 The **-t** option is incompatible with a redirection of the docker client
 standard input.
 
diff --git a/docs/man/docker-history.1.md b/docs/man/docker-history.1.md
index 47350f8..268e378 100644
--- a/docs/man/docker-history.1.md
+++ b/docs/man/docker-history.1.md
@@ -19,6 +19,9 @@
 **--help**
   Print usage statement
 
+**-H**. **--human**=*true*|*false*
+    Print sizes and dates in human readable format. The default is *true*.
+
 **--no-trunc**=*true*|*false*
    Don't truncate output. The default is *false*.
 
@@ -26,11 +29,21 @@
    Only show numeric IDs. The default is *false*.
 
 # EXAMPLES
-    $ sudo docker history fedora
-    IMAGE          CREATED          CREATED BY                                      SIZE
+    $ docker history fedora
+    IMAGE          CREATED          CREATED BY                                      SIZE                COMMENT
     105182bb5e8b   5 days ago       /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d   372.7 MB
     73bd853d2ea5   13 days ago      /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar   0 B
-    511136ea3c5a   10 months ago                                                    0 B
+    511136ea3c5a   10 months ago                                                    0 B                 Imported from -
+
+## Display comments in the image history
+The `docker commit` command has a **-m** flag for adding comments to the image. These comments will be displayed in the image history.
+
+    $ sudo docker history docker:scm
+    IMAGE               CREATED             CREATED BY                                      SIZE                COMMENT
+    2ac9d1098bf1        3 months ago        /bin/bash                                       241.4 MB            Added Apache to Fedora base image
+    88b42ffd1f7c        5 months ago        /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7   373.7 MB            
+    c69cab00d6ef        5 months ago        /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar   0 B                 
+    511136ea3c5a        19 months ago                                                       0 B                 Imported from -
 
 # HISTORY
 April 2014, Originally compiled by William Henry (whenry at redhat dot com)
diff --git a/docs/man/docker-images.1.md b/docs/man/docker-images.1.md
index c5151f1..16dd864 100644
--- a/docs/man/docker-images.1.md
+++ b/docs/man/docker-images.1.md
@@ -66,6 +66,11 @@
 
     docker images -a
 
+Previously, the docker images command supported the --tree and --dot arguments,
+which displayed different visualizations of the image data. Docker core removed
+this functionality in the 1.7 version. If you liked this functionality, you can
+still find it in the third-party dockviz tool: https://github.com/justone/dockviz.
+
 ## Listing only the shortened image IDs
 
 Listing just the shortened image IDs. This can be useful for some automated
diff --git a/docs/man/docker-import.1.md b/docs/man/docker-import.1.md
index 6b3899b..b45bf5d 100644
--- a/docs/man/docker-import.1.md
+++ b/docs/man/docker-import.1.md
@@ -13,7 +13,7 @@
 # OPTIONS
 **-c**, **--change**=[]
    Apply specified Dockerfile instructions while importing the image
-   Supported Dockerfile instructions: `ADD`|`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`FROM`|`MAINTAINER`|`RUN`|`USER`|`LABEL`|`VOLUME`|`WORKDIR`|`COPY`
+   Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR`
 
 # DESCRIPTION
 Create a new filesystem image from the contents of a tarball (`.tar`,
diff --git a/docs/man/docker-info.1.md b/docs/man/docker-info.1.md
index 346df86..a3bbd79 100644
--- a/docs/man/docker-info.1.md
+++ b/docs/man/docker-info.1.md
@@ -37,6 +37,7 @@
      Root Dir: /var/lib/docker/aufs
      Dirs: 80
     Execution Driver: native-0.2
+    Logging Driver: json-file
     Kernel Version: 3.13.0-24-generic
     Operating System: Ubuntu 14.04 LTS
     CPUs: 1
diff --git a/docs/man/docker-inspect.1.md b/docs/man/docker-inspect.1.md
index 85f6730..6f3cf51 100644
--- a/docs/man/docker-inspect.1.md
+++ b/docs/man/docker-inspect.1.md
@@ -19,80 +19,120 @@
 
 # OPTIONS
 **--help**
-  Print usage statement
+    Print usage statement
 
 **-f**, **--format**=""
-   Format the output using the given go template.
+    Format the output using the given go template.
 
 # EXAMPLES
 
 ## Getting information on a container
 
-To get information on a container use it's ID or instance name:
+To get information on a container use its ID or instance name:
 
-    #docker inspect 1eb5fabf5a03
+    $ docker inspect 1eb5fabf5a03
     [{
-       "ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b",
-       "Created": "2014-04-04T21:33:52.02361335Z",
-       "Path": "/usr/sbin/nginx",
-       "Args": [],
-       "Config": {
-            "Hostname": "1eb5fabf5a03",
-            "Domainname": "",
-            "User": "",
-            "Memory": 0,
-            "MemorySwap": 0,
-            "CpuShares": 0,
+        "AppArmorProfile": "",
+        "Args": [],
+        "Config": {
+            "AttachStderr": false,
             "AttachStdin": false,
             "AttachStdout": false,
-            "AttachStderr": false,
-            "PortSpecs": null,
-            "ExposedPorts": {
-                "80/tcp": {}
-        },
-	    "Tty": true,
-            "OpenStdin": false,
-            "StdinOnce": false,
-            "Env": [
-               "HOME=/",
-	       "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
-            ],
             "Cmd": [
                 "/usr/sbin/nginx"
             ],
-            "Dns": null,
-            "DnsSearch": null,
-            "Image": "summit/nginx",
-            "Volumes": null,
-            "VolumesFrom": "",
-            "WorkingDir": "",
+            "Domainname": "",
             "Entrypoint": null,
+            "Env": [
+                "HOME=/",
+                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+            ],
+            "ExposedPorts": {
+                "80/tcp": {}
+            },
+            "Hostname": "1eb5fabf5a03",
+            "Image": "summit/nginx",
+            "Labels": {
+                "com.example.vendor": "Acme",
+                "com.example.license": "GPL",
+                "com.example.version": "1.0"
+            },
+            "MacAddress": "",
             "NetworkDisabled": false,
             "OnBuild": null,
-            "Context": {
-               "mount_label": "system_u:object_r:svirt_sandbox_file_t:s0:c0,c650",
-	       "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c0,c650"
-	    }
+            "OpenStdin": false,
+            "PortSpecs": null,
+            "StdinOnce": false,
+            "Tty": true,
+            "User": "",
+            "Volumes": null,
+            "WorkingDir": "",
         },
-        "State": {
-            "Running": true,
-            "Pid": 858,
-            "ExitCode": 0,
-            "StartedAt": "2014-04-04T21:33:54.16259207Z",
-            "FinishedAt": "0001-01-01T00:00:00Z",
-            "Ghost": false
-        },
+        "Created": "2014-04-04T21:33:52.02361335Z",
+        "Driver": "devicemapper",
+        "ExecDriver": "native-0.1",
+        "ExecIDs": null,
+        "HostConfig": {
+            "Binds": null,
+            "CapAdd": null,
+            "CapDrop": null,
+            "CgroupParent": "",
+            "ContainerIDFile": "",
+            "CpuShares": 512,
+            "CpusetCpus": "0,1",
+            "CpusetMems": "",
+            "Devices": [],
+            "Dns": null,
+            "DnsSearch": null,
+            "ExtraHosts": null,
+            "IpcMode": "",
+            "Links": null,
+            "LogConfig": {
+                "Config": null,
+                "Type": "json-file"
+            },
+            "LxcConf": null,
+            "Memory": 16777216,
+            "MemorySwap": -1,
+            "NetworkMode": "",
+            "PidMode": "",
+            "PortBindings": {
+                "80/tcp": [
+                    {
+                        "HostIp": "0.0.0.0",
+                        "HostPort": "80"
+                    }
+                ]
+            },
+            "Privileged": false,
+            "PublishAllPorts": false,
+            "ReadonlyRootfs": false,
+            "RestartPolicy": {
+                "MaximumRetryCount": 0,
+                "Name": ""
+            },
+            "SecurityOpt": null,
+            "Ulimits": null,
+            "VolumesFrom": null
+        }
+        "HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname",
+        "HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts",
+        "ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b",
         "Image": "df53773a4390e25936f9fd3739e0c0e60a62d024ea7b669282b27e65ae8458e6",
-        "Labels": {
-            "com.example.vendor": "Acme",
-            "com.example.license": "GPL",
-            "com.example.version": "1.0"
-        },
+        "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log",
+        "MountLabel": "",
+        "Name": "/ecstatic_ptolemy",
         "NetworkSettings": {
+            "Bridge": "docker0",
+            "Gateway": "172.17.42.1",
+            "GlobalIPv6Address": "",
+            "GlobalIPv6PrefixLen": 0,
             "IPAddress": "172.17.0.2",
             "IPPrefixLen": 16,
-            "Gateway": "172.17.42.1",
-            "Bridge": "docker0",
+            "IPv6Gateway": "",
+            "LinkLocalIPv6Address": "",
+            "LinkLocalIPv6PrefixLen": 0,
+            "MacAddress": "",
             "PortMapping": null,
             "Ports": {
                 "80/tcp": [
@@ -103,41 +143,31 @@
                 ]
             }
         },
+        "Path": "/usr/sbin/nginx",
+        "ProcessLabel": "",
         "ResolvConfPath": "/etc/resolv.conf",
-        "HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname",
-        "HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts",
-        "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log",
-        "Name": "/ecstatic_ptolemy",
-        "Driver": "devicemapper",
-        "ExecDriver": "native-0.1",
+        "RestartCount": 0,
+        "State": {
+            "Dead": false,
+            "Error": "",
+            "ExitCode": 0,
+            "FinishedAt": "0001-01-01T00:00:00Z",
+            "OOMKilled": false,
+            "Paused": false,
+            "Pid": 858,
+            "Restarting": false,
+            "Running": true,
+            "StartedAt": "2014-04-04T21:33:54.16259207Z",
+        },
         "Volumes": {},
         "VolumesRW": {},
-        "HostConfig": {
-        "Binds": null,
-            "ContainerIDFile": "",
-            "LxcConf": [],
-            "Privileged": false,
-            "PortBindings": {
-                "80/tcp": [
-                    {
-                        "HostIp": "0.0.0.0",
-                        "HostPort": "80"
-                    }
-                ]
-            },
-            "Links": null,
-            "PublishAllPorts": false,
-            "DriverOptions": {
-                "lxc": null
-            },
-            "CliAddress": ""
-        }
+    }
 
 ## Getting the IP address of a container instance
 
 To get the IP address of a container use:
 
-    # docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03
+    $ docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03
     172.17.0.2
 
 ## Listing all port bindings
@@ -145,95 +175,96 @@
 One can loop over arrays and maps in the results to produce simple text
 output:
 
-    # docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \
-     {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03
+    $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \
+      {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03
+      80/tcp -> 80
 
-    80/tcp -> 80
+You can get more information about how to write a go template from:
+http://golang.org/pkg/text/template/.
 
 ## Getting information on an image
 
 Use an image's ID or name (e.g., repository/name[:tag]) to get information
- on it.
+on it.
 
-    # docker inspect 58394af37342
+    $ docker inspect fc1203419df2
     [{
-        "id": "58394af373423902a1b97f209a31e3777932d9321ef10e64feaaa7b4df609cf9",
-        "parent": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
-        "created": "2014-02-03T16:10:40.500814677Z",
-        "container": "f718f19a28a5147da49313c54620306243734bafa63c76942ef6f8c4b4113bc5",
-        "container_config": {
-            "Hostname": "88807319f25e",
-            "Domainname": "",
-            "User": "",
-            "Memory": 0,
-            "MemorySwap": 0,
-            "CpuShares": 0,
+        "Architecture": "amd64",
+        "Author": "",
+        "Comment": "",
+        "Config": {
+            "AttachStderr": false,
             "AttachStdin": false,
             "AttachStdout": false,
-            "AttachStderr": false,
-            "PortSpecs": null,
-            "ExposedPorts": null,
-            "Tty": false,
-            "OpenStdin": false,
-            "StdinOnce": false,
-            "Env": [
-                "HOME=/",
-                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+            "Cmd": [
+                "make",
+                "direct-test"
             ],
+            "Domainname": "",
+            "Entrypoint": [
+                "/dind"
+            ],
+            "Env": [
+                "PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+            ],
+            "ExposedPorts": null,
+            "Hostname": "242978536a06",
+            "Image": "c2b774c744afc5bea603b5e6c5218539e506649326de3ea0135182f299d0519a",
+            "Labels": {},
+            "MacAddress": "",
+            "NetworkDisabled": false,
+            "OnBuild": [],
+            "OpenStdin": false,
+            "PortSpecs": null,
+            "StdinOnce": false,
+            "Tty": false,
+            "User": "",
+            "Volumes": null,
+            "WorkingDir": "/go/src/github.com/docker/libcontainer"
+        },
+        "Container": "1c00417f3812a96d3ebc29e7fdee69f3d586d703ab89c8233fd4678d50707b39",
+        "ContainerConfig": {
+            "AttachStderr": false,
+            "AttachStdin": false,
+            "AttachStdout": false,
             "Cmd": [
                 "/bin/sh",
                 "-c",
-		 "#(nop) ADD fedora-20-dummy.tar.xz in /"
+                "#(nop) CMD [\"make\" \"direct-test\"]"
             ],
-            "Dns": null,
-            "DnsSearch": null,
-            "Image": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
-            "Volumes": null,
-            "VolumesFrom": "",
-            "WorkingDir": "",
-            "Entrypoint": null,
-            "NetworkDisabled": false,
-            "OnBuild": null,
-            "Context": null
-        },
-        "docker_version": "0.6.3",
-        "author": "I P Babble \u003clsm5@ipbabble.com\u003e - ./buildcontainers.sh",
-        "config": {
-            "Hostname": "88807319f25e",
             "Domainname": "",
-            "User": "",
-            "Memory": 0,
-            "MemorySwap": 0,
-            "CpuShares": 0,
-            "AttachStdin": false,
-            "AttachStdout": false,
-            "AttachStderr": false,
-            "PortSpecs": null,
-            "ExposedPorts": null,
-            "Tty": false,
-            "OpenStdin": false,
-            "StdinOnce": false,
-            "Env": [
-                "HOME=/",
-		        "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+            "Entrypoint": [
+                "/dind"
             ],
-            "Cmd": null,
-            "Dns": null,
-            "DnsSearch": null,
-            "Image": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
-            "Volumes": null,
-            "VolumesFrom": "",
-            "WorkingDir": "",
-            "Entrypoint": null,
+            "Env": [
+                "PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+            ],
+            "ExposedPorts": null,
+            "Hostname": "242978536a06",
+            "Image": "c2b774c744afc5bea603b5e6c5218539e506649326de3ea0135182f299d0519a",
+            "Labels": {},
+            "MacAddress": "",
             "NetworkDisabled": false,
-            "OnBuild": null,
-            "Context": null
+            "OnBuild": [],
+            "OpenStdin": false,
+            "PortSpecs": null,
+            "StdinOnce": false,
+            "Tty": false,
+            "User": "",
+            "Volumes": null,
+            "WorkingDir": "/go/src/github.com/docker/libcontainer"
         },
-	"architecture": "x86_64",
-	"Size": 385520098
+        "Created": "2015-04-07T05:34:39.079489206Z",
+        "DockerVersion": "1.5.0-dev",
+        "Id": "fc1203419df26ca82cad1dd04c709cb1b8a8a947bd5bcbdfbef8241a76f031db",
+        "Os": "linux",
+        "Parent": "c2b774c744afc5bea603b5e6c5218539e506649326de3ea0135182f299d0519a",
+        "Size": 0,
+        "VirtualSize": 613136466
     }]
 
 # HISTORY
-April 2014, Originally compiled by William Henry (whenry at redhat dot com)
+April 2014, originally compiled by William Henry (whenry at redhat dot com)
 based on docker.com source material and internal work.
 June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
+April 2015, updated by Qiang Huang <h.huangqiang@huawei.com>
diff --git a/docs/man/docker-load.1.md b/docs/man/docker-load.1.md
index 52eaa37..c045443 100644
--- a/docs/man/docker-load.1.md
+++ b/docs/man/docker-load.1.md
@@ -24,11 +24,11 @@
 
 # EXAMPLES
 
-    $ sudo docker images
+    $ docker images
     REPOSITORY          TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
     busybox             latest              769b9341d937        7 weeks ago         2.489 MB
-    $ sudo docker load --input fedora.tar
-    $ sudo docker images
+    $ docker load --input fedora.tar
+    $ docker images
     REPOSITORY          TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
     busybox             latest              769b9341d937        7 weeks ago         2.489 MB
     fedora              rawhide             0d20aec6529d        7 weeks ago         387 MB
diff --git a/docs/man/docker-login.1.md b/docs/man/docker-login.1.md
index f73df77..87ad31b 100644
--- a/docs/man/docker-login.1.md
+++ b/docs/man/docker-login.1.md
@@ -13,7 +13,7 @@
 [SERVER]
 
 # DESCRIPTION
-Register or log in to a Docker Registry Service located on the specified
+Register or log in to a Docker Registry located on the specified
 `SERVER`.  You can specify a URL or a `hostname` for the `SERVER` value. If you
 do not specify a `SERVER`, the command uses Docker's public registry located at
 `https://registry-1.docker.io/` by default.  To get a username/password for Docker's public registry, create an account on Docker Hub.
diff --git a/docs/man/docker-logout.1.md b/docs/man/docker-logout.1.md
index d464f00..3726fd6 100644
--- a/docs/man/docker-logout.1.md
+++ b/docs/man/docker-logout.1.md
@@ -2,14 +2,14 @@
 % Docker Community
 % JUNE 2014
 # NAME
-docker-logout - Log out from a Docker Registry Service.
+docker-logout - Log out from a Docker Registry.
 
 # SYNOPSIS
 **docker logout**
 [SERVER]
 
 # DESCRIPTION
-Log out of a Docker Registry Service located on the specified `SERVER`. You can
+Log out of a Docker Registry located on the specified `SERVER`. You can
 specify a URL or a `hostname` for the `SERVER` value. If you do not specify a
 `SERVER`, the command attempts to log you out of Docker's public registry
 located at `https://registry-1.docker.io/` by default.  
diff --git a/docs/man/docker-logs.1.md b/docs/man/docker-logs.1.md
index 01a15f5..e2cacea 100644
--- a/docs/man/docker-logs.1.md
+++ b/docs/man/docker-logs.1.md
@@ -8,6 +8,7 @@
 **docker logs**
 [**-f**|**--follow**[=*false*]]
 [**--help**]
+[**--since**[=*SINCE*]]
 [**-t**|**--timestamps**[=*false*]]
 [**--tail**[=*"all"*]]
 CONTAINER
@@ -31,6 +32,9 @@
 **-f**, **--follow**=*true*|*false*
    Follow log output. The default is *false*.
 
+**--since**=""
+   Show logs since timestamp
+
 **-t**, **--timestamps**=*true*|*false*
    Show timestamps. The default is *false*.
 
@@ -42,3 +46,4 @@
 based on docker.com source material and internal work.
 June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
 July 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
+April 2015, updated by Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
diff --git a/docs/man/docker-pull.1.md b/docs/man/docker-pull.1.md
index d7c1d59..30a949e 100644
--- a/docs/man/docker-pull.1.md
+++ b/docs/man/docker-pull.1.md
@@ -8,7 +8,7 @@
 **docker pull**
 [**-a**|**--all-tags**[=*false*]]
 [**--help**] 
-NAME[:TAG]
+NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG]
 
 # DESCRIPTION
 
@@ -31,7 +31,7 @@
 # Note that if the  image is previously downloaded then the status would be
 # 'Status: Image is up to date for fedora'
 
-    $ sudo docker pull fedora
+    $ docker pull fedora
     Pulling repository fedora
     ad57ef8d78d7: Download complete
     105182bb5e8b: Download complete
@@ -40,7 +40,7 @@
 
     Status: Downloaded newer image for fedora
 
-    $ sudo docker images
+    $ docker images
     REPOSITORY   TAG         IMAGE ID        CREATED      VIRTUAL SIZE
     fedora       rawhide     ad57ef8d78d7    5 days ago   359.3 MB
     fedora       20          105182bb5e8b    5 days ago   372.7 MB
@@ -51,7 +51,7 @@
 # Note that if the  image is previously downloaded then the status would be
 # 'Status: Image is up to date for registry.hub.docker.com/fedora:20'
 
-    $ sudo docker pull registry.hub.docker.com/fedora:20
+    $ docker pull registry.hub.docker.com/fedora:20
     Pulling repository fedora
     3f2fed40e4b0: Download complete 
     511136ea3c5a: Download complete 
@@ -59,7 +59,7 @@
 
     Status: Downloaded newer image for registry.hub.docker.com/fedora:20
 
-    $ sudo docker images
+    $ docker images
     REPOSITORY   TAG         IMAGE ID        CREATED      VIRTUAL SIZE
     fedora       20          3f2fed40e4b0    4 days ago   372.7 MB
 
diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md
index 1831237..1544c35 100644
--- a/docs/man/docker-run.1.md
+++ b/docs/man/docker-run.1.md
@@ -8,12 +8,16 @@
 **docker run**
 [**-a**|**--attach**[=*[]*]]
 [**--add-host**[=*[]*]]
+[**--blkio-weight**[=*[BLKIO-WEIGHT]*]]
 [**-c**|**--cpu-shares**[=*0*]]
 [**--cap-add**[=*[]*]]
 [**--cap-drop**[=*[]*]]
 [**--cidfile**[=*CIDFILE*]]
+[**--cpu-period**[=*0*]]
 [**--cpuset-cpus**[=*CPUSET-CPUS*]]
+[**--cpuset-mems**[=*CPUSET-MEMS*]]
 [**-d**|**--detach**[=*false*]]
+[**--cpu-quota**[=*0*]]
 [**--device**[=*[]*]]
 [**--dns-search**[=*[]*]]
 [**--dns**[=*[]*]]
@@ -35,9 +39,11 @@
 [**--mac-address**[=*MAC-ADDRESS*]]
 [**--name**[=*NAME*]]
 [**--net**[=*"bridge"*]]
+[**--oom-kill-disable**[=*false*]]
 [**-P**|**--publish-all**[=*false*]]
 [**-p**|**--publish**[=*[]*]]
 [**--pid**[=*[]*]]
+[**--uts**[=*[]*]]
 [**--privileged**[=*false*]]
 [**--read-only**[=*false*]]
 [**--restart**[=*RESTART*]]
@@ -83,6 +89,9 @@
    Add a line to /etc/hosts. The format is hostname:ip.  The **--add-host**
 option can be set multiple times.
 
+**--blkio-weight**=0
+   Block IO weight (relative weight) accepts a weight value between 10 and 1000.
+
 **-c**, **--cpu-shares**=0
    CPU shares (relative weight)
 
@@ -101,7 +110,7 @@
 For example, consider three containers, one has a cpu-share of 1024 and
 two others have a cpu-share setting of 512. When processes in all three
 containers attempt to use 100% of CPU, the first container would receive
-50% of the total CPU time. If you add a fouth container with a cpu-share
+50% of the total CPU time. If you add a fourth container with a cpu-share
 of 1024, the first container only gets 33% of the CPU. The remaining containers
 receive 16.5%, 16.5% and 33% of the CPU.
 
@@ -131,9 +140,28 @@
 **--cidfile**=""
    Write the container ID to the file
 
+**--cpu-period**=0
+   Limit the CPU CFS (Completely Fair Scheduler) period
+
+   Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify.
+
 **--cpuset-cpus**=""
    CPUs in which to allow execution (0-3, 0,1)
 
+**--cpuset-mems**=""
+   Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
+
+   If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1`
+then processes in your Docker container will only use memory from the first
+two memory nodes.
+
+**--cpu-quota**=0
+   Limit the CPU CFS (Completely Fair Scheduler) quota
+
+   Limit the container's CPU usage. By default, containers run with the full
+CPU resource. This flag tell the kernel to restrict the container's CPU usage
+to the quota you specify.
+
 **-d**, **--detach**=*true*|*false*
    Detached mode: run the container in the background and print the new container ID. The default is *false*.
 
@@ -211,7 +239,8 @@
    Read in a line delimited file of labels
 
 **--link**=[]
-   Add link to another container in the form of <name or id>:alias
+   Add link to another container in the form of <name or id>:alias or just <name or id>
+in which case the alias will match the name
 
    If the operator
 uses **--link** when starting the new client container, then the client
@@ -222,7 +251,7 @@
 **--lxc-conf**=[]
    (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
 
-**--log-driver**="|*json-file*|*syslog*|*none*"
+**--log-driver**="|*json-file*|*syslog*|*journald*|*none*"
   Logging driver for container. Default is defined by daemon `--log-driver` flag.
   **Warning**: `docker logs` command works only for `json-file` logging driver.
 
@@ -239,7 +268,7 @@
    Total memory limit (memory + swap)
 
    Set `-1` to disable swap (format: <number><optional unit>, where unit = b, k, m or g).
-This value should always larger than **-m**, so you should alway use this with **-m**.
+This value should always larger than **-m**, so you should always use this with **-m**.
 
 **--mac-address**=""
    Container MAC address (e.g. 92:d0:c6:0a:29:33)
@@ -269,6 +298,9 @@
                                'container:<name|id>': reuses another container network stack
                                'host': use the host network stack inside the container.  Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.
 
+**--oom-kill-disable**=*true*|*false*
+   Whether to disable OOM Killer for the container or not.
+
 **-P**, **--publish-all**=*true*|*false*
    Publish all exposed ports to random ports on the host interfaces. The default is *false*.
 
@@ -292,6 +324,11 @@
      **host**: use the host's PID namespace inside the container.
      Note: the host mode gives the container full access to local PID and is therefore considered insecure.
 
+**--uts**=host
+   Set the UTS mode for the container
+     **host**: use the host's UTS namespace inside the container.
+     Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure.
+
 **--privileged**=*true*|*false*
    Give extended privileges to this container. The default is *false*.
 
@@ -341,7 +378,12 @@
 standard input.
 
 **-u**, **--user**=""
-   Username or UID
+   Sets the username or UID used and optionally the groupname or GID for the specified command.
+
+   The followings examples are all valid:
+   --user [user | user:group | uid | uid:gid | user:gid | uid:group ]
+
+   Without this argument the command will be run as root in the container.
 
 **-v**, **--volume**=[]
    Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)
@@ -354,6 +396,21 @@
 read-only or read-write mode, respectively. By default, the volumes are mounted
 read-write. See examples.
 
+Labeling systems like SELinux require proper labels be placed on volume content
+mounted into a container, otherwise the secuirty system might prevent the
+processes running inside the container from using the content. By default,
+volumes are not relabeled.
+
+Two suffixes :z or :Z can be added to the volume mount. These suffixes tell
+Docker to relabel file objects on the shared volumes. The 'z' option tells
+Docker that the volume content will be shared between containers. Docker will
+label the content with a shared content label. Shared volumes labels allow all
+containers to read/write content. The 'Z' option tells Docker to label the
+content with a private unshared label. Private volumes can only be used by the
+current container.
+
+Note: Multiple Volume options can be added separated by a ","
+
 **--volumes-from**=[]
    Mount volumes from the specified container(s)
 
@@ -411,7 +468,7 @@
 
 ## Sharing IPC between containers
 
-Using shm_server.c available here: http://www.cs.cf.ac.uk/Dave/C/node27.html
+Using shm_server.c available here: https://www.cs.cf.ac.uk/Dave/C/node27.html
 
 Testing `--ipc=host` mode:
 
@@ -428,7 +485,7 @@
 Now run a regular container, and it correctly does NOT see the shared memory segment from the host:
 
 ```
- $ sudo docker run -it shm ipcs -m
+ $ docker run -it shm ipcs -m
 
  ------ Shared Memory Segments --------	
  key        shmid      owner      perms      bytes      nattch     status      
@@ -437,7 +494,7 @@
 Run a container with the new `--ipc=host` option, and it now sees the shared memory segment from the host httpd:
 
  ```
- $ sudo docker run -it --ipc=host shm ipcs -m
+ $ docker run -it --ipc=host shm ipcs -m
 
  ------ Shared Memory Segments --------
  key        shmid      owner      perms      bytes      nattch     status      
@@ -447,7 +504,7 @@
 
 Start a container with a program to create a shared memory segment:
 ```
- sudo docker run -it shm bash
+ $ docker run -it shm bash
  $ sudo shm/shm_server &
  $ sudo ipcs -m
 
@@ -457,7 +514,7 @@
 ```
 Create a 2nd container correctly shows no shared memory segment from 1st container:
 ```
- $ sudo docker run shm ipcs -m
+ $ docker run shm ipcs -m
 
  ------ Shared Memory Segments --------
  key        shmid      owner      perms      bytes      nattch     status      
@@ -466,7 +523,7 @@
 Create a 3rd container using the new --ipc=container:CONTAINERID option, now it shows the shared memory segment from the first:
 
 ```
- $ sudo docker run -it --ipc=container:ed735b2264ac shm ipcs -m
+ $ docker run -it --ipc=container:ed735b2264ac shm ipcs -m
  $ sudo ipcs -m
 
  ------ Shared Memory Segments --------
diff --git a/docs/man/docker-save.1.md b/docs/man/docker-save.1.md
index 91be06c..5f336ff 100644
--- a/docs/man/docker-save.1.md
+++ b/docs/man/docker-save.1.md
@@ -28,8 +28,8 @@
 Save all fedora repository images to a fedora-all.tar and save the latest
 fedora image to a fedora-latest.tar:
 
-    $ sudo docker save fedora > fedora-all.tar
-    $ sudo docker save --output=fedora-latest.tar fedora:latest
+    $ docker save fedora > fedora-all.tar
+    $ docker save --output=fedora-latest.tar fedora:latest
     $ ls -sh fedora-all.tar
     721M fedora-all.tar
     $ ls -sh fedora-latest.tar
diff --git a/docs/man/docker-search.1.md b/docs/man/docker-search.1.md
index 3c8e0a7..6316008 100644
--- a/docs/man/docker-search.1.md
+++ b/docs/man/docker-search.1.md
@@ -40,7 +40,7 @@
 Search a registry for the term 'fedora' and only display those images
 ranked 3 or higher:
 
-    $ sudo docker search -s 3 fedora
+    $ docker search -s 3 fedora
     NAME                  DESCRIPTION                                    STARS OFFICIAL  AUTOMATED
     mattdm/fedora         A basic Fedora image corresponding roughly...  50
     fedora                (Semi) Official Fedora base image.             38
@@ -52,7 +52,7 @@
 Search Docker Hub for the term 'fedora' and only display automated images
 ranked 1 or higher:
 
-    $ sudo docker search -s 1 -t fedora
+    $ docker search -s 1 -t fedora
     NAME               DESCRIPTION                                     STARS OFFICIAL  AUTOMATED
     goldmann/wildfly   A WildFly application server running on a ...   3               [OK]
     tutum/fedora-20    Fedora 20 image with SSH access. For the r...   1               [OK]
diff --git a/docs/man/docker-stats.1.md b/docs/man/docker-stats.1.md
index 493e402..4b48588 100644
--- a/docs/man/docker-stats.1.md
+++ b/docs/man/docker-stats.1.md
@@ -17,12 +17,15 @@
 **--help**
   Print usage statement
 
+**--no-stream**="false"
+  Disable streaming stats and only pull the first result
+
 # EXAMPLES
 
 Run **docker stats** with multiple containers.
 
-    $ sudo docker stats redis1 redis2
+    $ docker stats redis1 redis2
     CONTAINER           CPU %               MEM USAGE/LIMIT     MEM %               NET I/O
-    redis1              0.07%               796 KiB/64 MiB      1.21%               788 B/648 B
-    redis2              0.07%               2.746 MiB/64 MiB    4.29%               1.266 KiB/648 B
+    redis1              0.07%               796 KB/64 MB        1.21%               788 B/648 B
+    redis2              0.07%               2.746 MB/64 MB      4.29%               1.266 KB/648 B
 
diff --git a/docs/man/docker-top.1.md b/docs/man/docker-top.1.md
index be2bed2..c3bbf88 100644
--- a/docs/man/docker-top.1.md
+++ b/docs/man/docker-top.1.md
@@ -22,7 +22,7 @@
 
 Run **docker top** with the ps option of -x:
 
-    $ sudo docker top 8601afda2b -x
+    $ docker top 8601afda2b -x
     PID      TTY       STAT       TIME         COMMAND
     16623    ?         Ss         0:00         sleep 99999
 
diff --git a/docs/man/docker-wait.1.md b/docs/man/docker-wait.1.md
index a1e2aa2..5f07bac 100644
--- a/docs/man/docker-wait.1.md
+++ b/docs/man/docker-wait.1.md
@@ -19,9 +19,9 @@
 
 # EXAMPLES
 
-    $ sudo docker run -d fedora sleep 99
+    $ docker run -d fedora sleep 99
     079b83f558a2bc52ecad6b2a5de13622d584e6bb1aea058c11b36511e85e7622
-    $ sudo docker wait 079b83f558a2bc
+    $ docker wait 079b83f558a2bc
     0
 
 # HISTORY
diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md
index bcb9d25..884f181 100644
--- a/docs/man/docker.1.md
+++ b/docs/man/docker.1.md
@@ -41,12 +41,24 @@
 **-d**, **--daemon**=*true*|*false*
   Enable daemon mode. Default is false.
 
+**--default-gateway**=""
+  IPv4 address of the container default gateway; this address must be part of the bridge subnet (which is defined by \-b or \--bip)
+
+**--default-gateway-v6**=""
+  IPv6 address of the container default gateway
+
 **--dns**=""
   Force Docker to use specific DNS servers
 
 **-e**, **--exec-driver**=""
   Force Docker to use specific exec driver. Default is `native`.
 
+**--exec-opt**=[]
+  Set exec driver options. See EXEC DRIVER OPTIONS.
+
+**--exec-root**=""
+  Path to use as the root of the Docker execdriver. Default is `/var/run/docker`.
+
 **--fixed-cidr**=""
   IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip)
 
@@ -89,8 +101,8 @@
 **--label**="[]"
   Set key=value labels to the daemon (displayed in `docker info`)
 
-**--log-driver**="*json-file*|*syslog*|*none*"
-  Container's logging driver. Default is `default`.
+**--log-driver**="*json-file*|*syslog*|*journald*|*none*"
+  Default driver for container logs. Default is `json-file`.
   **Warning**: `docker logs` command works only for `json-file` logging driver.
 
 **--mtu**=VALUE
@@ -105,6 +117,9 @@
 **-s**, **--storage-driver**=""
   Force the Docker runtime to use a specific storage driver.
 
+**--selinux-enabled**=*true*|*false*
+  Enable selinux support. Default is false. SELinux does not presently support the BTRFS storage driver.
+
 **--storage-opt**=[]
   Set storage driver options. See STORAGE DRIVER OPTIONS.
 
@@ -115,127 +130,165 @@
   Use TLS and verify the remote (daemon: verify client, client: verify daemon).
   Default is false.
 
+**--userland-proxy**=*true*|*false*
+    Rely on a userland proxy implementation for inter-container and outside-to-container loopback communications. Default is true.
+
 **-v**, **--version**=*true*|*false*
   Print version information and quit. Default is false.
 
-**--selinux-enabled**=*true*|*false*
-  Enable selinux support. Default is false. SELinux does not presently support the BTRFS storage driver.
-
 # COMMANDS
-**docker-attach(1)**
+**attach**
   Attach to a running container
+  See **docker-attach(1)** for full documentation on the **attach** command.
 
-**docker-build(1)**
+**build**
   Build an image from a Dockerfile
+  See **docker-build(1)** for full documentation on the **build** command.
 
-**docker-commit(1)**
+**commit**
   Create a new image from a container's changes
+  See **docker-commit(1)** for full documentation on the **commit** command.
 
-**docker-cp(1)**
+**cp**
   Copy files/folders from a container's filesystem to the host
+  See **docker-cp(1)** for full documentation on the **cp** command.
 
-**docker-create(1)**
+**create**
   Create a new container
+  See **docker-create(1)** for full documentation on the **create** command.
 
-**docker-diff(1)**
+**diff**
   Inspect changes on a container's filesystem
+  See **docker-diff(1)** for full documentation on the **diff** command.
 
-**docker-events(1)**
+**events**
   Get real time events from the server
+  See **docker-events(1)** for full documentation on the **events** command.
 
-**docker-exec(1)**
+**exec**
   Run a command in a running container
+  See **docker-exec(1)** for full documentation on the **exec** command.
 
-**docker-export(1)**
+**export**
   Stream the contents of a container as a tar archive
+  See **docker-export(1)** for full documentation on the **export** command.
 
-**docker-history(1)**
+**history**
   Show the history of an image
+  See **docker-history(1)** for full documentation on the **history** command.
 
-**docker-images(1)**
+**images**
   List images
+  See **docker-images(1)** for full documentation on the **images** command.
 
-**docker-import(1)**
+**import**
   Create a new filesystem image from the contents of a tarball
+  See **docker-import(1)** for full documentation on the **import** command.
 
-**docker-info(1)**
+**info**
   Display system-wide information
+  See **docker-info(1)** for full documentation on the **info** command.
 
-**docker-inspect(1)**
+**inspect**
   Return low-level information on a container or image
+  See **docker-inspect(1)** for full documentation on the **inspect** command.
 
-**docker-kill(1)**
+**kill**
   Kill a running container (which includes the wrapper process and everything
 inside it)
+  See **docker-kill(1)** for full documentation on the **kill** command.
 
-**docker-load(1)**
+**load**
   Load an image from a tar archive
+  See **docker-load(1)** for full documentation on the **load** command.
 
-**docker-login(1)**
-  Register or login to a Docker Registry Service
+**login**
+  Register or login to a Docker Registry
+  See **docker-login(1)** for full documentation on the **login** command.
 
-**docker-logout(1)**
-  Log the user out of a Docker Registry Service
+**logout**
+  Log the user out of a Docker Registry
+  See **docker-logout(1)** for full documentation on the **logout** command.
 
-**docker-logs(1)**
+**logs**
   Fetch the logs of a container
+  See **docker-logs(1)** for full documentation on the **logs** command.
 
-**docker-pause(1)**
+**pause**
   Pause all processes within a container
+  See **docker-pause(1)** for full documentation on the **pause** command.
 
-**docker-port(1)**
+**port**
   Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
+  See **docker-port(1)** for full documentation on the **port** command.
 
-**docker-ps(1)**
+**ps**
   List containers
+  See **docker-ps(1)** for full documentation on the **ps** command.
 
-**docker-pull(1)**
-  Pull an image or a repository from a Docker Registry Service
+**pull**
+  Pull an image or a repository from a Docker Registry
+  See **docker-pull(1)** for full documentation on the **pull** command.
 
-**docker-push(1)**
-  Push an image or a repository to a Docker Registry Service
+**push**
+  Push an image or a repository to a Docker Registry
+  See **docker-push(1)** for full documentation on the **push** command.
 
-**docker-restart(1)**
+**restart**
   Restart a running container
+  See **docker-restart(1)** for full documentation on the **restart** command.
 
-**docker-rm(1)**
+**rm**
   Remove one or more containers
+  See **docker-rm(1)** for full documentation on the **rm** command.
 
-**docker-rmi(1)**
+**rmi**
   Remove one or more images
+  See **docker-rmi(1)** for full documentation on the **rmi** command.
 
-**docker-run(1)**
+**run**
   Run a command in a new container
+  See **docker-run(1)** for full documentation on the **run** command.
 
-**docker-save(1)**
+**save**
   Save an image to a tar archive
+  See **docker-save(1)** for full documentation on the **save** command.
 
-**docker-search(1)**
+**search**
   Search for an image in the Docker index
+  See **docker-search(1)** for full documentation on the **search** command.
 
-**docker-start(1)**
+**start**
   Start a stopped container
+  See **docker-start(1)** for full documentation on the **start** command.
 
-**docker-stats(1)**
+**stats**
   Display a live stream of one or more containers' resource usage statistics
+  See **docker-stats(1)** for full documentation on the **stats** command.
 
-**docker-stop(1)**
+**stop**
   Stop a running container
+  See **docker-stop(1)** for full documentation on the **stop** command.
 
-**docker-tag(1)**
+**tag**
   Tag an image into a repository
+  See **docker-tag(1)** for full documentation on the **tag** command.
 
-**docker-top(1)**
+**top**
   Lookup the running processes of a container
+  See **docker-top(1)** for full documentation on the **top** command.
 
-**docker-unpause(1)**
+**unpause**
   Unpause all processes within a container
+  See **docker-unpause(1)** for full documentation on the **unpause** command.
 
-**docker-version(1)**
+**version**
   Show the Docker version information
+  See **docker-version(1)** for full documentation on the **version** command.
 
-**docker-wait(1)**
+**wait**
   Block until a container stops, then print its exit code
+  See **docker-wait(1)** for full documentation on the **wait** command.
 
 # STORAGE DRIVER OPTIONS
 
@@ -313,6 +366,18 @@
       --storage-opt dm.metadatadev=/dev/vdc \
       --storage-opt dm.basesize=20G
 
+# EXEC DRIVER OPTIONS
+
+Use the **--exec-opt** flags to specify options to the exec-driver. The only
+driver that accepts this flag is the *native* (libcontainer) driver. As a
+result, you must also specify **-s=**native for this option to have effect. The 
+following is the only *native* option:
+
+#### native.cgroupdriver
+Specifies the management of the container's `cgroups`. You can specify 
+`cgroupfs` or `systemd`. If you specify `systemd` and it is not available, the 
+system uses `cgroupfs`.
+
 #### Client
 For specific client examples please see the man page for the specific Docker
 command. For example:
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 30f2d11..73018a5 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -25,15 +25,24 @@
 
 # Introduction:
 - ['index.md', 'About', 'Docker']
-- ['release-notes.md', 'About', 'Release Notes']
-- ['introduction/index.md', '**HIDDEN**']
 - ['introduction/understanding-docker.md', 'About', 'Understanding Docker']
+- ['release-notes.md', 'About', 'Release notes']
+# Experimental
+- ['experimental/experimental.md', 'About', 'Experimental Features']
+- ['experimental/plugin_api.md', '**HIDDEN**']
+- ['experimental/plugins_volume.md', '**HIDDEN**']
+- ['experimental/plugins.md', '**HIDDEN**']
+- ['reference/glossary.md', 'About', 'Glossary']
+- ['introduction/index.md', '**HIDDEN**']
+
 
 # Installation:
 - ['installation/index.md', '**HIDDEN**']
 - ['installation/ubuntulinux.md', 'Installation', 'Ubuntu']
 - ['installation/mac.md', 'Installation', 'Mac OS X']
+- ['kitematic/index.md', 'Installation', 'Kitematic on OS X']
 - ['installation/windows.md', 'Installation', 'Microsoft Windows']
+- ['installation/testing-windows-docker-client.md', 'Installation', 'Building and testing the Windows Docker client']
 - ['installation/amazon.md', 'Installation', 'Amazon EC2']
 - ['installation/archlinux.md', 'Installation', 'Arch Linux']
 - ['installation/binaries.md', 'Installation', 'Binaries']
@@ -54,11 +63,11 @@
 - ['compose/install.md', 'Installation', 'Docker Compose']
 
 # User Guide:
-- ['userguide/index.md', 'User Guide', 'The Docker User Guide' ]
-- ['userguide/dockerhub.md', 'User Guide', 'Getting Started with Docker Hub' ]
-- ['userguide/dockerizing.md', 'User Guide', 'Dockerizing Applications' ]
-- ['userguide/usingdocker.md', 'User Guide', 'Working with Containers' ]
-- ['userguide/dockerimages.md', 'User Guide', 'Working with Docker Images' ]
+- ['userguide/index.md', 'User Guide', 'The Docker user guide' ]
+- ['userguide/dockerhub.md', 'User Guide', 'Getting started with Docker Hub' ]
+- ['userguide/dockerizing.md', 'User Guide', 'Dockerizing applications' ]
+- ['userguide/usingdocker.md', 'User Guide', 'Working with containers' ]
+- ['userguide/dockerimages.md', 'User Guide', 'Working with Docker images' ]
 - ['userguide/dockerlinks.md', 'User Guide', 'Linking containers together' ]
 - ['userguide/dockervolumes.md', 'User Guide', 'Managing data in containers' ]
 - ['userguide/labels-custom-metadata.md', 'User Guide', 'Apply custom metadata' ]
@@ -66,20 +75,29 @@
 - ['userguide/level1.md', '**HIDDEN**' ]
 - ['userguide/level2.md', '**HIDDEN**' ]
 - ['compose/index.md', 'User Guide', 'Docker Compose' ]
+- ['compose/production.md', 'User Guide', '&nbsp;&nbsp;&nbsp;&nbsp;&blacksquare;&nbsp; Use Compose in production' ]
+- ['compose/extends.md', 'User Guide', '&nbsp;&nbsp;&nbsp;&nbsp;&blacksquare;&nbsp; Extend Compose services' ]
 - ['machine/index.md', 'User Guide', 'Docker Machine' ]
 - ['swarm/index.md', 'User Guide', 'Docker Swarm' ]
+- ['kitematic/userguide.md', 'User Guide', 'Kitematic']
 
 # Docker Hub docs:
 - ['docker-hub/index.md', 'Docker Hub', 'Docker Hub' ]
 - ['docker-hub/accounts.md', 'Docker Hub', 'Accounts']
-- ['docker-hub/repos.md', 'Docker Hub', 'Repositories']
+- ['docker-hub/userguide.md', 'Docker Hub', 'User Guide']
+- ['docker-hub/repos.md', 'Docker Hub', 'Your Repositories']
 - ['docker-hub/builds.md', 'Docker Hub', 'Automated Builds']
-- ['docker-hub/official_repos.md', 'Docker Hub', 'Official Repo Guidelines']
+- ['docker-hub/official_repos.md', 'Docker Hub', 'Official Repositories']
 
-# Docker Hub Enterprise
-#- ['docker-hub-enterprise/index.md', '**HIDDEN**' ]
-#- ['docker-hub-enterprise/install-config.md', 'Docker Hub Enterprise', 'Installation and Configuration' ]
-#- ['docker-hub-enterprise/usage.md', 'Docker Hub Enterprise', 'User Guide' ]
+# Docker Hub Enterprise:
+- ['docker-hub-enterprise/index.md', 'Docker Hub Enterprise', 'Overview' ]
+- ['docker-hub-enterprise/quick-start.md', 'Docker Hub Enterprise', 'Quick Start: Basic Workflow' ]
+- ['docker-hub-enterprise/userguide.md', 'Docker Hub Enterprise', 'User Guide' ]
+- ['docker-hub-enterprise/adminguide.md', 'Docker Hub Enterprise', 'Admin Guide' ]
+- ['docker-hub-enterprise/install.md', 'Docker Hub Enterprise', '&nbsp;&nbsp;Installation' ]
+- ['docker-hub-enterprise/configuration.md', 'Docker Hub Enterprise', '&nbsp;&nbsp;Configuration options' ]
+- ['docker-hub-enterprise/support.md', 'Docker Hub Enterprise', 'Support' ]
+- ['docker-hub-enterprise/release-notes.md', 'Docker Hub Enterprise', 'Release notes' ]
 
 # Examples:
 - ['examples/index.md', '**HIDDEN**']
@@ -94,6 +112,9 @@
 - ['compose/django.md', 'Examples', 'Getting started with Compose and Django']
 - ['compose/rails.md', 'Examples', 'Getting started with Compose and Rails']
 - ['compose/wordpress.md', 'Examples', 'Getting started with Compose and Wordpress']
+- ['kitematic/minecraft-server.md', 'Examples', 'Kitematic: Minecraft server']
+- ['kitematic/nginx-web-server.md', 'Examples', 'Kitematic: Ngnix web server']
+- ['kitematic/rethinkdb-dev-database.md', 'Examples', 'Kitematic: RethinkDB development database']
 
 # Articles
 - ['articles/index.md', '**HIDDEN**']
@@ -107,6 +128,7 @@
 - ['articles/dockerfile_best-practices.md', 'Articles', 'Best practices for writing Dockerfiles']
 - ['articles/certificates.md', 'Articles', 'Using certificates for repository client verification']
 - ['articles/using_supervisord.md', 'Articles', 'Using Supervisor']
+- ['articles/configuring.md', 'Articles', 'Configuring Docker']
 - ['articles/cfengine_process_management.md', 'Articles', 'Process management with CFEngine']
 - ['articles/puppet.md', 'Articles', 'Using Puppet']
 - ['articles/chef.md', 'Articles', 'Using Chef']
@@ -122,7 +144,8 @@
 - ['reference/commandline/cli.md', 'Reference', 'Docker command line']
 - ['reference/builder.md', 'Reference', 'Dockerfile']
 - ['faq.md', 'Reference', 'FAQ']
-- ['reference/run.md', 'Reference', 'Run Reference']
+- ['reference/run.md', 'Reference', 'Run reference']
+- ['reference/logging/journald.md', '**HIDDEN**']
 - ['compose/cli.md', 'Reference', 'Compose command line']
 - ['compose/yml.md', 'Reference', 'Compose yml']
 - ['compose/env.md', 'Reference', 'Compose ENV variables']
@@ -132,7 +155,7 @@
 - ['swarm/scheduler/filter.md', 'Reference', 'Swarm filters']
 - ['swarm/API.md', 'Reference', 'Swarm API']
 - ['reference/api/index.md', '**HIDDEN**']
-- ['registry/overview.md', 'Reference', 'Docker Registry 2.0']
+- ['registry/index.md', 'Reference', 'Docker Registry 2.0']
 - ['registry/deploying.md', 'Reference', '&nbsp;&nbsp;&nbsp;&nbsp;&blacksquare;&nbsp; Deploy a registry' ]
 - ['registry/configuration.md', 'Reference', '&nbsp;&nbsp;&nbsp;&nbsp;&blacksquare;&nbsp; Configure a registry' ]
 - ['registry/storagedrivers.md', 'Reference', '&nbsp;&nbsp;&nbsp;&nbsp;&blacksquare;&nbsp; Storage driver model' ]
@@ -142,11 +165,12 @@
 - ['registry/spec/auth/token.md', 'Reference', '&nbsp;&nbsp;&nbsp;&nbsp;&blacksquare;&nbsp; Authenticate via central service' ]
 - ['reference/api/hub_registry_spec.md', 'Reference', 'Docker Hub and Registry 1.0']
 - ['reference/api/registry_api.md', 'Reference', '&nbsp;&nbsp;&nbsp;&nbsp;&blacksquare;&nbsp;Docker Registry API v1']
-- ['reference/api/registry_api_client_libraries.md', 'Reference', '&nbsp;&nbsp;&nbsp;&nbsp;&blacksquare;&nbsp;Docker Registry 1.0 API Client Libraries']
+- ['reference/api/registry_api_client_libraries.md', 'Reference', '&nbsp;&nbsp;&nbsp;&nbsp;&blacksquare;&nbsp;Docker Registry 1.0 API client libraries']
 #- ['reference/image-spec-v1.md', 'Reference', 'Docker Image Specification v1.0.0']
 - ['reference/api/docker-io_api.md', 'Reference', 'Docker Hub API']
 #- ['reference/image-spec-v1.md', 'Reference', 'Docker Image Specification v1.0.0']
 - ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API']
+- ['reference/api/docker_remote_api_v1.19.md', 'Reference', 'Docker Remote API v1.19']
 - ['reference/api/docker_remote_api_v1.18.md', 'Reference', 'Docker Remote API v1.18']
 - ['reference/api/docker_remote_api_v1.17.md', 'Reference', 'Docker Remote API v1.17']
 - ['reference/api/docker_remote_api_v1.16.md', 'Reference', 'Docker Remote API v1.16']
@@ -166,8 +190,16 @@
 - ['reference/api/docker_remote_api_v1.2.md', '**HIDDEN**']
 - ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**']
 - ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**']
-- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries']
-- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub Accounts API']
+- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API client libraries']
+- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub accounts API']
+- ['kitematic/faq.md', 'Reference', 'Kitematic: FAQ']
+- ['kitematic/known-issues.md', 'Reference', 'Kitematic: Known issues']
+
+# Hidden registry files
+- ['registry/storage-drivers/azure.md', '**HIDDEN**' ]
+- ['registry/storage-drivers/filesystem.md', '**HIDDEN**' ]
+- ['registry/storage-drivers/inmemory.md', '**HIDDEN**' ]
+- ['registry/storage-drivers/s3.md', '**HIDDEN**' ]
 
 - ['jsearch.md', '**HIDDEN**']
 
@@ -184,18 +216,19 @@
 
 # Project:
 - ['project/index.md', '**HIDDEN**']
-- ['project/who-written-for.md', 'Contributor Guide', 'README first']
-- ['project/software-required.md', 'Contributor Guide', 'Get required software'] 
-- ['project/set-up-git.md', 'Contributor Guide', 'Configure Git for contributing'] 
-- ['project/set-up-dev-env.md', 'Contributor Guide', 'Work with a development container'] 
-- ['project/test-and-docs.md', 'Contributor Guide', 'Run tests and test documentation']
-- ['project/make-a-contribution.md', 'Contributor Guide', 'Understand contribution workflow']
-- ['project/find-an-issue.md', 'Contributor Guide', 'Find an issue'] 
-- ['project/work-issue.md', 'Contributor Guide', 'Work on an issue'] 
-- ['project/create-pr.md', 'Contributor Guide', 'Create a pull request'] 
-- ['project/review-pr.md', 'Contributor Guide', 'Participate in the PR review'] 
-- ['project/advanced-contributing.md', 'Contributor Guide', 'Advanced contributing']
-- ['project/get-help.md', 'Contributor Guide', 'Where to get help']
-- ['project/coding-style.md', 'Contributor Guide', 'Coding style guide']
-- ['project/doc-style.md', 'Contributor Guide', 'Documentation style guide']
+- ['project/who-written-for.md', 'Contributor', 'README first']
+- ['project/software-required.md', 'Contributor', 'Get required software for Linux or OS X'] 
+- ['project/software-req-win.md', 'Contributor', 'Get required software for Windows']
+- ['project/set-up-git.md', 'Contributor', 'Configure Git for contributing'] 
+- ['project/set-up-dev-env.md', 'Contributor', 'Work with a development container'] 
+- ['project/test-and-docs.md', 'Contributor', 'Run tests and test documentation']
+- ['project/make-a-contribution.md', 'Contributor', 'Understand contribution workflow']
+- ['project/find-an-issue.md', 'Contributor', 'Find an issue'] 
+- ['project/work-issue.md', 'Contributor', 'Work on an issue'] 
+- ['project/create-pr.md', 'Contributor', 'Create a pull request'] 
+- ['project/review-pr.md', 'Contributor', 'Participate in the PR review'] 
+- ['project/advanced-contributing.md', 'Contributor', 'Advanced contributing']
+- ['project/get-help.md', 'Contributor', 'Where to get help']
+- ['project/coding-style.md', 'Contributor', 'Coding style guide']
+- ['project/doc-style.md', 'Contributor', 'Documentation style guide']
 
diff --git a/docs/release.sh b/docs/release.sh
index 74e8085..d01bc02 100755
--- a/docs/release.sh
+++ b/docs/release.sh
@@ -5,7 +5,7 @@
 
 usage() {
 	cat >&2 <<'EOF'
-To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file 
+To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file
 (with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file)
 and set the AWS_S3_BUCKET env var to the name of your bucket.
 
diff --git a/docs/s3_website.json b/docs/s3_website.json
index 0ce0b8c..b2479bc 100644
--- a/docs/s3_website.json
+++ b/docs/s3_website.json
@@ -17,6 +17,9 @@
     { "Condition": { "KeyPrefixEquals": "docker-hub/invite.png" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/hub-images/invite.png" } },
     { "Condition": { "KeyPrefixEquals": "docker-hub/orgs.png" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/hub-images/orgs.png" } },
     { "Condition": { "KeyPrefixEquals": "docker-hub/repos.png" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/hub-images/repos.png" } },
+    { "Condition": { "KeyPrefixEquals": "installation/images/linux_docker_host.png" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "installation/images/linux_docker_host.svg" } },
+    { "Condition": { "KeyPrefixEquals": "installation/images/osx_docker_host.png" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "installation/images/osx_docker_host.svg" } },
+    { "Condition": { "KeyPrefixEquals": "installation/images/win_docker_host.png" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "installation/images/win_docker_host.svg" } },
     { "Condition": { "KeyPrefixEquals": "examples/hello_world/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerizing/" } },
     { "Condition": { "KeyPrefixEquals": "examples/python_web_app/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerizing/" } },
     { "Condition": { "KeyPrefixEquals": "use/working_with_volumes/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockervolumes/" } },
@@ -40,7 +43,8 @@
     { "Condition": { "KeyPrefixEquals": "contributing/contributing/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "project/who-written-for/" } },
     { "Condition": { "KeyPrefixEquals": "contributing/devenvironment/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "project/set-up-prereqs/" } },
     { "Condition": { "KeyPrefixEquals": "contributing/docs_style-guide/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "project/doc-style/" } },
-    { "Condition": { "KeyPrefixEquals": "registry/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "registry/overview/" } }
+    { "Condition": { "KeyPrefixEquals": "registry/overview/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "registry/" } }
+
   ]
 }
 
diff --git a/docs/sources/article-img/architecture.svg b/docs/sources/article-img/architecture.svg
index 607cc3c..afe563a 100644
--- a/docs/sources/article-img/architecture.svg
+++ b/docs/sources/article-img/architecture.svg
@@ -1,3 +1,2597 @@
-<?xml version="1.0"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
-<svg xmlns="http://www.w3.org/2000/svg" xmlns:xl="http://www.w3.org/1999/xlink" version="1.1" viewBox="0 0 358 351" width="358pt" height="351pt"><metadata xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>2014-04-15 00:37Z</dc:date><!-- Produced by OmniGraffle Professional 5.4.4 --></metadata><defs><font-face font-family="Helvetica" font-size="18" units-per-em="1000" underline-position="-75.683594" underline-thickness="49.316406" slope="0" x-height="532.22656" cap-height="719.72656" ascent="770.01953" descent="-229.98047" font-weight="bold"><font-face-src><font-face-name name="Helvetica-Bold"/></font-face-src></font-face><font-face font-family="Helvetica" font-size="12" panose-1="2 11 4 3 2 2 2 2 2 4" units-per-em="1000" underline-position="-75" underline-thickness="50" slope="0" x-height="524" cap-height="718" ascent="770.00427" descent="-229.99573" font-weight="300"><font-face-src><font-face-name name="Helvetica-Light"/></font-face-src></font-face><font-face font-family="Source Sans Pro" font-size="13" panose-1="2 11 6 3 3 4 3 2 2 4" units-per-em="1000" underline-position="-136" underline-thickness="94.00001" slope="0" x-height="504.00003" cap-height="667.00006" ascent="984.002" descent="-273.00058" font-weight="bold"><font-face-src><font-face-name name="SourceSansPro-Semibold"/></font-face-src></font-face><marker orient="auto" overflow="visible" markerUnits="strokeWidth" id="FilledArrow_Marker" viewBox="-1 -3 7 6" markerWidth="7" markerHeight="6" color="black"><g><path d="M 4.8000002 0 L 0 -1.8000001 L 0 1.8000001 Z" fill="currentColor" stroke="currentColor" stroke-width="1"/></g></marker><marker orient="auto" overflow="visible" markerUnits="strokeWidth" id="FilledArrow_Marker_2" viewBox="-6 -3 7 6" markerWidth="7" markerHeight="6" color="black"><g><path d="M -4.8000002 0 L 0 1.8000001 L 0 -1.8000001 Z" fill="currentColor" stroke="currentColor" stroke-width="1"/></g></marker></defs><g stroke="none" stroke-opacity="1" stroke-dasharray="none" fill="none" fill-opacity="1"><title>Canvas 1</title><g><title>Layer 1</title><path d="M 226 0 L 349 0 C 353.97056 30435919e-23 358 4.0294373 358 9 L 358 241 C 358 245.97056 353.97056 250 349 250 L 226 250 C 221.02944 250 217 245.97056 217 241 L 217 9 C 217 4.0294373 221.02944 -30435919e-23 226 0 Z" fill="#80bab7"/><path d="M 226 0 L 349 0 C 353.97056 30435919e-23 358 4.0294373 358 9 L 358 241 C 358 245.97056 353.97056 250 349 250 L 226 250 C 221.02944 250 217 245.97056 217 241 L 217 9 C 217 4.0294373 221.02944 -30435919e-23 226 0 Z" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/><text transform="translate(224.5 13)" fill="#1a2429"><tspan font-family="Helvetica" font-size="18" font-weight="bold" fill="#1a2429" x="43.000488" y="18" textLength="39.999023">Host</tspan></text><path d="M 243 83 L 332 83 C 336.97056 83 341 87.029437 341 92 L 341 105 C 341 109.97056 336.97056 114 332 114 L 243 114 C 238.02944 114 234 109.97056 234 105 L 234 92 C 234 87.029437 238.02944 83 243 83 Z" fill="#e0e0e0"/><path d="M 243 83 L 332 83 C 336.97056 83 341 87.029437 341 92 L 341 105 C 341 109.97056 336.97056 114 332 114 L 243 114 C 238.02944 114 234 109.97056 234 105 L 234 92 C 234 87.029437 238.02944 83 243 83 Z" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/><text transform="translate(239 91.5)" fill="#1a2429"><tspan font-family="Helvetica" font-size="12" font-weight="300" fill="#1a2429" x="17.486" y="11" textLength="62.028">Container 1</tspan></text><path d="M 243 122.5 L 332 122.5 C 336.97056 122.5 341 126.52944 341 131.5 L 341 144.5 C 341 149.47056 336.97056 153.5 332 153.5 L 243 153.5 C 238.02944 153.5 234 149.47056 234 144.5 L 234 131.5 C 234 126.52944 238.02944 122.5 243 122.5 Z" fill="#e0e0e0"/><path d="M 243 122.5 L 332 122.5 C 336.97056 122.5 341 126.52944 341 131.5 L 341 144.5 C 341 149.47056 336.97056 153.5 332 153.5 L 243 153.5 C 238.02944 153.5 234 149.47056 234 144.5 L 234 131.5 C 234 126.52944 238.02944 122.5 243 122.5 Z" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/><text transform="translate(239 131)" fill="#1a2429"><tspan font-family="Helvetica" font-size="12" font-weight="300" fill="#1a2429" x="17.486" y="11" textLength="62.028">Container 2</tspan></text><path d="M 243 162.5 L 332 162.5 C 336.97056 162.5 341 166.52944 341 171.5 L 341 184.5 C 341 189.47056 336.97056 193.5 332 193.5 L 243 193.5 C 238.02944 193.5 234 189.47056 234 184.5 L 234 171.5 C 234 166.52944 238.02944 162.5 243 162.5 Z" fill="#e0e0e0"/><path d="M 243 162.5 L 332 162.5 C 336.97056 162.5 341 166.52944 341 171.5 L 341 184.5 C 341 189.47056 336.97056 193.5 332 193.5 L 243 193.5 C 238.02944 193.5 234 189.47056 234 184.5 L 234 171.5 C 234 166.52944 238.02944 162.5 243 162.5 Z" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/><text transform="translate(239 171)" fill="#1a2429"><tspan font-family="Helvetica" font-size="12" font-weight="300" fill="#1a2429" x="17.486" y="11" textLength="62.028">Container 3</tspan></text><path d="M 243 202.5 L 332 202.5 C 336.97056 202.5 341 206.52944 341 211.5 L 341 224.5 C 341 229.47056 336.97056 233.5 332 233.5 L 243 233.5 C 238.02944 233.5 234 229.47056 234 224.5 L 234 211.5 C 234 206.52944 238.02944 202.5 243 202.5 Z" fill="#e0e0e0"/><path d="M 243 202.5 L 332 202.5 C 336.97056 202.5 341 206.52944 341 211.5 L 341 224.5 C 341 229.47056 336.97056 233.5 332 233.5 L 243 233.5 C 238.02944 233.5 234 229.47056 234 224.5 L 234 211.5 C 234 206.52944 238.02944 202.5 243 202.5 Z" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/><text transform="translate(239 211)" fill="#1a2429"><tspan font-family="Helvetica" font-size="12" font-weight="300" fill="#1a2429" x="15.818" y="11" textLength="65.364">Container ...</tspan></text><text transform="translate(0 83.625)" fill="#fa6b1c"><tspan font-family="Helvetica" font-size="18" font-weight="bold" fill="#fa6b1c" x=".48339844" y="18" textLength="116.0332">Docker Client</tspan></text><text transform="translate(27 114.375)" fill="black"><tspan font-family="Source Sans Pro" font-size="13" font-weight="bold" x=".1635" y="13" textLength="27.261">dock</tspan><tspan font-family="Source Sans Pro" font-size="13" font-weight="bold" x="27.1255" y="13" textLength="35.710999">er pull</tspan></text><text transform="translate(28 136.875)" fill="black"><tspan font-family="Source Sans Pro" font-size="13" font-weight="bold" x=".288" y="13" textLength="27.261">dock</tspan><tspan font-family="Source Sans Pro" font-size="13" font-weight="bold" x="27.25" y="13" textLength="33.462002">er run</tspan></text><text transform="translate(32.5 159.375)" fill="black"><tspan font-family="Source Sans Pro" font-size="13" font-weight="bold" x=".104" y="13" textLength="27.261">dock</tspan><tspan font-family="Source Sans Pro" font-size="13" font-weight="bold" x="27.066" y="13" textLength="24.83">er ...</tspan></text><line x1="151.9" y1="133" x2="178.1" y2="133" marker-end="url(#FilledArrow_Marker)" marker-start="url(#FilledArrow_Marker_2)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="2"/><text transform="translate(230.5 329)" fill="#fa6b1c"><tspan font-family="Helvetica" font-size="18" font-weight="bold" fill="#fa6b1c" x=".47753906" y="18" textLength="113.04492">Docker Index</tspan></text><line x1="287" y1="302.6" x2="287" y2="276.4" marker-end="url(#FilledArrow_Marker)" marker-start="url(#FilledArrow_Marker_2)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="2"/><path d="M 243 42.5 L 332 42.5 C 336.97056 42.5 341 46.529437 341 51.5 L 341 64.5 C 341 69.470563 336.97056 73.5 332 73.5 L 243 73.5 C 238.02944 73.5 234 69.470563 234 64.5 L 234 51.5 C 234 46.529437 238.02944 42.5 243 42.5 Z" fill="#e0e0e0"/><path d="M 243 42.5 L 332 42.5 C 336.97056 42.5 341 46.529437 341 51.5 L 341 64.5 C 341 69.470563 336.97056 73.5 332 73.5 L 243 73.5 C 238.02944 73.5 234 69.470563 234 64.5 L 234 51.5 C 234 46.529437 238.02944 42.5 243 42.5 Z" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/><text transform="translate(239 51)" fill="#1a2429"><tspan font-family="Helvetica" font-size="12" font-weight="300" fill="#1a2429" x="4.82" y="11" textLength="87.36">Docker Daemon</tspan></text></g></g></svg>
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:xlink="http://www.w3.org/1999/xlink"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   version="1.1"
+   viewBox="0 0 756.39868 395.51889"
+   width="756.39868pt"
+   height="395.51889pt"
+   id="svg12514"
+   inkscape:version="0.91 r13725"
+   sodipodi:docname="architecture.svg">
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1928"
+     inkscape:window-height="1038"
+     id="namedview12622"
+     showgrid="false"
+     showguides="true"
+     inkscape:guide-bbox="true"
+     inkscape:snap-global="false"
+     inkscape:zoom="1"
+     inkscape:cx="321.34217"
+     inkscape:cy="385.06379"
+     inkscape:window-x="4"
+     inkscape:window-y="0"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="layer5"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0" />
+  <metadata
+     id="metadata12516">
+    <dc:date>2014-04-15 00:37Z</dc:date>
+    <!-- Produced by OmniGraffle Professional 5.4.4 -->
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs12518">
+    <marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker20462"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow2Lend">
+      <path
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         id="path20464"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="Arrow2Lend"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         id="path5368"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:isstock="true"
+       style="overflow:visible"
+       id="marker16247"
+       refX="0"
+       refY="0"
+       orient="auto"
+       inkscape:stockid="Arrow2Lend">
+      <path
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         id="path16249"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker16171"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         id="path16173"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker16101"
+       style="overflow:visible"
+       inkscape:isstock="true"
+       inkscape:collect="always">
+      <path
+         id="path16103"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+         inkscape:connector-curvature="0" />
+    </marker>
+    <font-face
+       font-family="Helvetica"
+       font-size="18"
+       units-per-em="1000"
+       underline-position="-75.683594"
+       underline-thickness="49.316406"
+       slope="0"
+       x-height="532.22656"
+       cap-height="719.72656"
+       ascent="770.01953"
+       descent="-229.98047"
+       font-weight="bold"
+       id="font-face12520"
+       stemv="0"
+       stemh="0"
+       accent-height="0"
+       ideographic="0"
+       alphabetic="0"
+       mathematical="0"
+       hanging="0"
+       v-ideographic="0"
+       v-alphabetic="0"
+       v-mathematical="0"
+       v-hanging="0"
+       strikethrough-position="0"
+       strikethrough-thickness="0"
+       overline-position="0"
+       overline-thickness="0">
+      <font-face-src>
+        <font-face-name
+           name="Helvetica-Bold" />
+      </font-face-src>
+    </font-face>
+    <font-face
+       font-family="Helvetica"
+       font-size="12"
+       panose-1="2 11 4 3 2 2 2 2 2 4"
+       units-per-em="1000"
+       underline-position="-75"
+       underline-thickness="50"
+       slope="0"
+       x-height="524"
+       cap-height="718"
+       ascent="770.00427"
+       descent="-229.99573"
+       font-weight="300"
+       id="font-face12522"
+       stemv="0"
+       stemh="0"
+       accent-height="0"
+       ideographic="0"
+       alphabetic="0"
+       mathematical="0"
+       hanging="0"
+       v-ideographic="0"
+       v-alphabetic="0"
+       v-mathematical="0"
+       v-hanging="0"
+       strikethrough-position="0"
+       strikethrough-thickness="0"
+       overline-position="0"
+       overline-thickness="0">
+      <font-face-src>
+        <font-face-name
+           name="Helvetica-Light" />
+      </font-face-src>
+    </font-face>
+    <font-face
+       font-family="Source Sans Pro"
+       font-size="13"
+       panose-1="2 11 6 3 3 4 3 2 2 4"
+       units-per-em="1000"
+       underline-position="-136"
+       underline-thickness="94.00001"
+       slope="0"
+       x-height="504.00003"
+       cap-height="667.00006"
+       ascent="984.002"
+       descent="-273.00058"
+       font-weight="bold"
+       id="font-face12524"
+       stemv="0"
+       stemh="0"
+       accent-height="0"
+       ideographic="0"
+       alphabetic="0"
+       mathematical="0"
+       hanging="0"
+       v-ideographic="0"
+       v-alphabetic="0"
+       v-mathematical="0"
+       v-hanging="0"
+       strikethrough-position="0"
+       strikethrough-thickness="0"
+       overline-position="0"
+       overline-thickness="0">
+      <font-face-src>
+        <font-face-name
+           name="SourceSansPro-Semibold" />
+      </font-face-src>
+    </font-face>
+    <marker
+       orient="auto"
+       overflow="visible"
+       markerUnits="strokeWidth"
+       id="FilledArrow_Marker"
+       viewBox="-1 -3 7 6"
+       markerWidth="7"
+       markerHeight="6"
+       style="color:#000000;overflow:visible">
+      <g
+         id="g12527">
+        <path
+           d="M 4.8000002,0 0,-1.8000001 0,1.8000001 Z"
+           id="path12529"
+           inkscape:connector-curvature="0"
+           style="fill:currentColor;stroke:currentColor;stroke-width:1" />
+      </g>
+    </marker>
+    <marker
+       orient="auto"
+       overflow="visible"
+       markerUnits="strokeWidth"
+       id="FilledArrow_Marker_2"
+       viewBox="-6 -3 7 6"
+       markerWidth="7"
+       markerHeight="6"
+       style="color:#000000;overflow:visible">
+      <g
+         id="g12532">
+        <path
+           d="M -4.8000002,0 0,1.8000001 0,-1.8000001 Z"
+           id="path12534"
+           inkscape:connector-curvature="0"
+           style="fill:currentColor;stroke:currentColor;stroke-width:1" />
+      </g>
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker8148"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path8150"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0"
+       refX="0"
+       id="marker6352"
+       style="overflow:visible"
+       inkscape:isstock="true">
+      <path
+         inkscape:connector-curvature="0"
+         id="path6354"
+         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+         d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+         transform="matrix(-1.1,0,0,-1.1,-1.1,0)" />
+    </marker>
+    <inkscape:perspective
+       id="perspective4302"
+       inkscape:persp3d-origin="177.16534 : 22.440946 : 1"
+       inkscape:vp_z="354.33069 : 33.661419 : 1"
+       inkscape:vp_y="0 : 1000 : 0"
+       inkscape:vp_x="0 : 33.661419 : 1"
+       sodipodi:type="inkscape:persp3d" />
+    <clipPath
+       id="clipPath4429"
+       clipPathUnits="userSpaceOnUse">
+      <g
+         id="g4431"
+         transform="matrix(0.9947658,0,0,1,1.7860989,0)">
+        <path
+           id="path4433"
+           d="M 322.54143,2800.9375 L 318.10638,2806.6673 L 313.67143,2800.9375 L 313.67143,2757.7946 L 294.73171,2757.7946 L 318.10638,2732.2579 L 341.23763,2757.7946 L 322.54143,2757.7946 L 322.54143,2800.9375 z"
+           style="fill:#eea623;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:30;stroke-linecap:round;stroke-linejoin:round;marker:none;marker-start:none;marker-mid:none;marker-end:none;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;visibility:visible;display:inline;overflow:visible"
+           sodipodi:nodetypes="ccccccccc" />
+      </g>
+    </clipPath>
+  </defs>
+  <g
+     inkscape:groupmode="layer"
+     id="layer5"
+     inkscape:label="mac"
+     transform="translate(-73.287812,21.708751)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot16329"
+       style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:40px;line-height:125%;font-family:Monaco;-inkscape-font-specification:Monaco;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       transform="scale(0.8,0.8)"><flowRegion
+         id="flowRegion16331" /><flowPara
+         id="flowPara16335"></flowPara></flowRoot>    <flowRoot
+       xml:space="preserve"
+       id="flowRoot16337"
+       style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:40px;line-height:125%;font-family:Monaco;-inkscape-font-specification:Monaco;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       transform="scale(0.8,0.8)"><flowRegion
+         id="flowRegion16339"><rect
+           id="rect16341"
+           width="260"
+           height="135"
+           x="-586"
+           y="51.75"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco" /></flowRegion><flowPara
+         id="flowPara16343"></flowPara></flowRoot>    <rect
+       ry="0"
+       rx="0"
+       y="9.0645523"
+       x="276.25946"
+       height="363.91373"
+       width="327.51764"
+       id="rect4394"
+       style="display:inline;opacity:1;fill:#ade5f9;fill-opacity:1;stroke:#005976;stroke-width:1.66371417;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+    <g
+       id="g18942"
+       transform="translate(654.52608,-84.68013)">
+      <rect
+         style="opacity:1;fill:#fcfcfc;fill-opacity:1;stroke:#005976;stroke-width:1.87672079;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+         id="rect4345"
+         width="109.46352"
+         height="24.437973"
+         x="-378.16013"
+         y="80.70974"
+         ry="6.3192472"
+         rx="6.3192477" />
+      <g
+         id="flowRoot4337"
+         style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         transform="matrix(0.3883991,0,0,0.37995118,-446.94762,70.615007)">
+        <path
+           id="path7740"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           d="m 187.88477,72.829407 0,-30.3125 7.1875,0 q 4.96093,0 7.36328,1.738282 2.42187,1.71875 3.63281,5.214843 1.21094,3.496094 1.21094,8.28125 0,4.53125 -1.28907,7.96875 -1.26953,3.417969 -3.61328,5.273438 -2.34375,1.835937 -7.30468,1.835937 l -7.1875,0 z m 3.94531,-3.339843 2.63672,0 q 4.0039,0 5.70312,-1.425782 1.71875,-1.425781 2.44141,-3.945312 0.74219,-2.539063 0.74219,-6.992188 0,-4.355468 -0.85938,-6.699218 -0.85937,-2.34375 -2.46094,-3.59375 -1.60156,-1.269532 -5.2539,-1.269532 l -2.94922,0 0,23.925782 z"
+           inkscape:connector-curvature="0" />
+        <path
+           id="path7742"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           d="m 220.91211,73.747376 q -5.15625,0 -7.89063,-4.335937 -2.71484,-4.355469 -2.71484,-11.738282 0,-7.363281 2.71484,-11.699218 2.73438,-4.355469 7.89063,-4.355469 5.17578,0 7.89062,4.355469 2.71485,4.335937 2.71485,11.699218 0,7.382813 -2.71485,11.738282 -2.71484,4.335937 -7.89062,4.335937 z m 0,-3.046875 q 2.85156,0 4.74609,-3.222656 1.91407,-3.242188 1.91407,-9.804688 0,-6.5625 -1.91407,-9.785156 -1.89453,-3.242187 -4.74609,-3.242187 -2.85156,0 -4.76563,3.242187 -1.89453,3.222656 -1.89453,9.785156 0,6.5625 1.89453,9.804688 1.91407,3.222656 4.76563,3.222656 z"
+           inkscape:connector-curvature="0" />
+        <path
+           id="path7744"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           d="m 254.23242,72.477845 q -3.39844,1.269531 -6.09375,1.269531 -3.55469,0 -6.54297,-1.894531 -2.98828,-1.914063 -4.64843,-5.664063 -1.66016,-3.769531 -1.66016,-8.515625 0,-4.707031 1.64062,-8.457031 1.64063,-3.75 4.62891,-5.683594 2.98828,-1.933593 6.58203,-1.933593 2.69531,0 6.09375,1.269531 l 0,3.59375 q -3.14453,-1.816406 -6.25,-1.816406 -2.34375,0 -4.29687,1.601562 -1.9336,1.601563 -3.125,4.707031 -1.19141,3.085938 -1.19141,6.71875 0,3.691407 1.23047,6.816407 1.23047,3.125 3.14453,4.6875 1.91406,1.542968 4.23828,1.542968 3.08594,0 6.25,-1.816406 l 0,3.574219 z"
+           inkscape:connector-curvature="0" />
+        <path
+           id="path7746"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           d="m 260.0332,72.829407 0,-30.3125 3.94532,0 0,14.863282 11.26953,-14.863282 4.17968,0 -10.8789,14.394532 12.40234,15.917968 -4.9414,0 -12.03125,-15.449218 0,15.449218 -3.94532,0 z"
+           inkscape:connector-curvature="0" />
+        <path
+           id="path7748"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           d="m 301.2832,69.489564 0,3.339843 -16.36718,0 0,-30.3125 16.05468,0 0,3.046875 -12.10937,0 0,10.292969 10.89844,0 0,3.027344 -10.89844,0 0,10.605469 12.42187,0 z"
+           inkscape:connector-curvature="0" />
+        <path
+           id="path7750"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           d="m 311.86914,59.958314 0,12.871093 -3.94531,0 0,-30.3125 8.10547,0 q 3.96484,0 5.83984,0.820313 1.875,0.800781 2.89063,2.441406 1.03515,1.640625 1.03515,3.535156 0,1.757813 -0.68359,3.535157 -0.6836,1.777343 -1.89453,3.183593 -1.19141,1.386719 -3.45703,2.636719 l 8.76953,14.160156 -4.6875,0 -8.02735,-12.871093 -3.94531,0 z m 0,-3.046875 5.52734,0 q 1.85547,-1.015625 2.75391,-2.070313 0.91797,-1.054687 1.38672,-2.246094 0.46875,-1.191406 0.46875,-2.382812 0,-2.128906 -1.5625,-3.378906 -1.54297,-1.269532 -5.625,-1.269532 l -2.94922,0 0,11.347657 z"
+           inkscape:connector-curvature="0" />
+        <path
+           id="path7752"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           d="m 330.38477,75.856751 0,-3.027344 21.21093,0 0,3.027344 -21.21093,0 z"
+           inkscape:connector-curvature="0" />
+        <path
+           id="path7754"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           d="m 355.52148,72.829407 0,-30.3125 3.94532,0 0,13.046875 11.21093,0 0,-13.046875 3.92579,0 0,30.3125 -3.92579,0 0,-14.238281 -11.21093,0 0,14.238281 -3.94532,0 z"
+           inkscape:connector-curvature="0" />
+        <path
+           id="path7756"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           d="m 389.03711,73.747376 q -5.15625,0 -7.89063,-4.335937 -2.71484,-4.355469 -2.71484,-11.738282 0,-7.363281 2.71484,-11.699218 2.73438,-4.355469 7.89063,-4.355469 5.17578,0 7.89062,4.355469 2.71485,4.335937 2.71485,11.699218 0,7.382813 -2.71485,11.738282 -2.71484,4.335937 -7.89062,4.335937 z m 0,-3.046875 q 2.85156,0 4.74609,-3.222656 1.91407,-3.242188 1.91407,-9.804688 0,-6.5625 -1.91407,-9.785156 -1.89453,-3.242187 -4.74609,-3.242187 -2.85156,0 -4.76563,3.242187 -1.89453,3.222656 -1.89453,9.785156 0,6.5625 1.89453,9.804688 1.91407,3.222656 4.76563,3.222656 z"
+           inkscape:connector-curvature="0" />
+        <path
+           id="path7758"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           d="m 404.17383,72.184876 0,-3.710937 q 4.6875,2.226562 8.51562,2.226562 1.60157,0 3.06641,-0.605469 1.46484,-0.625 2.20703,-1.777343 0.76172,-1.152344 0.76172,-2.5 0,-1.582032 -1.03516,-2.949219 -1.03515,-1.386719 -3.88672,-3.046875 l -1.99218,-1.152344 -2.01172,-1.152344 q -5.3125,-3.144531 -5.3125,-7.8125 0,-3.417968 2.36328,-5.742187 2.38281,-2.34375 7.42187,-2.34375 3.24219,0 6.26954,0.9375 l 0,3.378906 q -3.33985,-1.289062 -6.50391,-1.289062 -2.51953,0 -4.08203,1.328125 -1.54297,1.328125 -1.54297,3.203125 0,1.855468 1.19141,3.085937 1.1914,1.230469 3.04687,2.265625 l 1.52344,0.917969 1.89453,1.152344 1.60156,0.9375 q 4.98047,3.046875 4.98047,7.65625 0,3.515625 -2.55859,6.035156 -2.5586,2.519531 -8.20313,2.519531 -1.79687,0 -3.4375,-0.3125 -1.62109,-0.292969 -4.27734,-1.25 z"
+           inkscape:connector-curvature="0" />
+        <path
+           id="path7760"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           d="m 435.11133,72.829407 0,-26.972656 -9.08203,0 0,-3.339844 22.1289,0 0,3.339844 -9.10156,0 0,26.972656 -3.94531,0 z"
+           inkscape:connector-curvature="0" />
+      </g>
+    </g>
+    <rect
+       style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:#005976;stroke-width:1.66371417;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       id="rect15559-9"
+       width="121.27443"
+       height="178.65872"
+       x="463.27411"
+       y="101.135"
+       rx="0"
+       ry="0" />
+    <rect
+       style="opacity:1;fill:#fcfcfc;fill-opacity:1;stroke:#005976;stroke-width:1.87672079;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       id="rect15561-8"
+       width="66.263512"
+       height="24.437988"
+       x="463.38062"
+       y="88.100029"
+       ry="6.3192472"
+       rx="6.3192477" />
+    <g
+       transform="translate(885.1556,-110.61633)"
+       style="font-style:normal;font-weight:normal;font-size:32px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot16093-5">
+      <path
+         d="m -413.08839,205.13537 0,11.424 1.52,0 0,-11.424 -1.52,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23126" />
+      <path
+         d="m -409.22014,208.28737 0,8.272 1.36,0 0,-5.152 q 0,-0.24 0.112,-0.592 0.128,-0.368 0.384,-0.704 0.272,-0.336 0.688,-0.576 0.432,-0.24 1.024,-0.24 0.464,0 0.752,0.144 0.304,0.128 0.48,0.384 0.176,0.24 0.24,0.576 0.08,0.336 0.08,0.736 l 0,5.424 1.36,0 0,-5.152 q 0,-0.96 0.576,-1.536 0.576,-0.576 1.584,-0.576 0.496,0 0.8,0.144 0.32,0.144 0.496,0.4 0.176,0.24 0.24,0.576 0.064,0.336 0.064,0.72 l 0,5.424 1.36,0 0,-6.064 q 0,-0.64 -0.208,-1.088 -0.192,-0.464 -0.56,-0.752 -0.352,-0.288 -0.864,-0.416 -0.496,-0.144 -1.12,-0.144 -0.816,0 -1.504,0.368 -0.672,0.368 -1.088,1.04 -0.256,-0.768 -0.88,-1.088 -0.624,-0.32 -1.392,-0.32 -1.744,0 -2.672,1.408 l -0.032,0 0,-1.216 -1.28,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23128" />
+      <path
+         d="m -388.23589,216.52737 q -0.352,0.208 -0.976,0.208 -0.528,0 -0.848,-0.288 -0.304,-0.304 -0.304,-0.976 -0.56,0.672 -1.312,0.976 -0.736,0.288 -1.6,0.288 -0.56,0 -1.072,-0.128 -0.496,-0.128 -0.864,-0.4 -0.368,-0.272 -0.592,-0.704 -0.208,-0.448 -0.208,-1.072 0,-0.704 0.24,-1.152 0.24,-0.448 0.624,-0.72 0.4,-0.288 0.896,-0.432 0.512,-0.144 1.04,-0.24 0.56,-0.112 1.056,-0.16 0.512,-0.064 0.896,-0.16 0.384,-0.112 0.608,-0.304 0.224,-0.208 0.224,-0.592 0,-0.448 -0.176,-0.72 -0.16,-0.272 -0.432,-0.416 -0.256,-0.144 -0.592,-0.192 -0.32,-0.048 -0.64,-0.048 -0.864,0 -1.44,0.336 -0.576,0.32 -0.624,1.232 l -1.36,0 q 0.032,-0.768 0.32,-1.296 0.288,-0.528 0.768,-0.848 0.48,-0.336 1.088,-0.48 0.624,-0.144 1.328,-0.144 0.56,0 1.104,0.08 0.56,0.08 1.008,0.336 0.448,0.24 0.72,0.688 0.272,0.448 0.272,1.168 l 0,4.256 q 0,0.48 0.048,0.704 0.064,0.224 0.384,0.224 0.176,0 0.416,-0.08 l 0,1.056 z m -2.208,-4.24 q -0.256,0.192 -0.672,0.288 -0.416,0.08 -0.88,0.144 -0.448,0.048 -0.912,0.128 -0.464,0.064 -0.832,0.224 -0.368,0.16 -0.608,0.464 -0.224,0.288 -0.224,0.8 0,0.336 0.128,0.576 0.144,0.224 0.352,0.368 0.224,0.144 0.512,0.208 0.288,0.064 0.608,0.064 0.672,0 1.152,-0.176 0.48,-0.192 0.784,-0.464 0.304,-0.288 0.448,-0.608 0.144,-0.336 0.144,-0.624 l 0,-1.392 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23130" />
+      <path
+         d="m -379.83414,215.85537 0,-7.568 -1.28,0 0,1.184 -0.016,0 q -0.368,-0.688 -1.024,-1.024 -0.656,-0.352 -1.44,-0.352 -1.072,0 -1.808,0.416 -0.736,0.4 -1.184,1.04 -0.448,0.624 -0.64,1.408 -0.192,0.768 -0.192,1.504 0,0.848 0.224,1.616 0.24,0.752 0.704,1.328 0.464,0.56 1.152,0.896 0.688,0.336 1.616,0.336 0.8,0 1.504,-0.352 0.72,-0.368 1.072,-1.136 l 0.032,0 0,0.544 q 0,0.688 -0.144,1.264 -0.128,0.576 -0.432,0.976 -0.304,0.416 -0.768,0.64 -0.464,0.24 -1.136,0.24 -0.336,0 -0.704,-0.08 -0.368,-0.064 -0.688,-0.224 -0.304,-0.16 -0.528,-0.416 -0.208,-0.256 -0.224,-0.624 l -1.36,0 q 0.032,0.672 0.352,1.136 0.32,0.464 0.8,0.752 0.496,0.288 1.088,0.416 0.608,0.128 1.184,0.128 1.984,0 2.912,-1.008 0.928,-1.008 0.928,-3.04 z m -3.808,-0.4 q -0.672,0 -1.12,-0.272 -0.448,-0.288 -0.72,-0.736 -0.272,-0.464 -0.384,-1.024 -0.112,-0.56 -0.112,-1.12 0,-0.592 0.128,-1.136 0.144,-0.544 0.432,-0.96 0.304,-0.416 0.768,-0.656 0.464,-0.256 1.12,-0.256 0.64,0 1.088,0.256 0.448,0.256 0.72,0.688 0.288,0.416 0.416,0.944 0.128,0.528 0.128,1.072 0,0.576 -0.144,1.152 -0.128,0.576 -0.416,1.04 -0.288,0.448 -0.768,0.736 -0.464,0.272 -1.136,0.272 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23132" />
+      <path
+         d="m -372.06989,211.63137 -4.752,0 q 0.032,-0.48 0.208,-0.896 0.176,-0.432 0.48,-0.752 0.304,-0.32 0.72,-0.496 0.432,-0.192 0.96,-0.192 0.512,0 0.928,0.192 0.432,0.176 0.736,0.496 0.32,0.304 0.496,0.736 0.192,0.432 0.224,0.912 z m 1.312,2.304 -1.344,0 q -0.176,0.816 -0.736,1.216 -0.544,0.4 -1.408,0.4 -0.672,0 -1.168,-0.224 -0.496,-0.224 -0.816,-0.592 -0.32,-0.384 -0.464,-0.864 -0.144,-0.496 -0.128,-1.04 l 6.192,0 q 0.032,-0.752 -0.144,-1.584 -0.16,-0.832 -0.608,-1.536 -0.432,-0.704 -1.168,-1.152 -0.72,-0.464 -1.824,-0.464 -0.848,0 -1.568,0.32 -0.704,0.32 -1.232,0.896 -0.512,0.576 -0.8,1.36 -0.288,0.784 -0.288,1.728 0.032,0.944 0.272,1.744 0.256,0.8 0.752,1.376 0.496,0.576 1.216,0.896 0.736,0.32 1.728,0.32 1.408,0 2.336,-0.704 0.928,-0.704 1.2,-2.096 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23134" />
+      <path
+         d="m -368.38814,213.95137 -1.36,0 q 0.032,0.768 0.32,1.312 0.288,0.528 0.768,0.864 0.48,0.32 1.104,0.464 0.624,0.144 1.312,0.144 0.624,0 1.248,-0.128 0.64,-0.112 1.136,-0.416 0.512,-0.304 0.816,-0.8 0.32,-0.496 0.32,-1.248 0,-0.592 -0.24,-0.992 -0.224,-0.4 -0.608,-0.656 -0.368,-0.272 -0.864,-0.432 -0.48,-0.16 -0.992,-0.272 -0.48,-0.112 -0.96,-0.208 -0.48,-0.112 -0.864,-0.256 -0.384,-0.16 -0.64,-0.384 -0.24,-0.24 -0.24,-0.592 0,-0.32 0.16,-0.512 0.16,-0.208 0.416,-0.32 0.256,-0.128 0.56,-0.176 0.32,-0.048 0.624,-0.048 0.336,0 0.656,0.08 0.336,0.064 0.608,0.224 0.272,0.16 0.448,0.432 0.176,0.256 0.208,0.656 l 1.36,0 q -0.048,-0.752 -0.32,-1.248 -0.272,-0.512 -0.736,-0.8 -0.448,-0.304 -1.04,-0.416 -0.592,-0.128 -1.296,-0.128 -0.544,0 -1.104,0.144 -0.544,0.128 -0.992,0.416 -0.432,0.272 -0.72,0.72 -0.272,0.448 -0.272,1.072 0,0.8 0.4,1.248 0.4,0.448 0.992,0.704 0.608,0.24 1.312,0.384 0.704,0.128 1.296,0.304 0.608,0.16 1.008,0.432 0.4,0.272 0.4,0.8 0,0.384 -0.192,0.64 -0.192,0.24 -0.496,0.368 -0.288,0.128 -0.64,0.176 -0.352,0.048 -0.672,0.048 -0.416,0 -0.816,-0.08 -0.384,-0.08 -0.704,-0.256 -0.304,-0.192 -0.496,-0.496 -0.192,-0.32 -0.208,-0.768 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23136" />
+    </g>
+    <rect
+       ry="0"
+       rx="0"
+       y="101.135"
+       x="295.48798"
+       height="244.25873"
+       width="124.47442"
+       id="rect16947"
+       style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;stroke:#005976;stroke-width:1.66371417;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+    <rect
+       rx="6.3192477"
+       ry="6.3192472"
+       y="88.100029"
+       x="295.59448"
+       height="24.437988"
+       width="90.263519"
+       id="rect16949"
+       style="opacity:1;fill:#fcfcfc;fill-opacity:1;stroke:#005976;stroke-width:1.87672079;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+    <g
+       transform="translate(717.3695,-110.61634)"
+       style="font-style:normal;font-weight:normal;font-size:32px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot16951">
+      <path
+         d="m -405.12039,208.57537 1.52,0 q -0.128,-0.912 -0.544,-1.6 -0.416,-0.704 -1.04,-1.168 -0.624,-0.464 -1.424,-0.704 -0.8,-0.24 -1.696,-0.24 -1.312,0 -2.336,0.48 -1.008,0.464 -1.696,1.28 -0.672,0.816 -1.024,1.92 -0.352,1.088 -0.352,2.336 0,1.248 0.32,2.336 0.336,1.088 0.992,1.888 0.656,0.8 1.648,1.264 0.992,0.448 2.32,0.448 2.192,0 3.456,-1.2 1.264,-1.2 1.488,-3.36 l -1.52,0 q -0.048,0.704 -0.288,1.312 -0.24,0.608 -0.672,1.056 -0.416,0.432 -1.008,0.688 -0.576,0.24 -1.328,0.24 -1.024,0 -1.76,-0.384 -0.736,-0.384 -1.216,-1.024 -0.464,-0.656 -0.688,-1.52 -0.224,-0.88 -0.224,-1.856 0,-0.896 0.224,-1.728 0.224,-0.832 0.688,-1.472 0.48,-0.656 1.2,-1.04 0.736,-0.384 1.76,-0.384 1.2,0 2.064,0.608 0.88,0.608 1.136,1.824 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23139" />
+      <path
+         d="m -400.82189,212.43137 q 0,-0.752 0.192,-1.328 0.208,-0.592 0.56,-0.992 0.352,-0.4 0.816,-0.608 0.48,-0.208 1.008,-0.208 0.528,0 0.992,0.208 0.48,0.208 0.832,0.608 0.352,0.4 0.544,0.992 0.208,0.576 0.208,1.328 0,0.752 -0.208,1.344 -0.192,0.576 -0.544,0.976 -0.352,0.384 -0.832,0.592 -0.464,0.208 -0.992,0.208 -0.528,0 -1.008,-0.208 -0.464,-0.208 -0.816,-0.592 -0.352,-0.4 -0.56,-0.976 -0.192,-0.592 -0.192,-1.344 z m -1.44,0 q 0,0.912 0.256,1.696 0.256,0.784 0.768,1.376 0.512,0.576 1.264,0.912 0.752,0.32 1.728,0.32 0.992,0 1.728,-0.32 0.752,-0.336 1.264,-0.912 0.512,-0.592 0.768,-1.376 0.256,-0.784 0.256,-1.696 0,-0.912 -0.256,-1.696 -0.256,-0.8 -0.768,-1.376 -0.512,-0.592 -1.264,-0.928 -0.736,-0.336 -1.728,-0.336 -0.976,0 -1.728,0.336 -0.752,0.336 -1.264,0.928 -0.512,0.576 -0.768,1.376 -0.256,0.784 -0.256,1.696 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23141" />
+      <path
+         d="m -392.65764,208.28737 0,8.272 1.36,0 0,-4.672 q 0,-0.56 0.144,-1.024 0.16,-0.48 0.464,-0.832 0.304,-0.352 0.752,-0.544 0.464,-0.192 1.088,-0.192 0.784,0 1.232,0.448 0.448,0.448 0.448,1.216 l 0,5.6 1.36,0 0,-5.44 q 0,-0.672 -0.144,-1.216 -0.128,-0.56 -0.464,-0.96 -0.336,-0.4 -0.88,-0.624 -0.544,-0.224 -1.36,-0.224 -1.84,0 -2.688,1.504 l -0.032,0 0,-1.312 -1.28,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23143" />
+      <path
+         d="m -381.86339,208.28737 0,-2.48 -1.36,0 0,2.48 -1.408,0 0,1.2 1.408,0 0,5.264 q 0,0.576 0.112,0.928 0.112,0.352 0.336,0.544 0.24,0.192 0.608,0.272 0.384,0.064 0.912,0.064 l 1.04,0 0,-1.2 -0.624,0 q -0.32,0 -0.528,-0.016 -0.192,-0.032 -0.304,-0.112 -0.112,-0.08 -0.16,-0.224 -0.032,-0.144 -0.032,-0.384 l 0,-5.136 1.648,0 0,-1.2 -1.648,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23145" />
+      <path
+         d="m -371.36089,216.52737 q -0.352,0.208 -0.976,0.208 -0.528,0 -0.848,-0.288 -0.304,-0.304 -0.304,-0.976 -0.56,0.672 -1.312,0.976 -0.736,0.288 -1.6,0.288 -0.56,0 -1.072,-0.128 -0.496,-0.128 -0.864,-0.4 -0.368,-0.272 -0.592,-0.704 -0.208,-0.448 -0.208,-1.072 0,-0.704 0.24,-1.152 0.24,-0.448 0.624,-0.72 0.4,-0.288 0.896,-0.432 0.512,-0.144 1.04,-0.24 0.56,-0.112 1.056,-0.16 0.512,-0.064 0.896,-0.16 0.384,-0.112 0.608,-0.304 0.224,-0.208 0.224,-0.592 0,-0.448 -0.176,-0.72 -0.16,-0.272 -0.432,-0.416 -0.256,-0.144 -0.592,-0.192 -0.32,-0.048 -0.64,-0.048 -0.864,0 -1.44,0.336 -0.576,0.32 -0.624,1.232 l -1.36,0 q 0.032,-0.768 0.32,-1.296 0.288,-0.528 0.768,-0.848 0.48,-0.336 1.088,-0.48 0.624,-0.144 1.328,-0.144 0.56,0 1.104,0.08 0.56,0.08 1.008,0.336 0.448,0.24 0.72,0.688 0.272,0.448 0.272,1.168 l 0,4.256 q 0,0.48 0.048,0.704 0.064,0.224 0.384,0.224 0.176,0 0.416,-0.08 l 0,1.056 z m -2.208,-4.24 q -0.256,0.192 -0.672,0.288 -0.416,0.08 -0.88,0.144 -0.448,0.048 -0.912,0.128 -0.464,0.064 -0.832,0.224 -0.368,0.16 -0.608,0.464 -0.224,0.288 -0.224,0.8 0,0.336 0.128,0.576 0.144,0.224 0.352,0.368 0.224,0.144 0.512,0.208 0.288,0.064 0.608,0.064 0.672,0 1.152,-0.176 0.48,-0.192 0.784,-0.464 0.304,-0.288 0.448,-0.608 0.144,-0.336 0.144,-0.624 l 0,-1.392 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23147" />
+      <path
+         d="m -368.65514,206.79937 0,-1.664 -1.36,0 0,1.664 1.36,0 z m -1.36,1.488 0,8.272 1.36,0 0,-8.272 -1.36,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23149" />
+      <path
+         d="m -366.53264,208.28737 0,8.272 1.36,0 0,-4.672 q 0,-0.56 0.144,-1.024 0.16,-0.48 0.464,-0.832 0.304,-0.352 0.752,-0.544 0.464,-0.192 1.088,-0.192 0.784,0 1.232,0.448 0.448,0.448 0.448,1.216 l 0,5.6 1.36,0 0,-5.44 q 0,-0.672 -0.144,-1.216 -0.128,-0.56 -0.464,-0.96 -0.336,-0.4 -0.88,-0.624 -0.544,-0.224 -1.36,-0.224 -1.84,0 -2.688,1.504 l -0.032,0 0,-1.312 -1.28,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23151" />
+      <path
+         d="m -351.88239,211.63137 -4.752,0 q 0.032,-0.48 0.208,-0.896 0.176,-0.432 0.48,-0.752 0.304,-0.32 0.72,-0.496 0.432,-0.192 0.96,-0.192 0.512,0 0.928,0.192 0.432,0.176 0.736,0.496 0.32,0.304 0.496,0.736 0.192,0.432 0.224,0.912 z m 1.312,2.304 -1.344,0 q -0.176,0.816 -0.736,1.216 -0.544,0.4 -1.408,0.4 -0.672,0 -1.168,-0.224 -0.496,-0.224 -0.816,-0.592 -0.32,-0.384 -0.464,-0.864 -0.144,-0.496 -0.128,-1.04 l 6.192,0 q 0.032,-0.752 -0.144,-1.584 -0.16,-0.832 -0.608,-1.536 -0.432,-0.704 -1.168,-1.152 -0.72,-0.464 -1.824,-0.464 -0.848,0 -1.568,0.32 -0.704,0.32 -1.232,0.896 -0.512,0.576 -0.8,1.36 -0.288,0.784 -0.288,1.728 0.032,0.944 0.272,1.744 0.256,0.8 0.752,1.376 0.496,0.576 1.216,0.896 0.736,0.32 1.728,0.32 1.408,0 2.336,-0.704 0.928,-0.704 1.2,-2.096 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23153" />
+      <path
+         d="m -349.08064,208.28737 0,8.272 1.36,0 0,-3.68 q 0,-0.8 0.16,-1.408 0.16,-0.624 0.512,-1.056 0.352,-0.432 0.928,-0.656 0.576,-0.224 1.392,-0.224 l 0,-1.44 q -1.104,-0.032 -1.824,0.448 -0.72,0.48 -1.216,1.488 l -0.032,0 0,-1.744 -1.28,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23155" />
+      <path
+         d="m -342.85689,213.95137 -1.36,0 q 0.032,0.768 0.32,1.312 0.288,0.528 0.768,0.864 0.48,0.32 1.104,0.464 0.624,0.144 1.312,0.144 0.624,0 1.248,-0.128 0.64,-0.112 1.136,-0.416 0.512,-0.304 0.816,-0.8 0.32,-0.496 0.32,-1.248 0,-0.592 -0.24,-0.992 -0.224,-0.4 -0.608,-0.656 -0.368,-0.272 -0.864,-0.432 -0.48,-0.16 -0.992,-0.272 -0.48,-0.112 -0.96,-0.208 -0.48,-0.112 -0.864,-0.256 -0.384,-0.16 -0.64,-0.384 -0.24,-0.24 -0.24,-0.592 0,-0.32 0.16,-0.512 0.16,-0.208 0.416,-0.32 0.256,-0.128 0.56,-0.176 0.32,-0.048 0.624,-0.048 0.336,0 0.656,0.08 0.336,0.064 0.608,0.224 0.272,0.16 0.448,0.432 0.176,0.256 0.208,0.656 l 1.36,0 q -0.048,-0.752 -0.32,-1.248 -0.272,-0.512 -0.736,-0.8 -0.448,-0.304 -1.04,-0.416 -0.592,-0.128 -1.296,-0.128 -0.544,0 -1.104,0.144 -0.544,0.128 -0.992,0.416 -0.432,0.272 -0.72,0.72 -0.272,0.448 -0.272,1.072 0,0.8 0.4,1.248 0.4,0.448 0.992,0.704 0.608,0.24 1.312,0.384 0.704,0.128 1.296,0.304 0.608,0.16 1.008,0.432 0.4,0.272 0.4,0.8 0,0.384 -0.192,0.64 -0.192,0.24 -0.496,0.368 -0.288,0.128 -0.64,0.176 -0.352,0.048 -0.672,0.048 -0.416,0 -0.816,-0.08 -0.384,-0.08 -0.704,-0.256 -0.304,-0.192 -0.496,-0.496 -0.192,-0.32 -0.208,-0.768 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23157" />
+    </g>
+    <g
+       id="g5083"
+       transform="matrix(0.47747005,0,0,0.50284965,330.19832,124.13704)">
+      <g
+         id="Isolation_Mode-6">
+        <polygon
+           id="polygon4984"
+           points="0,72.66 92.65,101.222 122.005,77.57 122.005,22.429 32.038,0.183 0,19.314 "
+           style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd" />
+        <polygon
+           id="polygon4986"
+           points="92.65,94.222 5,67.66 5,23.314 92.65,45.131 "
+           style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd" />
+        <g
+           id="g4988">
+          <g
+             id="g4990">
+            <polygon
+               id="polygon4992"
+               points="11.364,66.906 11.364,30.067 7.917,29.184 7.917,66.177 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon4994"
+               points="17.598,68.765 17.598,31.68 14.134,30.801 14.134,68.013 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon4996"
+               points="23.831,70.624 23.831,33.292 20.353,32.418 20.353,69.848 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon4998"
+               points="30.063,72.483 30.063,34.905 26.569,34.035 26.569,71.684 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5000"
+               points="36.298,74.341 36.298,36.518 32.789,35.652 32.789,73.519 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5002"
+               points="42.532,76.201 42.532,38.13 39.007,37.27 39.007,75.354 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5004"
+               points="48.765,78.06 48.765,39.743 45.224,38.887 45.224,77.19 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5006"
+               points="54.999,79.919 54.999,41.356 51.442,40.504 51.442,79.025 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5008"
+               points="61.231,81.777 61.231,42.968 57.661,42.122 57.661,80.86 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5010"
+               points="67.466,83.637 67.466,44.581 63.878,43.737 63.878,82.697 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5012"
+               points="73.699,85.496 73.699,46.193 70.097,45.355 70.097,84.532 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5014"
+               points="79.933,87.354 79.933,47.806 76.313,46.973 76.313,86.367 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5016"
+               points="82.532,88.203 86.165,89.213 86.165,49.418 82.532,48.589 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+          </g>
+          <g
+             id="g5018">
+            <polygon
+               id="polygon5020"
+               points="11.364,66.906 11.364,30.067 7.917,29.184 7.917,66.177 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5022"
+               points="17.598,68.765 17.598,31.68 14.134,30.801 14.134,68.013 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5024"
+               points="23.831,70.624 23.831,33.292 20.353,32.418 20.353,69.848 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5026"
+               points="30.063,72.483 30.063,34.905 26.569,34.035 26.569,71.684 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5028"
+               points="36.298,74.341 36.298,36.518 32.789,35.652 32.789,73.519 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5030"
+               points="42.532,76.201 42.532,38.13 39.007,37.27 39.007,75.354 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5032"
+               points="48.765,78.06 48.765,39.743 45.224,38.887 45.224,77.19 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5034"
+               points="54.999,79.919 54.999,41.356 51.442,40.504 51.442,79.025 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5036"
+               points="61.231,81.777 61.231,42.968 57.661,42.122 57.661,80.86 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5038"
+               points="67.466,83.637 67.466,44.581 63.878,43.737 63.878,82.697 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5040"
+               points="73.699,85.496 73.699,46.193 70.097,45.355 70.097,84.532 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5042"
+               points="79.933,87.354 79.933,47.806 76.313,46.973 76.313,86.367 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5044"
+               points="82.532,88.203 86.165,89.213 86.165,49.418 82.532,48.589 "
+               style="fill:#394d54" />
+          </g>
+        </g>
+        <polygon
+           id="polygon5046"
+           points="117.005,75.212 92.65,94.398 92.65,46.469 117.005,29.184 "
+           style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+        <polygon
+           id="polygon5048"
+           points="117.005,27.429 92.65,45.131 5,23.314 32.538,6.683 "
+           style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd" />
+        <rect
+           id="rect5050"
+           height="92.299004"
+           width="3.5250001"
+           transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+           y="-12.046"
+           x="46.075001"
+           style="fill:#394d54" />
+        <rect
+           id="rect5052"
+           height="34.868999"
+           width="3.5250001"
+           transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+           y="18.122"
+           x="103.715"
+           style="fill:#394d54" />
+        <rect
+           id="rect5054"
+           height="51.174"
+           width="3.523"
+           transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+           y="44.498001"
+           x="90.234001"
+           style="fill:#394d54" />
+      </g>
+      <g
+         display="none"
+         id="Layer_2-0"
+         style="display:none">
+        <line
+           id="line5063"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-49.362"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5065"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-46.612"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5067"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-4.362"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5069"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-67.064003"
+           x1="-52.994999"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5071"
+           y2="-49.472"
+           x2="-76.915001"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5073"
+           y2="-71.179001"
+           x2="-165"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5075"
+           y2="-0.271"
+           x2="-77.349998"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+      </g>
+    </g>
+    <g
+       id="g5225"
+       transform="matrix(0.47747005,0,0,0.50284965,330.19832,175.51094)">
+      <g
+         id="g5227">
+        <polygon
+           id="polygon5229"
+           points="122.005,22.429 32.038,0.183 0,19.314 0,72.66 92.65,101.222 122.005,77.57 "
+           style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd" />
+        <polygon
+           id="polygon5231"
+           points="92.65,45.131 92.65,94.222 5,67.66 5,23.314 "
+           style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd" />
+        <g
+           id="g5233">
+          <g
+             id="g5235">
+            <polygon
+               id="polygon5237"
+               points="7.917,66.177 11.364,66.906 11.364,30.067 7.917,29.184 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5239"
+               points="14.134,68.013 17.598,68.765 17.598,31.68 14.134,30.801 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5241"
+               points="20.353,69.848 23.831,70.624 23.831,33.292 20.353,32.418 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5243"
+               points="26.569,71.684 30.063,72.483 30.063,34.905 26.569,34.035 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5245"
+               points="32.789,73.519 36.298,74.341 36.298,36.518 32.789,35.652 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5247"
+               points="39.007,75.354 42.532,76.201 42.532,38.13 39.007,37.27 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5249"
+               points="45.224,77.19 48.765,78.06 48.765,39.743 45.224,38.887 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5251"
+               points="51.442,79.025 54.999,79.919 54.999,41.356 51.442,40.504 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5253"
+               points="57.661,80.86 61.231,81.777 61.231,42.968 57.661,42.122 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5255"
+               points="63.878,82.697 67.466,83.637 67.466,44.581 63.878,43.737 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5257"
+               points="70.097,84.532 73.699,85.496 73.699,46.193 70.097,45.355 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5259"
+               points="76.313,86.367 79.933,87.354 79.933,47.806 76.313,46.973 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5261"
+               points="82.532,48.589 82.532,88.203 86.165,89.213 86.165,49.418 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+          </g>
+          <g
+             id="g5263">
+            <polygon
+               id="polygon5265"
+               points="7.917,66.177 11.364,66.906 11.364,30.067 7.917,29.184 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5267"
+               points="14.134,68.013 17.598,68.765 17.598,31.68 14.134,30.801 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5269"
+               points="20.353,69.848 23.831,70.624 23.831,33.292 20.353,32.418 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5271"
+               points="26.569,71.684 30.063,72.483 30.063,34.905 26.569,34.035 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5273"
+               points="32.789,73.519 36.298,74.341 36.298,36.518 32.789,35.652 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5275"
+               points="39.007,75.354 42.532,76.201 42.532,38.13 39.007,37.27 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5277"
+               points="45.224,77.19 48.765,78.06 48.765,39.743 45.224,38.887 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5279"
+               points="51.442,79.025 54.999,79.919 54.999,41.356 51.442,40.504 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5281"
+               points="57.661,80.86 61.231,81.777 61.231,42.968 57.661,42.122 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5283"
+               points="63.878,82.697 67.466,83.637 67.466,44.581 63.878,43.737 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5285"
+               points="70.097,84.532 73.699,85.496 73.699,46.193 70.097,45.355 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5287"
+               points="76.313,86.367 79.933,87.354 79.933,47.806 76.313,46.973 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5289"
+               points="82.532,48.589 82.532,88.203 86.165,89.213 86.165,49.418 "
+               style="fill:#394d54" />
+          </g>
+        </g>
+        <polygon
+           id="polygon5291"
+           points="117.005,29.184 117.005,75.212 92.65,94.398 92.65,46.469 "
+           style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+        <polygon
+           id="polygon5293"
+           points="32.538,6.683 117.005,27.429 92.65,45.131 5,23.314 "
+           style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd" />
+        <rect
+           id="rect5295"
+           height="92.299004"
+           width="3.5250001"
+           transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+           y="-12.046"
+           x="46.075001"
+           style="fill:#394d54" />
+        <rect
+           id="rect5297"
+           height="34.868999"
+           width="3.5250001"
+           transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+           y="18.122"
+           x="103.715"
+           style="fill:#394d54" />
+        <rect
+           id="rect5299"
+           height="51.174"
+           width="3.523"
+           transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+           y="44.498001"
+           x="90.234001"
+           style="fill:#394d54" />
+      </g>
+      <g
+         display="none"
+         id="g5301"
+         style="display:none">
+        <line
+           id="line5303"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-49.362"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5305"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-46.612"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5307"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-4.362"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5309"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-67.064003"
+           x1="-52.994999"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5311"
+           y2="-49.472"
+           x2="-76.915001"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5313"
+           y2="-71.179001"
+           x2="-165"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5315"
+           y2="-0.271"
+           x2="-77.349998"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+      </g>
+    </g>
+    <g
+       transform="matrix(0.47747005,0,0,0.50284965,330.19832,226.88483)"
+       id="g17899">
+      <g
+         id="g17901">
+        <polygon
+           style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd"
+           points="122.005,77.57 122.005,22.429 32.038,0.183 0,19.314 0,72.66 92.65,101.222 "
+           id="polygon17903" />
+        <polygon
+           style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd"
+           points="5,23.314 92.65,45.131 92.65,94.222 5,67.66 "
+           id="polygon17905" />
+        <g
+           id="g17907">
+          <g
+             id="g17909">
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="7.917,29.184 7.917,66.177 11.364,66.906 11.364,30.067 "
+               id="polygon17911" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="14.134,30.801 14.134,68.013 17.598,68.765 17.598,31.68 "
+               id="polygon17913" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="20.353,32.418 20.353,69.848 23.831,70.624 23.831,33.292 "
+               id="polygon17915" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="26.569,34.035 26.569,71.684 30.063,72.483 30.063,34.905 "
+               id="polygon17917" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="32.789,35.652 32.789,73.519 36.298,74.341 36.298,36.518 "
+               id="polygon17919" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="39.007,37.27 39.007,75.354 42.532,76.201 42.532,38.13 "
+               id="polygon17921" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="45.224,38.887 45.224,77.19 48.765,78.06 48.765,39.743 "
+               id="polygon17923" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="51.442,40.504 51.442,79.025 54.999,79.919 54.999,41.356 "
+               id="polygon17925" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="57.661,42.122 57.661,80.86 61.231,81.777 61.231,42.968 "
+               id="polygon17927" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="63.878,43.737 63.878,82.697 67.466,83.637 67.466,44.581 "
+               id="polygon17929" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="70.097,45.355 70.097,84.532 73.699,85.496 73.699,46.193 "
+               id="polygon17931" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="76.313,46.973 76.313,86.367 79.933,87.354 79.933,47.806 "
+               id="polygon17933" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="86.165,49.418 82.532,48.589 82.532,88.203 86.165,89.213 "
+               id="polygon17935" />
+          </g>
+          <g
+             id="g17937">
+            <polygon
+               style="fill:#394d54"
+               points="7.917,29.184 7.917,66.177 11.364,66.906 11.364,30.067 "
+               id="polygon17939" />
+            <polygon
+               style="fill:#394d54"
+               points="14.134,30.801 14.134,68.013 17.598,68.765 17.598,31.68 "
+               id="polygon17941" />
+            <polygon
+               style="fill:#394d54"
+               points="20.353,32.418 20.353,69.848 23.831,70.624 23.831,33.292 "
+               id="polygon17943" />
+            <polygon
+               style="fill:#394d54"
+               points="26.569,34.035 26.569,71.684 30.063,72.483 30.063,34.905 "
+               id="polygon17945" />
+            <polygon
+               style="fill:#394d54"
+               points="32.789,35.652 32.789,73.519 36.298,74.341 36.298,36.518 "
+               id="polygon17947" />
+            <polygon
+               style="fill:#394d54"
+               points="39.007,37.27 39.007,75.354 42.532,76.201 42.532,38.13 "
+               id="polygon17949" />
+            <polygon
+               style="fill:#394d54"
+               points="45.224,38.887 45.224,77.19 48.765,78.06 48.765,39.743 "
+               id="polygon17951" />
+            <polygon
+               style="fill:#394d54"
+               points="51.442,40.504 51.442,79.025 54.999,79.919 54.999,41.356 "
+               id="polygon17953" />
+            <polygon
+               style="fill:#394d54"
+               points="57.661,42.122 57.661,80.86 61.231,81.777 61.231,42.968 "
+               id="polygon17955" />
+            <polygon
+               style="fill:#394d54"
+               points="63.878,43.737 63.878,82.697 67.466,83.637 67.466,44.581 "
+               id="polygon17957" />
+            <polygon
+               style="fill:#394d54"
+               points="70.097,45.355 70.097,84.532 73.699,85.496 73.699,46.193 "
+               id="polygon17959" />
+            <polygon
+               style="fill:#394d54"
+               points="76.313,46.973 76.313,86.367 79.933,87.354 79.933,47.806 "
+               id="polygon17961" />
+            <polygon
+               style="fill:#394d54"
+               points="86.165,49.418 82.532,48.589 82.532,88.203 86.165,89.213 "
+               id="polygon17963" />
+          </g>
+        </g>
+        <polygon
+           style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+           points="92.65,46.469 117.005,29.184 117.005,75.212 92.65,94.398 "
+           id="polygon17965" />
+        <polygon
+           style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd"
+           points="5,23.314 32.538,6.683 117.005,27.429 92.65,45.131 "
+           id="polygon17967" />
+        <rect
+           style="fill:#394d54"
+           x="46.075001"
+           y="-12.046"
+           transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+           width="3.5250001"
+           height="92.299004"
+           id="rect17969" />
+        <rect
+           style="fill:#394d54"
+           x="103.715"
+           y="18.122"
+           transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+           width="3.5250001"
+           height="34.868999"
+           id="rect17971" />
+        <rect
+           style="fill:#394d54"
+           x="90.234001"
+           y="44.498001"
+           transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+           width="3.523"
+           height="51.174"
+           id="rect17973" />
+      </g>
+      <g
+         style="display:none"
+         id="g17975"
+         display="none">
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="-77.349998"
+           y1="-49.362"
+           x2="-1830.454"
+           y2="-492.659"
+           id="line17977" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="-77.349998"
+           y1="-46.612"
+           x2="-1830.454"
+           y2="-492.659"
+           id="line17979" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="-77.349998"
+           y1="-4.362"
+           x2="-1830.454"
+           y2="-492.659"
+           id="line17981" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="-52.994999"
+           y1="-67.064003"
+           x2="-1830.454"
+           y2="-492.659"
+           id="line17983" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="532.88"
+           y1="-492.659"
+           x2="-76.915001"
+           y2="-49.472"
+           id="line17985" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="532.88"
+           y1="-492.659"
+           x2="-165"
+           y2="-71.179001"
+           id="line17987" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="532.88"
+           y1="-492.659"
+           x2="-77.349998"
+           y2="-0.271"
+           id="line17989" />
+      </g>
+    </g>
+    <g
+       transform="matrix(0.47747005,0,0,0.50284965,330.19832,278.25873)"
+       id="g17991">
+      <g
+         id="g17993">
+        <polygon
+           style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd"
+           points="32.038,0.183 0,19.314 0,72.66 92.65,101.222 122.005,77.57 122.005,22.429 "
+           id="polygon17995" />
+        <polygon
+           style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd"
+           points="92.65,94.222 5,67.66 5,23.314 92.65,45.131 "
+           id="polygon17997" />
+        <g
+           id="g17999">
+          <g
+             id="g18001">
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="11.364,66.906 11.364,30.067 7.917,29.184 7.917,66.177 "
+               id="polygon18003" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="17.598,68.765 17.598,31.68 14.134,30.801 14.134,68.013 "
+               id="polygon18005" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="23.831,70.624 23.831,33.292 20.353,32.418 20.353,69.848 "
+               id="polygon18007" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="30.063,72.483 30.063,34.905 26.569,34.035 26.569,71.684 "
+               id="polygon18009" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="36.298,74.341 36.298,36.518 32.789,35.652 32.789,73.519 "
+               id="polygon18011" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="42.532,76.201 42.532,38.13 39.007,37.27 39.007,75.354 "
+               id="polygon18013" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="48.765,78.06 48.765,39.743 45.224,38.887 45.224,77.19 "
+               id="polygon18015" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="54.999,79.919 54.999,41.356 51.442,40.504 51.442,79.025 "
+               id="polygon18017" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="61.231,81.777 61.231,42.968 57.661,42.122 57.661,80.86 "
+               id="polygon18019" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="67.466,83.637 67.466,44.581 63.878,43.737 63.878,82.697 "
+               id="polygon18021" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="73.699,85.496 73.699,46.193 70.097,45.355 70.097,84.532 "
+               id="polygon18023" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="79.933,87.354 79.933,47.806 76.313,46.973 76.313,86.367 "
+               id="polygon18025" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="82.532,88.203 86.165,89.213 86.165,49.418 82.532,48.589 "
+               id="polygon18027" />
+          </g>
+          <g
+             id="g18029">
+            <polygon
+               style="fill:#394d54"
+               points="11.364,66.906 11.364,30.067 7.917,29.184 7.917,66.177 "
+               id="polygon18031" />
+            <polygon
+               style="fill:#394d54"
+               points="17.598,68.765 17.598,31.68 14.134,30.801 14.134,68.013 "
+               id="polygon18033" />
+            <polygon
+               style="fill:#394d54"
+               points="23.831,70.624 23.831,33.292 20.353,32.418 20.353,69.848 "
+               id="polygon18035" />
+            <polygon
+               style="fill:#394d54"
+               points="30.063,72.483 30.063,34.905 26.569,34.035 26.569,71.684 "
+               id="polygon18037" />
+            <polygon
+               style="fill:#394d54"
+               points="36.298,74.341 36.298,36.518 32.789,35.652 32.789,73.519 "
+               id="polygon18039" />
+            <polygon
+               style="fill:#394d54"
+               points="42.532,76.201 42.532,38.13 39.007,37.27 39.007,75.354 "
+               id="polygon18041" />
+            <polygon
+               style="fill:#394d54"
+               points="48.765,78.06 48.765,39.743 45.224,38.887 45.224,77.19 "
+               id="polygon18043" />
+            <polygon
+               style="fill:#394d54"
+               points="54.999,79.919 54.999,41.356 51.442,40.504 51.442,79.025 "
+               id="polygon18045" />
+            <polygon
+               style="fill:#394d54"
+               points="61.231,81.777 61.231,42.968 57.661,42.122 57.661,80.86 "
+               id="polygon18047" />
+            <polygon
+               style="fill:#394d54"
+               points="67.466,83.637 67.466,44.581 63.878,43.737 63.878,82.697 "
+               id="polygon18049" />
+            <polygon
+               style="fill:#394d54"
+               points="73.699,85.496 73.699,46.193 70.097,45.355 70.097,84.532 "
+               id="polygon18051" />
+            <polygon
+               style="fill:#394d54"
+               points="79.933,87.354 79.933,47.806 76.313,46.973 76.313,86.367 "
+               id="polygon18053" />
+            <polygon
+               style="fill:#394d54"
+               points="82.532,88.203 86.165,89.213 86.165,49.418 82.532,48.589 "
+               id="polygon18055" />
+          </g>
+        </g>
+        <polygon
+           style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+           points="117.005,75.212 92.65,94.398 92.65,46.469 117.005,29.184 "
+           id="polygon18057" />
+        <polygon
+           style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd"
+           points="117.005,27.429 92.65,45.131 5,23.314 32.538,6.683 "
+           id="polygon18059" />
+        <rect
+           style="fill:#394d54"
+           x="46.075001"
+           y="-12.046"
+           transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+           width="3.5250001"
+           height="92.299004"
+           id="rect18061" />
+        <rect
+           style="fill:#394d54"
+           x="103.715"
+           y="18.122"
+           transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+           width="3.5250001"
+           height="34.868999"
+           id="rect18063" />
+        <rect
+           style="fill:#394d54"
+           x="90.234001"
+           y="44.498001"
+           transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+           width="3.523"
+           height="51.174"
+           id="rect18065" />
+      </g>
+      <g
+         style="display:none"
+         id="g18067"
+         display="none">
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="-77.349998"
+           y1="-49.362"
+           x2="-1830.454"
+           y2="-492.659"
+           id="line18069" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="-77.349998"
+           y1="-46.612"
+           x2="-1830.454"
+           y2="-492.659"
+           id="line18071" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="-77.349998"
+           y1="-4.362"
+           x2="-1830.454"
+           y2="-492.659"
+           id="line18073" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="-52.994999"
+           y1="-67.064003"
+           x2="-1830.454"
+           y2="-492.659"
+           id="line18075" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="532.88"
+           y1="-492.659"
+           x2="-76.915001"
+           y2="-49.472"
+           id="line18077" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="532.88"
+           y1="-492.659"
+           x2="-165"
+           y2="-71.179001"
+           id="line18079" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="532.88"
+           y1="-492.659"
+           x2="-77.349998"
+           y2="-0.271"
+           id="line18081" />
+      </g>
+    </g>
+    <g
+       transform="matrix(0.30953328,-0.23033205,0,0.30953328,188.6113,392.68265)"
+       id="g18083">
+      <path
+         style="fill:#394d54;fill-opacity:1"
+         d="m 620.36719,177.47266 c -11.84776,-0.50977 -19.91248,14.44883 -12.98047,24.06836 6.07955,10.39639 23.35314,8.80448 27.42969,-2.52735 4.38855,-9.96271 -3.54659,-21.81733 -14.44922,-21.54101 z"
+         id="circle18140"
+         inkscape:connector-curvature="0" />
+      <circle
+         id="circle18085"
+         r="13.5728"
+         cy="193.04353"
+         cx="620.4353"
+         style="fill:#dd4814" />
+      <path
+         id="path18087"
+         d="m 611.96969,191.41233 c -0.9016,0 -1.6288,0.7296 -1.6288,1.6312 0,0.8984 0.7272,1.6296 1.6288,1.6296 0.9024,0 1.6312,-0.7312 1.6312,-1.6296 0,-0.9016 -0.7288,-1.6312 -1.6312,-1.6312 z m 11.644,7.4128 c -0.7808,0.4472 -1.0464,1.4456 -0.5976,2.2232 0.4504,0.7816 1.4472,1.0472 2.2272,0.5984 0.78,-0.4496 1.048,-1.4472 0.5976,-2.2264 -0.4504,-0.78 -1.448,-1.0472 -2.2272,-0.5952 z m -8.1,-5.7816 c 0,-1.6128 0.7984,-3.0376 2.0232,-3.8984 l -1.1904,-1.9984 c -1.4272,0.9528 -2.4896,2.4112 -2.9312,4.1184 0.5168,0.42 0.844,1.0592 0.844,1.7784 0,0.7144 -0.3264,1.3528 -0.844,1.7752 0.4416,1.7064 1.504,3.164 2.9312,4.1168 l 1.1904,-1.9968 c -1.2248,-0.8608 -2.0232,-2.284 -2.0232,-3.8952 z m 4.7616,-4.7656 c 2.4896,0 4.5304,1.908 4.7448,4.344 l 2.324,-0.0376 c -0.1152,-1.7952 -0.8992,-3.408 -2.1048,-4.5936 -0.6208,0.2328 -1.3376,0.2 -1.956,-0.1592 -0.6184,-0.3576 -1.008,-0.9592 -1.1152,-1.616 -0.6032,-0.1656 -1.2376,-0.2576 -1.8928,-0.2576 -1.1264,0 -2.1928,0.2656 -3.1376,0.7344 l 1.132,2.0296 c 0.6096,-0.2848 1.2904,-0.444 2.0056,-0.444 z m 0,9.5264 c -0.716,0 -1.396,-0.1576 -2.0056,-0.4424 l -1.132,2.0296 c 0.9448,0.4704 2.0112,0.7328 3.1376,0.7328 0.6552,0 1.2896,-0.0888 1.8928,-0.256 0.1072,-0.656 0.496,-1.2576 1.1152,-1.6168 0.62,-0.3576 1.3352,-0.392 1.956,-0.156 1.2056,-1.1872 1.9896,-2.8 2.1048,-4.5968 l -2.324,-0.0328 c -0.2144,2.432 -2.2552,4.3384 -4.7448,4.3384 z m 3.3384,-10.5432 c 0.7792,0.4488 1.7768,0.1832 2.2256,-0.5984 0.4504,-0.7768 0.1832,-1.7752 -0.596,-2.2248 -0.7808,-0.4496 -1.7768,-0.1832 -2.2272,0.5968 -0.4488,0.7792 -0.1832,1.776 0.5976,2.2264 z"
+         inkscape:connector-curvature="0"
+         style="fill:#ffffff" />
+    </g>
+    <g
+       id="g18146"
+       transform="matrix(0.30953328,-0.23033205,0,0.30953328,188.6113,238.59168)">
+      <path
+         inkscape:connector-curvature="0"
+         id="path18148"
+         d="m 620.36719,177.47266 c -11.84776,-0.50977 -19.91248,14.44883 -12.98047,24.06836 6.07955,10.39639 23.35314,8.80448 27.42969,-2.52735 4.38855,-9.96271 -3.54659,-21.81733 -14.44922,-21.54101 z"
+         style="fill:#394d54;fill-opacity:1" />
+      <circle
+         style="fill:#dd4814"
+         cx="620.4353"
+         cy="193.04353"
+         r="13.5728"
+         id="circle18150" />
+      <path
+         style="fill:#ffffff"
+         inkscape:connector-curvature="0"
+         d="m 611.96969,191.41233 c -0.9016,0 -1.6288,0.7296 -1.6288,1.6312 0,0.8984 0.7272,1.6296 1.6288,1.6296 0.9024,0 1.6312,-0.7312 1.6312,-1.6296 0,-0.9016 -0.7288,-1.6312 -1.6312,-1.6312 z m 11.644,7.4128 c -0.7808,0.4472 -1.0464,1.4456 -0.5976,2.2232 0.4504,0.7816 1.4472,1.0472 2.2272,0.5984 0.78,-0.4496 1.048,-1.4472 0.5976,-2.2264 -0.4504,-0.78 -1.448,-1.0472 -2.2272,-0.5952 z m -8.1,-5.7816 c 0,-1.6128 0.7984,-3.0376 2.0232,-3.8984 l -1.1904,-1.9984 c -1.4272,0.9528 -2.4896,2.4112 -2.9312,4.1184 0.5168,0.42 0.844,1.0592 0.844,1.7784 0,0.7144 -0.3264,1.3528 -0.844,1.7752 0.4416,1.7064 1.504,3.164 2.9312,4.1168 l 1.1904,-1.9968 c -1.2248,-0.8608 -2.0232,-2.284 -2.0232,-3.8952 z m 4.7616,-4.7656 c 2.4896,0 4.5304,1.908 4.7448,4.344 l 2.324,-0.0376 c -0.1152,-1.7952 -0.8992,-3.408 -2.1048,-4.5936 -0.6208,0.2328 -1.3376,0.2 -1.956,-0.1592 -0.6184,-0.3576 -1.008,-0.9592 -1.1152,-1.616 -0.6032,-0.1656 -1.2376,-0.2576 -1.8928,-0.2576 -1.1264,0 -2.1928,0.2656 -3.1376,0.7344 l 1.132,2.0296 c 0.6096,-0.2848 1.2904,-0.444 2.0056,-0.444 z m 0,9.5264 c -0.716,0 -1.396,-0.1576 -2.0056,-0.4424 l -1.132,2.0296 c 0.9448,0.4704 2.0112,0.7328 3.1376,0.7328 0.6552,0 1.2896,-0.0888 1.8928,-0.256 0.1072,-0.656 0.496,-1.2576 1.1152,-1.6168 0.62,-0.3576 1.3352,-0.392 1.956,-0.156 1.2056,-1.1872 1.9896,-2.8 2.1048,-4.5968 l -2.324,-0.0328 c -0.2144,2.432 -2.2552,4.3384 -4.7448,4.3384 z m 3.3384,-10.5432 c 0.7792,0.4488 1.7768,0.1832 2.2256,-0.5984 0.4504,-0.7768 0.1832,-1.7752 -0.596,-2.2248 -0.7808,-0.4496 -1.7768,-0.1832 -2.2272,0.5968 -0.4488,0.7792 -0.1832,1.776 0.5976,2.2264 z"
+         id="path18152" />
+    </g>
+    <rect
+       style="display:inline;opacity:1;fill:#ade5f9;fill-opacity:1;stroke:#005976;stroke-width:1.47231948;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       id="rect15559"
+       width="172.45595"
+       height="163.06149"
+       x="74.023972"
+       y="8.968852"
+       rx="0"
+       ry="0" />
+    <rect
+       style="opacity:1;fill:#fcfcfc;fill-opacity:1;stroke:#005976;stroke-width:1.87672079;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       id="rect15561"
+       width="55.863518"
+       height="24.437973"
+       x="74.226181"
+       y="-3.9703875"
+       ry="6.3192472"
+       rx="6.3192477" />
+    <g
+       transform="translate(496.0012,-202.68678)"
+       style="font-style:normal;font-weight:normal;font-size:32px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot16093">
+      <path
+         d="m -405.12039,208.57537 1.52,0 q -0.128,-0.912 -0.544,-1.6 -0.416,-0.704 -1.04,-1.168 -0.624,-0.464 -1.424,-0.704 -0.8,-0.24 -1.696,-0.24 -1.312,0 -2.336,0.48 -1.008,0.464 -1.696,1.28 -0.672,0.816 -1.024,1.92 -0.352,1.088 -0.352,2.336 0,1.248 0.32,2.336 0.336,1.088 0.992,1.888 0.656,0.8 1.648,1.264 0.992,0.448 2.32,0.448 2.192,0 3.456,-1.2 1.264,-1.2 1.488,-3.36 l -1.52,0 q -0.048,0.704 -0.288,1.312 -0.24,0.608 -0.672,1.056 -0.416,0.432 -1.008,0.688 -0.576,0.24 -1.328,0.24 -1.024,0 -1.76,-0.384 -0.736,-0.384 -1.216,-1.024 -0.464,-0.656 -0.688,-1.52 -0.224,-0.88 -0.224,-1.856 0,-0.896 0.224,-1.728 0.224,-0.832 0.688,-1.472 0.48,-0.656 1.2,-1.04 0.736,-0.384 1.76,-0.384 1.2,0 2.064,0.608 0.88,0.608 1.136,1.824 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23223" />
+      <path
+         d="m -401.73389,205.13537 0,11.424 1.36,0 0,-11.424 -1.36,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23225" />
+      <path
+         d="m -396.81139,206.79937 0,-1.664 -1.36,0 0,1.664 1.36,0 z m -1.36,1.488 0,8.272 1.36,0 0,-8.272 -1.36,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23227" />
+      <path
+         d="m -388.94489,211.63137 -4.752,0 q 0.032,-0.48 0.208,-0.896 0.176,-0.432 0.48,-0.752 0.304,-0.32 0.72,-0.496 0.432,-0.192 0.96,-0.192 0.512,0 0.928,0.192 0.432,0.176 0.736,0.496 0.32,0.304 0.496,0.736 0.192,0.432 0.224,0.912 z m 1.312,2.304 -1.344,0 q -0.176,0.816 -0.736,1.216 -0.544,0.4 -1.408,0.4 -0.672,0 -1.168,-0.224 -0.496,-0.224 -0.816,-0.592 -0.32,-0.384 -0.464,-0.864 -0.144,-0.496 -0.128,-1.04 l 6.192,0 q 0.032,-0.752 -0.144,-1.584 -0.16,-0.832 -0.608,-1.536 -0.432,-0.704 -1.168,-1.152 -0.72,-0.464 -1.824,-0.464 -0.848,0 -1.568,0.32 -0.704,0.32 -1.232,0.896 -0.512,0.576 -0.8,1.36 -0.288,0.784 -0.288,1.728 0.032,0.944 0.272,1.744 0.256,0.8 0.752,1.376 0.496,0.576 1.216,0.896 0.736,0.32 1.728,0.32 1.408,0 2.336,-0.704 0.928,-0.704 1.2,-2.096 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23229" />
+      <path
+         d="m -386.09514,208.28737 0,8.272 1.36,0 0,-4.672 q 0,-0.56 0.144,-1.024 0.16,-0.48 0.464,-0.832 0.304,-0.352 0.752,-0.544 0.464,-0.192 1.088,-0.192 0.784,0 1.232,0.448 0.448,0.448 0.448,1.216 l 0,5.6 1.36,0 0,-5.44 q 0,-0.672 -0.144,-1.216 -0.128,-0.56 -0.464,-0.96 -0.336,-0.4 -0.88,-0.624 -0.544,-0.224 -1.36,-0.224 -1.84,0 -2.688,1.504 l -0.032,0 0,-1.312 -1.28,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23231" />
+      <path
+         d="m -375.30089,208.28737 0,-2.48 -1.36,0 0,2.48 -1.408,0 0,1.2 1.408,0 0,5.264 q 0,0.576 0.112,0.928 0.112,0.352 0.336,0.544 0.24,0.192 0.608,0.272 0.384,0.064 0.912,0.064 l 1.04,0 0,-1.2 -0.624,0 q -0.32,0 -0.528,-0.016 -0.192,-0.032 -0.304,-0.112 -0.112,-0.08 -0.16,-0.224 -0.032,-0.144 -0.032,-0.384 l 0,-5.136 1.648,0 0,-1.2 -1.648,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23233" />
+    </g>
+    <rect
+       y="35.104694"
+       x="88.070274"
+       height="24.38966"
+       width="144.36334"
+       id="rect15593"
+       style="opacity:1;fill:#fcfcfc;fill-opacity:1;stroke:#dbdde0;stroke-width:1.87672079;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       ry="7.1999998"
+       rx="5.5999999" />
+    <g
+       transform="translate(455.19194,-65.37672)"
+       style="font-style:normal;font-weight:normal;font-size:32px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot16345">
+      <path
+         d="m -339.36133,116.03723 q -0.66406,1.09375 -1.60156,1.64844 -0.92969,0.55469 -1.71094,0.55469 -1.09375,0 -1.96875,-0.94532 -0.86719,-0.94531 -0.86719,-2.86718 0,-1.51563 0.57813,-2.70313 0.57812,-1.19531 1.54687,-1.88281 0.96875,-0.69531 2.39844,-0.69531 0.41406,0 1.25,0.0859 0.125,0.0156 0.375,0.0391 l 0,-3.88282 1.46094,0 0,12.60938 -1.46094,0 0,-1.96094 z m 0,-1.72656 0,-3.74219 q -0.77344,-0.20312 -1.4375,-0.20312 -1.34375,0 -2.24219,1.03906 -0.89062,1.03906 -0.89062,2.82812 0,1.29688 0.48437,1.96094 0.49219,0.65625 1.11719,0.65625 0.67969,0 1.5,-0.6875 0.82031,-0.6875 1.46875,-1.85156 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23200" />
+      <path
+         d="m -332.00195,118.24036 q -1.875,0 -3,-1.30469 -1.125,-1.3125 -1.125,-3.30469 0,-1.99219 1.125,-3.29687 1.125,-1.30469 3,-1.30469 1.86718,0 2.99218,1.30469 1.125,1.30468 1.125,3.29687 0,1.99219 -1.125,3.30469 -1.125,1.30469 -2.99218,1.30469 z m 0,-1.21094 q 1.16406,0 1.85156,-0.90625 0.6875,-0.90625 0.6875,-2.49219 0,-1.57812 -0.6875,-2.48437 -0.6875,-0.90625 -1.85156,-0.90625 -1.17188,0 -1.85938,0.90625 -0.6875,0.90625 -0.6875,2.48437 0,1.58594 0.6875,2.49219 0.6875,0.90625 1.85938,0.90625 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23202" />
+      <path
+         d="m -318.62695,117.74817 q -1.46094,0.49219 -2.78125,0.49219 -1.3125,0 -2.33594,-0.60157 -1.01563,-0.60937 -1.58594,-1.64062 -0.57031,-1.03906 -0.57031,-2.35938 0,-1.98437 1.28906,-3.29687 1.29688,-1.3125 3.125,-1.3125 1.35156,0 2.85938,0.50781 l 0,1.39844 q -1.52344,-0.69531 -2.72657,-0.69531 -0.79687,0 -1.5,0.39843 -0.70312,0.39063 -1.08593,1.22657 -0.38282,0.83593 -0.38282,1.71875 0,1.17968 0.73438,2.25 0.74219,1.07031 2.32031,1.07031 0.47656,0 0.9375,-0.0625 0.46094,-0.0703 1.70313,-0.49219 l 0,1.39844 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23204" />
+      <path
+         d="m -315.95508,117.99817 0,-12.60938 1.45313,0 0,7.92188 4.19531,-4.03906 1.9375,0 -4.1875,4.03906 4.29687,4.6875 -2,0 -4.24218,-4.6875 0,4.6875 -1.45313,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23206" />
+      <path
+         d="m -299.46289,117.42004 q -1.74219,0.82813 -3.29688,0.82813 -1.17968,0 -2.10937,-0.58594 -0.92969,-0.58594 -1.49219,-1.67187 -0.55469,-1.09375 -0.55469,-2.33594 0,-1.19531 0.53125,-2.29688 0.53125,-1.10156 1.48438,-1.71093 0.95312,-0.61719 2.125,-0.61719 1.55469,0 2.49219,1.10937 0.94531,1.10938 0.94531,3.42188 l 0,0.3125 -6.01563,0 q 0,0.90625 0.375,1.65625 0.38282,0.75 1,1.125 0.61719,0.375 1.40625,0.375 1.40625,0 3.10938,-0.9375 l 0,1.32812 z m -5.80469,-4.63281 4.35156,0 0,-0.21094 q 0,-1.07031 -0.52343,-1.70312 -0.52344,-0.63281 -1.39844,-0.63281 -0.88281,0 -1.53125,0.64843 -0.64063,0.64063 -0.89844,1.89844 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23208" />
+      <path
+         d="m -296.38477,117.99817 0,-8.72656 1.45313,0 0,1.90625 q 0.67969,-1.07813 1.55469,-1.60938 0.88281,-0.53906 1.96093,-0.53906 0.78907,0 1.57813,0.24219 l 0,3.14843 -1.39844,0 0,-1.99218 q -0.36719,-0.0625 -0.57812,-0.0625 -0.8125,0 -1.59375,0.64062 -0.78125,0.64063 -1.52344,1.92188 l 0,5.07031 -1.45313,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23210" />
+      <path
+         d="m -276.5957,111.23254 q 0.67187,-1.09375 1.60156,-1.64843 0.92969,-0.55469 1.71094,-0.55469 1.09375,0 1.96875,0.94531 0.875,0.94531 0.875,2.86719 0,1.51562 -0.58594,2.71094 -0.57813,1.1875 -1.54688,1.88281 -0.96875,0.6875 -2.39843,0.6875 -0.40625,0 -1.25,-0.0859 -0.125,-0.0156 -0.375,-0.0391 l -1.45313,0 0,-12.60938 1.45313,0 0,5.84375 z m 0,1.72657 0,3.74218 q 0.78906,0.20313 1.45312,0.20313 1.32031,0 2.21875,-1.03125 0.90625,-1.03906 0.90625,-2.83594 0,-1.30469 -0.49219,-1.96094 -0.49218,-0.65625 -1.11718,-0.65625 -0.67969,0 -1.5,0.6875 -0.8125,0.6875 -1.46875,1.85157 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23212" />
+      <path
+         d="m -261.16602,109.27161 0,8.72656 -1.45312,0 0,-1.91406 q -0.70313,1.07812 -1.5625,1.61718 -0.85938,0.53907 -1.71875,0.53907 -0.76563,0 -1.40625,-0.40625 -0.63281,-0.41407 -0.88281,-1.07813 -0.25,-0.67187 -0.25,-2.60937 l 0,-4.875 1.45312,0 0,5.05468 q 0,1.25782 0.11719,1.66407 0.125,0.39843 0.44531,0.65625 0.32813,0.25781 0.70313,0.25781 0.76562,0 1.64062,-0.71875 0.88281,-0.72656 1.46094,-1.82813 l 0,-5.08593 1.45312,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23214" />
+      <path
+         d="m -256.27539,117.99817 0,-7.51563 -2.42188,0 0,-1.21093 3.875,0 0,7.51562 2.42969,0 0,1.21094 -3.88281,0 z m -0.24219,-11.875 q 0,-0.49219 0.33594,-0.73438 0.33594,-0.24218 0.63281,-0.24218 0.30469,0 0.64063,0.24218 0.33593,0.24219 0.33593,0.73438 0,0.48437 -0.33593,0.72656 -0.33594,0.24219 -0.64063,0.24219 -0.29687,0 -0.63281,-0.24219 -0.33594,-0.24219 -0.33594,-0.72656 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23216" />
+      <path
+         d="m -246.7832,117.99817 0,-11.39063 -2.60938,0 0,-1.21875 4.0625,0 0,11.39844 2.72656,0 0,1.21094 -4.17968,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23218" />
+      <path
+         d="m -233.67383,116.03723 q -0.66406,1.09375 -1.60156,1.64844 -0.92969,0.55469 -1.71094,0.55469 -1.09375,0 -1.96875,-0.94532 -0.86719,-0.94531 -0.86719,-2.86718 0,-1.51563 0.57813,-2.70313 0.57812,-1.19531 1.54687,-1.88281 0.96875,-0.69531 2.39844,-0.69531 0.41406,0 1.25,0.0859 0.125,0.0156 0.375,0.0391 l 0,-3.88282 1.46094,0 0,12.60938 -1.46094,0 0,-1.96094 z m 0,-1.72656 0,-3.74219 q -0.77344,-0.20312 -1.4375,-0.20312 -1.34375,0 -2.24219,1.03906 -0.89062,1.03906 -0.89062,2.82812 0,1.29688 0.48437,1.96094 0.49219,0.65625 1.11719,0.65625 0.67969,0 1.5,-0.6875 0.82031,-0.6875 1.46875,-1.85156 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23220" />
+    </g>
+    <rect
+       rx="5.5999999"
+       ry="7.1999998"
+       style="opacity:1;fill:#fcfcfc;fill-opacity:1;stroke:#dbdde0;stroke-width:1.87672079;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       id="rect18850"
+       width="144.36334"
+       height="24.38966"
+       x="88.070274"
+       y="78.304764" />
+    <g
+       transform="translate(455.19194,-22.17679)"
+       style="font-style:normal;font-weight:normal;font-size:32px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot18852">
+      <path
+         d="m -339.36133,116.03723 q -0.66406,1.09375 -1.60156,1.64844 -0.92969,0.55469 -1.71094,0.55469 -1.09375,0 -1.96875,-0.94532 -0.86719,-0.94531 -0.86719,-2.86718 0,-1.51563 0.57813,-2.70313 0.57812,-1.19531 1.54687,-1.88281 0.96875,-0.69531 2.39844,-0.69531 0.41406,0 1.25,0.0859 0.125,0.0156 0.375,0.0391 l 0,-3.88282 1.46094,0 0,12.60938 -1.46094,0 0,-1.96094 z m 0,-1.72656 0,-3.74219 q -0.77344,-0.20312 -1.4375,-0.20312 -1.34375,0 -2.24219,1.03906 -0.89062,1.03906 -0.89062,2.82812 0,1.29688 0.48437,1.96094 0.49219,0.65625 1.11719,0.65625 0.67969,0 1.5,-0.6875 0.82031,-0.6875 1.46875,-1.85156 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23179" />
+      <path
+         d="m -332.00195,118.24036 q -1.875,0 -3,-1.30469 -1.125,-1.3125 -1.125,-3.30469 0,-1.99219 1.125,-3.29687 1.125,-1.30469 3,-1.30469 1.86718,0 2.99218,1.30469 1.125,1.30468 1.125,3.29687 0,1.99219 -1.125,3.30469 -1.125,1.30469 -2.99218,1.30469 z m 0,-1.21094 q 1.16406,0 1.85156,-0.90625 0.6875,-0.90625 0.6875,-2.49219 0,-1.57812 -0.6875,-2.48437 -0.6875,-0.90625 -1.85156,-0.90625 -1.17188,0 -1.85938,0.90625 -0.6875,0.90625 -0.6875,2.48437 0,1.58594 0.6875,2.49219 0.6875,0.90625 1.85938,0.90625 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23181" />
+      <path
+         d="m -318.62695,117.74817 q -1.46094,0.49219 -2.78125,0.49219 -1.3125,0 -2.33594,-0.60157 -1.01563,-0.60937 -1.58594,-1.64062 -0.57031,-1.03906 -0.57031,-2.35938 0,-1.98437 1.28906,-3.29687 1.29688,-1.3125 3.125,-1.3125 1.35156,0 2.85938,0.50781 l 0,1.39844 q -1.52344,-0.69531 -2.72657,-0.69531 -0.79687,0 -1.5,0.39843 -0.70312,0.39063 -1.08593,1.22657 -0.38282,0.83593 -0.38282,1.71875 0,1.17968 0.73438,2.25 0.74219,1.07031 2.32031,1.07031 0.47656,0 0.9375,-0.0625 0.46094,-0.0703 1.70313,-0.49219 l 0,1.39844 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23183" />
+      <path
+         d="m -315.95508,117.99817 0,-12.60938 1.45313,0 0,7.92188 4.19531,-4.03906 1.9375,0 -4.1875,4.03906 4.29687,4.6875 -2,0 -4.24218,-4.6875 0,4.6875 -1.45313,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23185" />
+      <path
+         d="m -299.46289,117.42004 q -1.74219,0.82813 -3.29688,0.82813 -1.17968,0 -2.10937,-0.58594 -0.92969,-0.58594 -1.49219,-1.67187 -0.55469,-1.09375 -0.55469,-2.33594 0,-1.19531 0.53125,-2.29688 0.53125,-1.10156 1.48438,-1.71093 0.95312,-0.61719 2.125,-0.61719 1.55469,0 2.49219,1.10937 0.94531,1.10938 0.94531,3.42188 l 0,0.3125 -6.01563,0 q 0,0.90625 0.375,1.65625 0.38282,0.75 1,1.125 0.61719,0.375 1.40625,0.375 1.40625,0 3.10938,-0.9375 l 0,1.32812 z m -5.80469,-4.63281 4.35156,0 0,-0.21094 q 0,-1.07031 -0.52343,-1.70312 -0.52344,-0.63281 -1.39844,-0.63281 -0.88281,0 -1.53125,0.64843 -0.64063,0.64063 -0.89844,1.89844 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23187" />
+      <path
+         d="m -296.38477,117.99817 0,-8.72656 1.45313,0 0,1.90625 q 0.67969,-1.07813 1.55469,-1.60938 0.88281,-0.53906 1.96093,-0.53906 0.78907,0 1.57813,0.24219 l 0,3.14843 -1.39844,0 0,-1.99218 q -0.36719,-0.0625 -0.57812,-0.0625 -0.8125,0 -1.59375,0.64062 -0.78125,0.64063 -1.52344,1.92188 l 0,5.07031 -1.45313,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23189" />
+      <path
+         d="m -276.5957,111.23254 q 0.67187,-1.09375 1.60156,-1.64843 0.92969,-0.55469 1.71094,-0.55469 1.09375,0 1.96875,0.94531 0.875,0.94531 0.875,2.86719 0,1.51562 -0.58594,2.71094 -0.57813,1.1875 -1.54688,1.88281 -0.96875,0.6875 -2.39843,0.6875 -0.40625,0 -1.25,-0.0859 -0.125,-0.0156 -0.375,-0.0391 l 0,3.39062 -1.45313,0 0,-12.11718 1.45313,0 0,1.96093 z m 0,1.72657 0,3.74218 q 0.78906,0.20313 1.45312,0.20313 1.32031,0 2.21875,-1.03125 0.90625,-1.03906 0.90625,-2.83594 0,-1.30469 -0.49219,-1.96094 -0.49218,-0.65625 -1.11718,-0.65625 -0.67969,0 -1.5,0.6875 -0.8125,0.6875 -1.46875,1.85157 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23191" />
+      <path
+         d="m -261.16602,109.27161 0,8.72656 -1.45312,0 0,-1.91406 q -0.70313,1.07812 -1.5625,1.61718 -0.85938,0.53907 -1.71875,0.53907 -0.76563,0 -1.40625,-0.40625 -0.63281,-0.41407 -0.88281,-1.07813 -0.25,-0.67187 -0.25,-2.60937 l 0,-4.875 1.45312,0 0,5.05468 q 0,1.25782 0.11719,1.66407 0.125,0.39843 0.44531,0.65625 0.32813,0.25781 0.70313,0.25781 0.76562,0 1.64062,-0.71875 0.88281,-0.72656 1.46094,-1.82813 l 0,-5.08593 1.45312,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23193" />
+      <path
+         d="m -256.37695,117.99817 0,-11.39063 -2.60938,0 0,-1.21875 4.0625,0 0,11.39844 2.72656,0 0,1.21094 -4.17968,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23195" />
+      <path
+         d="m -246.75195,117.99817 0,-11.39063 -2.60938,0 0,-1.21875 4.0625,0 0,11.39844 2.72656,0 0,1.21094 -4.17968,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23197" />
+    </g>
+    <rect
+       y="121.50484"
+       x="88.070274"
+       height="24.38966"
+       width="144.36334"
+       id="rect18860"
+       style="opacity:1;fill:#fcfcfc;fill-opacity:1;stroke:#dbdde0;stroke-width:1.87672079;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       ry="7.1999998"
+       rx="5.5999999" />
+    <g
+       transform="translate(455.19194,21.02328)"
+       style="font-style:normal;font-weight:normal;font-size:32px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot18862">
+      <path
+         d="m -339.36133,116.03723 q -0.66406,1.09375 -1.60156,1.64844 -0.92969,0.55469 -1.71094,0.55469 -1.09375,0 -1.96875,-0.94532 -0.86719,-0.94531 -0.86719,-2.86718 0,-1.51563 0.57813,-2.70313 0.57812,-1.19531 1.54687,-1.88281 0.96875,-0.69531 2.39844,-0.69531 0.41406,0 1.25,0.0859 0.125,0.0156 0.375,0.0391 l 0,-3.88282 1.46094,0 0,12.60938 -1.46094,0 0,-1.96094 z m 0,-1.72656 0,-3.74219 q -0.77344,-0.20312 -1.4375,-0.20312 -1.34375,0 -2.24219,1.03906 -0.89062,1.03906 -0.89062,2.82812 0,1.29688 0.48437,1.96094 0.49219,0.65625 1.11719,0.65625 0.67969,0 1.5,-0.6875 0.82031,-0.6875 1.46875,-1.85156 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23160" />
+      <path
+         d="m -332.00195,118.24036 q -1.875,0 -3,-1.30469 -1.125,-1.3125 -1.125,-3.30469 0,-1.99219 1.125,-3.29687 1.125,-1.30469 3,-1.30469 1.86718,0 2.99218,1.30469 1.125,1.30468 1.125,3.29687 0,1.99219 -1.125,3.30469 -1.125,1.30469 -2.99218,1.30469 z m 0,-1.21094 q 1.16406,0 1.85156,-0.90625 0.6875,-0.90625 0.6875,-2.49219 0,-1.57812 -0.6875,-2.48437 -0.6875,-0.90625 -1.85156,-0.90625 -1.17188,0 -1.85938,0.90625 -0.6875,0.90625 -0.6875,2.48437 0,1.58594 0.6875,2.49219 0.6875,0.90625 1.85938,0.90625 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23162" />
+      <path
+         d="m -318.62695,117.74817 q -1.46094,0.49219 -2.78125,0.49219 -1.3125,0 -2.33594,-0.60157 -1.01563,-0.60937 -1.58594,-1.64062 -0.57031,-1.03906 -0.57031,-2.35938 0,-1.98437 1.28906,-3.29687 1.29688,-1.3125 3.125,-1.3125 1.35156,0 2.85938,0.50781 l 0,1.39844 q -1.52344,-0.69531 -2.72657,-0.69531 -0.79687,0 -1.5,0.39843 -0.70312,0.39063 -1.08593,1.22657 -0.38282,0.83593 -0.38282,1.71875 0,1.17968 0.73438,2.25 0.74219,1.07031 2.32031,1.07031 0.47656,0 0.9375,-0.0625 0.46094,-0.0703 1.70313,-0.49219 l 0,1.39844 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23164" />
+      <path
+         d="m -315.95508,117.99817 0,-12.60938 1.45313,0 0,7.92188 4.19531,-4.03906 1.9375,0 -4.1875,4.03906 4.29687,4.6875 -2,0 -4.24218,-4.6875 0,4.6875 -1.45313,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23166" />
+      <path
+         d="m -299.46289,117.42004 q -1.74219,0.82813 -3.29688,0.82813 -1.17968,0 -2.10937,-0.58594 -0.92969,-0.58594 -1.49219,-1.67187 -0.55469,-1.09375 -0.55469,-2.33594 0,-1.19531 0.53125,-2.29688 0.53125,-1.10156 1.48438,-1.71093 0.95312,-0.61719 2.125,-0.61719 1.55469,0 2.49219,1.10937 0.94531,1.10938 0.94531,3.42188 l 0,0.3125 -6.01563,0 q 0,0.90625 0.375,1.65625 0.38282,0.75 1,1.125 0.61719,0.375 1.40625,0.375 1.40625,0 3.10938,-0.9375 l 0,1.32812 z m -5.80469,-4.63281 4.35156,0 0,-0.21094 q 0,-1.07031 -0.52343,-1.70312 -0.52344,-0.63281 -1.39844,-0.63281 -0.88281,0 -1.53125,0.64843 -0.64063,0.64063 -0.89844,1.89844 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23168" />
+      <path
+         d="m -296.38477,117.99817 0,-8.72656 1.45313,0 0,1.90625 q 0.67969,-1.07813 1.55469,-1.60938 0.88281,-0.53906 1.96093,-0.53906 0.78907,0 1.57813,0.24219 l 0,3.14843 -1.39844,0 0,-1.99218 q -0.36719,-0.0625 -0.57812,-0.0625 -0.8125,0 -1.59375,0.64062 -0.78125,0.64063 -1.52344,1.92188 l 0,5.07031 -1.45313,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23170" />
+      <path
+         d="m -277.19727,117.99817 0,-8.72656 1.45313,0 0,1.90625 q 0.67969,-1.07813 1.55469,-1.60938 0.88281,-0.53906 1.96093,-0.53906 0.78907,0 1.57813,0.24219 l 0,3.14843 -1.39844,0 0,-1.99218 q -0.36719,-0.0625 -0.57812,-0.0625 -0.8125,0 -1.59375,0.64062 -0.78125,0.64063 -1.52344,1.92188 l 0,5.07031 -1.45313,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23172" />
+      <path
+         d="m -261.16602,109.27161 0,8.72656 -1.45312,0 0,-1.91406 q -0.70313,1.07812 -1.5625,1.61718 -0.85938,0.53907 -1.71875,0.53907 -0.76563,0 -1.40625,-0.40625 -0.63281,-0.41407 -0.88281,-1.07813 -0.25,-0.67187 -0.25,-2.60937 l 0,-4.875 1.45312,0 0,5.05468 q 0,1.25782 0.11719,1.66407 0.125,0.39843 0.44531,0.65625 0.32813,0.25781 0.70313,0.25781 0.76562,0 1.64062,-0.71875 0.88281,-0.72656 1.46094,-1.82813 l 0,-5.08593 1.45312,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23174" />
+      <path
+         d="m -258.7207,117.99817 0,-8.72656 1.45312,0 0,1.90625 q 0.625,-1.00782 1.50781,-1.57813 0.88282,-0.57031 1.76563,-0.57031 0.77344,0 1.40625,0.41406 0.63281,0.40625 0.88281,1.07031 0.25781,0.66407 0.25781,2.60938 l 0,4.875 -1.45312,0 0,-5.05469 q 0,-1.25781 -0.125,-1.65625 -0.11719,-0.40625 -0.44531,-0.66406 -0.32032,-0.25781 -0.70313,-0.25781 -0.76562,0 -1.64844,0.72656 -0.875,0.72656 -1.44531,1.82031 l 0,5.08594 -1.45312,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path23176" />
+    </g>
+    <rect
+       ry="0"
+       rx="0"
+       y="7.6822381"
+       x="633.65228"
+       height="164.83829"
+       width="194.87442"
+       id="rect18168"
+       style="display:inline;opacity:1;fill:#ade5f9;fill-opacity:1;stroke:#005976;stroke-width:1.66371417;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+    <rect
+       rx="6.3192477"
+       ry="5.653213"
+       y="-3.9788516"
+       x="633.75879"
+       height="21.862267"
+       width="70.263504"
+       id="rect18170"
+       style="opacity:1;fill:#fcfcfc;fill-opacity:1;stroke:#005976;stroke-width:1.87672079;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+    <g
+       transform="matrix(1,0,0,0.89460229,1055.5338,-181.75098)"
+       style="font-style:normal;font-weight:normal;font-size:32px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot18172">
+      <path
+         d="m -413.15239,205.13537 0,11.424 1.52,0 0,-4.88 3.712,0 q 0.56,0 0.896,0.176 0.336,0.16 0.544,0.448 0.208,0.272 0.304,0.656 0.096,0.368 0.16,0.784 0.08,0.416 0.096,0.848 0.016,0.432 0.032,0.816 0.016,0.368 0.064,0.672 0.064,0.304 0.224,0.48 l 1.696,0 q -0.24,-0.288 -0.368,-0.656 -0.112,-0.384 -0.176,-0.8 -0.064,-0.416 -0.08,-0.848 -0.016,-0.432 -0.048,-0.848 -0.048,-0.416 -0.144,-0.8 -0.08,-0.384 -0.272,-0.688 -0.176,-0.32 -0.496,-0.544 -0.304,-0.224 -0.8,-0.32 l 0,-0.032 q 1.04,-0.288 1.504,-1.072 0.48,-0.784 0.48,-1.824 0,-1.392 -0.928,-2.192 -0.912,-0.8 -2.544,-0.8 l -5.376,0 z m 4.688,5.264 -3.168,0 0,-3.984 3.776,0 q 1.072,0 1.552,0.544 0.48,0.544 0.48,1.408 0,0.624 -0.224,1.024 -0.208,0.384 -0.576,0.624 -0.352,0.224 -0.832,0.304 -0.48,0.08 -1.008,0.08 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23109" />
+      <path
+         d="m -396.66364,211.63137 -4.752,0 q 0.032,-0.48 0.208,-0.896 0.176,-0.432 0.48,-0.752 0.304,-0.32 0.72,-0.496 0.432,-0.192 0.96,-0.192 0.512,0 0.928,0.192 0.432,0.176 0.736,0.496 0.32,0.304 0.496,0.736 0.192,0.432 0.224,0.912 z m 1.312,2.304 -1.344,0 q -0.176,0.816 -0.736,1.216 -0.544,0.4 -1.408,0.4 -0.672,0 -1.168,-0.224 -0.496,-0.224 -0.816,-0.592 -0.32,-0.384 -0.464,-0.864 -0.144,-0.496 -0.128,-1.04 l 6.192,0 q 0.032,-0.752 -0.144,-1.584 -0.16,-0.832 -0.608,-1.536 -0.432,-0.704 -1.168,-1.152 -0.72,-0.464 -1.824,-0.464 -0.848,0 -1.568,0.32 -0.704,0.32 -1.232,0.896 -0.512,0.576 -0.8,1.36 -0.288,0.784 -0.288,1.728 0.032,0.944 0.272,1.744 0.256,0.8 0.752,1.376 0.496,0.576 1.216,0.896 0.736,0.32 1.728,0.32 1.408,0 2.336,-0.704 0.928,-0.704 1.2,-2.096 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23111" />
+      <path
+         d="m -386.67789,215.85537 0,-7.568 -1.28,0 0,1.184 -0.016,0 q -0.368,-0.688 -1.024,-1.024 -0.656,-0.352 -1.44,-0.352 -1.072,0 -1.808,0.416 -0.736,0.4 -1.184,1.04 -0.448,0.624 -0.64,1.408 -0.192,0.768 -0.192,1.504 0,0.848 0.224,1.616 0.24,0.752 0.704,1.328 0.464,0.56 1.152,0.896 0.688,0.336 1.616,0.336 0.8,0 1.504,-0.352 0.72,-0.368 1.072,-1.136 l 0.032,0 0,0.544 q 0,0.688 -0.144,1.264 -0.128,0.576 -0.432,0.976 -0.304,0.416 -0.768,0.64 -0.464,0.24 -1.136,0.24 -0.336,0 -0.704,-0.08 -0.368,-0.064 -0.688,-0.224 -0.304,-0.16 -0.528,-0.416 -0.208,-0.256 -0.224,-0.624 l -1.36,0 q 0.032,0.672 0.352,1.136 0.32,0.464 0.8,0.752 0.496,0.288 1.088,0.416 0.608,0.128 1.184,0.128 1.984,0 2.912,-1.008 0.928,-1.008 0.928,-3.04 z m -3.808,-0.4 q -0.672,0 -1.12,-0.272 -0.448,-0.288 -0.72,-0.736 -0.272,-0.464 -0.384,-1.024 -0.112,-0.56 -0.112,-1.12 0,-0.592 0.128,-1.136 0.144,-0.544 0.432,-0.96 0.304,-0.416 0.768,-0.656 0.464,-0.256 1.12,-0.256 0.64,0 1.088,0.256 0.448,0.256 0.72,0.688 0.288,0.416 0.416,0.944 0.128,0.528 0.128,1.072 0,0.576 -0.144,1.152 -0.128,0.576 -0.416,1.04 -0.288,0.448 -0.768,0.736 -0.464,0.272 -1.136,0.272 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23113" />
+      <path
+         d="m -383.21764,206.79937 0,-1.664 -1.36,0 0,1.664 1.36,0 z m -1.36,1.488 0,8.272 1.36,0 0,-8.272 -1.36,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23115" />
+      <path
+         d="m -380.26314,213.95137 -1.36,0 q 0.032,0.768 0.32,1.312 0.288,0.528 0.768,0.864 0.48,0.32 1.104,0.464 0.624,0.144 1.312,0.144 0.624,0 1.248,-0.128 0.64,-0.112 1.136,-0.416 0.512,-0.304 0.816,-0.8 0.32,-0.496 0.32,-1.248 0,-0.592 -0.24,-0.992 -0.224,-0.4 -0.608,-0.656 -0.368,-0.272 -0.864,-0.432 -0.48,-0.16 -0.992,-0.272 -0.48,-0.112 -0.96,-0.208 -0.48,-0.112 -0.864,-0.256 -0.384,-0.16 -0.64,-0.384 -0.24,-0.24 -0.24,-0.592 0,-0.32 0.16,-0.512 0.16,-0.208 0.416,-0.32 0.256,-0.128 0.56,-0.176 0.32,-0.048 0.624,-0.048 0.336,0 0.656,0.08 0.336,0.064 0.608,0.224 0.272,0.16 0.448,0.432 0.176,0.256 0.208,0.656 l 1.36,0 q -0.048,-0.752 -0.32,-1.248 -0.272,-0.512 -0.736,-0.8 -0.448,-0.304 -1.04,-0.416 -0.592,-0.128 -1.296,-0.128 -0.544,0 -1.104,0.144 -0.544,0.128 -0.992,0.416 -0.432,0.272 -0.72,0.72 -0.272,0.448 -0.272,1.072 0,0.8 0.4,1.248 0.4,0.448 0.992,0.704 0.608,0.24 1.312,0.384 0.704,0.128 1.296,0.304 0.608,0.16 1.008,0.432 0.4,0.272 0.4,0.8 0,0.384 -0.192,0.64 -0.192,0.24 -0.496,0.368 -0.288,0.128 -0.64,0.176 -0.352,0.048 -0.672,0.048 -0.416,0 -0.816,-0.08 -0.384,-0.08 -0.704,-0.256 -0.304,-0.192 -0.496,-0.496 -0.192,-0.32 -0.208,-0.768 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23117" />
+      <path
+         d="m -371.23839,208.28737 0,-2.48 -1.36,0 0,2.48 -1.408,0 0,1.2 1.408,0 0,5.264 q 0,0.576 0.112,0.928 0.112,0.352 0.336,0.544 0.24,0.192 0.608,0.272 0.384,0.064 0.912,0.064 l 1.04,0 0,-1.2 -0.624,0 q -0.32,0 -0.528,-0.016 -0.192,-0.032 -0.304,-0.112 -0.112,-0.08 -0.16,-0.224 -0.032,-0.144 -0.032,-0.384 l 0,-5.136 1.648,0 0,-1.2 -1.648,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23119" />
+      <path
+         d="m -368.11189,208.28737 0,8.272 1.36,0 0,-3.68 q 0,-0.8 0.16,-1.408 0.16,-0.624 0.512,-1.056 0.352,-0.432 0.928,-0.656 0.576,-0.224 1.392,-0.224 l 0,-1.44 q -1.104,-0.032 -1.824,0.448 -0.72,0.48 -1.216,1.488 l -0.032,0 0,-1.744 -1.28,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23121" />
+      <path
+         d="m -359.47214,217.66337 q -0.24,0.608 -0.48,1.024 -0.224,0.416 -0.512,0.672 -0.272,0.272 -0.624,0.384 -0.336,0.128 -0.784,0.128 -0.24,0 -0.48,-0.032 -0.24,-0.032 -0.464,-0.112 l 0,-1.248 q 0.176,0.08 0.4,0.128 0.24,0.064 0.4,0.064 0.416,0 0.688,-0.208 0.288,-0.192 0.432,-0.56 l 0.56,-1.392 -3.28,-8.224 1.536,0 2.416,6.768 0.032,0 2.32,-6.768 1.44,0 -3.6,9.376 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path23123" />
+    </g>
+    <rect
+       style="clip-rule:evenodd;display:none;fill:#e7e7e7;fill-rule:evenodd"
+       x="741.56049"
+       y="-22.457298"
+       display="none"
+       width="135.72552"
+       height="104.87882"
+       id="rect16721" />
+    <g
+       transform="matrix(0.17137062,0,0,0.17137062,732.99198,-36.789366)"
+       id="g16723">
+      <path
+         style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd"
+         inkscape:connector-curvature="0"
+         id="outline_7_"
+         d="m 363.387,173.391 42.695,0 0,43.646 21.588,0 c 9.969,0 20.223,-1.776 29.664,-4.976 4.639,-1.572 9.846,-3.762 14.422,-6.515 -6.027,-7.869 -9.104,-17.805 -10.01,-27.599 -1.23,-13.321 1.457,-30.66 10.473,-41.086 l 4.488,-5.191 5.348,4.299 c 13.465,10.818 24.789,25.935 26.785,43.166 16.213,-4.769 35.248,-3.641 49.539,4.607 l 5.863,3.383 -3.086,6.023 c -12.086,23.588 -37.354,30.895 -62.057,29.602 -36.965,92.068 -117.441,135.656 -215.02,135.656 -50.412,0 -96.664,-18.846 -123.002,-63.572 l -0.432,-0.73 -3.838,-7.807 c -8.902,-19.688 -11.859,-41.256 -9.854,-62.806 l 0.602,-6.455 36.51,0 0,-43.646 42.693,0 0,-42.695 85.393,0 0,-42.695 51.234,0 0,85.391 z" />
+      <g
+         id="body_colors_8_">
+        <path
+           style="fill:#00aada"
+           inkscape:connector-curvature="0"
+           d="m 501.713,189.537 c 2.863,-22.248 -13.787,-39.725 -24.113,-48.021 -11.898,13.758 -13.748,49.812 4.92,64.992 -10.418,9.254 -32.371,17.643 -54.85,17.643 l -273.627,0 c -2.186,23.465 1.934,45.072 11.342,63.568 l 3.113,5.693 c 1.971,3.345 4.124,6.573 6.451,9.681 10e-4,0 10e-4,0 10e-4,0 11.249,0.723 21.621,0.973 31.109,0.763 0.001,0 0.003,0 0.005,0 18.647,-0.413 33.862,-2.613 45.395,-6.609 1.717,-0.597 3.588,0.313 4.182,2.029 0.594,1.715 -0.314,3.587 -2.029,4.182 -1.533,0.531 -3.133,1.028 -4.783,1.5 -0.002,10e-4 -0.004,10e-4 -0.006,0.002 -9.08,2.592 -18.816,4.337 -31.379,5.112 0.744,0.012 -0.777,0.111 -0.781,0.112 -0.427,0.026 -0.965,0.087 -1.395,0.111 -4.943,0.277 -10.28,0.335 -15.736,0.335 -5.967,0 -11.845,-0.113 -18.414,-0.446 l -0.168,0.111 c 22.799,25.625 58.447,40.994 103.131,40.994 94.57,0 174.785,-41.922 210.305,-136.039 25.201,2.586 49.42,-3.84 60.438,-25.346 -17.551,-10.127 -40.117,-6.898 -53.111,-0.367 z"
+           id="path16728" />
+        <path
+           style="fill:#24b8eb"
+           inkscape:connector-curvature="0"
+           d="m 501.713,189.537 c 2.863,-22.248 -13.787,-39.725 -24.113,-48.021 -11.898,13.758 -13.748,49.812 4.92,64.992 -10.418,9.254 -32.371,17.643 -54.85,17.643 l -257.424,0 c -1.117,35.936 12.219,63.214 35.813,79.705 0.001,0 0.003,0 0.005,0 18.647,-0.413 33.862,-2.613 45.395,-6.609 1.717,-0.597 3.588,0.313 4.182,2.029 0.594,1.715 -0.314,3.587 -2.029,4.182 -1.533,0.531 -3.133,1.028 -4.783,1.5 -0.002,10e-4 -0.004,10e-4 -0.006,0.002 -9.08,2.592 -19.653,4.561 -32.216,5.336 -0.004,0 -0.305,-0.291 -0.309,-0.29 32.185,16.509 78.853,16.45 132.356,-4.103 59.994,-23.047 115.818,-66.957 154.771,-117.18 -0.587,0.265 -1.159,0.537 -1.712,0.814 z"
+           id="path16730" />
+        <path
+           style="fill:#008bb8"
+           inkscape:connector-curvature="0"
+           d="m 154.555,252.66 c 1.701,12.568 5.377,24.338 10.83,35.059 l 3.113,5.693 c 1.971,3.345 4.124,6.573 6.452,9.681 11.251,0.723 21.624,0.973 31.114,0.763 18.647,-0.413 33.862,-2.613 45.395,-6.609 1.717,-0.597 3.588,0.313 4.182,2.029 0.594,1.715 -0.314,3.587 -2.029,4.182 -1.533,0.531 -3.133,1.028 -4.783,1.5 -0.002,10e-4 -0.004,10e-4 -0.006,0.002 -9.08,2.592 -19.598,4.449 -32.16,5.225 -0.431,0.026 -1.184,0.031 -1.618,0.056 -4.942,0.277 -10.224,0.446 -15.681,0.446 -5.966,0 -12.067,-0.113 -18.637,-0.446 22.799,25.625 58.67,41.05 103.354,41.05 80.961,0 151.398,-30.73 192.281,-98.629 l -321.807,0 z"
+           id="path16732" />
+        <path
+           style="fill:#039bc6"
+           inkscape:connector-curvature="0"
+           d="m 172.709,252.66 c 4.84,22.069 16.471,39.395 33.355,51.195 18.647,-0.413 33.862,-2.613 45.395,-6.609 1.717,-0.597 3.588,0.313 4.182,2.029 0.594,1.715 -0.314,3.587 -2.029,4.182 -1.533,0.531 -3.133,1.028 -4.783,1.5 -0.002,0.001 -0.004,0.001 -0.006,0.002 -9.08,2.592 -19.82,4.449 -32.383,5.225 32.182,16.507 78.715,16.27 132.215,-4.281 32.365,-12.436 63.516,-30.947 91.463,-53.242 l -267.409,0 z"
+           id="path16734" />
+      </g>
+      <g
+         style="display:none"
+         display="none"
+         id="g16736">
+        <path
+           style="display:inline;fill:#394d54"
+           inkscape:connector-curvature="0"
+           display="inline"
+           d="m 199.434,310.508 c 5.455,0 10.652,-0.142 15.594,-0.419 0.432,-0.024 0.844,-0.058 1.271,-0.084 0.004,-10e-4 0.007,-10e-4 0.011,-10e-4 12.562,-0.775 23.434,-2.453 32.514,-5.045 0.002,-10e-4 0.004,-10e-4 0.006,-0.002 1.65,-0.472 3.25,-0.969 4.783,-1.5 1.715,-0.595 2.623,-2.467 2.029,-4.182 -0.594,-1.716 -2.465,-2.626 -4.182,-2.029 -11.532,3.996 -26.747,6.196 -45.395,6.609 -0.002,0 -0.004,0 -0.005,0 -9.488,0.21 -19.86,-0.04 -31.109,-0.763 0,0 0,0 -0.001,0 -0.575,-0.036 -1.141,-0.069 -1.721,-0.108 -1.807,-0.115 -3.379,1.245 -3.504,3.057 -0.123,1.811 1.246,3.379 3.057,3.502 2.297,0.156 4.555,0.291 6.783,0.41 0.354,0.019 0.697,0.031 1.049,0.049 l 0,0 c 6.57,0.333 12.853,0.506 18.82,0.506 z"
+           id="path16738" />
+      </g>
+      <g
+         id="Containers_8_">
+        <path
+           style="clip-rule:evenodd;fill:#00acd3;fill-rule:evenodd"
+           inkscape:connector-curvature="0"
+           d="m 222.18,183.59 2.922,0 0,30.838 -2.922,0 0,-30.838 z m -5.606,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.607,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.608,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.605,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.49,0 2.92,0 0,30.838 -2.92,0 0,-30.838 z m -3.082,-3.084 37.002,0 0,37.004 -37.002,0 0,-37.004 z"
+           id="path16741" />
+        <path
+           style="clip-rule:evenodd;fill:#00acd3;fill-rule:evenodd"
+           inkscape:connector-curvature="0"
+           d="m 264.875,140.895 2.924,0 0,30.836 -2.924,0 0,-30.836 z m -5.605,0 3.037,0 0,30.836 -3.037,0 0,-30.836 z m -5.608,0 3.037,0 0,30.836 -3.037,0 0,-30.836 z m -5.607,0 3.037,0 0,30.836 -3.037,0 0,-30.836 z m -5.604,0 3.035,0 0,30.836 -3.035,0 0,-30.836 z m -5.49,0 2.918,0 0,30.836 -2.918,0 0,-30.836 z m -3.086,-3.084 37.006,0 0,37.003 -37.006,0 0,-37.003 z"
+           id="path16743" />
+        <path
+           style="clip-rule:evenodd;fill:#20c2ef;fill-rule:evenodd"
+           inkscape:connector-curvature="0"
+           d="m 264.875,183.59 2.924,0 0,30.838 -2.924,0 0,-30.838 z m -5.605,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.608,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.607,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.604,0 3.035,0 0,30.838 -3.035,0 0,-30.838 z m -5.49,0 2.918,0 0,30.838 -2.918,0 0,-30.838 z m -3.086,-3.084 37.006,0 0,37.004 -37.006,0 0,-37.004 z"
+           id="path16745" />
+        <path
+           style="clip-rule:evenodd;fill:#00acd3;fill-rule:evenodd"
+           inkscape:connector-curvature="0"
+           d="m 307.572,183.59 2.92,0 0,30.838 -2.92,0 0,-30.838 z m -5.607,0 3.039,0 0,30.838 -3.039,0 0,-30.838 z m -5.606,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.607,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.607,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.489,0 2.918,0 0,30.838 -2.918,0 0,-30.838 z m -3.084,-3.084 37.004,0 0,37.004 -37.004,0 0,-37.004 z"
+           id="path16747" />
+        <path
+           style="clip-rule:evenodd;fill:#20c2ef;fill-rule:evenodd"
+           inkscape:connector-curvature="0"
+           d="m 307.572,140.895 2.92,0 0,30.836 -2.92,0 0,-30.836 z m -5.607,0 3.039,0 0,30.836 -3.039,0 0,-30.836 z m -5.606,0 3.037,0 0,30.836 -3.037,0 0,-30.836 z m -5.607,0 3.037,0 0,30.836 -3.037,0 0,-30.836 z m -5.607,0 3.037,0 0,30.836 -3.037,0 0,-30.836 z m -5.489,0 2.918,0 0,30.836 -2.918,0 0,-30.836 z m -3.084,-3.084 37.004,0 0,37.003 -37.004,0 0,-37.003 z"
+           id="path16749" />
+        <path
+           style="clip-rule:evenodd;fill:#20c2ef;fill-rule:evenodd"
+           inkscape:connector-curvature="0"
+           d="m 350.268,183.59 2.922,0 0,30.838 -2.922,0 0,-30.838 z m -5.606,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.607,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.608,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.605,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.49,0 2.92,0 0,30.838 -2.92,0 0,-30.838 z m -3.084,-3.084 37.004,0 0,37.004 -37.004,0 0,-37.004 z"
+           id="path16751" />
+        <path
+           style="clip-rule:evenodd;fill:#00acd3;fill-rule:evenodd"
+           inkscape:connector-curvature="0"
+           d="m 350.268,140.895 2.922,0 0,30.836 -2.922,0 0,-30.836 z m -5.606,0 3.037,0 0,30.836 -3.037,0 0,-30.836 z m -5.607,0 3.037,0 0,30.836 -3.037,0 0,-30.836 z m -5.608,0 3.037,0 0,30.836 -3.037,0 0,-30.836 z m -5.605,0 3.037,0 0,30.836 -3.037,0 0,-30.836 z m -5.49,0 2.92,0 0,30.836 -2.92,0 0,-30.836 z m -3.084,-3.084 37.004,0 0,37.003 -37.004,0 0,-37.003 z"
+           id="path16753" />
+        <path
+           style="clip-rule:evenodd;fill:#20c2ef;fill-rule:evenodd"
+           inkscape:connector-curvature="0"
+           d="m 350.268,98.197 2.922,0 0,30.838 -2.922,0 0,-30.838 z m -5.606,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.607,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.608,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.605,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.49,0 2.92,0 0,30.838 -2.92,0 0,-30.838 z m -3.084,-3.082 37.004,0 0,37.004 -37.004,0 0,-37.004 z"
+           id="path16755" />
+        <path
+           style="clip-rule:evenodd;fill:#00acd3;fill-rule:evenodd"
+           inkscape:connector-curvature="0"
+           d="m 392.963,183.59 2.92,0 0,30.838 -2.92,0 0,-30.838 z m -5.606,0 3.035,0 0,30.838 -3.035,0 0,-30.838 z m -5.609,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.605,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.608,0 3.037,0 0,30.838 -3.037,0 0,-30.838 z m -5.49,0 2.922,0 0,30.838 -2.922,0 0,-30.838 z m -3.082,-3.084 37.004,0 0,37.004 -37.004,0 0,-37.004 z"
+           id="path16757" />
+      </g>
+      <path
+         style="clip-rule:evenodd;fill:#d4edf1;fill-rule:evenodd"
+         inkscape:connector-curvature="0"
+         d="m 268.564,277.504 c 5.637,0 10.207,4.572 10.207,10.209 0,5.637 -4.57,10.207 -10.207,10.207 -5.639,0 -10.209,-4.57 -10.209,-10.207 0,-5.637 4.571,-10.209 10.209,-10.209"
+         id="path16759" />
+      <path
+         style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd"
+         inkscape:connector-curvature="0"
+         d="m 268.564,280.4 c 0.934,0 1.824,0.176 2.646,0.494 -0.891,0.516 -1.494,1.48 -1.494,2.584 0,1.648 1.336,2.982 2.984,2.982 1.129,0 2.109,-0.627 2.617,-1.553 0.357,0.863 0.557,1.811 0.557,2.805 0,4.037 -3.273,7.311 -7.311,7.311 -4.038,0 -7.312,-3.273 -7.312,-7.311 0.001,-4.038 3.276,-7.312 7.313,-7.312"
+         id="path16761" />
+      <path
+         style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd"
+         inkscape:connector-curvature="0"
+         d="m 88,256.35 223.613,0 27.656,0 223.607,0 c -10.34,-2.623 -32.715,-6.166 -29.025,-19.717 -18.801,21.756 -64.143,15.264 -75.586,4.535 -12.742,18.482 -86.926,11.457 -92.1,-2.941 -15.975,18.748 -65.477,18.748 -81.453,0 -5.176,14.398 -79.357,21.424 -92.102,2.941 -11.443,10.729 -56.781,17.221 -75.584,-4.535 3.691,13.551 -18.684,17.094 -29.026,19.717"
+         id="path16763" />
+      <path
+         style="fill:#bfdbe0"
+         inkscape:connector-curvature="0"
+         d="m 295.701,351.068 c -25.281,-11.997 -39.158,-28.306 -46.879,-46.109 -9.391,2.681 -20.68,4.394 -33.795,5.13 -4.941,0.277 -10.139,0.419 -15.594,0.419 -6.287,0 -12.914,-0.186 -19.869,-0.555 23.184,23.17 51.705,41.01 104.516,41.336 3.899,0 7.77,-0.076 11.621,-0.221 z"
+         id="path16765" />
+      <path
+         style="fill:#d4edf1"
+         inkscape:connector-curvature="0"
+         d="m 258.213,321.387 c -3.498,-4.748 -6.891,-10.715 -9.385,-16.43 -9.393,2.682 -20.684,4.396 -33.801,5.132 9.01,4.891 21.897,9.423 43.186,11.298 z"
+         id="path16767" />
+    </g>
+    <g
+       id="g24079">
+      <circle
+         style="fill:#dd4814"
+         cx="691.34717"
+         cy="65.498489"
+         r="25.6"
+         id="circle17337-7" />
+      <path
+         style="fill:#ffffff"
+         inkscape:connector-curvature="0"
+         d="m 675.37995,62.421839 c -1.70053,0 -3.07212,1.376117 -3.07212,3.076648 0,1.694495 1.37159,3.073629 3.07212,3.073629 1.70204,0 3.07665,-1.379134 3.07665,-3.073629 0,-1.700531 -1.37461,-3.076648 -3.07665,-3.076648 z M 697.342,76.403308 c -1.47269,0.843475 -1.97365,2.726583 -1.12715,4.193234 0.84951,1.474195 2.7296,1.97515 4.20078,1.128657 1.47117,-0.848002 1.97665,-2.7296 1.12714,-4.199269 -0.84951,-1.471178 -2.73111,-1.975151 -4.20077,-1.122622 z M 682.06438,65.498487 c 0,-3.041943 1.50588,-5.729294 3.81601,-7.352871 l -2.24524,-3.769232 c -2.69188,1.7971 -4.6957,4.547825 -5.52861,7.767818 0.97475,0.792172 1.59189,1.997783 1.59189,3.354285 0,1.347447 -0.61564,2.55155 -1.59189,3.348249 0.83291,3.218484 2.83673,5.9677 5.52861,7.7648 l 2.24524,-3.766214 c -2.31013,-1.623577 -3.81601,-4.30791 -3.81601,-7.346835 z m 8.98098,-8.988519 c 4.69569,0 8.5449,3.598727 8.94928,8.193328 l 4.38336,-0.07092 c -0.21728,-3.385972 -1.69601,-6.427915 -3.96992,-8.664105 -1.17091,0.43909 -2.52288,0.377225 -3.68926,-0.300271 -1.16638,-0.674478 -1.90121,-1.809171 -2.10341,-3.047978 -1.13771,-0.312343 -2.33427,-0.485866 -3.57005,-0.485866 -2.12454,0 -4.1359,0.500955 -5.91791,1.38517 l 2.13509,3.82808 c 1.14978,-0.537168 2.43386,-0.83744 3.78282,-0.83744 z m 0,17.967983 c -1.35047,0 -2.63304,-0.297253 -3.78282,-0.834421 l -2.13509,3.828079 c 1.78201,0.887234 3.79337,1.382153 5.91791,1.382153 1.23578,0 2.43234,-0.167488 3.57005,-0.482848 0.2022,-1.237298 0.93552,-2.371991 2.10341,-3.049487 1.1694,-0.674479 2.51835,-0.739361 3.68926,-0.294236 2.27391,-2.239208 3.75264,-5.28115 3.96992,-8.67014 l -4.38336,-0.06186 c -0.40438,4.587056 -4.25359,8.182765 -8.94928,8.182765 z M 697.342,54.592156 c 1.46966,0.846493 3.35126,0.345538 4.19776,-1.128657 0.84951,-1.465142 0.34553,-3.34825 -1.12413,-4.196251 -1.47269,-0.848002 -3.35127,-0.345539 -4.20078,1.125639 -0.8465,1.469669 -0.34554,3.349758 1.12715,4.199269 z"
+         id="path17339-0" />
+    </g>
+    <g
+       id="layer2"
+       inkscape:label="Typography"
+       style="display:none"
+       transform="matrix(0.44483039,0,0,0.44483039,740.13512,120.73191)">
+      <g
+         id="g3910">
+        <path
+           inkscape:connector-curvature="0"
+           id="path3872"
+           d="m 3.7946429,10.710395 156.7857171,0"
+           style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+        <path
+           inkscape:connector-curvature="0"
+           id="path3872-6"
+           d="m 3.6552959,35.734302 156.7857141,0"
+           style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+        <path
+           inkscape:connector-curvature="0"
+           id="path3872-6-8"
+           d="m -10.35714,23.222461 156.78571,0"
+           style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+      </g>
+    </g>
+    <g
+       inkscape:label="Layer 1"
+       id="layer1"
+       transform="matrix(0.44483039,0,0,0.44483039,740.13512,120.73191)">
+      <path
+         style="fill:none;stroke:#009900;stroke-width:5;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+         d="m 14.047838,32.727592 0,-19.064695 19.062499,19.064695 0,-19.064695"
+         id="path2996"
+         inkscape:connector-curvature="0"
+         sodipodi:nodetypes="cccc" />
+      <path
+         style="color:#000000;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:medium;line-height:normal;font-family:Sans;-inkscape-font-specification:Sans;text-indent:0;text-align:start;text-decoration:none;text-decoration-line:none;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;baseline-shift:baseline;text-anchor:start;display:inline;overflow:visible;visibility:visible;fill:#009900;fill-opacity:1;stroke:none;stroke-width:5;marker:none;enable-background:accumulate"
+         d="M 44.6875,11.1875 44,12.46875 38.6875,22.125 38,23.34375 38.6875,24.5625 44,33.90625 l 0.71875,1.28125 1.46875,0 10.875,0 1.5625,0 0.6875,-1.40625 3.96875,-8 1.78125,-3.625 -4.03125,0 L 50.875,22.1875 c -1.320782,-0.01868 -2.535605,1.179086 -2.535605,2.5 0,1.320914 1.214823,2.518679 2.535605,2.5 L 57,27.15625 l -1.5,3.03125 -7.875,0 -3.90625,-6.875 3.9375,-7.125 8.377221,0 1.953125,4.007812 5.03125,0 -3.171875,-7.601562 -0.6875,-1.40625 -1.5625,0 -11.408471,0 z"
+         id="path2996-0"
+         inkscape:connector-curvature="0"
+         sodipodi:nodetypes="cccccccccccccccscccccccccccccc" />
+      <path
+         style="color:#000000;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:medium;line-height:normal;font-family:Sans;-inkscape-font-specification:Sans;text-indent:0;text-align:start;text-decoration:none;text-decoration-line:none;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;baseline-shift:baseline;text-anchor:start;display:inline;overflow:visible;visibility:visible;fill:#009900;fill-opacity:1;stroke:none;stroke-width:5;marker:none;enable-background:accumulate"
+         d="m 70.013782,11.15625 c -1.308989,0.01639 -2.485084,1.222261 -2.46875,2.53125 l 0,6.514509 5,0 0,-6.514509 c 0.01659,-1.329821 -1.201429,-2.547843 -2.53125,-2.53125 z"
+         id="path2996-0-5"
+         inkscape:connector-curvature="0"
+         sodipodi:nodetypes="cccccc" />
+      <use
+         x="0"
+         y="0"
+         xlink:href="#path2996"
+         id="use3820"
+         transform="matrix(-1,0,0,1,111.13905,0.04841623)"
+         width="744.09448"
+         height="1052.3622" />
+      <g
+         id="g3920"
+         transform="translate(0.10586251,0.33010228)">
+        <path
+           sodipodi:nodetypes="cc"
+           inkscape:connector-curvature="0"
+           id="path2996-1"
+           d="m 104.90935,13.374209 19.08481,19.017856"
+           style="fill:none;stroke:#009900;stroke-width:5;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+        <use
+           height="1052.3622"
+           width="744.09448"
+           transform="matrix(-1,0,0,1,228.92583,0)"
+           id="use3851"
+           xlink:href="#path2996-1"
+           y="0"
+           x="0" />
+      </g>
+      <path
+         style="color:#000000;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:medium;line-height:normal;font-family:Sans;-inkscape-font-specification:Sans;text-indent:0;text-align:start;text-decoration:none;text-decoration-line:none;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;baseline-shift:baseline;text-anchor:start;display:inline;overflow:visible;visibility:visible;fill:#009900;fill-opacity:1;stroke:none;stroke-width:5;marker:none;enable-background:accumulate"
+         d="m 70.013782,35.204069 c -1.308989,-0.01639 -2.485084,-1.222261 -2.46875,-2.53125 l 0,-10.464363 5,0 0,10.464363 c 0.01659,1.329821 -1.201429,2.547843 -2.53125,2.53125 z"
+         id="path2996-0-5-1"
+         inkscape:connector-curvature="0"
+         sodipodi:nodetypes="cccccc" />
+    </g>
+    <g
+       id="g23808"
+       transform="translate(-247.29816,166.4)">
+      <path
+         id="path19313"
+         d="m 794.46487,77.394509 c -2.73221,1.424192 -16.88624,7.243753 -19.89955,8.814518 -3.01332,1.571366 -4.68732,1.556016 -7.06769,0.418047 -2.38037,-1.137968 -17.44273,-7.222083 -20.15598,-8.518964 -1.35617,-0.64829 -2.06917,-1.195153 -2.06917,-1.711919 l 0,-5.175185 c 0,0 19.60912,-4.268963 22.77502,-5.404524 3.16561,-1.135862 4.26385,-1.176794 6.95783,-0.189912 2.69429,0.986882 18.80282,3.893352 21.46551,4.868496 0,0 -10e-4,4.622001 -10e-4,5.101747 3e-4,0.51165 -0.61398,1.07296 -2.00476,1.797696 z"
+         inkscape:connector-curvature="0"
+         style="fill:#a41e11" />
+      <path
+         id="path19315"
+         d="m 794.46487,72.21782 c -2.73221,1.423589 -16.88624,7.243753 -19.89955,8.814517 -3.01332,1.571366 -4.68732,1.556017 -7.06769,0.418048 -2.38068,-1.137367 -17.44273,-7.222686 -20.15598,-8.518965 -2.71325,-1.296881 -2.77013,-2.189559 -0.10474,-3.233023 2.66509,-1.044066 17.64469,-6.921113 20.81089,-8.056975 3.16561,-1.13526 4.26385,-1.176794 6.95784,-0.189611 2.69428,0.986881 16.76374,6.587036 19.42612,7.56218 2.66299,0.976046 2.76532,1.779637 0.0331,3.203829 z"
+         inkscape:connector-curvature="0"
+         style="fill:#d82c20" />
+      <path
+         id="path19317"
+         d="m 794.46487,68.955302 c -2.73221,1.424191 -16.88624,7.243753 -19.89955,8.815119 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417446 -2.38068,-1.137367 -17.44273,-7.222084 -20.15598,-8.518965 -1.35617,-0.64829 -2.06917,-1.194551 -2.06917,-1.711317 l 0,-5.175786 c 0,0 19.60912,-4.268662 22.77502,-5.404524 3.16561,-1.135561 4.26385,-1.176794 6.95783,-0.189913 2.69429,0.986882 18.80282,3.89275 21.46551,4.868195 0,0 -10e-4,4.622001 -10e-4,5.10235 3e-4,0.511348 -0.61398,1.072658 -2.00476,1.797395 z"
+         inkscape:connector-curvature="0"
+         style="fill:#a41e11" />
+      <path
+         id="path19319"
+         d="m 794.46487,63.778612 c -2.73221,1.424192 -16.88624,7.243754 -19.89955,8.81512 -3.01332,1.570764 -4.68732,1.555414 -7.06769,0.417445 -2.38068,-1.137366 -17.44273,-7.222384 -20.15598,-8.518964 -2.71325,-1.29658 -2.77013,-2.189258 -0.10474,-3.233324 2.66509,-1.043464 17.64469,-6.920812 20.81089,-8.056373 3.16561,-1.135561 4.26385,-1.176794 6.95784,-0.189913 2.69428,0.986882 16.76374,6.586736 19.42612,7.56218 2.66299,0.975746 2.76532,1.779638 0.0331,3.203829 z"
+         inkscape:connector-curvature="0"
+         style="fill:#d82c20" />
+      <path
+         id="path19321"
+         d="m 794.46487,60.203085 c -2.73221,1.424191 -16.88624,7.244054 -19.89955,8.81542 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417446 -2.38068,-1.137367 -17.44273,-7.222384 -20.15598,-8.518964 -1.35617,-0.648291 -2.06917,-1.194853 -2.06917,-1.711318 l 0,-5.175786 c 0,0 19.60912,-4.268662 22.77502,-5.404223 3.16561,-1.135862 4.26385,-1.176794 6.95783,-0.189912 2.69429,0.986881 18.80282,3.89275 21.46551,4.868195 0,0 -10e-4,4.622001 -10e-4,5.102048 3e-4,0.511048 -0.61398,1.072357 -2.00476,1.797094 z"
+         inkscape:connector-curvature="0"
+         style="fill:#a41e11" />
+      <path
+         id="path19323"
+         d="m 794.46487,55.026396 c -2.73221,1.424191 -16.88624,7.244054 -19.89955,8.815119 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417747 -2.38037,-1.137668 -17.44273,-7.222385 -20.15598,-8.518965 -2.71325,-1.29658 -2.77013,-2.189559 -0.10474,-3.233324 2.66509,-1.043765 17.64469,-6.920511 20.81089,-8.056373 3.16561,-1.135862 4.26385,-1.176794 6.95784,-0.189611 2.69428,0.986881 16.76374,6.586735 19.42612,7.56218 2.66299,0.975143 2.76532,1.779336 0.0331,3.203227 z"
+         inkscape:connector-curvature="0"
+         style="fill:#d82c20" />
+      <polygon
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)"
+         id="polygon19325"
+         points="259.06,240.78 270.27,237.11 267.24,244.38 278.67,248.66 263.93,250.19 260.63,258.13 255.3,249.27 238.28,247.74 250.98,243.16 247.17,236.13 "
+         style="fill:#ffffff" />
+      <polygon
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)"
+         id="polygon19327"
+         points="232.24,275.77 271.66,269.72 259.75,287.18 "
+         style="fill:#ffffff" />
+      <ellipse
+         id="ellipse19329"
+         cy="53.828529"
+         ry="2.4580245"
+         rx="6.3411436"
+         cx="760.40704"
+         style="fill:#ffffff" />
+      <polygon
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)"
+         id="polygon19331"
+         points="319.42,260.05 296.11,269.26 296.09,250.83 "
+         style="fill:#7a0c00" />
+      <polygon
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)"
+         id="polygon19333"
+         points="296.09,250.83 296.11,269.26 293.58,270.25 270.28,261.04 "
+         style="fill:#ad2115" />
+    </g>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.60000002;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:1.6, 4.8;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker16101)"
+       d="m 232.8,48.6 c 0,0 -12.73666,1.801225 61.59028,-2.565366 C 414.41267,38.983507 605.85597,54.0261 541.6,135"
+       id="path20389"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="csc" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.60000002;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:9.6, 9.6;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker8148)"
+       d="M 232,91.8 C 267.3955,81.000077 227.99615,51.379889 294.44308,48.093811 487.01221,38.570464 649.34345,63.836408 662.48649,127.0156 596.42953,139.79479 662.23891,181.3224 552.8,223.8"
+       id="path20391"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cscc" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.60000002;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:12.8, 3.2, 1.6, 3.2;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker20462)"
+       d="m 232,135 c 57.21358,0.42565 19.24911,-77.191374 63.18997,-87.243822 38.67909,-8.8487 79.20414,-10.051572 104.88753,10.895187 38.40475,31.321995 38.65218,89.318415 98.03792,97.920405 -45.41426,67.00199 -38.65741,-9.39118 -109.31542,-3.9718"
+       id="path20393"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="csscc" />
+    <circle
+       id="circle17337"
+       r="25.6"
+       cy="152.70993"
+       cx="523.57367"
+       style="fill:#dd4814" />
+    <path
+       id="path17339"
+       d="m 507.60646,149.63328 c -1.70053,0 -3.07212,1.37612 -3.07212,3.07665 0,1.69449 1.37159,3.07363 3.07212,3.07363 1.70204,0 3.07665,-1.37914 3.07665,-3.07363 0,-1.70053 -1.37461,-3.07665 -3.07665,-3.07665 z m 21.96205,13.98147 c -1.47269,0.84347 -1.97365,2.72658 -1.12715,4.19323 0.84951,1.4742 2.7296,1.97515 4.20078,1.12866 1.47117,-0.848 1.97665,-2.7296 1.12714,-4.19927 -0.84951,-1.47118 -2.73111,-1.97515 -4.20077,-1.12262 z m -15.27762,-10.90482 c 0,-3.04195 1.50588,-5.7293 3.81601,-7.35287 l -2.24524,-3.76924 c -2.69188,1.7971 -4.6957,4.54783 -5.52861,7.76782 0.97475,0.79217 1.59189,1.99779 1.59189,3.35429 0,1.34744 -0.61564,2.55155 -1.59189,3.34825 0.83291,3.21848 2.83673,5.9677 5.52861,7.7648 l 2.24524,-3.76622 c -2.31013,-1.62358 -3.81601,-4.30791 -3.81601,-7.34683 z m 8.98098,-8.98852 c 4.69569,0 8.5449,3.59873 8.94928,8.19333 l 4.38336,-0.0709 c -0.21728,-3.38597 -1.69601,-6.42792 -3.96992,-8.66411 -1.17091,0.43909 -2.52288,0.37723 -3.68926,-0.30027 -1.16638,-0.67448 -1.90121,-1.80917 -2.10341,-3.04798 -1.13771,-0.31234 -2.33427,-0.48586 -3.57005,-0.48586 -2.12454,0 -4.1359,0.50095 -5.91791,1.38517 l 2.13509,3.82808 c 1.14978,-0.53717 2.43386,-0.83744 3.78282,-0.83744 z m 0,17.96798 c -1.35047,0 -2.63304,-0.29725 -3.78282,-0.83442 l -2.13509,3.82808 c 1.78201,0.88723 3.79337,1.38215 5.91791,1.38215 1.23578,0 2.43234,-0.16749 3.57005,-0.48285 0.2022,-1.23729 0.93552,-2.37199 2.10341,-3.04948 1.1694,-0.67448 2.51835,-0.73936 3.68926,-0.29424 2.27391,-2.23921 3.75264,-5.28115 3.96992,-8.67014 l -4.38336,-0.0619 c -0.40438,4.58705 -4.25359,8.18276 -8.94928,8.18276 z m 6.29664,-19.88579 c 1.46966,0.84649 3.35126,0.34553 4.19776,-1.12866 0.84951,-1.46514 0.34553,-3.34825 -1.12413,-4.19625 -1.47269,-0.848 -3.35127,-0.34554 -4.20078,1.12564 -0.8465,1.46967 -0.34554,3.34976 1.12715,4.19927 z"
+       inkscape:connector-curvature="0"
+       style="fill:#ffffff" />
+    <rect
+       style="opacity:1;fill:#f2f2f2;fill-opacity:1;stroke:#005976;stroke-width:1.87672079;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       id="rect4484"
+       width="288.75037"
+       height="19.589668"
+       x="295.59451"
+       y="37.709572" />
+    <g
+       transform="matrix(0.68994526,0,0,0.69259938,267.24024,14.934347)"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot4486">
+      <path
+         inkscape:connector-curvature="0"
+         d="m 187.98859,51.93976 0,-10.026972 2.89587,0 q 1.19455,0 2.00902,0.343886 0.81446,0.325786 1.32124,0.977358 0.52488,0.633473 0.74207,1.556533 0.23529,0.904962 0.23529,2.063312 0,1.194549 -0.25339,2.045213 -0.23529,0.832565 -0.61537,1.393641 -0.38009,0.561076 -0.86876,0.886862 -0.47058,0.325786 -0.95926,0.506778 -0.48868,0.162893 -0.92306,0.217191 -0.43439,0.0362 -0.72397,0.0362 l -2.85968,0 z m -1.71943,-11.47491 0,12.922849 4.43431,0 q 1.61083,0 2.78728,-0.452481 1.17645,-0.452481 1.93662,-1.303144 0.76017,-0.868763 1.12215,-2.11761 0.36199,-1.266946 0.36199,-2.895877 0,-3.113067 -1.61083,-4.633402 -1.61083,-1.520335 -4.59721,-1.520335 l -4.43431,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7700" />
+      <path
+         inkscape:connector-curvature="0"
+         d="m 199.86394,48.718098 q 0,-0.850664 0.2172,-1.502236 0.23529,-0.669671 0.63347,-1.122152 0.39818,-0.452481 0.92306,-0.687771 0.54298,-0.23529 1.14025,-0.23529 0.59727,0 1.12215,0.23529 0.54298,0.23529 0.94116,0.687771 0.39819,0.452481 0.61538,1.122152 0.23529,0.651572 0.23529,1.502236 0,0.850664 -0.23529,1.520335 -0.21719,0.651572 -0.61538,1.104053 -0.39818,0.434381 -0.94116,0.669671 -0.52488,0.23529 -1.12215,0.23529 -0.59727,0 -1.14025,-0.23529 -0.52488,-0.23529 -0.92306,-0.669671 -0.39818,-0.452481 -0.63347,-1.104053 -0.2172,-0.669671 -0.2172,-1.520335 z m -1.62893,0 q 0,1.031656 0.28959,1.918518 0.28959,0.886862 0.86876,1.556534 0.57918,0.651572 1.42984,1.031656 0.85067,0.361984 1.95472,0.361984 1.12215,0 1.95472,-0.361984 0.85066,-0.380084 1.42984,-1.031656 0.57917,-0.669672 0.86876,-1.556534 0.28959,-0.886862 0.28959,-1.918518 0,-1.031656 -0.28959,-1.918518 -0.28959,-0.904962 -0.86876,-1.556534 -0.57918,-0.669671 -1.42984,-1.049755 -0.83257,-0.380084 -1.95472,-0.380084 -1.10405,0 -1.95472,0.380084 -0.85066,0.380084 -1.42984,1.049755 -0.57917,0.651572 -0.86876,1.556534 -0.28959,0.886862 -0.28959,1.918518 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7702" />
+      <path
+         inkscape:connector-curvature="0"
+         d="m 215.45219,47.03487 1.59273,0 q -0.0905,-0.832565 -0.43438,-1.429839 -0.34389,-0.615374 -0.88686,-1.013557 -0.52488,-0.398183 -1.23075,-0.579175 -0.68777,-0.199092 -1.48414,-0.199092 -1.10405,0 -1.93662,0.398183 -0.83256,0.380084 -1.39364,1.067855 -0.54297,0.669671 -0.81446,1.592732 -0.27149,0.904961 -0.27149,1.954716 0,1.049756 0.27149,1.936618 0.28959,0.868763 0.83256,1.502236 0.56108,0.633473 1.37554,0.977358 0.83257,0.343885 1.90042,0.343885 1.79183,0 2.82348,-0.94116 1.04976,-0.941159 1.30315,-2.678685 l -1.57464,0 q -0.14479,1.085953 -0.79636,1.683228 -0.63347,0.597274 -1.77373,0.597274 -0.72397,0 -1.24884,-0.289587 -0.52488,-0.289588 -0.85067,-0.760168 -0.32578,-0.488679 -0.48868,-1.104053 -0.14479,-0.615374 -0.14479,-1.266946 0,-0.70587 0.14479,-1.357442 0.1448,-0.669671 0.47058,-1.17645 0.34389,-0.506778 0.90497,-0.814465 0.56107,-0.307687 1.39364,-0.307687 0.97735,0 1.55653,0.488679 0.57917,0.48868 0.76017,1.375542 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7704" />
+      <path
+         inkscape:connector-curvature="0"
+         d="m 218.91112,40.46485 0,12.922849 1.53843,0 0,-3.547449 1.44794,-1.339343 3.20357,4.886792 1.95471,0 -3.98183,-5.954646 3.71034,-3.402655 -2.06331,0 -4.27142,4.090425 0,-7.655973 -1.53843,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7706" />
+      <path
+         inkscape:connector-curvature="0"
+         d="m 234.72136,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54298,-0.850664 0.34388,-0.361985 0.81446,-0.561076 0.48868,-0.217191 1.08596,-0.217191 0.57917,0 1.04975,0.217191 0.48868,0.199091 0.83257,0.561076 0.36198,0.343885 0.56107,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48414,2.606288 -1.52034,0 q -0.19909,0.923061 -0.83256,1.375542 -0.61538,0.45248 -1.59273,0.45248 -0.76017,0 -1.32125,-0.253389 -0.56107,-0.253389 -0.92306,-0.669671 -0.36198,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.16289,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32125,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77372,0.361985 -0.79637,0.361984 -1.39364,1.013556 -0.57918,0.651573 -0.90496,1.538435 -0.32579,0.886862 -0.32579,1.954716 0.0362,1.067855 0.30769,1.972816 0.28958,0.904962 0.85066,1.556534 0.56108,0.651572 1.37554,1.013557 0.83257,0.361984 1.95472,0.361984 1.59273,0 2.64249,-0.796366 1.04975,-0.796366 1.35744,-2.370999 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7708" />
+      <path
+         inkscape:connector-curvature="0"
+         d="m 237.8907,44.030398 0,9.357301 1.53844,0 0,-4.162823 q 0,-0.904961 0.18099,-1.592732 0.18099,-0.70587 0.57918,-1.194549 0.39818,-0.488679 1.04975,-0.742068 0.65157,-0.253389 1.57463,-0.253389 l 0,-1.628931 q -1.24884,-0.0362 -2.06331,0.506779 -0.81446,0.542976 -1.37554,1.683228 l -0.0362,0 0,-1.972816 -1.44794,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7710" />
+      <path
+         inkscape:connector-curvature="0"
+         d="m 250.13172,48.790495 q 0,-0.687771 0.1448,-1.339343 0.14479,-0.651572 0.47058,-1.158351 0.32578,-0.506778 0.86876,-0.814465 0.54298,-0.307687 1.32124,-0.307687 0.79637,0 1.35744,0.307687 0.56108,0.289588 0.90497,0.778267 0.36198,0.488679 0.52487,1.140251 0.1629,0.633473 0.1629,1.321244 0,0.651572 -0.1629,1.285045 -0.14479,0.633473 -0.48868,1.140251 -0.34388,0.48868 -0.88686,0.796366 -0.54297,0.307687 -1.32124,0.307687 -0.74207,0 -1.30315,-0.289587 -0.54297,-0.289588 -0.90496,-0.778267 -0.34388,-0.488679 -0.52488,-1.104053 -0.16289,-0.633473 -0.16289,-1.285045 z m 7.23969,4.597204 0,-12.922849 -1.53843,0 0,4.814395 -0.0362,0 q -0.25339,-0.416283 -0.63347,-0.687771 -0.36199,-0.289588 -0.77827,-0.452481 -0.41628,-0.180992 -0.83256,-0.253389 -0.41629,-0.0724 -0.77827,-0.0724 -1.06786,0 -1.88232,0.398183 -0.79637,0.380084 -1.33934,1.049755 -0.52488,0.651573 -0.79637,1.538435 -0.25339,0.886862 -0.25339,1.882319 0,0.995458 0.27149,1.88232 0.27149,0.886862 0.79637,1.556534 0.54297,0.669671 1.33934,1.067854 0.81446,0.398183 1.90042,0.398183 0.97736,0 1.79182,-0.343885 0.81447,-0.343885 1.19455,-1.122152 l 0.0362,0 0,1.266946 1.53843,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7712" />
+      <path
+         inkscape:connector-curvature="0"
+         d="m 268.01008,53.3515 q -0.39818,0.23529 -1.10405,0.23529 -0.59727,0 -0.95926,-0.325786 -0.34388,-0.343885 -0.34388,-1.104053 -0.63348,0.760168 -1.48414,1.104053 -0.83257,0.325786 -1.80992,0.325786 -0.63348,0 -1.21265,-0.144794 -0.56108,-0.144793 -0.97736,-0.45248 -0.41628,-0.307687 -0.66967,-0.796366 -0.23529,-0.506779 -0.23529,-1.212649 0,-0.796366 0.27149,-1.303144 0.27149,-0.506778 0.70587,-0.814465 0.45248,-0.325786 1.01355,-0.488679 0.57918,-0.162893 1.17645,-0.271489 0.63348,-0.126694 1.19455,-0.180992 0.57918,-0.0724 1.01356,-0.180992 0.43438,-0.126695 0.68777,-0.343886 0.25339,-0.23529 0.25339,-0.669671 0,-0.506779 -0.19909,-0.814465 -0.18099,-0.307687 -0.48868,-0.47058 -0.28959,-0.162893 -0.66967,-0.217191 -0.36199,-0.0543 -0.72397,-0.0543 -0.97736,0 -1.62893,0.380084 -0.65157,0.361985 -0.70587,1.393641 l -1.53844,0 q 0.0362,-0.868763 0.36199,-1.466038 0.32578,-0.597274 0.86876,-0.959259 0.54298,-0.380084 1.23075,-0.542977 0.70587,-0.162893 1.50223,-0.162893 0.63348,0 1.24885,0.0905 0.63347,0.0905 1.14025,0.380084 0.50678,0.271488 0.81447,0.778267 0.30768,0.506778 0.30768,1.321244 l 0,4.814394 q 0,0.542977 0.0543,0.796366 0.0724,0.253389 0.43438,0.253389 0.19909,0 0.47058,-0.0905 l 0,1.194549 z m -2.49769,-4.796295 q -0.28959,0.217191 -0.76017,0.325786 -0.47058,0.0905 -0.99546,0.162893 -0.50677,0.0543 -1.03165,0.144794 -0.52488,0.0724 -0.94116,0.253389 -0.41628,0.180992 -0.68777,0.524878 -0.25339,0.325786 -0.25339,0.904961 0,0.380084 0.14479,0.651572 0.1629,0.253389 0.39819,0.416282 0.25339,0.162894 0.57917,0.23529 0.32579,0.0724 0.68777,0.0724 0.76017,0 1.30315,-0.199091 0.54297,-0.217191 0.88686,-0.524878 0.34388,-0.325786 0.50678,-0.68777 0.16289,-0.380084 0.16289,-0.70587 l 0,-1.574633 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7714" />
+      <path
+         inkscape:connector-curvature="0"
+         d="m 275.93952,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54298,-0.850664 0.34388,-0.361985 0.81446,-0.561076 0.48868,-0.217191 1.08595,-0.217191 0.57918,0 1.04976,0.217191 0.48868,0.199091 0.83256,0.561076 0.36199,0.343885 0.56108,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48414,2.606288 -1.52034,0 q -0.19909,0.923061 -0.83256,1.375542 -0.61538,0.45248 -1.59274,0.45248 -0.76016,0 -1.32124,-0.253389 -0.56107,-0.253389 -0.92306,-0.669671 -0.36198,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.16289,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32125,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77372,0.361985 -0.79637,0.361984 -1.39364,1.013556 -0.57918,0.651573 -0.90497,1.538435 -0.32578,0.886862 -0.32578,1.954716 0.0362,1.067855 0.30769,1.972816 0.28958,0.904962 0.85066,1.556534 0.56108,0.651572 1.37554,1.013557 0.83257,0.361984 1.95472,0.361984 1.59273,0 2.64248,-0.796366 1.04976,-0.796366 1.35745,-2.370999 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7716" />
+      <path
+         inkscape:connector-curvature="0"
+         d="m 279.16316,44.030398 0,9.357301 1.53844,0 0,-5.827952 q 0,-0.271488 0.12669,-0.669671 0.1448,-0.416282 0.43439,-0.796366 0.30768,-0.380084 0.77826,-0.651572 0.48868,-0.271489 1.15835,-0.271489 0.52488,0 0.85067,0.162893 0.34388,0.144794 0.54297,0.434382 0.19909,0.271488 0.27149,0.651572 0.0905,0.380084 0.0905,0.832564 l 0,6.135639 1.53843,0 0,-5.827952 q 0,-1.085953 0.65157,-1.737526 0.65158,-0.651572 1.79183,-0.651572 0.56107,0 0.90496,0.162893 0.36198,0.162893 0.56108,0.452481 0.19909,0.271488 0.27148,0.651572 0.0724,0.380084 0.0724,0.814465 l 0,6.135639 1.53843,0 0,-6.859608 q 0,-0.723969 -0.23528,-1.230747 -0.2172,-0.524878 -0.63348,-0.850664 -0.39818,-0.325786 -0.97736,-0.47058 -0.56107,-0.162893 -1.26694,-0.162893 -0.92306,0 -1.70133,0.416282 -0.76017,0.416283 -1.23075,1.17645 -0.28958,-0.868763 -0.99545,-1.230747 -0.70587,-0.361985 -1.57464,-0.361985 -1.97281,0 -3.02257,1.592732 l -0.0362,0 0,-1.375541 -1.44794,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7718" />
+      <path
+         inkscape:connector-curvature="0"
+         d="m 295.73329,48.718098 q 0,-0.850664 0.21719,-1.502236 0.23529,-0.669671 0.63348,-1.122152 0.39818,-0.452481 0.92306,-0.687771 0.54297,-0.23529 1.14025,-0.23529 0.59727,0 1.12215,0.23529 0.54298,0.23529 0.94116,0.687771 0.39818,0.452481 0.61537,1.122152 0.23529,0.651572 0.23529,1.502236 0,0.850664 -0.23529,1.520335 -0.21719,0.651572 -0.61537,1.104053 -0.39818,0.434381 -0.94116,0.669671 -0.52488,0.23529 -1.12215,0.23529 -0.59728,0 -1.14025,-0.23529 -0.52488,-0.23529 -0.92306,-0.669671 -0.39819,-0.452481 -0.63348,-1.104053 -0.21719,-0.669671 -0.21719,-1.520335 z m -1.62893,0 q 0,1.031656 0.28959,1.918518 0.28959,0.886862 0.86876,1.556534 0.57918,0.651572 1.42984,1.031656 0.85066,0.361984 1.95472,0.361984 1.12215,0 1.95471,-0.361984 0.85067,-0.380084 1.42984,-1.031656 0.57918,-0.669672 0.86877,-1.556534 0.28958,-0.886862 0.28958,-1.918518 0,-1.031656 -0.28958,-1.918518 -0.28959,-0.904962 -0.86877,-1.556534 -0.57917,-0.669671 -1.42984,-1.049755 -0.83256,-0.380084 -1.95471,-0.380084 -1.10406,0 -1.95472,0.380084 -0.85066,0.380084 -1.42984,1.049755 -0.57917,0.651572 -0.86876,1.556534 -0.28959,0.886862 -0.28959,1.918518 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7720" />
+      <path
+         inkscape:connector-curvature="0"
+         d="m 304.96871,44.030398 0,9.357301 1.53843,0 0,-5.284975 q 0,-0.633473 0.16289,-1.15835 0.181,-0.542977 0.52488,-0.94116 0.34389,-0.398183 0.85067,-0.615374 0.52487,-0.217191 1.23074,-0.217191 0.88687,0 1.39364,0.506779 0.50678,0.506778 0.50678,1.375541 l 0,6.33473 1.53844,0 0,-6.153738 q 0,-0.760167 -0.1629,-1.375541 -0.14479,-0.633473 -0.52487,-1.085954 -0.38009,-0.45248 -0.99546,-0.70587 -0.61538,-0.253389 -1.53844,-0.253389 -2.08141,0 -3.04067,1.701328 l -0.0362,0 0,-1.484137 -1.44793,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7722" />
+    </g>
+    <g
+       transform="matrix(0.46975244,0,0,0.46975284,616.35741,-226.29247)"
+       id="layer1-6"
+       inkscape:label="Layer 1">
+      <g
+         inkscape:label="#g2636"
+         clip-path="none"
+         transform="matrix(0.450413,0,0,0.450413,290.58948,830.01033)"
+         id="CENTOSARTWORK"
+         style="display:inline">
+        <g
+           inkscape:label="#g2638"
+           id="SYMBOL">
+          <g
+             id="g2640"
+             transform="matrix(0,-1.4853794,1.4853794,0,-2754.3723,24.127639)"
+             style="display:inline">
+            <g
+               transform="translate(16.279512,-899.16723)"
+               id="g2642">
+              <g
+                 id="g2644"
+                 transform="matrix(1.0666663,0,0,1,-26.935596,-4.1328125e-5)">
+                <path
+                   id="path2646"
+                   d="m 323.36653,2798.1614 -5.40616,5.8005 -5.40616,-5.8005 0,-40.8261 -18.01333,0 23.41949,-24.8979 23.02487,24.8979 -17.6187,0 -10e-6,40.8261 z"
+                   style="display:inline;overflow:visible;visibility:visible;fill:#922178;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:30;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none"
+                   sodipodi:nodetypes="ccccccccc"
+                   inkscape:connector-curvature="0" />
+                <path
+                   sodipodi:nodetypes="cccccc"
+                   style="display:inline;overflow:visible;visibility:visible;fill:#ffffff;fill-opacity:0.15686275;fill-rule:evenodd;stroke:none;stroke-width:30;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none"
+                   d="m 323.42008,2798.1614 -5.45971,5.8005 0,-71.5245 23.25296,24.8979 -17.79323,0 -2e-5,40.8261 z"
+                   id="path2648"
+                   inkscape:connector-curvature="0" />
+              </g>
+              <g
+                 transform="matrix(1.0722788,0,0,1,-28.850792,-4.1328125e-5)"
+                 id="g2650"
+                 clip-path="url(#clipPath4429)" />
+            </g>
+          </g>
+          <g
+             transform="matrix(1.4853794,0,0,1.4853794,-402.70015,-3303.1796)"
+             id="g2652"
+             style="display:inline">
+            <g
+               id="g2654"
+               transform="translate(16.279512,-899.16723)">
+              <g
+                 transform="matrix(1.0666663,0,0,1,-26.935596,-4.1328125e-5)"
+                 id="g2656">
+                <path
+                   sodipodi:nodetypes="ccccccccc"
+                   style="display:inline;overflow:visible;visibility:visible;fill:#eea623;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:30;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none"
+                   d="m 323.36653,2798.1614 -5.40616,5.8005 -5.40616,-5.8005 0,-40.8261 -18.01333,0 23.41949,-24.8979 23.02487,24.8979 -17.6187,0 -10e-6,40.8261 z"
+                   id="path2658"
+                   inkscape:connector-curvature="0" />
+                <path
+                   id="path2660"
+                   d="m 323.42008,2798.1614 -5.45971,5.8005 0,-71.5245 23.25296,24.8979 -17.79323,0 -2e-5,40.8261 z"
+                   style="display:inline;overflow:visible;visibility:visible;fill:#ffffff;fill-opacity:0.15686275;fill-rule:evenodd;stroke:none;stroke-width:30;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none"
+                   sodipodi:nodetypes="cccccc"
+                   inkscape:connector-curvature="0" />
+              </g>
+              <g
+                 clip-path="url(#clipPath4429)"
+                 id="g2662"
+                 transform="matrix(1.0722788,0,0,1,-28.850792,-4.1328125e-5)" />
+            </g>
+          </g>
+          <g
+             id="g2664"
+             transform="matrix(0,1.4853794,-1.4853794,0,4260.2115,-927.32613)"
+             style="display:inline">
+            <g
+               transform="matrix(1.0666663,0,0,1,-26.935596,-4.1328125e-5)"
+               id="g2666">
+              <path
+                 sodipodi:nodetypes="ccccccccc"
+                 style="display:inline;overflow:visible;visibility:visible;fill:#252476;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:30;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none"
+                 d="m 323.36653,2798.1614 -5.40616,5.8005 -5.40616,-5.8005 0,-40.8261 -18.01333,0 23.41949,-24.8979 23.02487,24.8979 -17.6187,0 -10e-6,40.8261 z"
+                 id="path2668"
+                 inkscape:connector-curvature="0" />
+              <path
+                 id="path2670"
+                 d="m 323.42008,2798.1614 -5.45971,5.8005 0,-71.5245 23.25296,24.8979 -17.79323,0 -2e-5,40.8261 z"
+                 style="display:inline;overflow:visible;visibility:visible;fill:#ffffff;fill-opacity:0.15686275;fill-rule:evenodd;stroke:none;stroke-width:30;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none"
+                 sodipodi:nodetypes="cccccc"
+                 inkscape:connector-curvature="0" />
+            </g>
+            <g
+               clip-path="url(#clipPath4429)"
+               id="g2672"
+               transform="matrix(1.0722788,0,0,1,-28.850792,-4.1328125e-5)" />
+          </g>
+          <g
+             transform="matrix(-1.4853794,0,0,-1.4853794,572.93464,2375.8)"
+             id="g2674"
+             style="display:inline">
+            <g
+               id="g2676"
+               transform="translate(16.279512,-899.16723)">
+              <g
+                 transform="matrix(1.0666663,0,0,1,-26.935596,-4.1328125e-5)"
+                 id="g2678">
+                <path
+                   sodipodi:nodetypes="ccccccccc"
+                   style="display:inline;overflow:visible;visibility:visible;fill:#73d216;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:30;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none"
+                   d="m 323.36653,2798.1614 -5.40616,5.8005 -5.40616,-5.8005 0,-40.8261 -18.01333,0 23.41949,-24.8979 23.02487,24.8979 -17.6187,0 -10e-6,40.8261 z"
+                   id="path2680"
+                   inkscape:connector-curvature="0" />
+                <path
+                   id="path2682"
+                   d="m 323.42008,2798.1614 -5.45971,5.8005 0,-71.5245 23.25296,24.8979 -17.79323,0 -2e-5,40.8261 z"
+                   style="display:inline;overflow:visible;visibility:visible;fill:#ffffff;fill-opacity:0.15686275;fill-rule:evenodd;stroke:none;stroke-width:30;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none"
+                   sodipodi:nodetypes="cccccc"
+                   inkscape:connector-curvature="0" />
+              </g>
+              <g
+                 clip-path="url(#clipPath4429)"
+                 id="g2684"
+                 transform="matrix(1.0722788,0,0,1,-28.850792,-4.1328125e-5)" />
+            </g>
+          </g>
+          <g
+             transform="matrix(1.4853794,0,0,1.4853794,-994.6136,-3940.533)"
+             id="g2686"
+             style="display:inline">
+            <rect
+               style="fill:#73d216;fill-opacity:1;stroke:#ffffff;stroke-width:6.03433704;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+               id="rect2688"
+               width="49.606449"
+               height="49.606449"
+               x="671.98438"
+               y="2285.7893" />
+            <rect
+               y="2285.7893"
+               x="732.22076"
+               height="49.606449"
+               width="49.606449"
+               id="rect2690"
+               style="fill:#922178;fill-opacity:1;stroke:#ffffff;stroke-width:6.03433704;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+            <rect
+               style="fill:#eea623;fill-opacity:1;stroke:#ffffff;stroke-width:6.03433704;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+               id="rect2692"
+               width="49.606449"
+               height="49.606449"
+               x="732.22076"
+               y="2346.0256" />
+            <rect
+               y="2346.0256"
+               x="671.98438"
+               height="49.606449"
+               width="49.606449"
+               id="rect2694"
+               style="fill:#252476;fill-opacity:1;stroke:#ffffff;stroke-width:6.03433704;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+            <path
+               style="fill:#ffffff;fill-opacity:0.15686275;stroke:none;stroke-width:3.54330707;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dashoffset:0;stroke-opacity:1"
+               d="m 696.78757,2311.2893 c 24.77617,0 24.80323,-25.5 24.80323,-25.5 l -10e-6,49.6065 -49.60644,0 c 0,0 -0.22209,-24.1065 24.80322,-24.1065 z"
+               id="path2696"
+               sodipodi:nodetypes="ccccz"
+               inkscape:connector-curvature="0" />
+            <path
+               sodipodi:nodetypes="ccccz"
+               id="path2698"
+               d="m 696.7876,2371.5256 c 24.77617,0 24.80323,-25.5 24.80323,-25.5 l -10e-6,49.6065 -49.60644,0 c 0,0 -0.22209,-24.1065 24.80322,-24.1065 z"
+               style="fill:#ffffff;fill-opacity:0.15686275;stroke:none;stroke-width:3.54330707;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dashoffset:0;stroke-opacity:1"
+               inkscape:connector-curvature="0" />
+            <path
+               sodipodi:nodetypes="ccccz"
+               id="path2700"
+               d="m 757.02399,2311.2893 c 24.77617,0 24.80323,-25.5 24.80323,-25.5 l -10e-6,49.6065 -49.60644,0 c 0,0 -0.22209,-24.1065 24.80322,-24.1065 z"
+               style="fill:#ffffff;fill-opacity:0.15686275;stroke:none;stroke-width:3.54330707;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dashoffset:0;stroke-opacity:1"
+               inkscape:connector-curvature="0" />
+            <path
+               style="fill:#ffffff;fill-opacity:0.15686275;stroke:none;stroke-width:3.54330707;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dashoffset:0;stroke-opacity:1"
+               d="m 757.02399,2371.5256 c 24.77617,0 24.80323,-25.5 24.80323,-25.5 l -10e-6,49.6065 -49.60644,0 c 0,0 -0.22209,-24.1065 24.80322,-24.1065 z"
+               id="path2702"
+               sodipodi:nodetypes="ccccz"
+               inkscape:connector-curvature="0" />
+          </g>
+          <rect
+             y="-349.27026"
+             x="306.48581"
+             height="73.684402"
+             width="73.684402"
+             id="rect2704"
+             style="display:inline;fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:8.96327972;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+             transform="matrix(0.7071068,-0.7071068,0.7071068,0.7071068,0,0)" />
+          <rect
+             style="display:inline;fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:8.96327972;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+             id="rect2706"
+             width="73.684402"
+             height="73.684402"
+             x="395.95984"
+             y="-349.27026"
+             transform="matrix(0.7071068,-0.7071068,0.7071068,0.7071068,0,0)" />
+          <rect
+             y="-259.79648"
+             x="395.95984"
+             height="73.684402"
+             width="73.684402"
+             id="rect2708"
+             style="display:inline;fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:8.96327972;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+             transform="matrix(0.7071068,-0.7071068,0.7071068,0.7071068,0,0)" />
+          <rect
+             style="display:inline;fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:8.96327972;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+             id="rect2710"
+             width="73.684402"
+             height="73.684402"
+             x="306.48581"
+             y="-259.79648"
+             transform="matrix(0.7071068,-0.7071068,0.7071068,0.7071068,0,0)" />
+        </g>
+      </g>
+    </g>
+    <g
+       transform="translate(-247.29816,166.4)"
+       id="g23821">
+      <path
+         style="fill:#a41e11"
+         inkscape:connector-curvature="0"
+         d="m 794.46487,77.394509 c -2.73221,1.424192 -16.88624,7.243753 -19.89955,8.814518 -3.01332,1.571366 -4.68732,1.556016 -7.06769,0.418047 -2.38037,-1.137968 -17.44273,-7.222083 -20.15598,-8.518964 -1.35617,-0.64829 -2.06917,-1.195153 -2.06917,-1.711919 l 0,-5.175185 c 0,0 19.60912,-4.268963 22.77502,-5.404524 3.16561,-1.135862 4.26385,-1.176794 6.95783,-0.189912 2.69429,0.986882 18.80282,3.893352 21.46551,4.868496 0,0 -10e-4,4.622001 -10e-4,5.101747 3e-4,0.51165 -0.61398,1.07296 -2.00476,1.797696 z"
+         id="path23823" />
+      <path
+         style="fill:#d82c20"
+         inkscape:connector-curvature="0"
+         d="m 794.46487,72.21782 c -2.73221,1.423589 -16.88624,7.243753 -19.89955,8.814517 -3.01332,1.571366 -4.68732,1.556017 -7.06769,0.418048 -2.38068,-1.137367 -17.44273,-7.222686 -20.15598,-8.518965 -2.71325,-1.296881 -2.77013,-2.189559 -0.10474,-3.233023 2.66509,-1.044066 17.64469,-6.921113 20.81089,-8.056975 3.16561,-1.13526 4.26385,-1.176794 6.95784,-0.189611 2.69428,0.986881 16.76374,6.587036 19.42612,7.56218 2.66299,0.976046 2.76532,1.779637 0.0331,3.203829 z"
+         id="path23825" />
+      <path
+         style="fill:#a41e11"
+         inkscape:connector-curvature="0"
+         d="m 794.46487,68.955302 c -2.73221,1.424191 -16.88624,7.243753 -19.89955,8.815119 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417446 -2.38068,-1.137367 -17.44273,-7.222084 -20.15598,-8.518965 -1.35617,-0.64829 -2.06917,-1.194551 -2.06917,-1.711317 l 0,-5.175786 c 0,0 19.60912,-4.268662 22.77502,-5.404524 3.16561,-1.135561 4.26385,-1.176794 6.95783,-0.189913 2.69429,0.986882 18.80282,3.89275 21.46551,4.868195 0,0 -10e-4,4.622001 -10e-4,5.10235 3e-4,0.511348 -0.61398,1.072658 -2.00476,1.797395 z"
+         id="path23827" />
+      <path
+         style="fill:#d82c20"
+         inkscape:connector-curvature="0"
+         d="m 794.46487,63.778612 c -2.73221,1.424192 -16.88624,7.243754 -19.89955,8.81512 -3.01332,1.570764 -4.68732,1.555414 -7.06769,0.417445 -2.38068,-1.137366 -17.44273,-7.222384 -20.15598,-8.518964 -2.71325,-1.29658 -2.77013,-2.189258 -0.10474,-3.233324 2.66509,-1.043464 17.64469,-6.920812 20.81089,-8.056373 3.16561,-1.135561 4.26385,-1.176794 6.95784,-0.189913 2.69428,0.986882 16.76374,6.586736 19.42612,7.56218 2.66299,0.975746 2.76532,1.779638 0.0331,3.203829 z"
+         id="path23829" />
+      <path
+         style="fill:#a41e11"
+         inkscape:connector-curvature="0"
+         d="m 794.46487,60.203085 c -2.73221,1.424191 -16.88624,7.244054 -19.89955,8.81542 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417446 -2.38068,-1.137367 -17.44273,-7.222384 -20.15598,-8.518964 -1.35617,-0.648291 -2.06917,-1.194853 -2.06917,-1.711318 l 0,-5.175786 c 0,0 19.60912,-4.268662 22.77502,-5.404223 3.16561,-1.135862 4.26385,-1.176794 6.95783,-0.189912 2.69429,0.986881 18.80282,3.89275 21.46551,4.868195 0,0 -10e-4,4.622001 -10e-4,5.102048 3e-4,0.511048 -0.61398,1.072357 -2.00476,1.797094 z"
+         id="path23831" />
+      <path
+         style="fill:#d82c20"
+         inkscape:connector-curvature="0"
+         d="m 794.46487,55.026396 c -2.73221,1.424191 -16.88624,7.244054 -19.89955,8.815119 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417747 -2.38037,-1.137668 -17.44273,-7.222385 -20.15598,-8.518965 -2.71325,-1.29658 -2.77013,-2.189559 -0.10474,-3.233324 2.66509,-1.043765 17.64469,-6.920511 20.81089,-8.056373 3.16561,-1.135862 4.26385,-1.176794 6.95784,-0.189611 2.69428,0.986881 16.76374,6.586735 19.42612,7.56218 2.66299,0.975143 2.76532,1.779336 0.0331,3.203227 z"
+         id="path23833" />
+      <polygon
+         style="fill:#ffffff"
+         points="259.06,240.78 270.27,237.11 267.24,244.38 278.67,248.66 263.93,250.19 260.63,258.13 255.3,249.27 238.28,247.74 250.98,243.16 247.17,236.13 "
+         id="polygon23835"
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)" />
+      <polygon
+         style="fill:#ffffff"
+         points="232.24,275.77 271.66,269.72 259.75,287.18 "
+         id="polygon23837"
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)" />
+      <ellipse
+         style="fill:#ffffff"
+         cx="760.40704"
+         rx="6.3411436"
+         ry="2.4580245"
+         cy="53.828529"
+         id="ellipse23839" />
+      <polygon
+         style="fill:#7a0c00"
+         points="319.42,260.05 296.11,269.26 296.09,250.83 "
+         id="polygon23841"
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)" />
+      <polygon
+         style="fill:#ad2115"
+         points="296.09,250.83 296.11,269.26 293.58,270.25 270.28,261.04 "
+         id="polygon23843"
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)" />
+    </g>
+    <g
+       id="g23845"
+       transform="matrix(0.1894162,-0.14094969,0,0.1894162,234.64109,354.55609)">
+      <path
+         id="path23847"
+         d="m 794.46487,77.394509 c -2.73221,1.424192 -16.88624,7.243753 -19.89955,8.814518 -3.01332,1.571366 -4.68732,1.556016 -7.06769,0.418047 -2.38037,-1.137968 -17.44273,-7.222083 -20.15598,-8.518964 -1.35617,-0.64829 -2.06917,-1.195153 -2.06917,-1.711919 l 0,-5.175185 c 0,0 19.60912,-4.268963 22.77502,-5.404524 3.16561,-1.135862 4.26385,-1.176794 6.95783,-0.189912 2.69429,0.986882 18.80282,3.893352 21.46551,4.868496 0,0 -10e-4,4.622001 -10e-4,5.101747 3e-4,0.51165 -0.61398,1.07296 -2.00476,1.797696 z"
+         inkscape:connector-curvature="0"
+         style="fill:#a41e11" />
+      <path
+         id="path23849"
+         d="m 794.46487,72.21782 c -2.73221,1.423589 -16.88624,7.243753 -19.89955,8.814517 -3.01332,1.571366 -4.68732,1.556017 -7.06769,0.418048 -2.38068,-1.137367 -17.44273,-7.222686 -20.15598,-8.518965 -2.71325,-1.296881 -2.77013,-2.189559 -0.10474,-3.233023 2.66509,-1.044066 17.64469,-6.921113 20.81089,-8.056975 3.16561,-1.13526 4.26385,-1.176794 6.95784,-0.189611 2.69428,0.986881 16.76374,6.587036 19.42612,7.56218 2.66299,0.976046 2.76532,1.779637 0.0331,3.203829 z"
+         inkscape:connector-curvature="0"
+         style="fill:#d82c20" />
+      <path
+         id="path23851"
+         d="m 794.46487,68.955302 c -2.73221,1.424191 -16.88624,7.243753 -19.89955,8.815119 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417446 -2.38068,-1.137367 -17.44273,-7.222084 -20.15598,-8.518965 -1.35617,-0.64829 -2.06917,-1.194551 -2.06917,-1.711317 l 0,-5.175786 c 0,0 19.60912,-4.268662 22.77502,-5.404524 3.16561,-1.135561 4.26385,-1.176794 6.95783,-0.189913 2.69429,0.986882 18.80282,3.89275 21.46551,4.868195 0,0 -10e-4,4.622001 -10e-4,5.10235 3e-4,0.511348 -0.61398,1.072658 -2.00476,1.797395 z"
+         inkscape:connector-curvature="0"
+         style="fill:#a41e11" />
+      <path
+         id="path23853"
+         d="m 794.46487,63.778612 c -2.73221,1.424192 -16.88624,7.243754 -19.89955,8.81512 -3.01332,1.570764 -4.68732,1.555414 -7.06769,0.417445 -2.38068,-1.137366 -17.44273,-7.222384 -20.15598,-8.518964 -2.71325,-1.29658 -2.77013,-2.189258 -0.10474,-3.233324 2.66509,-1.043464 17.64469,-6.920812 20.81089,-8.056373 3.16561,-1.135561 4.26385,-1.176794 6.95784,-0.189913 2.69428,0.986882 16.76374,6.586736 19.42612,7.56218 2.66299,0.975746 2.76532,1.779638 0.0331,3.203829 z"
+         inkscape:connector-curvature="0"
+         style="fill:#d82c20" />
+      <path
+         id="path23855"
+         d="m 794.46487,60.203085 c -2.73221,1.424191 -16.88624,7.244054 -19.89955,8.81542 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417446 -2.38068,-1.137367 -17.44273,-7.222384 -20.15598,-8.518964 -1.35617,-0.648291 -2.06917,-1.194853 -2.06917,-1.711318 l 0,-5.175786 c 0,0 19.60912,-4.268662 22.77502,-5.404223 3.16561,-1.135862 4.26385,-1.176794 6.95783,-0.189912 2.69429,0.986881 18.80282,3.89275 21.46551,4.868195 0,0 -10e-4,4.622001 -10e-4,5.102048 3e-4,0.511048 -0.61398,1.072357 -2.00476,1.797094 z"
+         inkscape:connector-curvature="0"
+         style="fill:#a41e11" />
+      <path
+         id="path23857"
+         d="m 794.46487,55.026396 c -2.73221,1.424191 -16.88624,7.244054 -19.89955,8.815119 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417747 -2.38037,-1.137668 -17.44273,-7.222385 -20.15598,-8.518965 -2.71325,-1.29658 -2.77013,-2.189559 -0.10474,-3.233324 2.66509,-1.043765 17.64469,-6.920511 20.81089,-8.056373 3.16561,-1.135862 4.26385,-1.176794 6.95784,-0.189611 2.69428,0.986881 16.76374,6.586735 19.42612,7.56218 2.66299,0.975143 2.76532,1.779336 0.0331,3.203227 z"
+         inkscape:connector-curvature="0"
+         style="fill:#d82c20" />
+      <polygon
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)"
+         id="polygon23859"
+         points="267.24,244.38 278.67,248.66 263.93,250.19 260.63,258.13 255.3,249.27 238.28,247.74 250.98,243.16 247.17,236.13 259.06,240.78 270.27,237.11 "
+         style="fill:#ffffff" />
+      <polygon
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)"
+         id="polygon23861"
+         points="232.24,275.77 271.66,269.72 259.75,287.18 "
+         style="fill:#ffffff" />
+      <ellipse
+         id="ellipse23863"
+         cy="53.828529"
+         ry="2.4580245"
+         rx="6.3411436"
+         cx="760.40704"
+         style="fill:#ffffff" />
+      <polygon
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)"
+         id="polygon23865"
+         points="319.42,260.05 296.11,269.26 296.09,250.83 "
+         style="fill:#7a0c00" />
+      <polygon
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)"
+         id="polygon23867"
+         points="296.09,250.83 296.11,269.26 293.58,270.25 270.28,261.04 "
+         style="fill:#ad2115" />
+    </g>
+    <g
+       transform="matrix(0.1894162,-0.14094969,0,0.1894162,234.64109,303.19243)"
+       id="g23869">
+      <path
+         style="fill:#a41e11"
+         inkscape:connector-curvature="0"
+         d="m 794.46487,77.394509 c -2.73221,1.424192 -16.88624,7.243753 -19.89955,8.814518 -3.01332,1.571366 -4.68732,1.556016 -7.06769,0.418047 -2.38037,-1.137968 -17.44273,-7.222083 -20.15598,-8.518964 -1.35617,-0.64829 -2.06917,-1.195153 -2.06917,-1.711919 l 0,-5.175185 c 0,0 19.60912,-4.268963 22.77502,-5.404524 3.16561,-1.135862 4.26385,-1.176794 6.95783,-0.189912 2.69429,0.986882 18.80282,3.893352 21.46551,4.868496 0,0 -10e-4,4.622001 -10e-4,5.101747 3e-4,0.51165 -0.61398,1.07296 -2.00476,1.797696 z"
+         id="path23871" />
+      <path
+         style="fill:#d82c20"
+         inkscape:connector-curvature="0"
+         d="m 794.46487,72.21782 c -2.73221,1.423589 -16.88624,7.243753 -19.89955,8.814517 -3.01332,1.571366 -4.68732,1.556017 -7.06769,0.418048 -2.38068,-1.137367 -17.44273,-7.222686 -20.15598,-8.518965 -2.71325,-1.296881 -2.77013,-2.189559 -0.10474,-3.233023 2.66509,-1.044066 17.64469,-6.921113 20.81089,-8.056975 3.16561,-1.13526 4.26385,-1.176794 6.95784,-0.189611 2.69428,0.986881 16.76374,6.587036 19.42612,7.56218 2.66299,0.976046 2.76532,1.779637 0.0331,3.203829 z"
+         id="path23873" />
+      <path
+         style="fill:#a41e11"
+         inkscape:connector-curvature="0"
+         d="m 794.46487,68.955302 c -2.73221,1.424191 -16.88624,7.243753 -19.89955,8.815119 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417446 -2.38068,-1.137367 -17.44273,-7.222084 -20.15598,-8.518965 -1.35617,-0.64829 -2.06917,-1.194551 -2.06917,-1.711317 l 0,-5.175786 c 0,0 19.60912,-4.268662 22.77502,-5.404524 3.16561,-1.135561 4.26385,-1.176794 6.95783,-0.189913 2.69429,0.986882 18.80282,3.89275 21.46551,4.868195 0,0 -10e-4,4.622001 -10e-4,5.10235 3e-4,0.511348 -0.61398,1.072658 -2.00476,1.797395 z"
+         id="path23875" />
+      <path
+         style="fill:#d82c20"
+         inkscape:connector-curvature="0"
+         d="m 794.46487,63.778612 c -2.73221,1.424192 -16.88624,7.243754 -19.89955,8.81512 -3.01332,1.570764 -4.68732,1.555414 -7.06769,0.417445 -2.38068,-1.137366 -17.44273,-7.222384 -20.15598,-8.518964 -2.71325,-1.29658 -2.77013,-2.189258 -0.10474,-3.233324 2.66509,-1.043464 17.64469,-6.920812 20.81089,-8.056373 3.16561,-1.135561 4.26385,-1.176794 6.95784,-0.189913 2.69428,0.986882 16.76374,6.586736 19.42612,7.56218 2.66299,0.975746 2.76532,1.779638 0.0331,3.203829 z"
+         id="path23877" />
+      <path
+         style="fill:#a41e11"
+         inkscape:connector-curvature="0"
+         d="m 794.46487,60.203085 c -2.73221,1.424191 -16.88624,7.244054 -19.89955,8.81542 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417446 -2.38068,-1.137367 -17.44273,-7.222384 -20.15598,-8.518964 -1.35617,-0.648291 -2.06917,-1.194853 -2.06917,-1.711318 l 0,-5.175786 c 0,0 19.60912,-4.268662 22.77502,-5.404223 3.16561,-1.135862 4.26385,-1.176794 6.95783,-0.189912 2.69429,0.986881 18.80282,3.89275 21.46551,4.868195 0,0 -10e-4,4.622001 -10e-4,5.102048 3e-4,0.511048 -0.61398,1.072357 -2.00476,1.797094 z"
+         id="path23879" />
+      <path
+         style="fill:#d82c20"
+         inkscape:connector-curvature="0"
+         d="m 794.46487,55.026396 c -2.73221,1.424191 -16.88624,7.244054 -19.89955,8.815119 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417747 -2.38037,-1.137668 -17.44273,-7.222385 -20.15598,-8.518965 -2.71325,-1.29658 -2.77013,-2.189559 -0.10474,-3.233324 2.66509,-1.043765 17.64469,-6.920511 20.81089,-8.056373 3.16561,-1.135862 4.26385,-1.176794 6.95784,-0.189611 2.69428,0.986881 16.76374,6.586735 19.42612,7.56218 2.66299,0.975143 2.76532,1.779336 0.0331,3.203227 z"
+         id="path23881" />
+      <polygon
+         style="fill:#ffffff"
+         points="278.67,248.66 263.93,250.19 260.63,258.13 255.3,249.27 238.28,247.74 250.98,243.16 247.17,236.13 259.06,240.78 270.27,237.11 267.24,244.38 "
+         id="polygon23883"
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)" />
+      <polygon
+         style="fill:#ffffff"
+         points="271.66,269.72 259.75,287.18 232.24,275.77 "
+         id="polygon23885"
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)" />
+      <ellipse
+         style="fill:#ffffff"
+         cx="760.40704"
+         rx="6.3411436"
+         ry="2.4580245"
+         cy="53.828529"
+         id="ellipse23887" />
+      <polygon
+         style="fill:#7a0c00"
+         points="296.11,269.26 296.09,250.83 319.42,260.05 "
+         id="polygon23889"
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)" />
+      <polygon
+         style="fill:#ad2115"
+         points="296.11,269.26 293.58,270.25 270.28,261.04 296.09,250.83 "
+         id="polygon23891"
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)" />
+    </g>
+    <g
+       id="g24055"
+       transform="translate(-79.524666,65.562133)">
+      <path
+         id="path24057"
+         d="m 794.46487,77.394509 c -2.73221,1.424192 -16.88624,7.243753 -19.89955,8.814518 -3.01332,1.571366 -4.68732,1.556016 -7.06769,0.418047 -2.38037,-1.137968 -17.44273,-7.222083 -20.15598,-8.518964 -1.35617,-0.64829 -2.06917,-1.195153 -2.06917,-1.711919 l 0,-5.175185 c 0,0 19.60912,-4.268963 22.77502,-5.404524 3.16561,-1.135862 4.26385,-1.176794 6.95783,-0.189912 2.69429,0.986882 18.80282,3.893352 21.46551,4.868496 0,0 -10e-4,4.622001 -10e-4,5.101747 3e-4,0.51165 -0.61398,1.07296 -2.00476,1.797696 z"
+         inkscape:connector-curvature="0"
+         style="fill:#a41e11" />
+      <path
+         id="path24059"
+         d="m 794.46487,72.21782 c -2.73221,1.423589 -16.88624,7.243753 -19.89955,8.814517 -3.01332,1.571366 -4.68732,1.556017 -7.06769,0.418048 -2.38068,-1.137367 -17.44273,-7.222686 -20.15598,-8.518965 -2.71325,-1.296881 -2.77013,-2.189559 -0.10474,-3.233023 2.66509,-1.044066 17.64469,-6.921113 20.81089,-8.056975 3.16561,-1.13526 4.26385,-1.176794 6.95784,-0.189611 2.69428,0.986881 16.76374,6.587036 19.42612,7.56218 2.66299,0.976046 2.76532,1.779637 0.0331,3.203829 z"
+         inkscape:connector-curvature="0"
+         style="fill:#d82c20" />
+      <path
+         id="path24061"
+         d="m 794.46487,68.955302 c -2.73221,1.424191 -16.88624,7.243753 -19.89955,8.815119 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417446 -2.38068,-1.137367 -17.44273,-7.222084 -20.15598,-8.518965 -1.35617,-0.64829 -2.06917,-1.194551 -2.06917,-1.711317 l 0,-5.175786 c 0,0 19.60912,-4.268662 22.77502,-5.404524 3.16561,-1.135561 4.26385,-1.176794 6.95783,-0.189913 2.69429,0.986882 18.80282,3.89275 21.46551,4.868195 0,0 -10e-4,4.622001 -10e-4,5.10235 3e-4,0.511348 -0.61398,1.072658 -2.00476,1.797395 z"
+         inkscape:connector-curvature="0"
+         style="fill:#a41e11" />
+      <path
+         id="path24063"
+         d="m 794.46487,63.778612 c -2.73221,1.424192 -16.88624,7.243754 -19.89955,8.81512 -3.01332,1.570764 -4.68732,1.555414 -7.06769,0.417445 -2.38068,-1.137366 -17.44273,-7.222384 -20.15598,-8.518964 -2.71325,-1.29658 -2.77013,-2.189258 -0.10474,-3.233324 2.66509,-1.043464 17.64469,-6.920812 20.81089,-8.056373 3.16561,-1.135561 4.26385,-1.176794 6.95784,-0.189913 2.69428,0.986882 16.76374,6.586736 19.42612,7.56218 2.66299,0.975746 2.76532,1.779638 0.0331,3.203829 z"
+         inkscape:connector-curvature="0"
+         style="fill:#d82c20" />
+      <path
+         id="path24065"
+         d="m 794.46487,60.203085 c -2.73221,1.424191 -16.88624,7.244054 -19.89955,8.81542 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417446 -2.38068,-1.137367 -17.44273,-7.222384 -20.15598,-8.518964 -1.35617,-0.648291 -2.06917,-1.194853 -2.06917,-1.711318 l 0,-5.175786 c 0,0 19.60912,-4.268662 22.77502,-5.404223 3.16561,-1.135862 4.26385,-1.176794 6.95783,-0.189912 2.69429,0.986881 18.80282,3.89275 21.46551,4.868195 0,0 -10e-4,4.622001 -10e-4,5.102048 3e-4,0.511048 -0.61398,1.072357 -2.00476,1.797094 z"
+         inkscape:connector-curvature="0"
+         style="fill:#a41e11" />
+      <path
+         id="path24067"
+         d="m 794.46487,55.026396 c -2.73221,1.424191 -16.88624,7.244054 -19.89955,8.815119 -3.01332,1.570764 -4.68732,1.555415 -7.06769,0.417747 -2.38037,-1.137668 -17.44273,-7.222385 -20.15598,-8.518965 -2.71325,-1.29658 -2.77013,-2.189559 -0.10474,-3.233324 2.66509,-1.043765 17.64469,-6.920511 20.81089,-8.056373 3.16561,-1.135862 4.26385,-1.176794 6.95784,-0.189611 2.69428,0.986881 16.76374,6.586735 19.42612,7.56218 2.66299,0.975143 2.76532,1.779336 0.0331,3.203227 z"
+         inkscape:connector-curvature="0"
+         style="fill:#d82c20" />
+      <polygon
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)"
+         id="polygon24069"
+         points="278.67,248.66 263.93,250.19 260.63,258.13 255.3,249.27 238.28,247.74 250.98,243.16 247.17,236.13 259.06,240.78 270.27,237.11 267.24,244.38 "
+         style="fill:#ffffff" />
+      <polygon
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)"
+         id="polygon24071"
+         points="232.24,275.77 271.66,269.72 259.75,287.18 "
+         style="fill:#ffffff" />
+      <ellipse
+         id="ellipse24073"
+         cy="53.828529"
+         ry="2.4580245"
+         rx="6.3411436"
+         cx="760.40704"
+         style="fill:#ffffff" />
+      <polygon
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)"
+         id="polygon24075"
+         points="319.42,260.05 296.11,269.26 296.09,250.83 "
+         style="fill:#7a0c00" />
+      <polygon
+         transform="matrix(0.30097031,0,0,0.30097031,693.70904,-24.79695)"
+         id="polygon24077"
+         points="270.28,261.04 296.09,250.83 296.11,269.26 293.58,270.25 "
+         style="fill:#ad2115" />
+    </g>
+  </g>
+</svg>
diff --git a/docs/sources/article-img/ipv6_routed_network_example.gliffy b/docs/sources/article-img/ipv6_routed_network_example.gliffy
index 81ab0ed..544fd52 100644
--- a/docs/sources/article-img/ipv6_routed_network_example.gliffy
+++ b/docs/sources/article-img/ipv6_routed_network_example.gliffy
@@ -1 +1 @@
-{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":903,"height":598,"nodeIndex":174,"autoFit":true,"exportBorder":false,"gridOn":false,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":-9.000680271168676,"y":-4.75},"max":{"x":756.0183424505415,"y":502.5}},"objects":[{"x":765.0,"y":250.0,"rotation":0.0,"id":169,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-12.982306425886122,0.0],[-41.25,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":663.0,"y":362.5,"rotation":270.0,"id":168,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">managed by Docker</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":747.0,"y":472.0,"rotation":0.0,"id":166,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":45,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,14.008510484195028],[0.0,-221.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":25.5,"y":254.0,"rotation":0.0,"id":162,"width":194.49999999999997,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":43,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">ip -6 route add 2001:db8:1:1::/64 \\</span></span></p><p style=\"text-align:left;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">    dev docker0</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":239.28932188134524,"y":150.0,"rotation":0.0,"id":32,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":4,"py":0.0,"px":0.2928932188134524}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":0,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[196.5,47.5],[151.9213562373095,-37.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":195.0,"y":261.5,"rotation":0.0,"id":35,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":11,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":2,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":13,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[66.28932188134524,11.0],[-92.0,91.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":182.0,"y":272.5,"rotation":0.0,"id":34,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":2,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":15,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[100.0,0.0],[82.0,80.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":11.5,"y":464.0,"rotation":0.0,"id":53,"width":346.49999999999994,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"\">ip -6 route add default via fe80::1 dev eth0</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":11.5,"y":323.5,"rotation":0.0,"id":56,"width":346.49999999999994,"height":163.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":245.0,"y":109.0,"rotation":0.0,"id":33,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":0,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":2,"py":0.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[104.78932188134524,3.999999999999986],[57.710678118654755,88.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":76.5,"y":141.5,"rotation":0.0,"id":31,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":4,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":25,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[400.71067811865476,131.0],[560.0,211.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":37.5,"y":145.5,"rotation":0.0,"id":30,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":6,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":4,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":27,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[419.0,127.0],[431.0,207.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":296.0,"y":21.0,"rotation":0.0,"id":87,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">eth0 2001:db8::1/64</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":293.0,"y":120.0,"rotation":0.0,"id":83,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">eth1 fe80::1/64</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":433.5,"y":46.5,"rotation":0.0,"id":82,"width":291.0,"height":78.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"\">ip -6 route add default via fe80::1 </span><span style=\"\">dev eth0</span></p><p style=\"text-align:left;\"><span style=\"text-decoration:none;\"> </span></p><p style=\"text-align:left;\"><span style=\"text-decoration:none;\"> </span></p><p style=\"text-align:left;\"><span style=\"\">ip -6 route add 2001:db8:1::/48 </span><span style=\"\">via fe80::1:1 dev eth1</span></p><p style=\"text-align:left;\"><span style=\"\">ip -6 route add 2001:db8:2::/48 via</span><span style=\"\"> fe80::2:1 dev eth1</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":320.5,"y":38.0,"rotation":0.0,"id":0,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#fff2cc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":1,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Router</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":369.0,"y":40.0,"rotation":0.0,"id":89,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":1,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":0,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#d9d9d9","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[1.5,-2.0],[1.5,-21.125],[1.5,-21.125],[1.5,-40.25]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[]},{"x":297.75,"y":10.5,"rotation":0.0,"id":80,"width":425.99999999999994,"height":133.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":528.5,"y":199.0,"rotation":0.0,"id":73,"width":195.25,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"\">ip -6 route add default via fe80::1 \\</span></p><p style=\"text-align:left;\"><span style=\"\">    dev eth0</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":793.0,"y":250.0,"rotation":0.0,"id":64,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":34,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":60,"py":0.6205673758865248,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"8.0,8.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-69.25,0.0],[-798.0006802711687,-3.410605131648481E-13]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":25.5,"y":201.0,"rotation":0.0,"id":47,"width":291.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"\">ip -6 route add default via fe80::1 \\</span></p><p style=\"text-align:left;\"><span style=\"\">   dev eth0</span><span style=\"text-decoration:none;\"> </span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":207.0,"y":281.0,"rotation":0.0,"id":11,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":21,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">docker0 fe80::1/64</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":220.0,"y":168.0,"rotation":0.0,"id":6,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":18,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">eth0 2001:db8:1:0::1/64</span></span></p><p style=\"text-align:left;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">        fe80::1:1/64</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":232.0,"y":197.5,"rotation":0.0,"id":2,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":3,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Host1</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":11.5,"y":162.5,"rotation":0.0,"id":59,"width":346.50000000000006,"height":141.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":384.75,"y":162.5,"rotation":0.0,"id":60,"width":339.0,"height":141.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":189.0,"y":336.0,"rotation":0.0,"id":74,"width":150.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"\">eth0 2001:db8:1:1::2/64</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":28.000000000000014,"y":336.0,"rotation":0.0,"id":19,"width":149.99999999999997,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"\">eth0 2001:db8:1:1::1/64</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":214.0,"y":353.0,"rotation":0.0,"id":15,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":24,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":16,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Container1-2</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":53.0,"y":353.0,"rotation":0.0,"id":13,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":14,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Container1-1</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":395.0,"y":336.0,"rotation":0.0,"id":77,"width":150.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"\">eth0 2001:db8:2:1::1/64</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":384.75,"y":323.5,"rotation":0.0,"id":58,"width":339.75,"height":163.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":384.75,"y":463.0,"rotation":0.0,"id":51,"width":339.75,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"\">ip -6 route add default via fe80::1 dev eth0</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":418.5,"y":353.0,"rotation":0.0,"id":27,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":27,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":28,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Container2-1</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":563.0,"y":336.0,"rotation":0.0,"id":78,"width":150.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"\">eth0 2001:db8:2:1::2/64</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":586.5,"y":353.0,"rotation":0.0,"id":25,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":29,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":26,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Container2-2</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":259.0,"y":491.5,"rotation":0.0,"id":107,"width":223.00000000000003,"height":11.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:10px;font-style:italic;\">containers&#39; link-local addresses are not displayed</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":394.5,"y":168.0,"rotation":0.0,"id":7,"width":150.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"\">eth0 2001:db8:2:0::1/64<br />        fe80::2:1/64</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":381.5,"y":280.0,"rotation":0.0,"id":9,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">docker0 fe80::1/64</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":406.5,"y":197.5,"rotation":0.0,"id":4,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":5,"width":96.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Host2</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":528.5,"y":252.0,"rotation":0.0,"id":164,"width":194.49999999999997,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">ip -6 route add 2001:db8:2:1::/64 \\</span></span></p><p style=\"text-align:left;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">    dev docker0</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":766.0,"y":487.0,"rotation":0.0,"id":171,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":48,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.981657549458532,0.0],[-41.25,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#fff2cc","stroke":"#333333","strokeWidth":2,"dashStyle":"2.0,2.0","gradient":true,"shadow":true}},"lineStyles":{"global":{"stroke":"#000000","strokeWidth":1}},"textStyles":{"global":{"size":"12px"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"],"autosaveDisabled":false},"embeddedResources":{"index":0,"resources":[]}}
\ No newline at end of file
+{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":893,"height":447,"nodeIndex":185,"autoFit":true,"exportBorder":false,"gridOn":false,"snapToGrid":false,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":-17.000680271168676,"y":7},"max":{"x":892.767693574114,"y":447}},"objects":[{"x":17.5,"y":205.5,"rotation":0.0,"id":167,"width":238.5,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"\">ip -6 route add 2001:db8:1::/64 dev docker0</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":231.28932188134524,"y":95.0,"rotation":0.0,"id":120,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":6,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":0.0,"px":0.2928932188134524}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":131,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[267.5,47.5],[217.9213562373095,-13.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":187.0,"y":206.5,"rotation":0.0,"id":121,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":140,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":148,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[130.28932188134524,11.0],[-79.0,91.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":174.0,"y":217.5,"rotation":0.0,"id":122,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":140,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":146,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[164.0,0.0],[120.0,81.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":33.50000000000003,"y":409.0,"rotation":0.0,"id":123,"width":346.49999999999994,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"\">ip -6 route add default via fe80::1 dev eth0</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":3.5000000000000284,"y":268.5,"rotation":0.0,"id":124,"width":411.00000000000006,"height":163.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":237.0,"y":54.0,"rotation":0.0,"id":125,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":131,"py":0.9999999999999998,"px":0.29289321881345254}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":140,"py":0.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[170.78932188134524,27.999999999999986],[121.71067811865476,88.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":378.5,"y":7.0,"rotation":0.0,"id":131,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#e2e2e2","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":132,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Layer 2 Switch</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":785.0,"y":195.0,"rotation":0.0,"id":136,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":32,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":143,"py":0.6187943262411347,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":"8.0,8.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[78.75000000000011,-0.25],[-798.0006802711687,-3.410605131648481E-13]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":262.0,"y":224.0,"rotation":0.0,"id":138,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">docker0 fe80::1/64</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":278.0,"y":126.0,"rotation":0.0,"id":139,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">eth0 2001:db8:0::1/64</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":288.0,"y":142.5,"rotation":0.0,"id":140,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":141,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Host1</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":3.4999999999999716,"y":107.5,"rotation":0.0,"id":142,"width":411.0,"height":141.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":221.0,"y":283.0,"rotation":0.0,"id":144,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"\">eth0 2001:db8:1::2/64</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":34.000000000000014,"y":283.0,"rotation":0.0,"id":145,"width":149.99999999999997,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"\">eth0 2001:db8:1::1/64</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":244.0,"y":299.0,"rotation":0.0,"id":146,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":147,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Container1-2</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":58.0,"y":298.0,"rotation":0.0,"id":148,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":20,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":149,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Container1-1</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":317.0,"y":436.5,"rotation":0.0,"id":158,"width":223.00000000000003,"height":11.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-style:italic;font-size:10px;\">containers&#39; link-local addresses are not displayed</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":17.5,"y":148.0,"rotation":0.0,"id":137,"width":291.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"\">ip -6 route add 2001:db8:0::/64 dev eth0</span></p><p style=\"text-align:left;\"><span style=\"\">ip -6 route add 2001:db8:2::/64 via 2001:db8:0::2</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":901.7500000000001,"y":195.0,"rotation":0.0,"id":172,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":43,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-12.982306425886122,0.0],[-41.25,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":670.0,"y":284.0,"rotation":0.0,"id":155,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"\">eth0 2001:db8:2::2/64</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":479.0,"y":284.0,"rotation":0.0,"id":150,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"\">eth0 2001:db8:2::1/64</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":488.75,"y":408.0,"rotation":0.0,"id":152,"width":339.75,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"\">ip -6 route add default via fe80::1 dev eth0</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":694.5,"y":298.0,"rotation":0.0,"id":156,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":27,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":157,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Container2-2</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":501.5,"y":298.0,"rotation":0.0,"id":153,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.square","order":25,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ead1dc","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":154,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Container2-1</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":444.5,"y":223.0,"rotation":0.0,"id":160,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":18,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">docker0 fe80::1/64</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":460.5,"y":128.0,"rotation":0.0,"id":159,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"\">eth0 2001:db8:0::2/64</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":469.5,"y":142.5,"rotation":0.0,"id":161,"width":100.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#a4c2f4","gradient":true,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":162,"width":96.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;text-decoration:none;font-family:Arial;\"><span style=\"text-decoration:none;\">Host2</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"children":[]}]},{"x":139.5,"y":86.5,"rotation":0.0,"id":126,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":1.0,"px":0.7071067811865476}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":156,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[400.71067811865476,131.0],[605.0,211.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":100.5,"y":90.5,"rotation":0.0,"id":127,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":161,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":153,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#cccccc","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[419.0,127.0],[451.0,207.5]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":447.75,"y":268.5,"rotation":0.0,"id":151,"width":416.0000000000001,"height":163.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":447.75,"y":107.5,"rotation":0.0,"id":143,"width":416.0000000000001,"height":141.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":"2,2","dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[]},{"x":795.7500000000001,"y":307.5,"rotation":270.0,"id":173,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-size:12px;font-family:Arial;\"><span style=\"\">managed by Docker</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":879.7500000000001,"y":417.0,"rotation":0.0,"id":174,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,14.008510484195028],[0.0,-221.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":898.7500000000001,"y":432.0,"rotation":0.0,"id":171,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.981657549458532,0.0],[-41.25,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":582.5,"y":151.0,"rotation":0.0,"id":135,"width":285.25000000000017,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"\">ip -6 route add 2001:db8:0::/64 dev eth0</span></p><p style=\"text-align:left;\"><span style=\"\">ip -6 route add 2001:db8:1::/64 via 2001:db8:0::1</span><span style=\"text-decoration:none;\"> </span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]},{"x":583.0,"y":204.0,"rotation":0.0,"id":168,"width":272.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"\">ip -6 route add 2001:db8:2::/64 dev docker0</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"children":[]}],"shapeStyles":{"com.gliffy.shape.basic.basic_v1.default":{"fill":"#e2e2e2","stroke":"#333333","strokeWidth":2,"dashStyle":"2.0,2.0","gradient":true,"shadow":true}},"lineStyles":{"global":{"stroke":"#000000","strokeWidth":1,"dashStyle":"8.0,8.0"}},"textStyles":{}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.images"],"autosaveDisabled":false},"embeddedResources":{"index":0,"resources":[]}}
\ No newline at end of file
diff --git a/docs/sources/article-img/ipv6_routed_network_example.svg b/docs/sources/article-img/ipv6_routed_network_example.svg
index da62143..c97b02c 100644
--- a/docs/sources/article-img/ipv6_routed_network_example.svg
+++ b/docs/sources/article-img/ipv6_routed_network_example.svg
@@ -1 +1 @@
-<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="776.0183424505415" height="522.5"><style xmlns="http://www.w3.org/1999/xhtml"></style><defs><linearGradient id="pTwRJlTmoXQS" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#fff2cc"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient><linearGradient id="FvZELeRwLGTV" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#a4c2f4"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient><linearGradient id="icpEixOwIatD" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#a4c2f4"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient><linearGradient id="LkNSHLFhaxIA" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ead1dc"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient><linearGradient id="oKDqFTggfkIn" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ead1dc"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient><linearGradient id="ibePzzWmRSgC" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ead1dc"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient><linearGradient id="OTKSvGQcxRpB" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ead1dc"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient></defs><g transform="translate(0,0)"><g><rect fill="#FFFFFF" stroke="none" x="0" y="0" width="776.0183424505415" height="522.5"/></g><g transform="translate(0,0) matrix(1,0,0,1,297.75,10.5)"><g><g transform="translate(0,0) scale(4.26,1.33)"><g><path fill="#FFFFFF" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(0.2347417840375587,0.7518796992481203)"><path fill="none" stroke="none" d="M 0 0 L 426 0 Q 426 0 426 0 L 426 133 Q 426 133 426 133 L 0 133 Q 0 133 0 133 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="#333333" d="M 0 0 M 0 0 L 2 0 M 2 0 M 4 0 L 6 0 M 6 0 M 8 0 L 10 0 M 10 0 M 12 0 L 14 0 M 14 0 M 16 0 L 18 0 M 18 0 M 20 0 L 22 0 M 22 0 M 24 0 L 26 0 M 26 0 M 28 0 L 30 0 M 30 0 M 32 0 L 34 0 M 34 0 M 36 0 L 38 0 M 38 0 M 40 0 L 42 0 M 42 0 M 44 0 L 46 0 M 46 0 M 48 0 L 50 0 M 50 0 M 52 0 L 54 0 M 54 0 M 56 0 L 58 0 M 58 0 M 60 0 L 62 0 M 62 0 M 64 0 L 66 0 M 66 0 M 68 0 L 70 0 M 70 0 M 72 0 L 74 0 M 74 0 M 76 0 L 78 0 M 78 0 M 80 0 L 82 0 M 82 0 M 84 0 L 86 0 M 86 0 M 88 0 L 90 0 M 90 0 M 92 0 L 94 0 M 94 0 M 96 0 L 98 0 M 98 0 M 100 0 L 102 0 M 102 0 M 104 0 L 106 0 M 106 0 M 108 0 L 110 0 M 110 0 M 112 0 L 114 0 M 114 0 M 116 0 L 118 0 M 118 0 M 120 0 L 122 0 M 122 0 M 124 0 L 126 0 M 126 0 M 128 0 L 130 0 M 130 0 M 132 0 L 134 0 M 134 0 M 136 0 L 138 0 M 138 0 M 140 0 L 142 0 M 142 0 M 144 0 L 146 0 M 146 0 M 148 0 L 150 0 M 150 0 M 152 0 L 154 0 M 154 0 M 156 0 L 158 0 M 158 0 M 160 0 L 162 0 M 162 0 M 164 0 L 166 0 M 166 0 M 168 0 L 170 0 M 170 0 M 172 0 L 174 0 M 174 0 M 176 0 L 178 0 M 178 0 M 180 0 L 182 0 M 182 0 M 184 0 L 186 0 M 186 0 M 188 0 L 190 0 M 190 0 M 192 0 L 194 0 M 194 0 M 196 0 L 198 0 M 198 0 M 200 0 L 202 0 M 202 0 M 204 0 L 206 0 M 206 0 M 208 0 L 210 0 M 210 0 M 212 0 L 214 0 M 214 0 M 216 0 L 218 0 M 218 0 M 220 0 L 222 0 M 222 0 M 224 0 L 226 0 M 226 0 M 228 0 L 230 0 M 230 0 M 232 0 L 234 0 M 234 0 M 236 0 L 238 0 M 238 0 M 240 0 L 242 0 M 242 0 M 244 0 L 246 0 M 246 0 M 248 0 L 250 0 M 250 0 M 252 0 L 254 0 M 254 0 M 256 0 L 258 0 M 258 0 M 260 0 L 262 0 M 262 0 M 264 0 L 266 0 M 266 0 M 268 0 L 270 0 M 270 0 M 272 0 L 274 0 M 274 0 M 276 0 L 278 0 M 278 0 M 280 0 L 282 0 M 282 0 M 284 0 L 286 0 M 286 0 M 288 0 L 290 0 M 290 0 M 292 0 L 294 0 M 294 0 M 296 0 L 298 0 M 298 0 M 300 0 L 302 0 M 302 0 M 304 0 L 306 0 M 306 0 M 308 0 L 310 0 M 310 0 M 312 0 L 314 0 M 314 0 M 316 0 L 318 0 M 318 0 M 320 0 L 322 0 M 322 0 M 324 0 L 326 0 M 326 0 M 328 0 L 330 0 M 330 0 M 332 0 L 334 0 M 334 0 M 336 0 L 338 0 M 338 0 M 340 0 L 342 0 M 342 0 M 344 0 L 346 0 M 346 0 M 348 0 L 350 0 M 350 0 M 352 0 L 354 0 M 354 0 M 356 0 L 358 0 M 358 0 M 360 0 L 362 0 M 362 0 M 364 0 L 366 0 M 366 0 M 368 0 L 370 0 M 370 0 M 372 0 L 374 0 M 374 0 M 376 0 L 378 0 M 378 0 M 380 0 L 382 0 M 382 0 M 384 0 L 386 0 M 386 0 M 388 0 L 390 0 M 390 0 M 392 0 L 394 0 M 394 0 M 396 0 L 398 0 M 398 0 M 400 0 L 402 0 M 402 0 M 404 0 L 406 0 M 406 0 M 408 0 L 410 0 M 410 0 M 412 0 L 414 0 M 414 0 M 416 0 L 418 0 M 418 0 M 420 0 L 422 0 M 422 0 M 424 0 L 426 0 M 426 0 M 426 2 L 426 4 M 426 4 M 426 6 L 426 8 M 426 8 M 426 10 L 426 12 M 426 12 M 426 14 L 426 16 M 426 16 M 426 18 L 426 20 M 426 20 M 426 22 L 426 24 M 426 24 M 426 26 L 426 28 M 426 28 M 426 30 L 426 32 M 426 32 M 426 34 L 426 36 M 426 36 M 426 38 L 426 40 M 426 40 M 426 42 L 426 44 M 426 44 M 426 46 L 426 48 M 426 48 M 426 50 L 426 52 M 426 52 M 426 54 L 426 56 M 426 56 M 426 58 L 426 60 M 426 60 M 426 62 L 426 64 M 426 64 M 426 66 L 426 68 M 426 68 M 426 70 L 426 72 M 426 72 M 426 74 L 426 76 M 426 76 M 426 78 L 426 80 M 426 80 M 426 82 L 426 84 M 426 84 M 426 86 L 426 88 M 426 88 M 426 90 L 426 92 M 426 92 M 426 94 L 426 96 M 426 96 M 426 98 L 426 100 M 426 100 M 426 102 L 426 104 M 426 104 M 426 106 L 426 108 M 426 108 M 426 110 L 426 112 M 426 112 M 426 114 L 426 116 M 426 116 M 426 118 L 426 120 M 426 120 M 426 122 L 426 124 M 426 124 M 426 126 L 426 128 M 426 128 M 426 130 L 426 132 M 426 132 M 425 133 L 423 133 M 423 133 M 421 133 L 419 133 M 419 133 M 417 133 L 415 133 M 415 133 M 413 133 L 411 133 M 411 133 M 409 133 L 407 133 M 407 133 M 405 133 L 403 133 M 403 133 M 401 133 L 399 133 M 399 133 M 397 133 L 395 133 M 395 133 M 393 133 L 391 133 M 391 133 M 389 133 L 387 133 M 387 133 M 385 133 L 383 133 M 383 133 M 381 133 L 379 133 M 379 133 M 377 133 L 375 133 M 375 133 M 373 133 L 371 133 M 371 133 M 369 133 L 367 133 M 367 133 M 365 133 L 363 133 M 363 133 M 361 133 L 359 133 M 359 133 M 357 133 L 355 133 M 355 133 M 353 133 L 351 133 M 351 133 M 349 133 L 347 133 M 347 133 M 345 133 L 343 133 M 343 133 M 341 133 L 339 133 M 339 133 M 337 133 L 335 133 M 335 133 M 333 133 L 331 133 M 331 133 M 329 133 L 327 133 M 327 133 M 325 133 L 323 133 M 323 133 M 321 133 L 319 133 M 319 133 M 317 133 L 315 133 M 315 133 M 313 133 L 311 133 M 311 133 M 309 133 L 307 133 M 307 133 M 305 133 L 303 133 M 303 133 M 301 133 L 299 133 M 299 133 M 297 133 L 295 133 M 295 133 M 293 133 L 291 133 M 291 133 M 289 133 L 287 133 M 287 133 M 285 133 L 283 133 M 283 133 M 281 133 L 279 133 M 279 133 M 277 133 L 275 133 M 275 133 M 273 133 L 271 133 M 271 133 M 269 133 L 267 133 M 267 133 M 265 133 L 263 133 M 263 133 M 261 133 L 259 133 M 259 133 M 257 133 L 255 133 M 255 133 M 253 133 L 251 133 M 251 133 M 249 133 L 247 133 M 247 133 M 245 133 L 243 133 M 243 133 M 241 133 L 239 133 M 239 133 M 237 133 L 235 133 M 235 133 M 233 133 L 231 133 M 231 133 M 229 133 L 227 133 M 227 133 M 225 133 L 223 133 M 223 133 M 221 133 L 219 133 M 219 133 M 217 133 L 215 133 M 215 133 M 213 133 L 211 133 M 211 133 M 209 133 L 207 133 M 207 133 M 205 133 L 203 133 M 203 133 M 201 133 L 199 133 M 199 133 M 197 133 L 195 133 M 195 133 M 193 133 L 191 133 M 191 133 M 189 133 L 187 133 M 187 133 M 185 133 L 183 133 M 183 133 M 181 133 L 179 133 M 179 133 M 177 133 L 175 133 M 175 133 M 173 133 L 171 133 M 171 133 M 169 133 L 167 133 M 167 133 M 165 133 L 163 133 M 163 133 M 161 133 L 159 133 M 159 133 M 157 133 L 155 133 M 155 133 M 153 133 L 151 133 M 151 133 M 149 133 L 147 133 M 147 133 M 145 133 L 143 133 M 143 133 M 141 133 L 139 133 M 139 133 M 137 133 L 135 133 M 135 133 M 133 133 L 131 133 M 131 133 M 129 133 L 127 133 M 127 133 M 125 133 L 123 133 M 123 133 M 121 133 L 119 133 M 119 133 M 117 133 L 115 133 M 115 133 M 113 133 L 111 133 M 111 133 M 109 133 L 107 133 M 107 133 M 105 133 L 103 133 M 103 133 M 101 133 L 99 133 M 99 133 M 97 133 L 95 133 M 95 133 M 93 133 L 91 133 M 91 133 M 89 133 L 87 133 M 87 133 M 85 133 L 83 133 M 83 133 M 81 133 L 79 133 M 79 133 M 77 133 L 75 133 M 75 133 M 73 133 L 71 133 M 71 133 M 69 133 L 67 133 M 67 133 M 65 133 L 63 133 M 63 133 M 61 133 L 59 133 M 59 133 M 57 133 L 55 133 M 55 133 M 53 133 L 51 133 M 51 133 M 49 133 L 47 133 M 47 133 M 45 133 L 43 133 M 43 133 M 41 133 L 39 133 M 39 133 M 37 133 L 35 133 M 35 133 M 33 133 L 31 133 M 31 133 M 29 133 L 27 133 M 27 133 M 25 133 L 23 133 M 23 133 M 21 133 L 19 133 M 19 133 M 17 133 L 15 133 M 15 133 M 13 133 L 11 133 M 11 133 M 9 133 L 7 133 M 7 133 M 5 133 L 3 133 M 3 133 M 1 133 L 0 133 Q 0 133 0 133 L 0 132 M 0 132 M 0 130 L 0 128 M 0 128 M 0 126 L 0 124 M 0 124 M 0 122 L 0 120 M 0 120 M 0 118 L 0 116 M 0 116 M 0 114 L 0 112 M 0 112 M 0 110 L 0 108 M 0 108 M 0 106 L 0 104 M 0 104 M 0 102 L 0 100 M 0 100 M 0 98 L 0 96 M 0 96 M 0 94 L 0 92 M 0 92 M 0 90 L 0 88 M 0 88 M 0 86 L 0 84 M 0 84 M 0 82 L 0 80 M 0 80 M 0 78 L 0 76 M 0 76 M 0 74 L 0 72 M 0 72 M 0 70 L 0 68 M 0 68 M 0 66 L 0 64 M 0 64 M 0 62 L 0 60 M 0 60 M 0 58 L 0 56 M 0 56 M 0 54 L 0 52 M 0 52 M 0 50 L 0 48 M 0 48 M 0 46 L 0 44 M 0 44 M 0 42 L 0 40 M 0 40 M 0 38 L 0 36 M 0 36 M 0 34 L 0 32 M 0 32 M 0 30 L 0 28 M 0 28 M 0 26 L 0 24 M 0 24 M 0 22 L 0 20 M 0 20 M 0 18 L 0 16 M 0 16 M 0 14 L 0 12 M 0 12 M 0 10 L 0 8 M 0 8 M 0 6 L 0 4 M 0 4 M 0 2 L 0 0 M 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="matrix(1,0,0,1,366,-4.75)"><g transform="translate(0,0)"><g transform="translate(-369,-40) translate(3,44.75) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#d9d9d9" d="M 370.5 38 L 370.5 28.4375 Q 370.5 18.875 370.5 18.875 L 370.5 18.875 Q 370.5 18.875 370.5 9.3125 L 370.5 -0.25" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="translate(0,0) matrix(1,0,0,1,384.75,162.5)"><g><g transform="translate(0,0) scale(3.39,1.41)"><g><path fill="#FFFFFF" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(0.2949852507374631,0.7092198581560284)"><path fill="none" stroke="none" d="M 0 0 L 339 0 Q 339 0 339 0 L 339 141 Q 339 141 339 141 L 0 141 Q 0 141 0 141 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="#333333" d="M 0 0 M 0 0 L 2 0 M 2 0 M 4 0 L 6 0 M 6 0 M 8 0 L 10 0 M 10 0 M 12 0 L 14 0 M 14 0 M 16 0 L 18 0 M 18 0 M 20 0 L 22 0 M 22 0 M 24 0 L 26 0 M 26 0 M 28 0 L 30 0 M 30 0 M 32 0 L 34 0 M 34 0 M 36 0 L 38 0 M 38 0 M 40 0 L 42 0 M 42 0 M 44 0 L 46 0 M 46 0 M 48 0 L 50 0 M 50 0 M 52 0 L 54 0 M 54 0 M 56 0 L 58 0 M 58 0 M 60 0 L 62 0 M 62 0 M 64 0 L 66 0 M 66 0 M 68 0 L 70 0 M 70 0 M 72 0 L 74 0 M 74 0 M 76 0 L 78 0 M 78 0 M 80 0 L 82 0 M 82 0 M 84 0 L 86 0 M 86 0 M 88 0 L 90 0 M 90 0 M 92 0 L 94 0 M 94 0 M 96 0 L 98 0 M 98 0 M 100 0 L 102 0 M 102 0 M 104 0 L 106 0 M 106 0 M 108 0 L 110 0 M 110 0 M 112 0 L 114 0 M 114 0 M 116 0 L 118 0 M 118 0 M 120 0 L 122 0 M 122 0 M 124 0 L 126 0 M 126 0 M 128 0 L 130 0 M 130 0 M 132 0 L 134 0 M 134 0 M 136 0 L 138 0 M 138 0 M 140 0 L 142 0 M 142 0 M 144 0 L 146 0 M 146 0 M 148 0 L 150 0 M 150 0 M 152 0 L 154 0 M 154 0 M 156 0 L 158 0 M 158 0 M 160 0 L 162 0 M 162 0 M 164 0 L 166 0 M 166 0 M 168 0 L 170 0 M 170 0 M 172 0 L 174 0 M 174 0 M 176 0 L 178 0 M 178 0 M 180 0 L 182 0 M 182 0 M 184 0 L 186 0 M 186 0 M 188 0 L 190 0 M 190 0 M 192 0 L 194 0 M 194 0 M 196 0 L 198 0 M 198 0 M 200 0 L 202 0 M 202 0 M 204 0 L 206 0 M 206 0 M 208 0 L 210 0 M 210 0 M 212 0 L 214 0 M 214 0 M 216 0 L 218 0 M 218 0 M 220 0 L 222 0 M 222 0 M 224 0 L 226 0 M 226 0 M 228 0 L 230 0 M 230 0 M 232 0 L 234 0 M 234 0 M 236 0 L 238 0 M 238 0 M 240 0 L 242 0 M 242 0 M 244 0 L 246 0 M 246 0 M 248 0 L 250 0 M 250 0 M 252 0 L 254 0 M 254 0 M 256 0 L 258 0 M 258 0 M 260 0 L 262 0 M 262 0 M 264 0 L 266 0 M 266 0 M 268 0 L 270 0 M 270 0 M 272 0 L 274 0 M 274 0 M 276 0 L 278 0 M 278 0 M 280 0 L 282 0 M 282 0 M 284 0 L 286 0 M 286 0 M 288 0 L 290 0 M 290 0 M 292 0 L 294 0 M 294 0 M 296 0 L 298 0 M 298 0 M 300 0 L 302 0 M 302 0 M 304 0 L 306 0 M 306 0 M 308 0 L 310 0 M 310 0 M 312 0 L 314 0 M 314 0 M 316 0 L 318 0 M 318 0 M 320 0 L 322 0 M 322 0 M 324 0 L 326 0 M 326 0 M 328 0 L 330 0 M 330 0 M 332 0 L 334 0 M 334 0 M 336 0 L 338 0 M 338 0 M 339 1 L 339 3 M 339 3 M 339 5 L 339 7 M 339 7 M 339 9 L 339 11 M 339 11 M 339 13 L 339 15 M 339 15 M 339 17 L 339 19 M 339 19 M 339 21 L 339 23 M 339 23 M 339 25 L 339 27 M 339 27 M 339 29 L 339 31 M 339 31 M 339 33 L 339 35 M 339 35 M 339 37 L 339 39 M 339 39 M 339 41 L 339 43 M 339 43 M 339 45 L 339 47 M 339 47 M 339 49 L 339 51 M 339 51 M 339 53 L 339 55 M 339 55 M 339 57 L 339 59 M 339 59 M 339 61 L 339 63 M 339 63 M 339 65 L 339 67 M 339 67 M 339 69 L 339 71 M 339 71 M 339 73 L 339 75 M 339 75 M 339 77 L 339 79 M 339 79 M 339 81 L 339 83 M 339 83 M 339 85 L 339 87 M 339 87 M 339 89 L 339 91 M 339 91 M 339 93 L 339 95 M 339 95 M 339 97 L 339 99 M 339 99 M 339 101 L 339 103 M 339 103 M 339 105 L 339 107 M 339 107 M 339 109 L 339 111 M 339 111 M 339 113 L 339 115 M 339 115 M 339 117 L 339 119 M 339 119 M 339 121 L 339 123 M 339 123 M 339 125 L 339 127 M 339 127 M 339 129 L 339 131 M 339 131 M 339 133 L 339 135 M 339 135 M 339 137 L 339 139 M 339 139 M 339 141 L 339 141 Q 339 141 339 141 L 337 141 M 337 141 M 335 141 L 333 141 M 333 141 M 331 141 L 329 141 M 329 141 M 327 141 L 325 141 M 325 141 M 323 141 L 321 141 M 321 141 M 319 141 L 317 141 M 317 141 M 315 141 L 313 141 M 313 141 M 311 141 L 309 141 M 309 141 M 307 141 L 305 141 M 305 141 M 303 141 L 301 141 M 301 141 M 299 141 L 297 141 M 297 141 M 295 141 L 293 141 M 293 141 M 291 141 L 289 141 M 289 141 M 287 141 L 285 141 M 285 141 M 283 141 L 281 141 M 281 141 M 279 141 L 277 141 M 277 141 M 275 141 L 273 141 M 273 141 M 271 141 L 269 141 M 269 141 M 267 141 L 265 141 M 265 141 M 263 141 L 261 141 M 261 141 M 259 141 L 257 141 M 257 141 M 255 141 L 253 141 M 253 141 M 251 141 L 249 141 M 249 141 M 247 141 L 245 141 M 245 141 M 243 141 L 241 141 M 241 141 M 239 141 L 237 141 M 237 141 M 235 141 L 233 141 M 233 141 M 231 141 L 229 141 M 229 141 M 227 141 L 225 141 M 225 141 M 223 141 L 221 141 M 221 141 M 219 141 L 217 141 M 217 141 M 215 141 L 213 141 M 213 141 M 211 141 L 209 141 M 209 141 M 207 141 L 205 141 M 205 141 M 203 141 L 201 141 M 201 141 M 199 141 L 197 141 M 197 141 M 195 141 L 193 141 M 193 141 M 191 141 L 189 141 M 189 141 M 187 141 L 185 141 M 185 141 M 183 141 L 181 141 M 181 141 M 179 141 L 177 141 M 177 141 M 175 141 L 173 141 M 173 141 M 171 141 L 169 141 M 169 141 M 167 141 L 165 141 M 165 141 M 163 141 L 161 141 M 161 141 M 159 141 L 157 141 M 157 141 M 155 141 L 153 141 M 153 141 M 151 141 L 149 141 M 149 141 M 147 141 L 145 141 M 145 141 M 143 141 L 141 141 M 141 141 M 139 141 L 137 141 M 137 141 M 135 141 L 133 141 M 133 141 M 131 141 L 129 141 M 129 141 M 127 141 L 125 141 M 125 141 M 123 141 L 121 141 M 121 141 M 119 141 L 117 141 M 117 141 M 115 141 L 113 141 M 113 141 M 111 141 L 109 141 M 109 141 M 107 141 L 105 141 M 105 141 M 103 141 L 101 141 M 101 141 M 99 141 L 97 141 M 97 141 M 95 141 L 93 141 M 93 141 M 91 141 L 89 141 M 89 141 M 87 141 L 85 141 M 85 141 M 83 141 L 81 141 M 81 141 M 79 141 L 77 141 M 77 141 M 75 141 L 73 141 M 73 141 M 71 141 L 69 141 M 69 141 M 67 141 L 65 141 M 65 141 M 63 141 L 61 141 M 61 141 M 59 141 L 57 141 M 57 141 M 55 141 L 53 141 M 53 141 M 51 141 L 49 141 M 49 141 M 47 141 L 45 141 M 45 141 M 43 141 L 41 141 M 41 141 M 39 141 L 37 141 M 37 141 M 35 141 L 33 141 M 33 141 M 31 141 L 29 141 M 29 141 M 27 141 L 25 141 M 25 141 M 23 141 L 21 141 M 21 141 M 19 141 L 17 141 M 17 141 M 15 141 L 13 141 M 13 141 M 11 141 L 9 141 M 9 141 M 7 141 L 5 141 M 5 141 M 3 141 L 1 141 M 1 141 M 0 140 L 0 138 M 0 138 M 0 136 L 0 134 M 0 134 M 0 132 L 0 130 M 0 130 M 0 128 L 0 126 M 0 126 M 0 124 L 0 122 M 0 122 M 0 120 L 0 118 M 0 118 M 0 116 L 0 114 M 0 114 M 0 112 L 0 110 M 0 110 M 0 108 L 0 106 M 0 106 M 0 104 L 0 102 M 0 102 M 0 100 L 0 98 M 0 98 M 0 96 L 0 94 M 0 94 M 0 92 L 0 90 M 0 90 M 0 88 L 0 86 M 0 86 M 0 84 L 0 82 M 0 82 M 0 80 L 0 78 M 0 78 M 0 76 L 0 74 M 0 74 M 0 72 L 0 70 M 0 70 M 0 68 L 0 66 M 0 66 M 0 64 L 0 62 M 0 62 M 0 60 L 0 58 M 0 58 M 0 56 L 0 54 M 0 54 M 0 52 L 0 50 M 0 50 M 0 48 L 0 46 M 0 46 M 0 44 L 0 42 M 0 42 M 0 40 L 0 38 M 0 38 M 0 36 L 0 34 M 0 34 M 0 32 L 0 30 M 0 30 M 0 28 L 0 26 M 0 26 M 0 24 L 0 22 M 0 22 M 0 20 L 0 18 M 0 18 M 0 16 L 0 14 M 0 14 M 0 12 L 0 10 M 0 10 M 0 8 L 0 6 M 0 6 M 0 4 L 0 2 M 0 2 M 0 0 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="translate(0,0) matrix(1,0,0,1,11.5,162.5)"><g><g transform="translate(0,0) scale(3.4650000000000007,1.41)"><g><path fill="#FFFFFF" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(0.2886002886002885,0.7092198581560284)"><path fill="none" stroke="none" d="M 0 0 L 346.50000000000006 0 Q 346.50000000000006 0 346.50000000000006 0 L 346.50000000000006 141 Q 346.50000000000006 141 346.50000000000006 141 L 0 141 Q 0 141 0 141 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="#333333" d="M 0 0 M 0 0 L 2 0 M 2 0 M 4 0 L 6 0 M 6 0 M 8 0 L 10 0 M 10 0 M 12 0 L 14 0 M 14 0 M 16 0 L 18 0 M 18 0 M 20 0 L 22 0 M 22 0 M 24 0 L 26 0 M 26 0 M 28 0 L 30 0 M 30 0 M 32 0 L 34 0 M 34 0 M 36 0 L 38 0 M 38 0 M 40 0 L 42 0 M 42 0 M 44 0 L 46 0 M 46 0 M 48 0 L 50 0 M 50 0 M 52 0 L 54 0 M 54 0 M 56 0 L 58 0 M 58 0 M 60 0 L 62 0 M 62 0 M 64 0 L 66 0 M 66 0 M 68 0 L 70 0 M 70 0 M 72 0 L 74 0 M 74 0 M 76 0 L 78 0 M 78 0 M 80 0 L 82 0 M 82 0 M 84 0 L 86 0 M 86 0 M 88 0 L 90 0 M 90 0 M 92 0 L 94 0 M 94 0 M 96 0 L 98 0 M 98 0 M 100 0 L 102 0 M 102 0 M 104 0 L 106 0 M 106 0 M 108 0 L 110 0 M 110 0 M 112 0 L 114 0 M 114 0 M 116 0 L 118 0 M 118 0 M 120 0 L 122 0 M 122 0 M 124 0 L 126 0 M 126 0 M 128 0 L 130 0 M 130 0 M 132 0 L 134 0 M 134 0 M 136 0 L 138 0 M 138 0 M 140 0 L 142 0 M 142 0 M 144 0 L 146 0 M 146 0 M 148 0 L 150 0 M 150 0 M 152 0 L 154 0 M 154 0 M 156 0 L 158 0 M 158 0 M 160 0 L 162 0 M 162 0 M 164 0 L 166 0 M 166 0 M 168 0 L 170 0 M 170 0 M 172 0 L 174 0 M 174 0 M 176 0 L 178 0 M 178 0 M 180 0 L 182 0 M 182 0 M 184 0 L 186 0 M 186 0 M 188 0 L 190 0 M 190 0 M 192 0 L 194 0 M 194 0 M 196 0 L 198 0 M 198 0 M 200 0 L 202 0 M 202 0 M 204 0 L 206 0 M 206 0 M 208 0 L 210 0 M 210 0 M 212 0 L 214 0 M 214 0 M 216 0 L 218 0 M 218 0 M 220 0 L 222 0 M 222 0 M 224 0 L 226 0 M 226 0 M 228 0 L 230 0 M 230 0 M 232 0 L 234 0 M 234 0 M 236 0 L 238 0 M 238 0 M 240 0 L 242 0 M 242 0 M 244 0 L 246 0 M 246 0 M 248 0 L 250 0 M 250 0 M 252 0 L 254 0 M 254 0 M 256 0 L 258 0 M 258 0 M 260 0 L 262 0 M 262 0 M 264 0 L 266 0 M 266 0 M 268 0 L 270 0 M 270 0 M 272 0 L 274 0 M 274 0 M 276 0 L 278 0 M 278 0 M 280 0 L 282 0 M 282 0 M 284 0 L 286 0 M 286 0 M 288 0 L 290 0 M 290 0 M 292 0 L 294 0 M 294 0 M 296 0 L 298 0 M 298 0 M 300 0 L 302 0 M 302 0 M 304 0 L 306 0 M 306 0 M 308 0 L 310 0 M 310 0 M 312 0 L 314 0 M 314 0 M 316 0 L 318 0 M 318 0 M 320 0 L 322 0 M 322 0 M 324 0 L 326 0 M 326 0 M 328 0 L 330 0 M 330 0 M 332 0 L 334 0 M 334 0 M 336 0 L 338 0 M 338 0 M 340 0 L 342 0 M 342 0 M 344 0 L 346 0 M 346 0 M 346.50000000000006 1.4999999999999432 L 346.50000000000006 3.499999999999943 M 346.50000000000006 3.499999999999943 M 346.50000000000006 5.499999999999943 L 346.50000000000006 7.499999999999943 M 346.50000000000006 7.499999999999943 M 346.50000000000006 9.499999999999943 L 346.50000000000006 11.499999999999943 M 346.50000000000006 11.499999999999943 M 346.50000000000006 13.499999999999943 L 346.50000000000006 15.499999999999943 M 346.50000000000006 15.499999999999943 M 346.50000000000006 17.499999999999943 L 346.50000000000006 19.499999999999943 M 346.50000000000006 19.499999999999943 M 346.50000000000006 21.499999999999943 L 346.50000000000006 23.499999999999943 M 346.50000000000006 23.499999999999943 M 346.50000000000006 25.499999999999943 L 346.50000000000006 27.499999999999943 M 346.50000000000006 27.499999999999943 M 346.50000000000006 29.499999999999943 L 346.50000000000006 31.499999999999943 M 346.50000000000006 31.499999999999943 M 346.50000000000006 33.49999999999994 L 346.50000000000006 35.49999999999994 M 346.50000000000006 35.49999999999994 M 346.50000000000006 37.49999999999994 L 346.50000000000006 39.49999999999994 M 346.50000000000006 39.49999999999994 M 346.50000000000006 41.49999999999994 L 346.50000000000006 43.49999999999994 M 346.50000000000006 43.49999999999994 M 346.50000000000006 45.49999999999994 L 346.50000000000006 47.49999999999994 M 346.50000000000006 47.49999999999994 M 346.50000000000006 49.49999999999994 L 346.50000000000006 51.49999999999994 M 346.50000000000006 51.49999999999994 M 346.50000000000006 53.49999999999994 L 346.50000000000006 55.49999999999994 M 346.50000000000006 55.49999999999994 M 346.50000000000006 57.49999999999994 L 346.50000000000006 59.49999999999994 M 346.50000000000006 59.49999999999994 M 346.50000000000006 61.49999999999994 L 346.50000000000006 63.49999999999994 M 346.50000000000006 63.49999999999994 M 346.50000000000006 65.49999999999994 L 346.50000000000006 67.49999999999994 M 346.50000000000006 67.49999999999994 M 346.50000000000006 69.49999999999994 L 346.50000000000006 71.49999999999994 M 346.50000000000006 71.49999999999994 M 346.50000000000006 73.49999999999994 L 346.50000000000006 75.49999999999994 M 346.50000000000006 75.49999999999994 M 346.50000000000006 77.49999999999994 L 346.50000000000006 79.49999999999994 M 346.50000000000006 79.49999999999994 M 346.50000000000006 81.49999999999994 L 346.50000000000006 83.49999999999994 M 346.50000000000006 83.49999999999994 M 346.50000000000006 85.49999999999994 L 346.50000000000006 87.49999999999994 M 346.50000000000006 87.49999999999994 M 346.50000000000006 89.49999999999994 L 346.50000000000006 91.49999999999994 M 346.50000000000006 91.49999999999994 M 346.50000000000006 93.49999999999994 L 346.50000000000006 95.49999999999994 M 346.50000000000006 95.49999999999994 M 346.50000000000006 97.49999999999994 L 346.50000000000006 99.49999999999994 M 346.50000000000006 99.49999999999994 M 346.50000000000006 101.49999999999994 L 346.50000000000006 103.49999999999994 M 346.50000000000006 103.49999999999994 M 346.50000000000006 105.49999999999994 L 346.50000000000006 107.49999999999994 M 346.50000000000006 107.49999999999994 M 346.50000000000006 109.49999999999994 L 346.50000000000006 111.49999999999994 M 346.50000000000006 111.49999999999994 M 346.50000000000006 113.49999999999994 L 346.50000000000006 115.49999999999994 M 346.50000000000006 115.49999999999994 M 346.50000000000006 117.49999999999994 L 346.50000000000006 119.49999999999994 M 346.50000000000006 119.49999999999994 M 346.50000000000006 121.49999999999994 L 346.50000000000006 123.49999999999994 M 346.50000000000006 123.49999999999994 M 346.50000000000006 125.49999999999994 L 346.50000000000006 127.49999999999994 M 346.50000000000006 127.49999999999994 M 346.50000000000006 129.49999999999994 L 346.50000000000006 131.49999999999994 M 346.50000000000006 131.49999999999994 M 346.50000000000006 133.49999999999994 L 346.50000000000006 135.49999999999994 M 346.50000000000006 135.49999999999994 M 346.50000000000006 137.49999999999994 L 346.50000000000006 139.49999999999994 M 346.50000000000006 139.49999999999994 M 346.0000000000001 141 L 344.0000000000001 141 M 344.0000000000001 141 M 342.0000000000001 141 L 340.0000000000001 141 M 340.0000000000001 141 M 338.0000000000001 141 L 336.0000000000001 141 M 336.0000000000001 141 M 334.0000000000001 141 L 332.0000000000001 141 M 332.0000000000001 141 M 330.0000000000001 141 L 328.0000000000001 141 M 328.0000000000001 141 M 326.0000000000001 141 L 324.0000000000001 141 M 324.0000000000001 141 M 322.0000000000001 141 L 320.0000000000001 141 M 320.0000000000001 141 M 318.0000000000001 141 L 316.0000000000001 141 M 316.0000000000001 141 M 314.0000000000001 141 L 312.0000000000001 141 M 312.0000000000001 141 M 310.0000000000001 141 L 308.0000000000001 141 M 308.0000000000001 141 M 306.0000000000001 141 L 304.0000000000001 141 M 304.0000000000001 141 M 302.0000000000001 141 L 300.0000000000001 141 M 300.0000000000001 141 M 298.0000000000001 141 L 296.0000000000001 141 M 296.0000000000001 141 M 294.0000000000001 141 L 292.0000000000001 141 M 292.0000000000001 141 M 290.0000000000001 141 L 288.0000000000001 141 M 288.0000000000001 141 M 286.0000000000001 141 L 284.0000000000001 141 M 284.0000000000001 141 M 282.0000000000001 141 L 280.0000000000001 141 M 280.0000000000001 141 M 278.0000000000001 141 L 276.0000000000001 141 M 276.0000000000001 141 M 274.0000000000001 141 L 272.0000000000001 141 M 272.0000000000001 141 M 270.0000000000001 141 L 268.0000000000001 141 M 268.0000000000001 141 M 266.0000000000001 141 L 264.0000000000001 141 M 264.0000000000001 141 M 262.0000000000001 141 L 260.0000000000001 141 M 260.0000000000001 141 M 258.0000000000001 141 L 256.0000000000001 141 M 256.0000000000001 141 M 254.0000000000001 141 L 252.0000000000001 141 M 252.0000000000001 141 M 250.0000000000001 141 L 248.0000000000001 141 M 248.0000000000001 141 M 246.0000000000001 141 L 244.0000000000001 141 M 244.0000000000001 141 M 242.0000000000001 141 L 240.0000000000001 141 M 240.0000000000001 141 M 238.0000000000001 141 L 236.0000000000001 141 M 236.0000000000001 141 M 234.0000000000001 141 L 232.0000000000001 141 M 232.0000000000001 141 M 230.0000000000001 141 L 228.0000000000001 141 M 228.0000000000001 141 M 226.0000000000001 141 L 224.0000000000001 141 M 224.0000000000001 141 M 222.0000000000001 141 L 220.0000000000001 141 M 220.0000000000001 141 M 218.0000000000001 141 L 216.0000000000001 141 M 216.0000000000001 141 M 214.0000000000001 141 L 212.0000000000001 141 M 212.0000000000001 141 M 210.0000000000001 141 L 208.0000000000001 141 M 208.0000000000001 141 M 206.0000000000001 141 L 204.0000000000001 141 M 204.0000000000001 141 M 202.0000000000001 141 L 200.0000000000001 141 M 200.0000000000001 141 M 198.0000000000001 141 L 196.0000000000001 141 M 196.0000000000001 141 M 194.0000000000001 141 L 192.0000000000001 141 M 192.0000000000001 141 M 190.0000000000001 141 L 188.0000000000001 141 M 188.0000000000001 141 M 186.0000000000001 141 L 184.0000000000001 141 M 184.0000000000001 141 M 182.0000000000001 141 L 180.0000000000001 141 M 180.0000000000001 141 M 178.0000000000001 141 L 176.0000000000001 141 M 176.0000000000001 141 M 174.0000000000001 141 L 172.0000000000001 141 M 172.0000000000001 141 M 170.0000000000001 141 L 168.0000000000001 141 M 168.0000000000001 141 M 166.0000000000001 141 L 164.0000000000001 141 M 164.0000000000001 141 M 162.0000000000001 141 L 160.0000000000001 141 M 160.0000000000001 141 M 158.0000000000001 141 L 156.0000000000001 141 M 156.0000000000001 141 M 154.0000000000001 141 L 152.0000000000001 141 M 152.0000000000001 141 M 150.0000000000001 141 L 148.0000000000001 141 M 148.0000000000001 141 M 146.0000000000001 141 L 144.0000000000001 141 M 144.0000000000001 141 M 142.0000000000001 141 L 140.0000000000001 141 M 140.0000000000001 141 M 138.0000000000001 141 L 136.0000000000001 141 M 136.0000000000001 141 M 134.0000000000001 141 L 132.0000000000001 141 M 132.0000000000001 141 M 130.0000000000001 141 L 128.0000000000001 141 M 128.0000000000001 141 M 126.00000000000011 141 L 124.00000000000011 141 M 124.00000000000011 141 M 122.00000000000011 141 L 120.00000000000011 141 M 120.00000000000011 141 M 118.00000000000011 141 L 116.00000000000011 141 M 116.00000000000011 141 M 114.00000000000011 141 L 112.00000000000011 141 M 112.00000000000011 141 M 110.00000000000011 141 L 108.00000000000011 141 M 108.00000000000011 141 M 106.00000000000011 141 L 104.00000000000011 141 M 104.00000000000011 141 M 102.00000000000011 141 L 100.00000000000011 141 M 100.00000000000011 141 M 98.00000000000011 141 L 96.00000000000011 141 M 96.00000000000011 141 M 94.00000000000011 141 L 92.00000000000011 141 M 92.00000000000011 141 M 90.00000000000011 141 L 88.00000000000011 141 M 88.00000000000011 141 M 86.00000000000011 141 L 84.00000000000011 141 M 84.00000000000011 141 M 82.00000000000011 141 L 80.00000000000011 141 M 80.00000000000011 141 M 78.00000000000011 141 L 76.00000000000011 141 M 76.00000000000011 141 M 74.00000000000011 141 L 72.00000000000011 141 M 72.00000000000011 141 M 70.00000000000011 141 L 68.00000000000011 141 M 68.00000000000011 141 M 66.00000000000011 141 L 64.00000000000011 141 M 64.00000000000011 141 M 62.000000000000114 141 L 60.000000000000114 141 M 60.000000000000114 141 M 58.000000000000114 141 L 56.000000000000114 141 M 56.000000000000114 141 M 54.000000000000114 141 L 52.000000000000114 141 M 52.000000000000114 141 M 50.000000000000114 141 L 48.000000000000114 141 M 48.000000000000114 141 M 46.000000000000114 141 L 44.000000000000114 141 M 44.000000000000114 141 M 42.000000000000114 141 L 40.000000000000114 141 M 40.000000000000114 141 M 38.000000000000114 141 L 36.000000000000114 141 M 36.000000000000114 141 M 34.000000000000114 141 L 32.000000000000114 141 M 32.000000000000114 141 M 30.000000000000114 141 L 28.000000000000114 141 M 28.000000000000114 141 M 26.000000000000114 141 L 24.000000000000114 141 M 24.000000000000114 141 M 22.000000000000114 141 L 20.000000000000114 141 M 20.000000000000114 141 M 18.000000000000114 141 L 16.000000000000114 141 M 16.000000000000114 141 M 14.000000000000114 141 L 12.000000000000114 141 M 12.000000000000114 141 M 10.000000000000114 141 L 8.000000000000114 141 M 8.000000000000114 141 M 6.000000000000114 141 L 4.000000000000114 141 M 4.000000000000114 141 M 2.0000000000001137 141 L 1.1368683772161603e-13 141 M 1.1368683772161603e-13 141 M 0 139.0000000000001 L 0 137.0000000000001 M 0 137.0000000000001 M 0 135.0000000000001 L 0 133.0000000000001 M 0 133.0000000000001 M 0 131.0000000000001 L 0 129.0000000000001 M 0 129.0000000000001 M 0 127.00000000000011 L 0 125.00000000000011 M 0 125.00000000000011 M 0 123.00000000000011 L 0 121.00000000000011 M 0 121.00000000000011 M 0 119.00000000000011 L 0 117.00000000000011 M 0 117.00000000000011 M 0 115.00000000000011 L 0 113.00000000000011 M 0 113.00000000000011 M 0 111.00000000000011 L 0 109.00000000000011 M 0 109.00000000000011 M 0 107.00000000000011 L 0 105.00000000000011 M 0 105.00000000000011 M 0 103.00000000000011 L 0 101.00000000000011 M 0 101.00000000000011 M 0 99.00000000000011 L 0 97.00000000000011 M 0 97.00000000000011 M 0 95.00000000000011 L 0 93.00000000000011 M 0 93.00000000000011 M 0 91.00000000000011 L 0 89.00000000000011 M 0 89.00000000000011 M 0 87.00000000000011 L 0 85.00000000000011 M 0 85.00000000000011 M 0 83.00000000000011 L 0 81.00000000000011 M 0 81.00000000000011 M 0 79.00000000000011 L 0 77.00000000000011 M 0 77.00000000000011 M 0 75.00000000000011 L 0 73.00000000000011 M 0 73.00000000000011 M 0 71.00000000000011 L 0 69.00000000000011 M 0 69.00000000000011 M 0 67.00000000000011 L 0 65.00000000000011 M 0 65.00000000000011 M 0 63.000000000000114 L 0 61.000000000000114 M 0 61.000000000000114 M 0 59.000000000000114 L 0 57.000000000000114 M 0 57.000000000000114 M 0 55.000000000000114 L 0 53.000000000000114 M 0 53.000000000000114 M 0 51.000000000000114 L 0 49.000000000000114 M 0 49.000000000000114 M 0 47.000000000000114 L 0 45.000000000000114 M 0 45.000000000000114 M 0 43.000000000000114 L 0 41.000000000000114 M 0 41.000000000000114 M 0 39.000000000000114 L 0 37.000000000000114 M 0 37.000000000000114 M 0 35.000000000000114 L 0 33.000000000000114 M 0 33.000000000000114 M 0 31.000000000000114 L 0 29.000000000000114 M 0 29.000000000000114 M 0 27.000000000000114 L 0 25.000000000000114 M 0 25.000000000000114 M 0 23.000000000000114 L 0 21.000000000000114 M 0 21.000000000000114 M 0 19.000000000000114 L 0 17.000000000000114 M 0 17.000000000000114 M 0 15.000000000000114 L 0 13.000000000000114 M 0 13.000000000000114 M 0 11.000000000000114 L 0 9.000000000000114 M 0 9.000000000000114 M 0 7.000000000000114 L 0 5.000000000000114 M 0 5.000000000000114 M 0 3.0000000000001137 L 0 1.0000000000001137 M 0 1.0000000000001137 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="translate(0,0) matrix(1,0,0,1,384.75,323.5)"><g><g transform="translate(0,0) scale(3.3975,1.63)"><g><path fill="#FFFFFF" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(0.29433406916850624,0.6134969325153374)"><path fill="none" stroke="none" d="M 0 0 L 339.75 0 Q 339.75 0 339.75 0 L 339.75 163 Q 339.75 163 339.75 163 L 0 163 Q 0 163 0 163 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="#333333" d="M 0 0 M 0 0 L 2 0 M 2 0 M 4 0 L 6 0 M 6 0 M 8 0 L 10 0 M 10 0 M 12 0 L 14 0 M 14 0 M 16 0 L 18 0 M 18 0 M 20 0 L 22 0 M 22 0 M 24 0 L 26 0 M 26 0 M 28 0 L 30 0 M 30 0 M 32 0 L 34 0 M 34 0 M 36 0 L 38 0 M 38 0 M 40 0 L 42 0 M 42 0 M 44 0 L 46 0 M 46 0 M 48 0 L 50 0 M 50 0 M 52 0 L 54 0 M 54 0 M 56 0 L 58 0 M 58 0 M 60 0 L 62 0 M 62 0 M 64 0 L 66 0 M 66 0 M 68 0 L 70 0 M 70 0 M 72 0 L 74 0 M 74 0 M 76 0 L 78 0 M 78 0 M 80 0 L 82 0 M 82 0 M 84 0 L 86 0 M 86 0 M 88 0 L 90 0 M 90 0 M 92 0 L 94 0 M 94 0 M 96 0 L 98 0 M 98 0 M 100 0 L 102 0 M 102 0 M 104 0 L 106 0 M 106 0 M 108 0 L 110 0 M 110 0 M 112 0 L 114 0 M 114 0 M 116 0 L 118 0 M 118 0 M 120 0 L 122 0 M 122 0 M 124 0 L 126 0 M 126 0 M 128 0 L 130 0 M 130 0 M 132 0 L 134 0 M 134 0 M 136 0 L 138 0 M 138 0 M 140 0 L 142 0 M 142 0 M 144 0 L 146 0 M 146 0 M 148 0 L 150 0 M 150 0 M 152 0 L 154 0 M 154 0 M 156 0 L 158 0 M 158 0 M 160 0 L 162 0 M 162 0 M 164 0 L 166 0 M 166 0 M 168 0 L 170 0 M 170 0 M 172 0 L 174 0 M 174 0 M 176 0 L 178 0 M 178 0 M 180 0 L 182 0 M 182 0 M 184 0 L 186 0 M 186 0 M 188 0 L 190 0 M 190 0 M 192 0 L 194 0 M 194 0 M 196 0 L 198 0 M 198 0 M 200 0 L 202 0 M 202 0 M 204 0 L 206 0 M 206 0 M 208 0 L 210 0 M 210 0 M 212 0 L 214 0 M 214 0 M 216 0 L 218 0 M 218 0 M 220 0 L 222 0 M 222 0 M 224 0 L 226 0 M 226 0 M 228 0 L 230 0 M 230 0 M 232 0 L 234 0 M 234 0 M 236 0 L 238 0 M 238 0 M 240 0 L 242 0 M 242 0 M 244 0 L 246 0 M 246 0 M 248 0 L 250 0 M 250 0 M 252 0 L 254 0 M 254 0 M 256 0 L 258 0 M 258 0 M 260 0 L 262 0 M 262 0 M 264 0 L 266 0 M 266 0 M 268 0 L 270 0 M 270 0 M 272 0 L 274 0 M 274 0 M 276 0 L 278 0 M 278 0 M 280 0 L 282 0 M 282 0 M 284 0 L 286 0 M 286 0 M 288 0 L 290 0 M 290 0 M 292 0 L 294 0 M 294 0 M 296 0 L 298 0 M 298 0 M 300 0 L 302 0 M 302 0 M 304 0 L 306 0 M 306 0 M 308 0 L 310 0 M 310 0 M 312 0 L 314 0 M 314 0 M 316 0 L 318 0 M 318 0 M 320 0 L 322 0 M 322 0 M 324 0 L 326 0 M 326 0 M 328 0 L 330 0 M 330 0 M 332 0 L 334 0 M 334 0 M 336 0 L 338 0 M 338 0 M 339.75 0.25 L 339.75 2.25 M 339.75 2.25 M 339.75 4.25 L 339.75 6.25 M 339.75 6.25 M 339.75 8.25 L 339.75 10.25 M 339.75 10.25 M 339.75 12.25 L 339.75 14.25 M 339.75 14.25 M 339.75 16.25 L 339.75 18.25 M 339.75 18.25 M 339.75 20.25 L 339.75 22.25 M 339.75 22.25 M 339.75 24.25 L 339.75 26.25 M 339.75 26.25 M 339.75 28.25 L 339.75 30.25 M 339.75 30.25 M 339.75 32.25 L 339.75 34.25 M 339.75 34.25 M 339.75 36.25 L 339.75 38.25 M 339.75 38.25 M 339.75 40.25 L 339.75 42.25 M 339.75 42.25 M 339.75 44.25 L 339.75 46.25 M 339.75 46.25 M 339.75 48.25 L 339.75 50.25 M 339.75 50.25 M 339.75 52.25 L 339.75 54.25 M 339.75 54.25 M 339.75 56.25 L 339.75 58.25 M 339.75 58.25 M 339.75 60.25 L 339.75 62.25 M 339.75 62.25 M 339.75 64.25 L 339.75 66.25 M 339.75 66.25 M 339.75 68.25 L 339.75 70.25 M 339.75 70.25 M 339.75 72.25 L 339.75 74.25 M 339.75 74.25 M 339.75 76.25 L 339.75 78.25 M 339.75 78.25 M 339.75 80.25 L 339.75 82.25 M 339.75 82.25 M 339.75 84.25 L 339.75 86.25 M 339.75 86.25 M 339.75 88.25 L 339.75 90.25 M 339.75 90.25 M 339.75 92.25 L 339.75 94.25 M 339.75 94.25 M 339.75 96.25 L 339.75 98.25 M 339.75 98.25 M 339.75 100.25 L 339.75 102.25 M 339.75 102.25 M 339.75 104.25 L 339.75 106.25 M 339.75 106.25 M 339.75 108.25 L 339.75 110.25 M 339.75 110.25 M 339.75 112.25 L 339.75 114.25 M 339.75 114.25 M 339.75 116.25 L 339.75 118.25 M 339.75 118.25 M 339.75 120.25 L 339.75 122.25 M 339.75 122.25 M 339.75 124.25 L 339.75 126.25 M 339.75 126.25 M 339.75 128.25 L 339.75 130.25 M 339.75 130.25 M 339.75 132.25 L 339.75 134.25 M 339.75 134.25 M 339.75 136.25 L 339.75 138.25 M 339.75 138.25 M 339.75 140.25 L 339.75 142.25 M 339.75 142.25 M 339.75 144.25 L 339.75 146.25 M 339.75 146.25 M 339.75 148.25 L 339.75 150.25 M 339.75 150.25 M 339.75 152.25 L 339.75 154.25 M 339.75 154.25 M 339.75 156.25 L 339.75 158.25 M 339.75 158.25 M 339.75 160.25 L 339.75 162.25 M 339.75 162.25 M 338.5 163 L 336.5 163 M 336.5 163 M 334.5 163 L 332.5 163 M 332.5 163 M 330.5 163 L 328.5 163 M 328.5 163 M 326.5 163 L 324.5 163 M 324.5 163 M 322.5 163 L 320.5 163 M 320.5 163 M 318.5 163 L 316.5 163 M 316.5 163 M 314.5 163 L 312.5 163 M 312.5 163 M 310.5 163 L 308.5 163 M 308.5 163 M 306.5 163 L 304.5 163 M 304.5 163 M 302.5 163 L 300.5 163 M 300.5 163 M 298.5 163 L 296.5 163 M 296.5 163 M 294.5 163 L 292.5 163 M 292.5 163 M 290.5 163 L 288.5 163 M 288.5 163 M 286.5 163 L 284.5 163 M 284.5 163 M 282.5 163 L 280.5 163 M 280.5 163 M 278.5 163 L 276.5 163 M 276.5 163 M 274.5 163 L 272.5 163 M 272.5 163 M 270.5 163 L 268.5 163 M 268.5 163 M 266.5 163 L 264.5 163 M 264.5 163 M 262.5 163 L 260.5 163 M 260.5 163 M 258.5 163 L 256.5 163 M 256.5 163 M 254.5 163 L 252.5 163 M 252.5 163 M 250.5 163 L 248.5 163 M 248.5 163 M 246.5 163 L 244.5 163 M 244.5 163 M 242.5 163 L 240.5 163 M 240.5 163 M 238.5 163 L 236.5 163 M 236.5 163 M 234.5 163 L 232.5 163 M 232.5 163 M 230.5 163 L 228.5 163 M 228.5 163 M 226.5 163 L 224.5 163 M 224.5 163 M 222.5 163 L 220.5 163 M 220.5 163 M 218.5 163 L 216.5 163 M 216.5 163 M 214.5 163 L 212.5 163 M 212.5 163 M 210.5 163 L 208.5 163 M 208.5 163 M 206.5 163 L 204.5 163 M 204.5 163 M 202.5 163 L 200.5 163 M 200.5 163 M 198.5 163 L 196.5 163 M 196.5 163 M 194.5 163 L 192.5 163 M 192.5 163 M 190.5 163 L 188.5 163 M 188.5 163 M 186.5 163 L 184.5 163 M 184.5 163 M 182.5 163 L 180.5 163 M 180.5 163 M 178.5 163 L 176.5 163 M 176.5 163 M 174.5 163 L 172.5 163 M 172.5 163 M 170.5 163 L 168.5 163 M 168.5 163 M 166.5 163 L 164.5 163 M 164.5 163 M 162.5 163 L 160.5 163 M 160.5 163 M 158.5 163 L 156.5 163 M 156.5 163 M 154.5 163 L 152.5 163 M 152.5 163 M 150.5 163 L 148.5 163 M 148.5 163 M 146.5 163 L 144.5 163 M 144.5 163 M 142.5 163 L 140.5 163 M 140.5 163 M 138.5 163 L 136.5 163 M 136.5 163 M 134.5 163 L 132.5 163 M 132.5 163 M 130.5 163 L 128.5 163 M 128.5 163 M 126.5 163 L 124.5 163 M 124.5 163 M 122.5 163 L 120.5 163 M 120.5 163 M 118.5 163 L 116.5 163 M 116.5 163 M 114.5 163 L 112.5 163 M 112.5 163 M 110.5 163 L 108.5 163 M 108.5 163 M 106.5 163 L 104.5 163 M 104.5 163 M 102.5 163 L 100.5 163 M 100.5 163 M 98.5 163 L 96.5 163 M 96.5 163 M 94.5 163 L 92.5 163 M 92.5 163 M 90.5 163 L 88.5 163 M 88.5 163 M 86.5 163 L 84.5 163 M 84.5 163 M 82.5 163 L 80.5 163 M 80.5 163 M 78.5 163 L 76.5 163 M 76.5 163 M 74.5 163 L 72.5 163 M 72.5 163 M 70.5 163 L 68.5 163 M 68.5 163 M 66.5 163 L 64.5 163 M 64.5 163 M 62.5 163 L 60.5 163 M 60.5 163 M 58.5 163 L 56.5 163 M 56.5 163 M 54.5 163 L 52.5 163 M 52.5 163 M 50.5 163 L 48.5 163 M 48.5 163 M 46.5 163 L 44.5 163 M 44.5 163 M 42.5 163 L 40.5 163 M 40.5 163 M 38.5 163 L 36.5 163 M 36.5 163 M 34.5 163 L 32.5 163 M 32.5 163 M 30.5 163 L 28.5 163 M 28.5 163 M 26.5 163 L 24.5 163 M 24.5 163 M 22.5 163 L 20.5 163 M 20.5 163 M 18.5 163 L 16.5 163 M 16.5 163 M 14.5 163 L 12.5 163 M 12.5 163 M 10.5 163 L 8.5 163 M 8.5 163 M 6.5 163 L 4.5 163 M 4.5 163 M 2.5 163 L 0.5 163 M 0.5 163 M 0 161.5 L 0 159.5 M 0 159.5 M 0 157.5 L 0 155.5 M 0 155.5 M 0 153.5 L 0 151.5 M 0 151.5 M 0 149.5 L 0 147.5 M 0 147.5 M 0 145.5 L 0 143.5 M 0 143.5 M 0 141.5 L 0 139.5 M 0 139.5 M 0 137.5 L 0 135.5 M 0 135.5 M 0 133.5 L 0 131.5 M 0 131.5 M 0 129.5 L 0 127.5 M 0 127.5 M 0 125.5 L 0 123.5 M 0 123.5 M 0 121.5 L 0 119.5 M 0 119.5 M 0 117.5 L 0 115.5 M 0 115.5 M 0 113.5 L 0 111.5 M 0 111.5 M 0 109.5 L 0 107.5 M 0 107.5 M 0 105.5 L 0 103.5 M 0 103.5 M 0 101.5 L 0 99.5 M 0 99.5 M 0 97.5 L 0 95.5 M 0 95.5 M 0 93.5 L 0 91.5 M 0 91.5 M 0 89.5 L 0 87.5 M 0 87.5 M 0 85.5 L 0 83.5 M 0 83.5 M 0 81.5 L 0 79.5 M 0 79.5 M 0 77.5 L 0 75.5 M 0 75.5 M 0 73.5 L 0 71.5 M 0 71.5 M 0 69.5 L 0 67.5 M 0 67.5 M 0 65.5 L 0 63.5 M 0 63.5 M 0 61.5 L 0 59.5 M 0 59.5 M 0 57.5 L 0 55.5 M 0 55.5 M 0 53.5 L 0 51.5 M 0 51.5 M 0 49.5 L 0 47.5 M 0 47.5 M 0 45.5 L 0 43.5 M 0 43.5 M 0 41.5 L 0 39.5 M 0 39.5 M 0 37.5 L 0 35.5 M 0 35.5 M 0 33.5 L 0 31.5 M 0 31.5 M 0 29.5 L 0 27.5 M 0 27.5 M 0 25.5 L 0 23.5 M 0 23.5 M 0 21.5 L 0 19.5 M 0 19.5 M 0 17.5 L 0 15.5 M 0 15.5 M 0 13.5 L 0 11.5 M 0 11.5 M 0 9.5 L 0 7.5 M 0 7.5 M 0 5.5 L 0 3.5 M 0 3.5 M 0 1.5 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="translate(0,0) matrix(1,0,0,1,11.5,323.5)"><g><g transform="translate(0,0) scale(3.4649999999999994,1.63)"><g><path fill="#FFFFFF" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(0.28860028860028863,0.6134969325153374)"><path fill="none" stroke="none" d="M 0 0 L 346.49999999999994 0 Q 346.49999999999994 0 346.49999999999994 0 L 346.49999999999994 163 Q 346.49999999999994 163 346.49999999999994 163 L 0 163 Q 0 163 0 163 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="#333333" d="M 0 0 M 0 0 L 2 0 M 2 0 M 4 0 L 6 0 M 6 0 M 8 0 L 10 0 M 10 0 M 12 0 L 14 0 M 14 0 M 16 0 L 18 0 M 18 0 M 20 0 L 22 0 M 22 0 M 24 0 L 26 0 M 26 0 M 28 0 L 30 0 M 30 0 M 32 0 L 34 0 M 34 0 M 36 0 L 38 0 M 38 0 M 40 0 L 42 0 M 42 0 M 44 0 L 46 0 M 46 0 M 48 0 L 50 0 M 50 0 M 52 0 L 54 0 M 54 0 M 56 0 L 58 0 M 58 0 M 60 0 L 62 0 M 62 0 M 64 0 L 66 0 M 66 0 M 68 0 L 70 0 M 70 0 M 72 0 L 74 0 M 74 0 M 76 0 L 78 0 M 78 0 M 80 0 L 82 0 M 82 0 M 84 0 L 86 0 M 86 0 M 88 0 L 90 0 M 90 0 M 92 0 L 94 0 M 94 0 M 96 0 L 98 0 M 98 0 M 100 0 L 102 0 M 102 0 M 104 0 L 106 0 M 106 0 M 108 0 L 110 0 M 110 0 M 112 0 L 114 0 M 114 0 M 116 0 L 118 0 M 118 0 M 120 0 L 122 0 M 122 0 M 124 0 L 126 0 M 126 0 M 128 0 L 130 0 M 130 0 M 132 0 L 134 0 M 134 0 M 136 0 L 138 0 M 138 0 M 140 0 L 142 0 M 142 0 M 144 0 L 146 0 M 146 0 M 148 0 L 150 0 M 150 0 M 152 0 L 154 0 M 154 0 M 156 0 L 158 0 M 158 0 M 160 0 L 162 0 M 162 0 M 164 0 L 166 0 M 166 0 M 168 0 L 170 0 M 170 0 M 172 0 L 174 0 M 174 0 M 176 0 L 178 0 M 178 0 M 180 0 L 182 0 M 182 0 M 184 0 L 186 0 M 186 0 M 188 0 L 190 0 M 190 0 M 192 0 L 194 0 M 194 0 M 196 0 L 198 0 M 198 0 M 200 0 L 202 0 M 202 0 M 204 0 L 206 0 M 206 0 M 208 0 L 210 0 M 210 0 M 212 0 L 214 0 M 214 0 M 216 0 L 218 0 M 218 0 M 220 0 L 222 0 M 222 0 M 224 0 L 226 0 M 226 0 M 228 0 L 230 0 M 230 0 M 232 0 L 234 0 M 234 0 M 236 0 L 238 0 M 238 0 M 240 0 L 242 0 M 242 0 M 244 0 L 246 0 M 246 0 M 248 0 L 250 0 M 250 0 M 252 0 L 254 0 M 254 0 M 256 0 L 258 0 M 258 0 M 260 0 L 262 0 M 262 0 M 264 0 L 266 0 M 266 0 M 268 0 L 270 0 M 270 0 M 272 0 L 274 0 M 274 0 M 276 0 L 278 0 M 278 0 M 280 0 L 282 0 M 282 0 M 284 0 L 286 0 M 286 0 M 288 0 L 290 0 M 290 0 M 292 0 L 294 0 M 294 0 M 296 0 L 298 0 M 298 0 M 300 0 L 302 0 M 302 0 M 304 0 L 306 0 M 306 0 M 308 0 L 310 0 M 310 0 M 312 0 L 314 0 M 314 0 M 316 0 L 318 0 M 318 0 M 320 0 L 322 0 M 322 0 M 324 0 L 326 0 M 326 0 M 328 0 L 330 0 M 330 0 M 332 0 L 334 0 M 334 0 M 336 0 L 338 0 M 338 0 M 340 0 L 342 0 M 342 0 M 344 0 L 346 0 M 346 0 M 346.49999999999994 1.5000000000000568 L 346.49999999999994 3.500000000000057 M 346.49999999999994 3.500000000000057 M 346.49999999999994 5.500000000000057 L 346.49999999999994 7.500000000000057 M 346.49999999999994 7.500000000000057 M 346.49999999999994 9.500000000000057 L 346.49999999999994 11.500000000000057 M 346.49999999999994 11.500000000000057 M 346.49999999999994 13.500000000000057 L 346.49999999999994 15.500000000000057 M 346.49999999999994 15.500000000000057 M 346.49999999999994 17.500000000000057 L 346.49999999999994 19.500000000000057 M 346.49999999999994 19.500000000000057 M 346.49999999999994 21.500000000000057 L 346.49999999999994 23.500000000000057 M 346.49999999999994 23.500000000000057 M 346.49999999999994 25.500000000000057 L 346.49999999999994 27.500000000000057 M 346.49999999999994 27.500000000000057 M 346.49999999999994 29.500000000000057 L 346.49999999999994 31.500000000000057 M 346.49999999999994 31.500000000000057 M 346.49999999999994 33.50000000000006 L 346.49999999999994 35.50000000000006 M 346.49999999999994 35.50000000000006 M 346.49999999999994 37.50000000000006 L 346.49999999999994 39.50000000000006 M 346.49999999999994 39.50000000000006 M 346.49999999999994 41.50000000000006 L 346.49999999999994 43.50000000000006 M 346.49999999999994 43.50000000000006 M 346.49999999999994 45.50000000000006 L 346.49999999999994 47.50000000000006 M 346.49999999999994 47.50000000000006 M 346.49999999999994 49.50000000000006 L 346.49999999999994 51.50000000000006 M 346.49999999999994 51.50000000000006 M 346.49999999999994 53.50000000000006 L 346.49999999999994 55.50000000000006 M 346.49999999999994 55.50000000000006 M 346.49999999999994 57.50000000000006 L 346.49999999999994 59.50000000000006 M 346.49999999999994 59.50000000000006 M 346.49999999999994 61.50000000000006 L 346.49999999999994 63.50000000000006 M 346.49999999999994 63.50000000000006 M 346.49999999999994 65.50000000000006 L 346.49999999999994 67.50000000000006 M 346.49999999999994 67.50000000000006 M 346.49999999999994 69.50000000000006 L 346.49999999999994 71.50000000000006 M 346.49999999999994 71.50000000000006 M 346.49999999999994 73.50000000000006 L 346.49999999999994 75.50000000000006 M 346.49999999999994 75.50000000000006 M 346.49999999999994 77.50000000000006 L 346.49999999999994 79.50000000000006 M 346.49999999999994 79.50000000000006 M 346.49999999999994 81.50000000000006 L 346.49999999999994 83.50000000000006 M 346.49999999999994 83.50000000000006 M 346.49999999999994 85.50000000000006 L 346.49999999999994 87.50000000000006 M 346.49999999999994 87.50000000000006 M 346.49999999999994 89.50000000000006 L 346.49999999999994 91.50000000000006 M 346.49999999999994 91.50000000000006 M 346.49999999999994 93.50000000000006 L 346.49999999999994 95.50000000000006 M 346.49999999999994 95.50000000000006 M 346.49999999999994 97.50000000000006 L 346.49999999999994 99.50000000000006 M 346.49999999999994 99.50000000000006 M 346.49999999999994 101.50000000000006 L 346.49999999999994 103.50000000000006 M 346.49999999999994 103.50000000000006 M 346.49999999999994 105.50000000000006 L 346.49999999999994 107.50000000000006 M 346.49999999999994 107.50000000000006 M 346.49999999999994 109.50000000000006 L 346.49999999999994 111.50000000000006 M 346.49999999999994 111.50000000000006 M 346.49999999999994 113.50000000000006 L 346.49999999999994 115.50000000000006 M 346.49999999999994 115.50000000000006 M 346.49999999999994 117.50000000000006 L 346.49999999999994 119.50000000000006 M 346.49999999999994 119.50000000000006 M 346.49999999999994 121.50000000000006 L 346.49999999999994 123.50000000000006 M 346.49999999999994 123.50000000000006 M 346.49999999999994 125.50000000000006 L 346.49999999999994 127.50000000000006 M 346.49999999999994 127.50000000000006 M 346.49999999999994 129.50000000000006 L 346.49999999999994 131.50000000000006 M 346.49999999999994 131.50000000000006 M 346.49999999999994 133.50000000000006 L 346.49999999999994 135.50000000000006 M 346.49999999999994 135.50000000000006 M 346.49999999999994 137.50000000000006 L 346.49999999999994 139.50000000000006 M 346.49999999999994 139.50000000000006 M 346.49999999999994 141.50000000000006 L 346.49999999999994 143.50000000000006 M 346.49999999999994 143.50000000000006 M 346.49999999999994 145.50000000000006 L 346.49999999999994 147.50000000000006 M 346.49999999999994 147.50000000000006 M 346.49999999999994 149.50000000000006 L 346.49999999999994 151.50000000000006 M 346.49999999999994 151.50000000000006 M 346.49999999999994 153.50000000000006 L 346.49999999999994 155.50000000000006 M 346.49999999999994 155.50000000000006 M 346.49999999999994 157.50000000000006 L 346.49999999999994 159.50000000000006 M 346.49999999999994 159.50000000000006 M 346.49999999999994 161.50000000000006 L 346.49999999999994 163 Q 346.49999999999994 163 346.49999999999994 163 L 345.9999999999999 163 M 345.9999999999999 163 M 343.9999999999999 163 L 341.9999999999999 163 M 341.9999999999999 163 M 339.9999999999999 163 L 337.9999999999999 163 M 337.9999999999999 163 M 335.9999999999999 163 L 333.9999999999999 163 M 333.9999999999999 163 M 331.9999999999999 163 L 329.9999999999999 163 M 329.9999999999999 163 M 327.9999999999999 163 L 325.9999999999999 163 M 325.9999999999999 163 M 323.9999999999999 163 L 321.9999999999999 163 M 321.9999999999999 163 M 319.9999999999999 163 L 317.9999999999999 163 M 317.9999999999999 163 M 315.9999999999999 163 L 313.9999999999999 163 M 313.9999999999999 163 M 311.9999999999999 163 L 309.9999999999999 163 M 309.9999999999999 163 M 307.9999999999999 163 L 305.9999999999999 163 M 305.9999999999999 163 M 303.9999999999999 163 L 301.9999999999999 163 M 301.9999999999999 163 M 299.9999999999999 163 L 297.9999999999999 163 M 297.9999999999999 163 M 295.9999999999999 163 L 293.9999999999999 163 M 293.9999999999999 163 M 291.9999999999999 163 L 289.9999999999999 163 M 289.9999999999999 163 M 287.9999999999999 163 L 285.9999999999999 163 M 285.9999999999999 163 M 283.9999999999999 163 L 281.9999999999999 163 M 281.9999999999999 163 M 279.9999999999999 163 L 277.9999999999999 163 M 277.9999999999999 163 M 275.9999999999999 163 L 273.9999999999999 163 M 273.9999999999999 163 M 271.9999999999999 163 L 269.9999999999999 163 M 269.9999999999999 163 M 267.9999999999999 163 L 265.9999999999999 163 M 265.9999999999999 163 M 263.9999999999999 163 L 261.9999999999999 163 M 261.9999999999999 163 M 259.9999999999999 163 L 257.9999999999999 163 M 257.9999999999999 163 M 255.9999999999999 163 L 253.9999999999999 163 M 253.9999999999999 163 M 251.9999999999999 163 L 249.9999999999999 163 M 249.9999999999999 163 M 247.9999999999999 163 L 245.9999999999999 163 M 245.9999999999999 163 M 243.9999999999999 163 L 241.9999999999999 163 M 241.9999999999999 163 M 239.9999999999999 163 L 237.9999999999999 163 M 237.9999999999999 163 M 235.9999999999999 163 L 233.9999999999999 163 M 233.9999999999999 163 M 231.9999999999999 163 L 229.9999999999999 163 M 229.9999999999999 163 M 227.9999999999999 163 L 225.9999999999999 163 M 225.9999999999999 163 M 223.9999999999999 163 L 221.9999999999999 163 M 221.9999999999999 163 M 219.9999999999999 163 L 217.9999999999999 163 M 217.9999999999999 163 M 215.9999999999999 163 L 213.9999999999999 163 M 213.9999999999999 163 M 211.9999999999999 163 L 209.9999999999999 163 M 209.9999999999999 163 M 207.9999999999999 163 L 205.9999999999999 163 M 205.9999999999999 163 M 203.9999999999999 163 L 201.9999999999999 163 M 201.9999999999999 163 M 199.9999999999999 163 L 197.9999999999999 163 M 197.9999999999999 163 M 195.9999999999999 163 L 193.9999999999999 163 M 193.9999999999999 163 M 191.9999999999999 163 L 189.9999999999999 163 M 189.9999999999999 163 M 187.9999999999999 163 L 185.9999999999999 163 M 185.9999999999999 163 M 183.9999999999999 163 L 181.9999999999999 163 M 181.9999999999999 163 M 179.9999999999999 163 L 177.9999999999999 163 M 177.9999999999999 163 M 175.9999999999999 163 L 173.9999999999999 163 M 173.9999999999999 163 M 171.9999999999999 163 L 169.9999999999999 163 M 169.9999999999999 163 M 167.9999999999999 163 L 165.9999999999999 163 M 165.9999999999999 163 M 163.9999999999999 163 L 161.9999999999999 163 M 161.9999999999999 163 M 159.9999999999999 163 L 157.9999999999999 163 M 157.9999999999999 163 M 155.9999999999999 163 L 153.9999999999999 163 M 153.9999999999999 163 M 151.9999999999999 163 L 149.9999999999999 163 M 149.9999999999999 163 M 147.9999999999999 163 L 145.9999999999999 163 M 145.9999999999999 163 M 143.9999999999999 163 L 141.9999999999999 163 M 141.9999999999999 163 M 139.9999999999999 163 L 137.9999999999999 163 M 137.9999999999999 163 M 135.9999999999999 163 L 133.9999999999999 163 M 133.9999999999999 163 M 131.9999999999999 163 L 129.9999999999999 163 M 129.9999999999999 163 M 127.99999999999989 163 L 125.99999999999989 163 M 125.99999999999989 163 M 123.99999999999989 163 L 121.99999999999989 163 M 121.99999999999989 163 M 119.99999999999989 163 L 117.99999999999989 163 M 117.99999999999989 163 M 115.99999999999989 163 L 113.99999999999989 163 M 113.99999999999989 163 M 111.99999999999989 163 L 109.99999999999989 163 M 109.99999999999989 163 M 107.99999999999989 163 L 105.99999999999989 163 M 105.99999999999989 163 M 103.99999999999989 163 L 101.99999999999989 163 M 101.99999999999989 163 M 99.99999999999989 163 L 97.99999999999989 163 M 97.99999999999989 163 M 95.99999999999989 163 L 93.99999999999989 163 M 93.99999999999989 163 M 91.99999999999989 163 L 89.99999999999989 163 M 89.99999999999989 163 M 87.99999999999989 163 L 85.99999999999989 163 M 85.99999999999989 163 M 83.99999999999989 163 L 81.99999999999989 163 M 81.99999999999989 163 M 79.99999999999989 163 L 77.99999999999989 163 M 77.99999999999989 163 M 75.99999999999989 163 L 73.99999999999989 163 M 73.99999999999989 163 M 71.99999999999989 163 L 69.99999999999989 163 M 69.99999999999989 163 M 67.99999999999989 163 L 65.99999999999989 163 M 65.99999999999989 163 M 63.999999999999886 163 L 61.999999999999886 163 M 61.999999999999886 163 M 59.999999999999886 163 L 57.999999999999886 163 M 57.999999999999886 163 M 55.999999999999886 163 L 53.999999999999886 163 M 53.999999999999886 163 M 51.999999999999886 163 L 49.999999999999886 163 M 49.999999999999886 163 M 47.999999999999886 163 L 45.999999999999886 163 M 45.999999999999886 163 M 43.999999999999886 163 L 41.999999999999886 163 M 41.999999999999886 163 M 39.999999999999886 163 L 37.999999999999886 163 M 37.999999999999886 163 M 35.999999999999886 163 L 33.999999999999886 163 M 33.999999999999886 163 M 31.999999999999886 163 L 29.999999999999886 163 M 29.999999999999886 163 M 27.999999999999886 163 L 25.999999999999886 163 M 25.999999999999886 163 M 23.999999999999886 163 L 21.999999999999886 163 M 21.999999999999886 163 M 19.999999999999886 163 L 17.999999999999886 163 M 17.999999999999886 163 M 15.999999999999886 163 L 13.999999999999886 163 M 13.999999999999886 163 M 11.999999999999886 163 L 9.999999999999886 163 M 9.999999999999886 163 M 7.999999999999886 163 L 5.999999999999886 163 M 5.999999999999886 163 M 3.9999999999998863 163 L 1.9999999999998863 163 M 1.9999999999998863 163 M 0 162.9999999999999 L 0 160.9999999999999 M 0 160.9999999999999 M 0 158.9999999999999 L 0 156.9999999999999 M 0 156.9999999999999 M 0 154.9999999999999 L 0 152.9999999999999 M 0 152.9999999999999 M 0 150.9999999999999 L 0 148.9999999999999 M 0 148.9999999999999 M 0 146.9999999999999 L 0 144.9999999999999 M 0 144.9999999999999 M 0 142.9999999999999 L 0 140.9999999999999 M 0 140.9999999999999 M 0 138.9999999999999 L 0 136.9999999999999 M 0 136.9999999999999 M 0 134.9999999999999 L 0 132.9999999999999 M 0 132.9999999999999 M 0 130.9999999999999 L 0 128.9999999999999 M 0 128.9999999999999 M 0 126.99999999999989 L 0 124.99999999999989 M 0 124.99999999999989 M 0 122.99999999999989 L 0 120.99999999999989 M 0 120.99999999999989 M 0 118.99999999999989 L 0 116.99999999999989 M 0 116.99999999999989 M 0 114.99999999999989 L 0 112.99999999999989 M 0 112.99999999999989 M 0 110.99999999999989 L 0 108.99999999999989 M 0 108.99999999999989 M 0 106.99999999999989 L 0 104.99999999999989 M 0 104.99999999999989 M 0 102.99999999999989 L 0 100.99999999999989 M 0 100.99999999999989 M 0 98.99999999999989 L 0 96.99999999999989 M 0 96.99999999999989 M 0 94.99999999999989 L 0 92.99999999999989 M 0 92.99999999999989 M 0 90.99999999999989 L 0 88.99999999999989 M 0 88.99999999999989 M 0 86.99999999999989 L 0 84.99999999999989 M 0 84.99999999999989 M 0 82.99999999999989 L 0 80.99999999999989 M 0 80.99999999999989 M 0 78.99999999999989 L 0 76.99999999999989 M 0 76.99999999999989 M 0 74.99999999999989 L 0 72.99999999999989 M 0 72.99999999999989 M 0 70.99999999999989 L 0 68.99999999999989 M 0 68.99999999999989 M 0 66.99999999999989 L 0 64.99999999999989 M 0 64.99999999999989 M 0 62.999999999999886 L 0 60.999999999999886 M 0 60.999999999999886 M 0 58.999999999999886 L 0 56.999999999999886 M 0 56.999999999999886 M 0 54.999999999999886 L 0 52.999999999999886 M 0 52.999999999999886 M 0 50.999999999999886 L 0 48.999999999999886 M 0 48.999999999999886 M 0 46.999999999999886 L 0 44.999999999999886 M 0 44.999999999999886 M 0 42.999999999999886 L 0 40.999999999999886 M 0 40.999999999999886 M 0 38.999999999999886 L 0 36.999999999999886 M 0 36.999999999999886 M 0 34.999999999999886 L 0 32.999999999999886 M 0 32.999999999999886 M 0 30.999999999999886 L 0 28.999999999999886 M 0 28.999999999999886 M 0 26.999999999999886 L 0 24.999999999999886 M 0 24.999999999999886 M 0 22.999999999999886 L 0 20.999999999999886 M 0 20.999999999999886 M 0 18.999999999999886 L 0 16.999999999999886 M 0 16.999999999999886 M 0 14.999999999999886 L 0 12.999999999999886 M 0 12.999999999999886 M 0 10.999999999999886 L 0 8.999999999999886 M 0 8.999999999999886 M 0 6.999999999999886 L 0 4.999999999999886 M 0 4.999999999999886 M 0 2.9999999999998863 L 0 0.9999999999998863 M 0 0.9999999999998863 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="matrix(1,0,0,1,452,268)"><g transform="translate(0,0)"><g transform="translate(-37.5,-145.5) translate(-414.5,-122.5) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#cccccc" d="M 456.5 272.5 L 468.5 353" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="matrix(1,0,0,1,472.71067811865476,268)"><g transform="translate(0,0)"><g transform="translate(-76.5,-141.5) translate(-396.21067811865476,-126.5) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#cccccc" d="M 477.21067811865476 272.5 L 636.5 353" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="matrix(1,0,0,1,386.71067811865476,108.5)"><g transform="translate(0,0)"><g transform="translate(-239.28932188134524,-150) translate(-147.4213562373095,41.5) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#cccccc" d="M 435.78932188134524 197.5 L 391.21067811865476 113" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="matrix(1,0,0,1,298.21067811865476,108.49999999999999)"><g transform="translate(0,0)"><g transform="translate(-245,-109) translate(-53.210678118654755,0.5000000000000142) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#cccccc" d="M 349.78932188134524 112.99999999999999 L 302.71067811865476 197.5" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="matrix(1,0,0,1,259.5,268)"><g transform="translate(0,0)"><g transform="translate(-182,-272.5) translate(-77.5,4.5) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#cccccc" d="M 282 272.5 L 264 353" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="matrix(1,0,0,1,98.5,268)"><g transform="translate(0,0)"><g transform="translate(-195,-261.5) translate(96.5,-6.5) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#cccccc" d="M 261.28932188134524 272.5 L 103 353" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="translate(0,0) matrix(1,0,0,1,320.5,38)"><g transform="translate(4,4) scale(1.01,1.0133333333333334)"><g><g transform="translate(0,0) scale(1,0.75)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1.3333333333333333)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,0.75)"><g><path fill="url(#pTwRJlTmoXQS)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1.3333333333333333)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#pTwRJlTmoXQS)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="matrix(1,0,0,1,331,69)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="22" y="11">R</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="30.666671752929688" y="11">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="37.33332824707031" y="11">u</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="44" y="11">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="47.33332824707031" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="54" y="11">r</text></g></g><g transform="translate(0,0) matrix(1,0,0,1,232,197.5)"><g transform="translate(4,4) scale(1.01,1.0133333333333334)"><g><g transform="translate(0,0) scale(1,0.75)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1.3333333333333333)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,0.75)"><g><path fill="url(#FvZELeRwLGTV)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1.3333333333333333)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#FvZELeRwLGTV)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="matrix(1,0,0,1,242,228)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="24.333328247070312" y="11">H</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="33" y="11">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="39.66667175292969" y="11">s</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="45.66667175292969" y="11">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="49" y="11">1</text></g></g><g transform="translate(0,0) matrix(1,0,0,1,406.5,197.5)"><g transform="translate(4,4) scale(1.01,1.0133333333333334)"><g><g transform="translate(0,0) scale(1,0.75)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1.3333333333333333)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,0.75)"><g><path fill="url(#icpEixOwIatD)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1.3333333333333333)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#icpEixOwIatD)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="matrix(1,0,0,1,417,228)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="24.333328247070312" y="11">H</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="33" y="11">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="39.66667175292969" y="11">s</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="45.66667175292969" y="11">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="49" y="11">2</text></g></g><g transform="matrix(1,0,0,1,222,168)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="0" y="11">eth0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="26.666671752929688" y="11">2001</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="53.33332824707031" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="56.66667175292969" y="11">db8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="76.66667175292969" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="80" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="86.66667175292969" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="90" y="11">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="96.66667175292969" y="11">::</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="103.33332824707031" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="110" y="11">/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="113.33332824707031" y="11">64</text></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="26.666671752929688" y="25">fe80</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="50" y="25">::</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="56.66667175292969" y="25">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="63.33332824707031" y="25">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="66.66667175292969" y="25">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="73.33332824707031" y="25">/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="76.66667175292969" y="25">64</text></g></g><g transform="matrix(1,0,0,1,397,168)"><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="0" y="11.75">eth0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="26.666671752929688" y="11.75">2001</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="53.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="56.66667175292969" y="11.75">db8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="76.66667175292969" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="80" y="11.75">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="86.66667175292969" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="90" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="96.66667175292969" y="11.75">::</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="103.33332824707031" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="110" y="11.75">/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="113.33332824707031" y="11.75">64</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="26.666671752929688" y="27.25">fe80</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="50" y="27.25">::</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="56.66667175292969" y="27.25">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="63.33332824707031" y="27.25">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="66.66667175292969" y="27.25">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="73.33332824707031" y="27.25">/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="76.66667175292969" y="27.25">64</text></g><g><g/></g></g><g transform="matrix(1,0,0,1,384,280)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="23.333328247070312" y="11">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="30" y="11">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="36.66667175292969" y="11">c</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="42.66667175292969" y="11">k</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="48.66667175292969" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="55.33332824707031" y="11">r</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="59.33332824707031" y="11">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="69.33332824707031" y="11">f</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="72.66667175292969" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="79.33332824707031" y="11">8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="86" y="11">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="92.66667175292969" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="96" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="99.33332824707031" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="106" y="11">/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="109.33332824707031" y="11">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="116" y="11">4</text></g></g><g transform="matrix(1,0,0,1,209,281)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="23.333328247070312" y="11">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="30" y="11">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="36.66667175292969" y="11">c</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="42.66667175292969" y="11">k</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="48.66667175292969" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="55.33332824707031" y="11">r</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="59.33332824707031" y="11">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="69.33332824707031" y="11">f</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="72.66667175292969" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="79.33332824707031" y="11">8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="86" y="11">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="92.66667175292969" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="96" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="99.33332824707031" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="106" y="11">/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="109.33332824707031" y="11">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="116" y="11">4</text></g></g><g transform="translate(0,0) matrix(1,0,0,1,53,353)"><g transform="translate(4,4) scale(1.01,1.01)"><g><g transform="translate(0,0) scale(1,1)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,1)"><g><path fill="url(#LkNSHLFhaxIA)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#LkNSHLFhaxIA)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="matrix(1,0,0,1,63,396)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="5.3333282470703125" y="11">C</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="14" y="11">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="20.666671752929688" y="11">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="27.333328247070312" y="11">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="30.666671752929688" y="11">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="37.33332824707031" y="11">i</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="40" y="11">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="46.66667175292969" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="53.33332824707031" y="11">r</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="57.33332824707031" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="64" y="11">-</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="68" y="11">1</text></g></g><g transform="translate(0,0) matrix(1,0,0,1,214,353)"><g transform="translate(4,4) scale(1.01,1.01)"><g><g transform="translate(0,0) scale(1,1)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,1)"><g><path fill="url(#oKDqFTggfkIn)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#oKDqFTggfkIn)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="matrix(1,0,0,1,224,396)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="5.3333282470703125" y="11">C</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="14" y="11">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="20.666671752929688" y="11">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="27.333328247070312" y="11">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="30.666671752929688" y="11">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="37.33332824707031" y="11">i</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="40" y="11">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="46.66667175292969" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="53.33332824707031" y="11">r</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="57.33332824707031" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="64" y="11">-</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="68" y="11">2</text></g></g><g transform="matrix(1,0,0,1,30,336)"><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="9.666671752929688" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="16.333328247070312" y="11.75">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="19.666671752929688" y="11.75">h</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="26.333328247070312" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="36.33332824707031" y="11.75">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="43" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="49.66667175292969" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="56.33332824707031" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="63" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="66.33332824707031" y="11.75">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="73" y="11.75">b</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="79.66667175292969" y="11.75">8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="86.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="89.66667175292969" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="96.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="99.66667175292969" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="106.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="109.66667175292969" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="113" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="119.66667175292969" y="11.75">/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="123" y="11.75">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="129.6666717529297" y="11.75">4</text></g></g><g transform="translate(0,0) matrix(1,0,0,1,418.5,353)"><g transform="translate(4,4) scale(1.01,1.01)"><g><g transform="translate(0,0) scale(1,1)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,1)"><g><path fill="url(#ibePzzWmRSgC)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#ibePzzWmRSgC)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="matrix(1,0,0,1,429,396)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="5.3333282470703125" y="11">C</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="14" y="11">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="20.666671752929688" y="11">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="27.333328247070312" y="11">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="30.666671752929688" y="11">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="37.33332824707031" y="11">i</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="40" y="11">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="46.66667175292969" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="53.33332824707031" y="11">r</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="57.33332824707031" y="11">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="64" y="11">-</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="68" y="11">1</text></g></g><g transform="translate(0,0) matrix(1,0,0,1,586.5,353)"><g transform="translate(4,4) scale(1.01,1.01)"><g><g transform="translate(0,0) scale(1,1)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,1)"><g><path fill="url(#OTKSvGQcxRpB)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#OTKSvGQcxRpB)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="matrix(1,0,0,1,597,396)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="5.3333282470703125" y="11">C</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="14" y="11">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="20.666671752929688" y="11">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="27.333328247070312" y="11">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="30.666671752929688" y="11">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="37.33332824707031" y="11">i</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="40" y="11">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="46.66667175292969" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="53.33332824707031" y="11">r</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="57.33332824707031" y="11">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="64" y="11">-</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="68" y="11">2</text></g></g><g transform="matrix(1,0,0,1,28,201)"><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="0" y="11.75">ip</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="9.333328247070312" y="11.75"> -</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="16.666671752929688" y="11.75">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="26.666671752929688" y="11.75">route</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="57.33332824707031" y="11.75">add</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="80.66667175292969" y="11.75">default</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="120" y="11.75">via</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="138.6666717529297" y="11.75">fe80</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="162" y="11.75">::</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="168.6666717529297" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="175.3333282470703" y="11.75"> \</text></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="10" y="27.25">dev</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="32.66667175292969" y="27.25">eth0</text></g><g><g/></g></g><g transform="matrix(1,0,0,1,387,463)"><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="55.53334045410156" y="11.75">i</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="58.19999694824219" y="11.75">p</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="68.19999694824219" y="11.75">-</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="72.19999694824219" y="11.75">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="82.19999694824219" y="11.75">r</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="86.19999694824219" y="11.75">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="92.86666870117188" y="11.75">u</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="99.53334045410156" y="11.75">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="102.86666870117188" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="112.86666870117188" y="11.75">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="119.53334045410156" y="11.75">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="126.19999694824219" y="11.75">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="136.1999969482422" y="11.75">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="142.86666870117188" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="149.53334045410156" y="11.75">f</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="152.86666870117188" y="11.75">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="159.53334045410156" y="11.75">u</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="166.1999969482422" y="11.75">l</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="168.86666870117188" y="11.75">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="175.53334045410156" y="11.75">v</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="181.53334045410156" y="11.75">i</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="184.1999969482422" y="11.75">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="194.1999969482422" y="11.75">f</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="197.53334045410156" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="204.1999969482422" y="11.75">8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="210.86666870117188" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="217.53334045410156" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="220.86666870117188" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="224.1999969482422" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="234.1999969482422" y="11.75">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="240.86666870117188" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="247.53334045410156" y="11.75">v</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="256.8666687011719" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="263.5333251953125" y="11.75">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="266.8666687011719" y="11.75">h</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="273.5333251953125" y="11.75">0</text></g></g><g transform="matrix(1,0,0,1,14,464)"><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="58.91667175292969" y="11.75">i</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="61.58332824707031" y="11.75">p</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="71.58332824707031" y="11.75">-</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="75.58332824707031" y="11.75">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="85.58332824707031" y="11.75">r</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="89.58332824707031" y="11.75">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="96.25" y="11.75">u</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="102.91667175292969" y="11.75">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="106.25" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="116.25" y="11.75">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="122.91667175292969" y="11.75">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="129.5833282470703" y="11.75">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="139.5833282470703" y="11.75">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="146.25" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="152.9166717529297" y="11.75">f</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="156.25" y="11.75">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="162.9166717529297" y="11.75">u</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="169.5833282470703" y="11.75">l</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="172.25" y="11.75">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="178.9166717529297" y="11.75">v</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="184.9166717529297" y="11.75">i</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="187.5833282470703" y="11.75">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="197.5833282470703" y="11.75">f</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="200.9166717529297" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="207.5833282470703" y="11.75">8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="214.25" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="220.9166717529297" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="224.25" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="227.5833282470703" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="237.5833282470703" y="11.75">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="244.25" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="250.9166717529297" y="11.75">v</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="260.25" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="266.91668701171875" y="11.75">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="270.25" y="11.75">h</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="276.91668701171875" y="11.75">0</text></g></g><g transform="matrix(1,0,0,1,-9.000680271168676,245.99999999999966)"><g transform="translate(0,0)"><g transform="translate(-793,-250) translate(802.0006802711687,4.000000000000341) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#000000" d="M 723.75 250 M 723.75 250 L 715.75 250 M 715.75 250 M 707.75 250 L 699.75 250 M 699.75 250 M 691.75 250 L 683.75 250 M 683.75 250 M 675.75 250 L 667.75 250 M 667.75 250 M 659.75 250 L 651.75 250 M 651.75 250 M 643.75 250 L 635.75 250 M 635.75 250 M 627.75 250 L 619.75 250 M 619.75 250 M 611.75 250 L 603.75 250 M 603.75 250 M 595.75 250 L 587.75 250 M 587.75 250 M 579.75 250 L 571.75 250 M 571.75 250 M 563.75 250 L 555.75 250 M 555.75 250 M 547.75 250 L 539.75 250 M 539.75 250 M 531.75 250 L 523.75 250 M 523.75 250 M 515.75 250 L 507.75 250 M 507.75 250 M 499.75 250 L 491.75 250 M 491.75 250 M 483.75 250 L 475.75 250 M 475.75 250 M 467.75 250 L 459.75 250 M 459.75 250 M 451.75 250 L 443.75 250 M 443.75 250 M 435.75 250 L 427.75 250 M 427.75 250 M 419.75 250 L 411.75 250 M 411.75 250 M 403.75 250 L 395.75 250 M 395.75 250 M 387.75 250 L 379.75 250 M 379.75 250 M 371.75 250 L 363.75 250 M 363.75 250 M 355.75 250 L 347.75 250 M 347.75 250 M 339.75 250 L 331.75 250 M 331.75 250 M 323.75 250 L 315.75 250 M 315.75 250 M 307.75 250 L 299.75 250 M 299.75 250 M 291.75 250 L 283.75 250 M 283.75 250 M 275.75 250 L 267.75 250 M 267.75 250 M 259.75 250 L 251.75 250 M 251.75 250 M 243.75 250 L 235.75 250 M 235.75 250 M 227.75 250 L 219.75 250 M 219.75 250 M 211.75 250 L 203.75 250 M 203.75 250 M 195.75 250 L 187.75 250 M 187.75 250 M 179.75 250 L 171.75 249.99999999999997 M 171.75 249.99999999999997 M 163.75 249.99999999999997 L 155.75 249.99999999999994 M 155.75 249.99999999999994 M 147.75 249.99999999999994 L 139.75 249.99999999999991 M 139.75 249.99999999999991 M 131.75 249.99999999999991 L 123.75 249.9999999999999 M 123.75 249.9999999999999 M 115.75 249.9999999999999 L 107.75 249.99999999999986 M 107.75 249.99999999999986 M 99.75 249.99999999999986 L 91.75 249.99999999999983 M 91.75 249.99999999999983 M 83.75 249.99999999999983 L 75.75 249.9999999999998 M 75.75 249.9999999999998 M 67.75 249.9999999999998 L 59.75 249.99999999999977 M 59.75 249.99999999999977 M 51.75 249.99999999999977 L 43.75 249.99999999999974 M 43.75 249.99999999999974 M 35.75 249.99999999999974 L 27.75 249.99999999999972 M 27.75 249.99999999999972 M 19.75 249.99999999999972 L 11.75 249.9999999999997 M 11.75 249.9999999999997 M 3.75 249.9999999999997 L -4.25 249.99999999999966 M -4.25 249.99999999999966" stroke-miterlimit="10"/></g></g></g></g><g transform="matrix(1,0,0,1,531,199)"><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="0" y="11.75">ip</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="9.333328247070312" y="11.75"> -</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="16.666671752929688" y="11.75">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="26.666671752929688" y="11.75">route</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="57.33332824707031" y="11.75">add</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="80.66667175292969" y="11.75">default</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="120" y="11.75">via</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="138.6666717529297" y="11.75">fe80</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="162" y="11.75">::</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="168.6666717529297" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="175.3333282470703" y="11.75"> \</text></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="13.333328247070312" y="27.25">dev</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="36" y="27.25">eth0</text></g></g><g transform="matrix(1,0,0,1,191,336)"><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="9.666671752929688" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="16.333328247070312" y="11.75">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="19.666671752929688" y="11.75">h</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="26.333328247070312" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="36.33332824707031" y="11.75">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="43" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="49.66667175292969" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="56.33332824707031" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="63" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="66.33332824707031" y="11.75">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="73" y="11.75">b</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="79.66667175292969" y="11.75">8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="86.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="89.66667175292969" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="96.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="99.66667175292969" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="106.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="109.66667175292969" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="113" y="11.75">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="119.66667175292969" y="11.75">/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="123" y="11.75">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="129.6666717529297" y="11.75">4</text></g></g><g transform="matrix(1,0,0,1,397,336)"><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="9.666671752929688" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="16.333328247070312" y="11.75">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="19.666671752929688" y="11.75">h</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="26.333328247070312" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="36.33332824707031" y="11.75">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="43" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="49.66667175292969" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="56.33332824707031" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="63" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="66.33332824707031" y="11.75">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="73" y="11.75">b</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="79.66667175292969" y="11.75">8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="86.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="89.66667175292969" y="11.75">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="96.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="99.66667175292969" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="106.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="109.66667175292969" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="113" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="119.66667175292969" y="11.75">/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="123" y="11.75">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="129.6666717529297" y="11.75">4</text></g></g><g transform="matrix(1,0,0,1,565,336)"><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="9.666671752929688" y="11.75">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="16.333328247070312" y="11.75">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="19.666671752929688" y="11.75">h</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="26.333328247070312" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="36.33332824707031" y="11.75">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="43" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="49.66667175292969" y="11.75">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="56.33332824707031" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="63" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="66.33332824707031" y="11.75">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="73" y="11.75">b</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="79.66667175292969" y="11.75">8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="86.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="89.66667175292969" y="11.75">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="96.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="99.66667175292969" y="11.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="106.33332824707031" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="109.66667175292969" y="11.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="113" y="11.75">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="119.66667175292969" y="11.75">/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="123" y="11.75">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="129.6666717529297" y="11.75">4</text></g></g><g transform="matrix(1,0,0,1,436,47)"><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="0" y="11.75">ip</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="9.333328247070312" y="11.75"> -</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="16.666671752929688" y="11.75">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="26.666671752929688" y="11.75">route</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="57.33332824707031" y="11.75">add</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="80.66667175292969" y="11.75">default</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="120" y="11.75">via</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="138.6666717529297" y="11.75">fe80</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="162" y="11.75">::</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="168.6666717529297" y="11.75">1</text></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="178.6666717529297" y="11.75">dev</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="201.3333282470703" y="11.75">eth0</text></g><g><g/></g><g><g/></g><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="0" y="58.25">ip</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="9.333328247070312" y="58.25"> -</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="16.666671752929688" y="58.25">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="26.666671752929688" y="58.25">route</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="57.33332824707031" y="58.25">add</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="80.66667175292969" y="58.25">2001</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="107.33332824707031" y="58.25">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="110.66667175292969" y="58.25">db8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="130.6666717529297" y="58.25">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="134" y="58.25">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="140.6666717529297" y="58.25">::/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="150.6666717529297" y="58.25">48</text></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="167.3333282470703" y="58.25">via</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="186" y="58.25">fe80</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="209.3333282470703" y="58.25">::</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="216" y="58.25">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="222.6666717529297" y="58.25">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="226" y="58.25">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="236" y="58.25">dev</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="258.66668701171875" y="58.25">eth1</text></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="0" y="73.75">ip</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="9.333328247070312" y="73.75"> -</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="16.666671752929688" y="73.75">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="26.666671752929688" y="73.75">route</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="57.33332824707031" y="73.75">add</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="80.66667175292969" y="73.75">2001</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="107.33332824707031" y="73.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="110.66667175292969" y="73.75">db8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="130.6666717529297" y="73.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="134" y="73.75">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="140.6666717529297" y="73.75">::/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="150.6666717529297" y="73.75">48</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="167.3333282470703" y="73.75">via</text></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="186" y="73.75">fe80</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="209.3333282470703" y="73.75">::</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="216" y="73.75">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="222.6666717529297" y="73.75">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="226" y="73.75">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="236" y="73.75">dev</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="15.5px" x="258.66668701171875" y="73.75">eth1</text></g></g><g transform="matrix(1,0,0,1,295,120)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="33" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="39.66667175292969" y="11">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="43" y="11">h</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="49.66667175292969" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="59.66667175292969" y="11">f</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="63" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="69.66667175292969" y="11">8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="76.33332824707031" y="11">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="83" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="86.33332824707031" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="89.66667175292969" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="96.33332824707031" y="11">/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="99.66667175292969" y="11">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="106.33332824707031" y="11">4</text></g></g><g transform="matrix(1,0,0,1,298,21)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="19.666671752929688" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="26.333328247070312" y="11">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="29.666671752929688" y="11">h</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="36.33332824707031" y="11">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="46.33332824707031" y="11">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="53" y="11">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="59.66667175292969" y="11">0</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="66.33332824707031" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="73" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="76.33332824707031" y="11">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="83" y="11">b</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="89.66667175292969" y="11">8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="96.33332824707031" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="99.66667175292969" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="103" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="109.66667175292969" y="11">/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="113" y="11">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="119.66667175292969" y="11">4</text></g></g><g transform="matrix(1,0,0,1,261,492)"><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="0.633331298828125" y="9">c</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="5.633331298828125" y="9">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="11.199996948242188" y="9">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="16.76666259765625" y="9">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="19.550003051757812" y="9">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="25.116668701171875" y="9">i</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="27.333328247070312" y="9">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="32.899993896484375" y="9">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="38.46665954589844" y="9">r</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="41.80000305175781" y="9">s</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="46.80000305175781" y="9">'</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="51.5" y="9">l</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="53.71665954589844" y="9">i</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="55.93333435058594" y="9">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="61.5" y="9">k</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="66.5" y="9">-</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="69.83332824707031" y="9">l</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="72.05000305175781" y="9">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="77.61666870117188" y="9">c</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="82.61666870117188" y="9">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="88.18333435058594" y="9">l</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="93.18333435058594" y="9">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="98.75" y="9">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="104.31666564941406" y="9">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="109.88333129882812" y="9">r</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="113.21665954589844" y="9">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="118.78334045410156" y="9">s</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="123.78334045410156" y="9">s</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="128.78334045410156" y="9">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="134.35000610351562" y="9">s</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="142.13333129882812" y="9">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="147.6999969482422" y="9">r</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="151.03334045410156" y="9">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="159.38333129882812" y="9">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="164.9499969482422" y="9">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="170.51666259765625" y="9">t</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="176.0833282470703" y="9">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="181.64999389648438" y="9">i</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="183.86666870117188" y="9">s</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="188.86666870117188" y="9">p</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="194.43333435058594" y="9">l</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="196.64999389648438" y="9">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="202.21665954589844" y="9">y</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="207.21665954589844" y="9">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="10px" font-style="italic" font-weight="normal" text-decoration="" line-height="11px" x="212.78334045410156" y="9">d</text></g></g><g transform="matrix(1,0,0,1,28,254)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="0" y="11">ip</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="9.333328247070312" y="11"> -</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="16.666671752929688" y="11">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="26.666671752929688" y="11">route</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="57.33332824707031" y="11">add</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="80.66667175292969" y="11">2001</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="107.33332824707031" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="110.66667175292969" y="11">db8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="130.6666717529297" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="134" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="140.6666717529297" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="144" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="150.6666717529297" y="11">::/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="160.6666717529297" y="11">64</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="174" y="11"> \</text></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="13.333328247070312" y="25">dev</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="36" y="25">docker0</text></g></g><g transform="matrix(1,0,0,1,531,252)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="0" y="11">ip</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="9.333328247070312" y="11"> -</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="16.666671752929688" y="11">6</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="26.666671752929688" y="11">route</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="57.33332824707031" y="11">add</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="80.66667175292969" y="11">2001</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="107.33332824707031" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="110.66667175292969" y="11">db8</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="130.6666717529297" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="134" y="11">2</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="140.6666717529297" y="11">:</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="144" y="11">1</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="150.6666717529297" y="11">::/</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="160.6666717529297" y="11">64</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="174" y="11"> \</text></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="13.333328247070312" y="25">dev</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="36" y="25">docker0</text></g></g><g transform="matrix(1,0,0,1,727,231)"><image width="40" height="275" preserveAspectRatio="none" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAETCAYAAABENES3AAABz0lEQVR4nO3QMU4CURhF4UukoJtS1wFb0R3ACoTaBvekO9EaWzrqwYIHjMJEzCneFOdLbjJ5meLkTyRJkiTpZqPaAX95LBukuyQfZXeVW66aJ9mXzSu3XJgk2SRpyzblbTCWOV/vuGXVoo4myTaHyx3j2vLWVOw6WefyesetK3YlSe6T7NIfuCv/VDNLsursvaz7NqtWd8VL2WAZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGUgZSBlIGfhf0yTPnb2Vdd+m1eqSPCTZJdn3bFf+qeo1/YHril0nTZJtkjbnsLa8NRW7fljl8nqrqkW/TJJ85XC5tnxPqhZdscj5eovKLVeNk3yWjSu39HoqG6xRmSRJkiTd4hsDlSZIihHtWwAAAABJRU5ErkJggg==" transform="translate(0,0)"/></g><g transform="matrix(-1.8369701987210297e-16,-1,1,-1.8369701987210297e-16,731,443)"><g><g/></g><g><g/></g><g><g/></g><g><g/><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="19.333328247070312" y="11">m</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="29.333328247070312" y="11">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="36" y="11">n</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="42.66667175292969" y="11">a</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="49.33332824707031" y="11">g</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="56" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="62.66667175292969" y="11">d</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="72.66667175292969" y="11">b</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="79.33332824707031" y="11">y</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="88.66667175292969" y="11">D</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="97.33332824707031" y="11">o</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="104" y="11">c</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="110" y="11">k</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="116" y="11">e</text><text fill="rgb(0, 0, 0)" stroke="none" font-family="&quot;Arial&quot;" font-size="12px" font-style="normal" font-weight="normal" text-decoration="" line-height="14px" x="122.66667175292969" y="11">r</text></g></g><g transform="matrix(1,0,0,1,719.75,246)"><g transform="translate(0,0)"><g transform="translate(-765,-250) translate(45.25,4) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#000000" d="M 752.0176935741139 250 L 723.75 250" stroke-miterlimit="10"/></g></g></g></g><g transform="matrix(1,0,0,1,720.75,483)"><g transform="translate(0,0)"><g transform="translate(-766,-487) translate(45.25,4) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#000000" d="M 752.0183424505415 487 L 724.75 487" stroke-miterlimit="10"/></g></g></g></g></g></svg>
\ No newline at end of file
+<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="912.767693574114" height="467"><defs><linearGradient id="kKfylQdvRaFI" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#e2e2e2"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient><linearGradient id="okETWrHtmeUv" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#a4c2f4"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient><linearGradient id="WLczqDFsktCx" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#a4c2f4"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient><linearGradient id="EOtrFaZZJZro" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ead1dc"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient><linearGradient id="itkkpRbaglyb" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ead1dc"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient><linearGradient id="MpRBXNFHBTHf" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ead1dc"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient><linearGradient id="tVySeLJzhfuC" x1="0px" x2="0px" y1="100px" y2="-50px" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ead1dc"/><stop offset="1" stop-color="#FFFFFF"/></linearGradient></defs><g transform="translate(0,0)"><g><rect fill="#FFFFFF" stroke="none" x="0" y="0" width="912.767693574114" height="467"/></g><g transform="translate(0,0) matrix(1,0,0,1,447.75,107.5)"><g><g transform="translate(0,0) scale(4.160000000000001,1.41)"><g><path fill="#FFFFFF" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(0.24038461538461534,0.7092198581560284)"><path fill="none" stroke="none" d="M 0 0 L 416.0000000000001 0 Q 416.0000000000001 0 416.0000000000001 0 L 416.0000000000001 141 Q 416.0000000000001 141 416.0000000000001 141 L 0 141 Q 0 141 0 141 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="#333333" d="M 0 0 M 0 0 L 2 0 M 2 0 M 4 0 L 6 0 M 6 0 M 8 0 L 10 0 M 10 0 M 12 0 L 14 0 M 14 0 M 16 0 L 18 0 M 18 0 M 20 0 L 22 0 M 22 0 M 24 0 L 26 0 M 26 0 M 28 0 L 30 0 M 30 0 M 32 0 L 34 0 M 34 0 M 36 0 L 38 0 M 38 0 M 40 0 L 42 0 M 42 0 M 44 0 L 46 0 M 46 0 M 48 0 L 50 0 M 50 0 M 52 0 L 54 0 M 54 0 M 56 0 L 58 0 M 58 0 M 60 0 L 62 0 M 62 0 M 64 0 L 66 0 M 66 0 M 68 0 L 70 0 M 70 0 M 72 0 L 74 0 M 74 0 M 76 0 L 78 0 M 78 0 M 80 0 L 82 0 M 82 0 M 84 0 L 86 0 M 86 0 M 88 0 L 90 0 M 90 0 M 92 0 L 94 0 M 94 0 M 96 0 L 98 0 M 98 0 M 100 0 L 102 0 M 102 0 M 104 0 L 106 0 M 106 0 M 108 0 L 110 0 M 110 0 M 112 0 L 114 0 M 114 0 M 116 0 L 118 0 M 118 0 M 120 0 L 122 0 M 122 0 M 124 0 L 126 0 M 126 0 M 128 0 L 130 0 M 130 0 M 132 0 L 134 0 M 134 0 M 136 0 L 138 0 M 138 0 M 140 0 L 142 0 M 142 0 M 144 0 L 146 0 M 146 0 M 148 0 L 150 0 M 150 0 M 152 0 L 154 0 M 154 0 M 156 0 L 158 0 M 158 0 M 160 0 L 162 0 M 162 0 M 164 0 L 166 0 M 166 0 M 168 0 L 170 0 M 170 0 M 172 0 L 174 0 M 174 0 M 176 0 L 178 0 M 178 0 M 180 0 L 182 0 M 182 0 M 184 0 L 186 0 M 186 0 M 188 0 L 190 0 M 190 0 M 192 0 L 194 0 M 194 0 M 196 0 L 198 0 M 198 0 M 200 0 L 202 0 M 202 0 M 204 0 L 206 0 M 206 0 M 208 0 L 210 0 M 210 0 M 212 0 L 214 0 M 214 0 M 216 0 L 218 0 M 218 0 M 220 0 L 222 0 M 222 0 M 224 0 L 226 0 M 226 0 M 228 0 L 230 0 M 230 0 M 232 0 L 234 0 M 234 0 M 236 0 L 238 0 M 238 0 M 240 0 L 242 0 M 242 0 M 244 0 L 246 0 M 246 0 M 248 0 L 250 0 M 250 0 M 252 0 L 254 0 M 254 0 M 256 0 L 258 0 M 258 0 M 260 0 L 262 0 M 262 0 M 264 0 L 266 0 M 266 0 M 268 0 L 270 0 M 270 0 M 272 0 L 274 0 M 274 0 M 276 0 L 278 0 M 278 0 M 280 0 L 282 0 M 282 0 M 284 0 L 286 0 M 286 0 M 288 0 L 290 0 M 290 0 M 292 0 L 294 0 M 294 0 M 296 0 L 298 0 M 298 0 M 300 0 L 302 0 M 302 0 M 304 0 L 306 0 M 306 0 M 308 0 L 310 0 M 310 0 M 312 0 L 314 0 M 314 0 M 316 0 L 318 0 M 318 0 M 320 0 L 322 0 M 322 0 M 324 0 L 326 0 M 326 0 M 328 0 L 330 0 M 330 0 M 332 0 L 334 0 M 334 0 M 336 0 L 338 0 M 338 0 M 340 0 L 342 0 M 342 0 M 344 0 L 346 0 M 346 0 M 348 0 L 350 0 M 350 0 M 352 0 L 354 0 M 354 0 M 356 0 L 358 0 M 358 0 M 360 0 L 362 0 M 362 0 M 364 0 L 366 0 M 366 0 M 368 0 L 370 0 M 370 0 M 372 0 L 374 0 M 374 0 M 376 0 L 378 0 M 378 0 M 380 0 L 382 0 M 382 0 M 384 0 L 386 0 M 386 0 M 388 0 L 390 0 M 390 0 M 392 0 L 394 0 M 394 0 M 396 0 L 398 0 M 398 0 M 400 0 L 402 0 M 402 0 M 404 0 L 406 0 M 406 0 M 408 0 L 410 0 M 410 0 M 412 0 L 414 0 M 414 0 M 416 0 L 416.0000000000001 0 Q 416.0000000000001 0 416.0000000000001 0 L 416.0000000000001 1.9999999999998863 M 416.0000000000001 1.9999999999998863 M 416.0000000000001 3.9999999999998863 L 416.0000000000001 5.999999999999886 M 416.0000000000001 5.999999999999886 M 416.0000000000001 7.999999999999886 L 416.0000000000001 9.999999999999886 M 416.0000000000001 9.999999999999886 M 416.0000000000001 11.999999999999886 L 416.0000000000001 13.999999999999886 M 416.0000000000001 13.999999999999886 M 416.0000000000001 15.999999999999886 L 416.0000000000001 17.999999999999886 M 416.0000000000001 17.999999999999886 M 416.0000000000001 19.999999999999886 L 416.0000000000001 21.999999999999886 M 416.0000000000001 21.999999999999886 M 416.0000000000001 23.999999999999886 L 416.0000000000001 25.999999999999886 M 416.0000000000001 25.999999999999886 M 416.0000000000001 27.999999999999886 L 416.0000000000001 29.999999999999886 M 416.0000000000001 29.999999999999886 M 416.0000000000001 31.999999999999886 L 416.0000000000001 33.999999999999886 M 416.0000000000001 33.999999999999886 M 416.0000000000001 35.999999999999886 L 416.0000000000001 37.999999999999886 M 416.0000000000001 37.999999999999886 M 416.0000000000001 39.999999999999886 L 416.0000000000001 41.999999999999886 M 416.0000000000001 41.999999999999886 M 416.0000000000001 43.999999999999886 L 416.0000000000001 45.999999999999886 M 416.0000000000001 45.999999999999886 M 416.0000000000001 47.999999999999886 L 416.0000000000001 49.999999999999886 M 416.0000000000001 49.999999999999886 M 416.0000000000001 51.999999999999886 L 416.0000000000001 53.999999999999886 M 416.0000000000001 53.999999999999886 M 416.0000000000001 55.999999999999886 L 416.0000000000001 57.999999999999886 M 416.0000000000001 57.999999999999886 M 416.0000000000001 59.999999999999886 L 416.0000000000001 61.999999999999886 M 416.0000000000001 61.999999999999886 M 416.0000000000001 63.999999999999886 L 416.0000000000001 65.99999999999989 M 416.0000000000001 65.99999999999989 M 416.0000000000001 67.99999999999989 L 416.0000000000001 69.99999999999989 M 416.0000000000001 69.99999999999989 M 416.0000000000001 71.99999999999989 L 416.0000000000001 73.99999999999989 M 416.0000000000001 73.99999999999989 M 416.0000000000001 75.99999999999989 L 416.0000000000001 77.99999999999989 M 416.0000000000001 77.99999999999989 M 416.0000000000001 79.99999999999989 L 416.0000000000001 81.99999999999989 M 416.0000000000001 81.99999999999989 M 416.0000000000001 83.99999999999989 L 416.0000000000001 85.99999999999989 M 416.0000000000001 85.99999999999989 M 416.0000000000001 87.99999999999989 L 416.0000000000001 89.99999999999989 M 416.0000000000001 89.99999999999989 M 416.0000000000001 91.99999999999989 L 416.0000000000001 93.99999999999989 M 416.0000000000001 93.99999999999989 M 416.0000000000001 95.99999999999989 L 416.0000000000001 97.99999999999989 M 416.0000000000001 97.99999999999989 M 416.0000000000001 99.99999999999989 L 416.0000000000001 101.99999999999989 M 416.0000000000001 101.99999999999989 M 416.0000000000001 103.99999999999989 L 416.0000000000001 105.99999999999989 M 416.0000000000001 105.99999999999989 M 416.0000000000001 107.99999999999989 L 416.0000000000001 109.99999999999989 M 416.0000000000001 109.99999999999989 M 416.0000000000001 111.99999999999989 L 416.0000000000001 113.99999999999989 M 416.0000000000001 113.99999999999989 M 416.0000000000001 115.99999999999989 L 416.0000000000001 117.99999999999989 M 416.0000000000001 117.99999999999989 M 416.0000000000001 119.99999999999989 L 416.0000000000001 121.99999999999989 M 416.0000000000001 121.99999999999989 M 416.0000000000001 123.99999999999989 L 416.0000000000001 125.99999999999989 M 416.0000000000001 125.99999999999989 M 416.0000000000001 127.99999999999989 L 416.0000000000001 129.9999999999999 M 416.0000000000001 129.9999999999999 M 416.0000000000001 131.9999999999999 L 416.0000000000001 133.9999999999999 M 416.0000000000001 133.9999999999999 M 416.0000000000001 135.9999999999999 L 416.0000000000001 137.9999999999999 M 416.0000000000001 137.9999999999999 M 416.0000000000001 139.9999999999999 L 416.0000000000001 141 Q 416.0000000000001 141 416.0000000000001 141 L 415.0000000000002 141 M 415.0000000000002 141 M 413.0000000000002 141 L 411.0000000000002 141 M 411.0000000000002 141 M 409.0000000000002 141 L 407.0000000000002 141 M 407.0000000000002 141 M 405.0000000000002 141 L 403.0000000000002 141 M 403.0000000000002 141 M 401.0000000000002 141 L 399.0000000000002 141 M 399.0000000000002 141 M 397.0000000000002 141 L 395.0000000000002 141 M 395.0000000000002 141 M 393.0000000000002 141 L 391.0000000000002 141 M 391.0000000000002 141 M 389.0000000000002 141 L 387.0000000000002 141 M 387.0000000000002 141 M 385.0000000000002 141 L 383.0000000000002 141 M 383.0000000000002 141 M 381.0000000000002 141 L 379.0000000000002 141 M 379.0000000000002 141 M 377.0000000000002 141 L 375.0000000000002 141 M 375.0000000000002 141 M 373.0000000000002 141 L 371.0000000000002 141 M 371.0000000000002 141 M 369.0000000000002 141 L 367.0000000000002 141 M 367.0000000000002 141 M 365.0000000000002 141 L 363.0000000000002 141 M 363.0000000000002 141 M 361.0000000000002 141 L 359.0000000000002 141 M 359.0000000000002 141 M 357.0000000000002 141 L 355.0000000000002 141 M 355.0000000000002 141 M 353.0000000000002 141 L 351.0000000000002 141 M 351.0000000000002 141 M 349.0000000000002 141 L 347.0000000000002 141 M 347.0000000000002 141 M 345.0000000000002 141 L 343.0000000000002 141 M 343.0000000000002 141 M 341.0000000000002 141 L 339.0000000000002 141 M 339.0000000000002 141 M 337.0000000000002 141 L 335.0000000000002 141 M 335.0000000000002 141 M 333.0000000000002 141 L 331.0000000000002 141 M 331.0000000000002 141 M 329.0000000000002 141 L 327.0000000000002 141 M 327.0000000000002 141 M 325.0000000000002 141 L 323.0000000000002 141 M 323.0000000000002 141 M 321.0000000000002 141 L 319.0000000000002 141 M 319.0000000000002 141 M 317.0000000000002 141 L 315.0000000000002 141 M 315.0000000000002 141 M 313.0000000000002 141 L 311.0000000000002 141 M 311.0000000000002 141 M 309.0000000000002 141 L 307.0000000000002 141 M 307.0000000000002 141 M 305.0000000000002 141 L 303.0000000000002 141 M 303.0000000000002 141 M 301.0000000000002 141 L 299.0000000000002 141 M 299.0000000000002 141 M 297.0000000000002 141 L 295.0000000000002 141 M 295.0000000000002 141 M 293.0000000000002 141 L 291.0000000000002 141 M 291.0000000000002 141 M 289.0000000000002 141 L 287.0000000000002 141 M 287.0000000000002 141 M 285.0000000000002 141 L 283.0000000000002 141 M 283.0000000000002 141 M 281.0000000000002 141 L 279.0000000000002 141 M 279.0000000000002 141 M 277.0000000000002 141 L 275.0000000000002 141 M 275.0000000000002 141 M 273.0000000000002 141 L 271.0000000000002 141 M 271.0000000000002 141 M 269.0000000000002 141 L 267.0000000000002 141 M 267.0000000000002 141 M 265.0000000000002 141 L 263.0000000000002 141 M 263.0000000000002 141 M 261.0000000000002 141 L 259.0000000000002 141 M 259.0000000000002 141 M 257.0000000000002 141 L 255.00000000000023 141 M 255.00000000000023 141 M 253.00000000000023 141 L 251.00000000000023 141 M 251.00000000000023 141 M 249.00000000000023 141 L 247.00000000000023 141 M 247.00000000000023 141 M 245.00000000000023 141 L 243.00000000000023 141 M 243.00000000000023 141 M 241.00000000000023 141 L 239.00000000000023 141 M 239.00000000000023 141 M 237.00000000000023 141 L 235.00000000000023 141 M 235.00000000000023 141 M 233.00000000000023 141 L 231.00000000000023 141 M 231.00000000000023 141 M 229.00000000000023 141 L 227.00000000000023 141 M 227.00000000000023 141 M 225.00000000000023 141 L 223.00000000000023 141 M 223.00000000000023 141 M 221.00000000000023 141 L 219.00000000000023 141 M 219.00000000000023 141 M 217.00000000000023 141 L 215.00000000000023 141 M 215.00000000000023 141 M 213.00000000000023 141 L 211.00000000000023 141 M 211.00000000000023 141 M 209.00000000000023 141 L 207.00000000000023 141 M 207.00000000000023 141 M 205.00000000000023 141 L 203.00000000000023 141 M 203.00000000000023 141 M 201.00000000000023 141 L 199.00000000000023 141 M 199.00000000000023 141 M 197.00000000000023 141 L 195.00000000000023 141 M 195.00000000000023 141 M 193.00000000000023 141 L 191.00000000000023 141 M 191.00000000000023 141 M 189.00000000000023 141 L 187.00000000000023 141 M 187.00000000000023 141 M 185.00000000000023 141 L 183.00000000000023 141 M 183.00000000000023 141 M 181.00000000000023 141 L 179.00000000000023 141 M 179.00000000000023 141 M 177.00000000000023 141 L 175.00000000000023 141 M 175.00000000000023 141 M 173.00000000000023 141 L 171.00000000000023 141 M 171.00000000000023 141 M 169.00000000000023 141 L 167.00000000000023 141 M 167.00000000000023 141 M 165.00000000000023 141 L 163.00000000000023 141 M 163.00000000000023 141 M 161.00000000000023 141 L 159.00000000000023 141 M 159.00000000000023 141 M 157.00000000000023 141 L 155.00000000000023 141 M 155.00000000000023 141 M 153.00000000000023 141 L 151.00000000000023 141 M 151.00000000000023 141 M 149.00000000000023 141 L 147.00000000000023 141 M 147.00000000000023 141 M 145.00000000000023 141 L 143.00000000000023 141 M 143.00000000000023 141 M 141.00000000000023 141 L 139.00000000000023 141 M 139.00000000000023 141 M 137.00000000000023 141 L 135.00000000000023 141 M 135.00000000000023 141 M 133.00000000000023 141 L 131.00000000000023 141 M 131.00000000000023 141 M 129.00000000000023 141 L 127.00000000000023 141 M 127.00000000000023 141 M 125.00000000000023 141 L 123.00000000000023 141 M 123.00000000000023 141 M 121.00000000000023 141 L 119.00000000000023 141 M 119.00000000000023 141 M 117.00000000000023 141 L 115.00000000000023 141 M 115.00000000000023 141 M 113.00000000000023 141 L 111.00000000000023 141 M 111.00000000000023 141 M 109.00000000000023 141 L 107.00000000000023 141 M 107.00000000000023 141 M 105.00000000000023 141 L 103.00000000000023 141 M 103.00000000000023 141 M 101.00000000000023 141 L 99.00000000000023 141 M 99.00000000000023 141 M 97.00000000000023 141 L 95.00000000000023 141 M 95.00000000000023 141 M 93.00000000000023 141 L 91.00000000000023 141 M 91.00000000000023 141 M 89.00000000000023 141 L 87.00000000000023 141 M 87.00000000000023 141 M 85.00000000000023 141 L 83.00000000000023 141 M 83.00000000000023 141 M 81.00000000000023 141 L 79.00000000000023 141 M 79.00000000000023 141 M 77.00000000000023 141 L 75.00000000000023 141 M 75.00000000000023 141 M 73.00000000000023 141 L 71.00000000000023 141 M 71.00000000000023 141 M 69.00000000000023 141 L 67.00000000000023 141 M 67.00000000000023 141 M 65.00000000000023 141 L 63.00000000000023 141 M 63.00000000000023 141 M 61.00000000000023 141 L 59.00000000000023 141 M 59.00000000000023 141 M 57.00000000000023 141 L 55.00000000000023 141 M 55.00000000000023 141 M 53.00000000000023 141 L 51.00000000000023 141 M 51.00000000000023 141 M 49.00000000000023 141 L 47.00000000000023 141 M 47.00000000000023 141 M 45.00000000000023 141 L 43.00000000000023 141 M 43.00000000000023 141 M 41.00000000000023 141 L 39.00000000000023 141 M 39.00000000000023 141 M 37.00000000000023 141 L 35.00000000000023 141 M 35.00000000000023 141 M 33.00000000000023 141 L 31.000000000000227 141 M 31.000000000000227 141 M 29.000000000000227 141 L 27.000000000000227 141 M 27.000000000000227 141 M 25.000000000000227 141 L 23.000000000000227 141 M 23.000000000000227 141 M 21.000000000000227 141 L 19.000000000000227 141 M 19.000000000000227 141 M 17.000000000000227 141 L 15.000000000000227 141 M 15.000000000000227 141 M 13.000000000000227 141 L 11.000000000000227 141 M 11.000000000000227 141 M 9.000000000000227 141 L 7.000000000000227 141 M 7.000000000000227 141 M 5.000000000000227 141 L 3.0000000000002274 141 M 3.0000000000002274 141 M 1.0000000000002274 141 L 0 141 Q 0 141 0 141 L 0 140.00000000000023 M 0 140.00000000000023 M 0 138.00000000000023 L 0 136.00000000000023 M 0 136.00000000000023 M 0 134.00000000000023 L 0 132.00000000000023 M 0 132.00000000000023 M 0 130.00000000000023 L 0 128.00000000000023 M 0 128.00000000000023 M 0 126.00000000000023 L 0 124.00000000000023 M 0 124.00000000000023 M 0 122.00000000000023 L 0 120.00000000000023 M 0 120.00000000000023 M 0 118.00000000000023 L 0 116.00000000000023 M 0 116.00000000000023 M 0 114.00000000000023 L 0 112.00000000000023 M 0 112.00000000000023 M 0 110.00000000000023 L 0 108.00000000000023 M 0 108.00000000000023 M 0 106.00000000000023 L 0 104.00000000000023 M 0 104.00000000000023 M 0 102.00000000000023 L 0 100.00000000000023 M 0 100.00000000000023 M 0 98.00000000000023 L 0 96.00000000000023 M 0 96.00000000000023 M 0 94.00000000000023 L 0 92.00000000000023 M 0 92.00000000000023 M 0 90.00000000000023 L 0 88.00000000000023 M 0 88.00000000000023 M 0 86.00000000000023 L 0 84.00000000000023 M 0 84.00000000000023 M 0 82.00000000000023 L 0 80.00000000000023 M 0 80.00000000000023 M 0 78.00000000000023 L 0 76.00000000000023 M 0 76.00000000000023 M 0 74.00000000000023 L 0 72.00000000000023 M 0 72.00000000000023 M 0 70.00000000000023 L 0 68.00000000000023 M 0 68.00000000000023 M 0 66.00000000000023 L 0 64.00000000000023 M 0 64.00000000000023 M 0 62.00000000000023 L 0 60.00000000000023 M 0 60.00000000000023 M 0 58.00000000000023 L 0 56.00000000000023 M 0 56.00000000000023 M 0 54.00000000000023 L 0 52.00000000000023 M 0 52.00000000000023 M 0 50.00000000000023 L 0 48.00000000000023 M 0 48.00000000000023 M 0 46.00000000000023 L 0 44.00000000000023 M 0 44.00000000000023 M 0 42.00000000000023 L 0 40.00000000000023 M 0 40.00000000000023 M 0 38.00000000000023 L 0 36.00000000000023 M 0 36.00000000000023 M 0 34.00000000000023 L 0 32.00000000000023 M 0 32.00000000000023 M 0 30.000000000000227 L 0 28.000000000000227 M 0 28.000000000000227 M 0 26.000000000000227 L 0 24.000000000000227 M 0 24.000000000000227 M 0 22.000000000000227 L 0 20.000000000000227 M 0 20.000000000000227 M 0 18.000000000000227 L 0 16.000000000000227 M 0 16.000000000000227 M 0 14.000000000000227 L 0 12.000000000000227 M 0 12.000000000000227 M 0 10.000000000000227 L 0 8.000000000000227 M 0 8.000000000000227 M 0 6.000000000000227 L 0 4.000000000000227 M 0 4.000000000000227 M 0 2.0000000000002274 L 0 2.2737367544323206e-13 M 0 2.2737367544323206e-13 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="translate(0,0) matrix(1,0,0,1,3.4999999999999716,107.5)"><g><g transform="translate(0,0) scale(4.11,1.41)"><g><path fill="#FFFFFF" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(0.24330900243309,0.7092198581560284)"><path fill="none" stroke="none" d="M 0 0 L 411.00000000000006 0 Q 411.00000000000006 0 411.00000000000006 0 L 411.00000000000006 141 Q 411.00000000000006 141 411.00000000000006 141 L 0 141 Q 0 141 0 141 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="#333333" d="M 0 0 M 0 0 L 2 0 M 2 0 M 4 0 L 6 0 M 6 0 M 8 0 L 10 0 M 10 0 M 12 0 L 14 0 M 14 0 M 16 0 L 18 0 M 18 0 M 20 0 L 22 0 M 22 0 M 24 0 L 26 0 M 26 0 M 28 0 L 30 0 M 30 0 M 32 0 L 34 0 M 34 0 M 36 0 L 38 0 M 38 0 M 40 0 L 42 0 M 42 0 M 44 0 L 46 0 M 46 0 M 48 0 L 50 0 M 50 0 M 52 0 L 54 0 M 54 0 M 56 0 L 58 0 M 58 0 M 60 0 L 62 0 M 62 0 M 64 0 L 66 0 M 66 0 M 68 0 L 70 0 M 70 0 M 72 0 L 74 0 M 74 0 M 76 0 L 78 0 M 78 0 M 80 0 L 82 0 M 82 0 M 84 0 L 86 0 M 86 0 M 88 0 L 90 0 M 90 0 M 92 0 L 94 0 M 94 0 M 96 0 L 98 0 M 98 0 M 100 0 L 102 0 M 102 0 M 104 0 L 106 0 M 106 0 M 108 0 L 110 0 M 110 0 M 112 0 L 114 0 M 114 0 M 116 0 L 118 0 M 118 0 M 120 0 L 122 0 M 122 0 M 124 0 L 126 0 M 126 0 M 128 0 L 130 0 M 130 0 M 132 0 L 134 0 M 134 0 M 136 0 L 138 0 M 138 0 M 140 0 L 142 0 M 142 0 M 144 0 L 146 0 M 146 0 M 148 0 L 150 0 M 150 0 M 152 0 L 154 0 M 154 0 M 156 0 L 158 0 M 158 0 M 160 0 L 162 0 M 162 0 M 164 0 L 166 0 M 166 0 M 168 0 L 170 0 M 170 0 M 172 0 L 174 0 M 174 0 M 176 0 L 178 0 M 178 0 M 180 0 L 182 0 M 182 0 M 184 0 L 186 0 M 186 0 M 188 0 L 190 0 M 190 0 M 192 0 L 194 0 M 194 0 M 196 0 L 198 0 M 198 0 M 200 0 L 202 0 M 202 0 M 204 0 L 206 0 M 206 0 M 208 0 L 210 0 M 210 0 M 212 0 L 214 0 M 214 0 M 216 0 L 218 0 M 218 0 M 220 0 L 222 0 M 222 0 M 224 0 L 226 0 M 226 0 M 228 0 L 230 0 M 230 0 M 232 0 L 234 0 M 234 0 M 236 0 L 238 0 M 238 0 M 240 0 L 242 0 M 242 0 M 244 0 L 246 0 M 246 0 M 248 0 L 250 0 M 250 0 M 252 0 L 254 0 M 254 0 M 256 0 L 258 0 M 258 0 M 260 0 L 262 0 M 262 0 M 264 0 L 266 0 M 266 0 M 268 0 L 270 0 M 270 0 M 272 0 L 274 0 M 274 0 M 276 0 L 278 0 M 278 0 M 280 0 L 282 0 M 282 0 M 284 0 L 286 0 M 286 0 M 288 0 L 290 0 M 290 0 M 292 0 L 294 0 M 294 0 M 296 0 L 298 0 M 298 0 M 300 0 L 302 0 M 302 0 M 304 0 L 306 0 M 306 0 M 308 0 L 310 0 M 310 0 M 312 0 L 314 0 M 314 0 M 316 0 L 318 0 M 318 0 M 320 0 L 322 0 M 322 0 M 324 0 L 326 0 M 326 0 M 328 0 L 330 0 M 330 0 M 332 0 L 334 0 M 334 0 M 336 0 L 338 0 M 338 0 M 340 0 L 342 0 M 342 0 M 344 0 L 346 0 M 346 0 M 348 0 L 350 0 M 350 0 M 352 0 L 354 0 M 354 0 M 356 0 L 358 0 M 358 0 M 360 0 L 362 0 M 362 0 M 364 0 L 366 0 M 366 0 M 368 0 L 370 0 M 370 0 M 372 0 L 374 0 M 374 0 M 376 0 L 378 0 M 378 0 M 380 0 L 382 0 M 382 0 M 384 0 L 386 0 M 386 0 M 388 0 L 390 0 M 390 0 M 392 0 L 394 0 M 394 0 M 396 0 L 398 0 M 398 0 M 400 0 L 402 0 M 402 0 M 404 0 L 406 0 M 406 0 M 408 0 L 410 0 M 410 0 M 411.00000000000006 0.9999999999999432 L 411.00000000000006 2.999999999999943 M 411.00000000000006 2.999999999999943 M 411.00000000000006 4.999999999999943 L 411.00000000000006 6.999999999999943 M 411.00000000000006 6.999999999999943 M 411.00000000000006 8.999999999999943 L 411.00000000000006 10.999999999999943 M 411.00000000000006 10.999999999999943 M 411.00000000000006 12.999999999999943 L 411.00000000000006 14.999999999999943 M 411.00000000000006 14.999999999999943 M 411.00000000000006 16.999999999999943 L 411.00000000000006 18.999999999999943 M 411.00000000000006 18.999999999999943 M 411.00000000000006 20.999999999999943 L 411.00000000000006 22.999999999999943 M 411.00000000000006 22.999999999999943 M 411.00000000000006 24.999999999999943 L 411.00000000000006 26.999999999999943 M 411.00000000000006 26.999999999999943 M 411.00000000000006 28.999999999999943 L 411.00000000000006 30.999999999999943 M 411.00000000000006 30.999999999999943 M 411.00000000000006 32.99999999999994 L 411.00000000000006 34.99999999999994 M 411.00000000000006 34.99999999999994 M 411.00000000000006 36.99999999999994 L 411.00000000000006 38.99999999999994 M 411.00000000000006 38.99999999999994 M 411.00000000000006 40.99999999999994 L 411.00000000000006 42.99999999999994 M 411.00000000000006 42.99999999999994 M 411.00000000000006 44.99999999999994 L 411.00000000000006 46.99999999999994 M 411.00000000000006 46.99999999999994 M 411.00000000000006 48.99999999999994 L 411.00000000000006 50.99999999999994 M 411.00000000000006 50.99999999999994 M 411.00000000000006 52.99999999999994 L 411.00000000000006 54.99999999999994 M 411.00000000000006 54.99999999999994 M 411.00000000000006 56.99999999999994 L 411.00000000000006 58.99999999999994 M 411.00000000000006 58.99999999999994 M 411.00000000000006 60.99999999999994 L 411.00000000000006 62.99999999999994 M 411.00000000000006 62.99999999999994 M 411.00000000000006 64.99999999999994 L 411.00000000000006 66.99999999999994 M 411.00000000000006 66.99999999999994 M 411.00000000000006 68.99999999999994 L 411.00000000000006 70.99999999999994 M 411.00000000000006 70.99999999999994 M 411.00000000000006 72.99999999999994 L 411.00000000000006 74.99999999999994 M 411.00000000000006 74.99999999999994 M 411.00000000000006 76.99999999999994 L 411.00000000000006 78.99999999999994 M 411.00000000000006 78.99999999999994 M 411.00000000000006 80.99999999999994 L 411.00000000000006 82.99999999999994 M 411.00000000000006 82.99999999999994 M 411.00000000000006 84.99999999999994 L 411.00000000000006 86.99999999999994 M 411.00000000000006 86.99999999999994 M 411.00000000000006 88.99999999999994 L 411.00000000000006 90.99999999999994 M 411.00000000000006 90.99999999999994 M 411.00000000000006 92.99999999999994 L 411.00000000000006 94.99999999999994 M 411.00000000000006 94.99999999999994 M 411.00000000000006 96.99999999999994 L 411.00000000000006 98.99999999999994 M 411.00000000000006 98.99999999999994 M 411.00000000000006 100.99999999999994 L 411.00000000000006 102.99999999999994 M 411.00000000000006 102.99999999999994 M 411.00000000000006 104.99999999999994 L 411.00000000000006 106.99999999999994 M 411.00000000000006 106.99999999999994 M 411.00000000000006 108.99999999999994 L 411.00000000000006 110.99999999999994 M 411.00000000000006 110.99999999999994 M 411.00000000000006 112.99999999999994 L 411.00000000000006 114.99999999999994 M 411.00000000000006 114.99999999999994 M 411.00000000000006 116.99999999999994 L 411.00000000000006 118.99999999999994 M 411.00000000000006 118.99999999999994 M 411.00000000000006 120.99999999999994 L 411.00000000000006 122.99999999999994 M 411.00000000000006 122.99999999999994 M 411.00000000000006 124.99999999999994 L 411.00000000000006 126.99999999999994 M 411.00000000000006 126.99999999999994 M 411.00000000000006 128.99999999999994 L 411.00000000000006 130.99999999999994 M 411.00000000000006 130.99999999999994 M 411.00000000000006 132.99999999999994 L 411.00000000000006 134.99999999999994 M 411.00000000000006 134.99999999999994 M 411.00000000000006 136.99999999999994 L 411.00000000000006 138.99999999999994 M 411.00000000000006 138.99999999999994 M 411.00000000000006 140.99999999999994 L 411.00000000000006 141 Q 411.00000000000006 141 411.00000000000006 141 L 409.0000000000001 141 M 409.0000000000001 141 M 407.0000000000001 141 L 405.0000000000001 141 M 405.0000000000001 141 M 403.0000000000001 141 L 401.0000000000001 141 M 401.0000000000001 141 M 399.0000000000001 141 L 397.0000000000001 141 M 397.0000000000001 141 M 395.0000000000001 141 L 393.0000000000001 141 M 393.0000000000001 141 M 391.0000000000001 141 L 389.0000000000001 141 M 389.0000000000001 141 M 387.0000000000001 141 L 385.0000000000001 141 M 385.0000000000001 141 M 383.0000000000001 141 L 381.0000000000001 141 M 381.0000000000001 141 M 379.0000000000001 141 L 377.0000000000001 141 M 377.0000000000001 141 M 375.0000000000001 141 L 373.0000000000001 141 M 373.0000000000001 141 M 371.0000000000001 141 L 369.0000000000001 141 M 369.0000000000001 141 M 367.0000000000001 141 L 365.0000000000001 141 M 365.0000000000001 141 M 363.0000000000001 141 L 361.0000000000001 141 M 361.0000000000001 141 M 359.0000000000001 141 L 357.0000000000001 141 M 357.0000000000001 141 M 355.0000000000001 141 L 353.0000000000001 141 M 353.0000000000001 141 M 351.0000000000001 141 L 349.0000000000001 141 M 349.0000000000001 141 M 347.0000000000001 141 L 345.0000000000001 141 M 345.0000000000001 141 M 343.0000000000001 141 L 341.0000000000001 141 M 341.0000000000001 141 M 339.0000000000001 141 L 337.0000000000001 141 M 337.0000000000001 141 M 335.0000000000001 141 L 333.0000000000001 141 M 333.0000000000001 141 M 331.0000000000001 141 L 329.0000000000001 141 M 329.0000000000001 141 M 327.0000000000001 141 L 325.0000000000001 141 M 325.0000000000001 141 M 323.0000000000001 141 L 321.0000000000001 141 M 321.0000000000001 141 M 319.0000000000001 141 L 317.0000000000001 141 M 317.0000000000001 141 M 315.0000000000001 141 L 313.0000000000001 141 M 313.0000000000001 141 M 311.0000000000001 141 L 309.0000000000001 141 M 309.0000000000001 141 M 307.0000000000001 141 L 305.0000000000001 141 M 305.0000000000001 141 M 303.0000000000001 141 L 301.0000000000001 141 M 301.0000000000001 141 M 299.0000000000001 141 L 297.0000000000001 141 M 297.0000000000001 141 M 295.0000000000001 141 L 293.0000000000001 141 M 293.0000000000001 141 M 291.0000000000001 141 L 289.0000000000001 141 M 289.0000000000001 141 M 287.0000000000001 141 L 285.0000000000001 141 M 285.0000000000001 141 M 283.0000000000001 141 L 281.0000000000001 141 M 281.0000000000001 141 M 279.0000000000001 141 L 277.0000000000001 141 M 277.0000000000001 141 M 275.0000000000001 141 L 273.0000000000001 141 M 273.0000000000001 141 M 271.0000000000001 141 L 269.0000000000001 141 M 269.0000000000001 141 M 267.0000000000001 141 L 265.0000000000001 141 M 265.0000000000001 141 M 263.0000000000001 141 L 261.0000000000001 141 M 261.0000000000001 141 M 259.0000000000001 141 L 257.0000000000001 141 M 257.0000000000001 141 M 255.0000000000001 141 L 253.0000000000001 141 M 253.0000000000001 141 M 251.0000000000001 141 L 249.0000000000001 141 M 249.0000000000001 141 M 247.0000000000001 141 L 245.0000000000001 141 M 245.0000000000001 141 M 243.0000000000001 141 L 241.0000000000001 141 M 241.0000000000001 141 M 239.0000000000001 141 L 237.0000000000001 141 M 237.0000000000001 141 M 235.0000000000001 141 L 233.0000000000001 141 M 233.0000000000001 141 M 231.0000000000001 141 L 229.0000000000001 141 M 229.0000000000001 141 M 227.0000000000001 141 L 225.0000000000001 141 M 225.0000000000001 141 M 223.0000000000001 141 L 221.0000000000001 141 M 221.0000000000001 141 M 219.0000000000001 141 L 217.0000000000001 141 M 217.0000000000001 141 M 215.0000000000001 141 L 213.0000000000001 141 M 213.0000000000001 141 M 211.0000000000001 141 L 209.0000000000001 141 M 209.0000000000001 141 M 207.0000000000001 141 L 205.0000000000001 141 M 205.0000000000001 141 M 203.0000000000001 141 L 201.0000000000001 141 M 201.0000000000001 141 M 199.0000000000001 141 L 197.0000000000001 141 M 197.0000000000001 141 M 195.0000000000001 141 L 193.0000000000001 141 M 193.0000000000001 141 M 191.0000000000001 141 L 189.0000000000001 141 M 189.0000000000001 141 M 187.0000000000001 141 L 185.0000000000001 141 M 185.0000000000001 141 M 183.0000000000001 141 L 181.0000000000001 141 M 181.0000000000001 141 M 179.0000000000001 141 L 177.0000000000001 141 M 177.0000000000001 141 M 175.0000000000001 141 L 173.0000000000001 141 M 173.0000000000001 141 M 171.0000000000001 141 L 169.0000000000001 141 M 169.0000000000001 141 M 167.0000000000001 141 L 165.0000000000001 141 M 165.0000000000001 141 M 163.0000000000001 141 L 161.0000000000001 141 M 161.0000000000001 141 M 159.0000000000001 141 L 157.0000000000001 141 M 157.0000000000001 141 M 155.0000000000001 141 L 153.0000000000001 141 M 153.0000000000001 141 M 151.0000000000001 141 L 149.0000000000001 141 M 149.0000000000001 141 M 147.0000000000001 141 L 145.0000000000001 141 M 145.0000000000001 141 M 143.0000000000001 141 L 141.0000000000001 141 M 141.0000000000001 141 M 139.0000000000001 141 L 137.0000000000001 141 M 137.0000000000001 141 M 135.0000000000001 141 L 133.0000000000001 141 M 133.0000000000001 141 M 131.0000000000001 141 L 129.0000000000001 141 M 129.0000000000001 141 M 127.00000000000011 141 L 125.00000000000011 141 M 125.00000000000011 141 M 123.00000000000011 141 L 121.00000000000011 141 M 121.00000000000011 141 M 119.00000000000011 141 L 117.00000000000011 141 M 117.00000000000011 141 M 115.00000000000011 141 L 113.00000000000011 141 M 113.00000000000011 141 M 111.00000000000011 141 L 109.00000000000011 141 M 109.00000000000011 141 M 107.00000000000011 141 L 105.00000000000011 141 M 105.00000000000011 141 M 103.00000000000011 141 L 101.00000000000011 141 M 101.00000000000011 141 M 99.00000000000011 141 L 97.00000000000011 141 M 97.00000000000011 141 M 95.00000000000011 141 L 93.00000000000011 141 M 93.00000000000011 141 M 91.00000000000011 141 L 89.00000000000011 141 M 89.00000000000011 141 M 87.00000000000011 141 L 85.00000000000011 141 M 85.00000000000011 141 M 83.00000000000011 141 L 81.00000000000011 141 M 81.00000000000011 141 M 79.00000000000011 141 L 77.00000000000011 141 M 77.00000000000011 141 M 75.00000000000011 141 L 73.00000000000011 141 M 73.00000000000011 141 M 71.00000000000011 141 L 69.00000000000011 141 M 69.00000000000011 141 M 67.00000000000011 141 L 65.00000000000011 141 M 65.00000000000011 141 M 63.000000000000114 141 L 61.000000000000114 141 M 61.000000000000114 141 M 59.000000000000114 141 L 57.000000000000114 141 M 57.000000000000114 141 M 55.000000000000114 141 L 53.000000000000114 141 M 53.000000000000114 141 M 51.000000000000114 141 L 49.000000000000114 141 M 49.000000000000114 141 M 47.000000000000114 141 L 45.000000000000114 141 M 45.000000000000114 141 M 43.000000000000114 141 L 41.000000000000114 141 M 41.000000000000114 141 M 39.000000000000114 141 L 37.000000000000114 141 M 37.000000000000114 141 M 35.000000000000114 141 L 33.000000000000114 141 M 33.000000000000114 141 M 31.000000000000114 141 L 29.000000000000114 141 M 29.000000000000114 141 M 27.000000000000114 141 L 25.000000000000114 141 M 25.000000000000114 141 M 23.000000000000114 141 L 21.000000000000114 141 M 21.000000000000114 141 M 19.000000000000114 141 L 17.000000000000114 141 M 17.000000000000114 141 M 15.000000000000114 141 L 13.000000000000114 141 M 13.000000000000114 141 M 11.000000000000114 141 L 9.000000000000114 141 M 9.000000000000114 141 M 7.000000000000114 141 L 5.000000000000114 141 M 5.000000000000114 141 M 3.0000000000001137 141 L 1.0000000000001137 141 M 1.0000000000001137 141 M 0 140.0000000000001 L 0 138.0000000000001 M 0 138.0000000000001 M 0 136.0000000000001 L 0 134.0000000000001 M 0 134.0000000000001 M 0 132.0000000000001 L 0 130.0000000000001 M 0 130.0000000000001 M 0 128.0000000000001 L 0 126.00000000000011 M 0 126.00000000000011 M 0 124.00000000000011 L 0 122.00000000000011 M 0 122.00000000000011 M 0 120.00000000000011 L 0 118.00000000000011 M 0 118.00000000000011 M 0 116.00000000000011 L 0 114.00000000000011 M 0 114.00000000000011 M 0 112.00000000000011 L 0 110.00000000000011 M 0 110.00000000000011 M 0 108.00000000000011 L 0 106.00000000000011 M 0 106.00000000000011 M 0 104.00000000000011 L 0 102.00000000000011 M 0 102.00000000000011 M 0 100.00000000000011 L 0 98.00000000000011 M 0 98.00000000000011 M 0 96.00000000000011 L 0 94.00000000000011 M 0 94.00000000000011 M 0 92.00000000000011 L 0 90.00000000000011 M 0 90.00000000000011 M 0 88.00000000000011 L 0 86.00000000000011 M 0 86.00000000000011 M 0 84.00000000000011 L 0 82.00000000000011 M 0 82.00000000000011 M 0 80.00000000000011 L 0 78.00000000000011 M 0 78.00000000000011 M 0 76.00000000000011 L 0 74.00000000000011 M 0 74.00000000000011 M 0 72.00000000000011 L 0 70.00000000000011 M 0 70.00000000000011 M 0 68.00000000000011 L 0 66.00000000000011 M 0 66.00000000000011 M 0 64.00000000000011 L 0 62.000000000000114 M 0 62.000000000000114 M 0 60.000000000000114 L 0 58.000000000000114 M 0 58.000000000000114 M 0 56.000000000000114 L 0 54.000000000000114 M 0 54.000000000000114 M 0 52.000000000000114 L 0 50.000000000000114 M 0 50.000000000000114 M 0 48.000000000000114 L 0 46.000000000000114 M 0 46.000000000000114 M 0 44.000000000000114 L 0 42.000000000000114 M 0 42.000000000000114 M 0 40.000000000000114 L 0 38.000000000000114 M 0 38.000000000000114 M 0 36.000000000000114 L 0 34.000000000000114 M 0 34.000000000000114 M 0 32.000000000000114 L 0 30.000000000000114 M 0 30.000000000000114 M 0 28.000000000000114 L 0 26.000000000000114 M 0 26.000000000000114 M 0 24.000000000000114 L 0 22.000000000000114 M 0 22.000000000000114 M 0 20.000000000000114 L 0 18.000000000000114 M 0 18.000000000000114 M 0 16.000000000000114 L 0 14.000000000000114 M 0 14.000000000000114 M 0 12.000000000000114 L 0 10.000000000000114 M 0 10.000000000000114 M 0 8.000000000000114 L 0 6.000000000000114 M 0 6.000000000000114 M 0 4.000000000000114 L 0 2.0000000000001137 M 0 2.0000000000001137 M 0 1.1368683772161603e-13 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="translate(0,0) matrix(1,0,0,1,447.75,268.5)"><g><g transform="translate(0,0) scale(4.160000000000001,1.63)"><g><path fill="#FFFFFF" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(0.24038461538461534,0.6134969325153374)"><path fill="none" stroke="none" d="M 0 0 L 416.0000000000001 0 Q 416.0000000000001 0 416.0000000000001 0 L 416.0000000000001 163 Q 416.0000000000001 163 416.0000000000001 163 L 0 163 Q 0 163 0 163 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="#333333" d="M 0 0 M 0 0 L 2 0 M 2 0 M 4 0 L 6 0 M 6 0 M 8 0 L 10 0 M 10 0 M 12 0 L 14 0 M 14 0 M 16 0 L 18 0 M 18 0 M 20 0 L 22 0 M 22 0 M 24 0 L 26 0 M 26 0 M 28 0 L 30 0 M 30 0 M 32 0 L 34 0 M 34 0 M 36 0 L 38 0 M 38 0 M 40 0 L 42 0 M 42 0 M 44 0 L 46 0 M 46 0 M 48 0 L 50 0 M 50 0 M 52 0 L 54 0 M 54 0 M 56 0 L 58 0 M 58 0 M 60 0 L 62 0 M 62 0 M 64 0 L 66 0 M 66 0 M 68 0 L 70 0 M 70 0 M 72 0 L 74 0 M 74 0 M 76 0 L 78 0 M 78 0 M 80 0 L 82 0 M 82 0 M 84 0 L 86 0 M 86 0 M 88 0 L 90 0 M 90 0 M 92 0 L 94 0 M 94 0 M 96 0 L 98 0 M 98 0 M 100 0 L 102 0 M 102 0 M 104 0 L 106 0 M 106 0 M 108 0 L 110 0 M 110 0 M 112 0 L 114 0 M 114 0 M 116 0 L 118 0 M 118 0 M 120 0 L 122 0 M 122 0 M 124 0 L 126 0 M 126 0 M 128 0 L 130 0 M 130 0 M 132 0 L 134 0 M 134 0 M 136 0 L 138 0 M 138 0 M 140 0 L 142 0 M 142 0 M 144 0 L 146 0 M 146 0 M 148 0 L 150 0 M 150 0 M 152 0 L 154 0 M 154 0 M 156 0 L 158 0 M 158 0 M 160 0 L 162 0 M 162 0 M 164 0 L 166 0 M 166 0 M 168 0 L 170 0 M 170 0 M 172 0 L 174 0 M 174 0 M 176 0 L 178 0 M 178 0 M 180 0 L 182 0 M 182 0 M 184 0 L 186 0 M 186 0 M 188 0 L 190 0 M 190 0 M 192 0 L 194 0 M 194 0 M 196 0 L 198 0 M 198 0 M 200 0 L 202 0 M 202 0 M 204 0 L 206 0 M 206 0 M 208 0 L 210 0 M 210 0 M 212 0 L 214 0 M 214 0 M 216 0 L 218 0 M 218 0 M 220 0 L 222 0 M 222 0 M 224 0 L 226 0 M 226 0 M 228 0 L 230 0 M 230 0 M 232 0 L 234 0 M 234 0 M 236 0 L 238 0 M 238 0 M 240 0 L 242 0 M 242 0 M 244 0 L 246 0 M 246 0 M 248 0 L 250 0 M 250 0 M 252 0 L 254 0 M 254 0 M 256 0 L 258 0 M 258 0 M 260 0 L 262 0 M 262 0 M 264 0 L 266 0 M 266 0 M 268 0 L 270 0 M 270 0 M 272 0 L 274 0 M 274 0 M 276 0 L 278 0 M 278 0 M 280 0 L 282 0 M 282 0 M 284 0 L 286 0 M 286 0 M 288 0 L 290 0 M 290 0 M 292 0 L 294 0 M 294 0 M 296 0 L 298 0 M 298 0 M 300 0 L 302 0 M 302 0 M 304 0 L 306 0 M 306 0 M 308 0 L 310 0 M 310 0 M 312 0 L 314 0 M 314 0 M 316 0 L 318 0 M 318 0 M 320 0 L 322 0 M 322 0 M 324 0 L 326 0 M 326 0 M 328 0 L 330 0 M 330 0 M 332 0 L 334 0 M 334 0 M 336 0 L 338 0 M 338 0 M 340 0 L 342 0 M 342 0 M 344 0 L 346 0 M 346 0 M 348 0 L 350 0 M 350 0 M 352 0 L 354 0 M 354 0 M 356 0 L 358 0 M 358 0 M 360 0 L 362 0 M 362 0 M 364 0 L 366 0 M 366 0 M 368 0 L 370 0 M 370 0 M 372 0 L 374 0 M 374 0 M 376 0 L 378 0 M 378 0 M 380 0 L 382 0 M 382 0 M 384 0 L 386 0 M 386 0 M 388 0 L 390 0 M 390 0 M 392 0 L 394 0 M 394 0 M 396 0 L 398 0 M 398 0 M 400 0 L 402 0 M 402 0 M 404 0 L 406 0 M 406 0 M 408 0 L 410 0 M 410 0 M 412 0 L 414 0 M 414 0 M 416 0 L 416.0000000000001 0 Q 416.0000000000001 0 416.0000000000001 0 L 416.0000000000001 1.9999999999998863 M 416.0000000000001 1.9999999999998863 M 416.0000000000001 3.9999999999998863 L 416.0000000000001 5.999999999999886 M 416.0000000000001 5.999999999999886 M 416.0000000000001 7.999999999999886 L 416.0000000000001 9.999999999999886 M 416.0000000000001 9.999999999999886 M 416.0000000000001 11.999999999999886 L 416.0000000000001 13.999999999999886 M 416.0000000000001 13.999999999999886 M 416.0000000000001 15.999999999999886 L 416.0000000000001 17.999999999999886 M 416.0000000000001 17.999999999999886 M 416.0000000000001 19.999999999999886 L 416.0000000000001 21.999999999999886 M 416.0000000000001 21.999999999999886 M 416.0000000000001 23.999999999999886 L 416.0000000000001 25.999999999999886 M 416.0000000000001 25.999999999999886 M 416.0000000000001 27.999999999999886 L 416.0000000000001 29.999999999999886 M 416.0000000000001 29.999999999999886 M 416.0000000000001 31.999999999999886 L 416.0000000000001 33.999999999999886 M 416.0000000000001 33.999999999999886 M 416.0000000000001 35.999999999999886 L 416.0000000000001 37.999999999999886 M 416.0000000000001 37.999999999999886 M 416.0000000000001 39.999999999999886 L 416.0000000000001 41.999999999999886 M 416.0000000000001 41.999999999999886 M 416.0000000000001 43.999999999999886 L 416.0000000000001 45.999999999999886 M 416.0000000000001 45.999999999999886 M 416.0000000000001 47.999999999999886 L 416.0000000000001 49.999999999999886 M 416.0000000000001 49.999999999999886 M 416.0000000000001 51.999999999999886 L 416.0000000000001 53.999999999999886 M 416.0000000000001 53.999999999999886 M 416.0000000000001 55.999999999999886 L 416.0000000000001 57.999999999999886 M 416.0000000000001 57.999999999999886 M 416.0000000000001 59.999999999999886 L 416.0000000000001 61.999999999999886 M 416.0000000000001 61.999999999999886 M 416.0000000000001 63.999999999999886 L 416.0000000000001 65.99999999999989 M 416.0000000000001 65.99999999999989 M 416.0000000000001 67.99999999999989 L 416.0000000000001 69.99999999999989 M 416.0000000000001 69.99999999999989 M 416.0000000000001 71.99999999999989 L 416.0000000000001 73.99999999999989 M 416.0000000000001 73.99999999999989 M 416.0000000000001 75.99999999999989 L 416.0000000000001 77.99999999999989 M 416.0000000000001 77.99999999999989 M 416.0000000000001 79.99999999999989 L 416.0000000000001 81.99999999999989 M 416.0000000000001 81.99999999999989 M 416.0000000000001 83.99999999999989 L 416.0000000000001 85.99999999999989 M 416.0000000000001 85.99999999999989 M 416.0000000000001 87.99999999999989 L 416.0000000000001 89.99999999999989 M 416.0000000000001 89.99999999999989 M 416.0000000000001 91.99999999999989 L 416.0000000000001 93.99999999999989 M 416.0000000000001 93.99999999999989 M 416.0000000000001 95.99999999999989 L 416.0000000000001 97.99999999999989 M 416.0000000000001 97.99999999999989 M 416.0000000000001 99.99999999999989 L 416.0000000000001 101.99999999999989 M 416.0000000000001 101.99999999999989 M 416.0000000000001 103.99999999999989 L 416.0000000000001 105.99999999999989 M 416.0000000000001 105.99999999999989 M 416.0000000000001 107.99999999999989 L 416.0000000000001 109.99999999999989 M 416.0000000000001 109.99999999999989 M 416.0000000000001 111.99999999999989 L 416.0000000000001 113.99999999999989 M 416.0000000000001 113.99999999999989 M 416.0000000000001 115.99999999999989 L 416.0000000000001 117.99999999999989 M 416.0000000000001 117.99999999999989 M 416.0000000000001 119.99999999999989 L 416.0000000000001 121.99999999999989 M 416.0000000000001 121.99999999999989 M 416.0000000000001 123.99999999999989 L 416.0000000000001 125.99999999999989 M 416.0000000000001 125.99999999999989 M 416.0000000000001 127.99999999999989 L 416.0000000000001 129.9999999999999 M 416.0000000000001 129.9999999999999 M 416.0000000000001 131.9999999999999 L 416.0000000000001 133.9999999999999 M 416.0000000000001 133.9999999999999 M 416.0000000000001 135.9999999999999 L 416.0000000000001 137.9999999999999 M 416.0000000000001 137.9999999999999 M 416.0000000000001 139.9999999999999 L 416.0000000000001 141.9999999999999 M 416.0000000000001 141.9999999999999 M 416.0000000000001 143.9999999999999 L 416.0000000000001 145.9999999999999 M 416.0000000000001 145.9999999999999 M 416.0000000000001 147.9999999999999 L 416.0000000000001 149.9999999999999 M 416.0000000000001 149.9999999999999 M 416.0000000000001 151.9999999999999 L 416.0000000000001 153.9999999999999 M 416.0000000000001 153.9999999999999 M 416.0000000000001 155.9999999999999 L 416.0000000000001 157.9999999999999 M 416.0000000000001 157.9999999999999 M 416.0000000000001 159.9999999999999 L 416.0000000000001 161.9999999999999 M 416.0000000000001 161.9999999999999 M 415.0000000000002 163 L 413.0000000000002 163 M 413.0000000000002 163 M 411.0000000000002 163 L 409.0000000000002 163 M 409.0000000000002 163 M 407.0000000000002 163 L 405.0000000000002 163 M 405.0000000000002 163 M 403.0000000000002 163 L 401.0000000000002 163 M 401.0000000000002 163 M 399.0000000000002 163 L 397.0000000000002 163 M 397.0000000000002 163 M 395.0000000000002 163 L 393.0000000000002 163 M 393.0000000000002 163 M 391.0000000000002 163 L 389.0000000000002 163 M 389.0000000000002 163 M 387.0000000000002 163 L 385.0000000000002 163 M 385.0000000000002 163 M 383.0000000000002 163 L 381.0000000000002 163 M 381.0000000000002 163 M 379.0000000000002 163 L 377.0000000000002 163 M 377.0000000000002 163 M 375.0000000000002 163 L 373.0000000000002 163 M 373.0000000000002 163 M 371.0000000000002 163 L 369.0000000000002 163 M 369.0000000000002 163 M 367.0000000000002 163 L 365.0000000000002 163 M 365.0000000000002 163 M 363.0000000000002 163 L 361.0000000000002 163 M 361.0000000000002 163 M 359.0000000000002 163 L 357.0000000000002 163 M 357.0000000000002 163 M 355.0000000000002 163 L 353.0000000000002 163 M 353.0000000000002 163 M 351.0000000000002 163 L 349.0000000000002 163 M 349.0000000000002 163 M 347.0000000000002 163 L 345.0000000000002 163 M 345.0000000000002 163 M 343.0000000000002 163 L 341.0000000000002 163 M 341.0000000000002 163 M 339.0000000000002 163 L 337.0000000000002 163 M 337.0000000000002 163 M 335.0000000000002 163 L 333.0000000000002 163 M 333.0000000000002 163 M 331.0000000000002 163 L 329.0000000000002 163 M 329.0000000000002 163 M 327.0000000000002 163 L 325.0000000000002 163 M 325.0000000000002 163 M 323.0000000000002 163 L 321.0000000000002 163 M 321.0000000000002 163 M 319.0000000000002 163 L 317.0000000000002 163 M 317.0000000000002 163 M 315.0000000000002 163 L 313.0000000000002 163 M 313.0000000000002 163 M 311.0000000000002 163 L 309.0000000000002 163 M 309.0000000000002 163 M 307.0000000000002 163 L 305.0000000000002 163 M 305.0000000000002 163 M 303.0000000000002 163 L 301.0000000000002 163 M 301.0000000000002 163 M 299.0000000000002 163 L 297.0000000000002 163 M 297.0000000000002 163 M 295.0000000000002 163 L 293.0000000000002 163 M 293.0000000000002 163 M 291.0000000000002 163 L 289.0000000000002 163 M 289.0000000000002 163 M 287.0000000000002 163 L 285.0000000000002 163 M 285.0000000000002 163 M 283.0000000000002 163 L 281.0000000000002 163 M 281.0000000000002 163 M 279.0000000000002 163 L 277.0000000000002 163 M 277.0000000000002 163 M 275.0000000000002 163 L 273.0000000000002 163 M 273.0000000000002 163 M 271.0000000000002 163 L 269.0000000000002 163 M 269.0000000000002 163 M 267.0000000000002 163 L 265.0000000000002 163 M 265.0000000000002 163 M 263.0000000000002 163 L 261.0000000000002 163 M 261.0000000000002 163 M 259.0000000000002 163 L 257.0000000000002 163 M 257.0000000000002 163 M 255.00000000000023 163 L 253.00000000000023 163 M 253.00000000000023 163 M 251.00000000000023 163 L 249.00000000000023 163 M 249.00000000000023 163 M 247.00000000000023 163 L 245.00000000000023 163 M 245.00000000000023 163 M 243.00000000000023 163 L 241.00000000000023 163 M 241.00000000000023 163 M 239.00000000000023 163 L 237.00000000000023 163 M 237.00000000000023 163 M 235.00000000000023 163 L 233.00000000000023 163 M 233.00000000000023 163 M 231.00000000000023 163 L 229.00000000000023 163 M 229.00000000000023 163 M 227.00000000000023 163 L 225.00000000000023 163 M 225.00000000000023 163 M 223.00000000000023 163 L 221.00000000000023 163 M 221.00000000000023 163 M 219.00000000000023 163 L 217.00000000000023 163 M 217.00000000000023 163 M 215.00000000000023 163 L 213.00000000000023 163 M 213.00000000000023 163 M 211.00000000000023 163 L 209.00000000000023 163 M 209.00000000000023 163 M 207.00000000000023 163 L 205.00000000000023 163 M 205.00000000000023 163 M 203.00000000000023 163 L 201.00000000000023 163 M 201.00000000000023 163 M 199.00000000000023 163 L 197.00000000000023 163 M 197.00000000000023 163 M 195.00000000000023 163 L 193.00000000000023 163 M 193.00000000000023 163 M 191.00000000000023 163 L 189.00000000000023 163 M 189.00000000000023 163 M 187.00000000000023 163 L 185.00000000000023 163 M 185.00000000000023 163 M 183.00000000000023 163 L 181.00000000000023 163 M 181.00000000000023 163 M 179.00000000000023 163 L 177.00000000000023 163 M 177.00000000000023 163 M 175.00000000000023 163 L 173.00000000000023 163 M 173.00000000000023 163 M 171.00000000000023 163 L 169.00000000000023 163 M 169.00000000000023 163 M 167.00000000000023 163 L 165.00000000000023 163 M 165.00000000000023 163 M 163.00000000000023 163 L 161.00000000000023 163 M 161.00000000000023 163 M 159.00000000000023 163 L 157.00000000000023 163 M 157.00000000000023 163 M 155.00000000000023 163 L 153.00000000000023 163 M 153.00000000000023 163 M 151.00000000000023 163 L 149.00000000000023 163 M 149.00000000000023 163 M 147.00000000000023 163 L 145.00000000000023 163 M 145.00000000000023 163 M 143.00000000000023 163 L 141.00000000000023 163 M 141.00000000000023 163 M 139.00000000000023 163 L 137.00000000000023 163 M 137.00000000000023 163 M 135.00000000000023 163 L 133.00000000000023 163 M 133.00000000000023 163 M 131.00000000000023 163 L 129.00000000000023 163 M 129.00000000000023 163 M 127.00000000000023 163 L 125.00000000000023 163 M 125.00000000000023 163 M 123.00000000000023 163 L 121.00000000000023 163 M 121.00000000000023 163 M 119.00000000000023 163 L 117.00000000000023 163 M 117.00000000000023 163 M 115.00000000000023 163 L 113.00000000000023 163 M 113.00000000000023 163 M 111.00000000000023 163 L 109.00000000000023 163 M 109.00000000000023 163 M 107.00000000000023 163 L 105.00000000000023 163 M 105.00000000000023 163 M 103.00000000000023 163 L 101.00000000000023 163 M 101.00000000000023 163 M 99.00000000000023 163 L 97.00000000000023 163 M 97.00000000000023 163 M 95.00000000000023 163 L 93.00000000000023 163 M 93.00000000000023 163 M 91.00000000000023 163 L 89.00000000000023 163 M 89.00000000000023 163 M 87.00000000000023 163 L 85.00000000000023 163 M 85.00000000000023 163 M 83.00000000000023 163 L 81.00000000000023 163 M 81.00000000000023 163 M 79.00000000000023 163 L 77.00000000000023 163 M 77.00000000000023 163 M 75.00000000000023 163 L 73.00000000000023 163 M 73.00000000000023 163 M 71.00000000000023 163 L 69.00000000000023 163 M 69.00000000000023 163 M 67.00000000000023 163 L 65.00000000000023 163 M 65.00000000000023 163 M 63.00000000000023 163 L 61.00000000000023 163 M 61.00000000000023 163 M 59.00000000000023 163 L 57.00000000000023 163 M 57.00000000000023 163 M 55.00000000000023 163 L 53.00000000000023 163 M 53.00000000000023 163 M 51.00000000000023 163 L 49.00000000000023 163 M 49.00000000000023 163 M 47.00000000000023 163 L 45.00000000000023 163 M 45.00000000000023 163 M 43.00000000000023 163 L 41.00000000000023 163 M 41.00000000000023 163 M 39.00000000000023 163 L 37.00000000000023 163 M 37.00000000000023 163 M 35.00000000000023 163 L 33.00000000000023 163 M 33.00000000000023 163 M 31.000000000000227 163 L 29.000000000000227 163 M 29.000000000000227 163 M 27.000000000000227 163 L 25.000000000000227 163 M 25.000000000000227 163 M 23.000000000000227 163 L 21.000000000000227 163 M 21.000000000000227 163 M 19.000000000000227 163 L 17.000000000000227 163 M 17.000000000000227 163 M 15.000000000000227 163 L 13.000000000000227 163 M 13.000000000000227 163 M 11.000000000000227 163 L 9.000000000000227 163 M 9.000000000000227 163 M 7.000000000000227 163 L 5.000000000000227 163 M 5.000000000000227 163 M 3.0000000000002274 163 L 1.0000000000002274 163 M 1.0000000000002274 163 M 0 162.00000000000023 L 0 160.00000000000023 M 0 160.00000000000023 M 0 158.00000000000023 L 0 156.00000000000023 M 0 156.00000000000023 M 0 154.00000000000023 L 0 152.00000000000023 M 0 152.00000000000023 M 0 150.00000000000023 L 0 148.00000000000023 M 0 148.00000000000023 M 0 146.00000000000023 L 0 144.00000000000023 M 0 144.00000000000023 M 0 142.00000000000023 L 0 140.00000000000023 M 0 140.00000000000023 M 0 138.00000000000023 L 0 136.00000000000023 M 0 136.00000000000023 M 0 134.00000000000023 L 0 132.00000000000023 M 0 132.00000000000023 M 0 130.00000000000023 L 0 128.00000000000023 M 0 128.00000000000023 M 0 126.00000000000023 L 0 124.00000000000023 M 0 124.00000000000023 M 0 122.00000000000023 L 0 120.00000000000023 M 0 120.00000000000023 M 0 118.00000000000023 L 0 116.00000000000023 M 0 116.00000000000023 M 0 114.00000000000023 L 0 112.00000000000023 M 0 112.00000000000023 M 0 110.00000000000023 L 0 108.00000000000023 M 0 108.00000000000023 M 0 106.00000000000023 L 0 104.00000000000023 M 0 104.00000000000023 M 0 102.00000000000023 L 0 100.00000000000023 M 0 100.00000000000023 M 0 98.00000000000023 L 0 96.00000000000023 M 0 96.00000000000023 M 0 94.00000000000023 L 0 92.00000000000023 M 0 92.00000000000023 M 0 90.00000000000023 L 0 88.00000000000023 M 0 88.00000000000023 M 0 86.00000000000023 L 0 84.00000000000023 M 0 84.00000000000023 M 0 82.00000000000023 L 0 80.00000000000023 M 0 80.00000000000023 M 0 78.00000000000023 L 0 76.00000000000023 M 0 76.00000000000023 M 0 74.00000000000023 L 0 72.00000000000023 M 0 72.00000000000023 M 0 70.00000000000023 L 0 68.00000000000023 M 0 68.00000000000023 M 0 66.00000000000023 L 0 64.00000000000023 M 0 64.00000000000023 M 0 62.00000000000023 L 0 60.00000000000023 M 0 60.00000000000023 M 0 58.00000000000023 L 0 56.00000000000023 M 0 56.00000000000023 M 0 54.00000000000023 L 0 52.00000000000023 M 0 52.00000000000023 M 0 50.00000000000023 L 0 48.00000000000023 M 0 48.00000000000023 M 0 46.00000000000023 L 0 44.00000000000023 M 0 44.00000000000023 M 0 42.00000000000023 L 0 40.00000000000023 M 0 40.00000000000023 M 0 38.00000000000023 L 0 36.00000000000023 M 0 36.00000000000023 M 0 34.00000000000023 L 0 32.00000000000023 M 0 32.00000000000023 M 0 30.000000000000227 L 0 28.000000000000227 M 0 28.000000000000227 M 0 26.000000000000227 L 0 24.000000000000227 M 0 24.000000000000227 M 0 22.000000000000227 L 0 20.000000000000227 M 0 20.000000000000227 M 0 18.000000000000227 L 0 16.000000000000227 M 0 16.000000000000227 M 0 14.000000000000227 L 0 12.000000000000227 M 0 12.000000000000227 M 0 10.000000000000227 L 0 8.000000000000227 M 0 8.000000000000227 M 0 6.000000000000227 L 0 4.000000000000227 M 0 4.000000000000227 M 0 2.0000000000002274 L 0 2.2737367544323206e-13 M 0 2.2737367544323206e-13 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="translate(0,0) matrix(1,0,0,1,3.5000000000000284,268.5)"><g><g transform="translate(0,0) scale(4.11,1.63)"><g><path fill="#FFFFFF" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(0.24330900243309,0.6134969325153374)"><path fill="none" stroke="none" d="M 0 0 L 411.00000000000006 0 Q 411.00000000000006 0 411.00000000000006 0 L 411.00000000000006 163 Q 411.00000000000006 163 411.00000000000006 163 L 0 163 Q 0 163 0 163 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="#333333" d="M 0 0 M 0 0 L 2 0 M 2 0 M 4 0 L 6 0 M 6 0 M 8 0 L 10 0 M 10 0 M 12 0 L 14 0 M 14 0 M 16 0 L 18 0 M 18 0 M 20 0 L 22 0 M 22 0 M 24 0 L 26 0 M 26 0 M 28 0 L 30 0 M 30 0 M 32 0 L 34 0 M 34 0 M 36 0 L 38 0 M 38 0 M 40 0 L 42 0 M 42 0 M 44 0 L 46 0 M 46 0 M 48 0 L 50 0 M 50 0 M 52 0 L 54 0 M 54 0 M 56 0 L 58 0 M 58 0 M 60 0 L 62 0 M 62 0 M 64 0 L 66 0 M 66 0 M 68 0 L 70 0 M 70 0 M 72 0 L 74 0 M 74 0 M 76 0 L 78 0 M 78 0 M 80 0 L 82 0 M 82 0 M 84 0 L 86 0 M 86 0 M 88 0 L 90 0 M 90 0 M 92 0 L 94 0 M 94 0 M 96 0 L 98 0 M 98 0 M 100 0 L 102 0 M 102 0 M 104 0 L 106 0 M 106 0 M 108 0 L 110 0 M 110 0 M 112 0 L 114 0 M 114 0 M 116 0 L 118 0 M 118 0 M 120 0 L 122 0 M 122 0 M 124 0 L 126 0 M 126 0 M 128 0 L 130 0 M 130 0 M 132 0 L 134 0 M 134 0 M 136 0 L 138 0 M 138 0 M 140 0 L 142 0 M 142 0 M 144 0 L 146 0 M 146 0 M 148 0 L 150 0 M 150 0 M 152 0 L 154 0 M 154 0 M 156 0 L 158 0 M 158 0 M 160 0 L 162 0 M 162 0 M 164 0 L 166 0 M 166 0 M 168 0 L 170 0 M 170 0 M 172 0 L 174 0 M 174 0 M 176 0 L 178 0 M 178 0 M 180 0 L 182 0 M 182 0 M 184 0 L 186 0 M 186 0 M 188 0 L 190 0 M 190 0 M 192 0 L 194 0 M 194 0 M 196 0 L 198 0 M 198 0 M 200 0 L 202 0 M 202 0 M 204 0 L 206 0 M 206 0 M 208 0 L 210 0 M 210 0 M 212 0 L 214 0 M 214 0 M 216 0 L 218 0 M 218 0 M 220 0 L 222 0 M 222 0 M 224 0 L 226 0 M 226 0 M 228 0 L 230 0 M 230 0 M 232 0 L 234 0 M 234 0 M 236 0 L 238 0 M 238 0 M 240 0 L 242 0 M 242 0 M 244 0 L 246 0 M 246 0 M 248 0 L 250 0 M 250 0 M 252 0 L 254 0 M 254 0 M 256 0 L 258 0 M 258 0 M 260 0 L 262 0 M 262 0 M 264 0 L 266 0 M 266 0 M 268 0 L 270 0 M 270 0 M 272 0 L 274 0 M 274 0 M 276 0 L 278 0 M 278 0 M 280 0 L 282 0 M 282 0 M 284 0 L 286 0 M 286 0 M 288 0 L 290 0 M 290 0 M 292 0 L 294 0 M 294 0 M 296 0 L 298 0 M 298 0 M 300 0 L 302 0 M 302 0 M 304 0 L 306 0 M 306 0 M 308 0 L 310 0 M 310 0 M 312 0 L 314 0 M 314 0 M 316 0 L 318 0 M 318 0 M 320 0 L 322 0 M 322 0 M 324 0 L 326 0 M 326 0 M 328 0 L 330 0 M 330 0 M 332 0 L 334 0 M 334 0 M 336 0 L 338 0 M 338 0 M 340 0 L 342 0 M 342 0 M 344 0 L 346 0 M 346 0 M 348 0 L 350 0 M 350 0 M 352 0 L 354 0 M 354 0 M 356 0 L 358 0 M 358 0 M 360 0 L 362 0 M 362 0 M 364 0 L 366 0 M 366 0 M 368 0 L 370 0 M 370 0 M 372 0 L 374 0 M 374 0 M 376 0 L 378 0 M 378 0 M 380 0 L 382 0 M 382 0 M 384 0 L 386 0 M 386 0 M 388 0 L 390 0 M 390 0 M 392 0 L 394 0 M 394 0 M 396 0 L 398 0 M 398 0 M 400 0 L 402 0 M 402 0 M 404 0 L 406 0 M 406 0 M 408 0 L 410 0 M 410 0 M 411.00000000000006 0.9999999999999432 L 411.00000000000006 2.999999999999943 M 411.00000000000006 2.999999999999943 M 411.00000000000006 4.999999999999943 L 411.00000000000006 6.999999999999943 M 411.00000000000006 6.999999999999943 M 411.00000000000006 8.999999999999943 L 411.00000000000006 10.999999999999943 M 411.00000000000006 10.999999999999943 M 411.00000000000006 12.999999999999943 L 411.00000000000006 14.999999999999943 M 411.00000000000006 14.999999999999943 M 411.00000000000006 16.999999999999943 L 411.00000000000006 18.999999999999943 M 411.00000000000006 18.999999999999943 M 411.00000000000006 20.999999999999943 L 411.00000000000006 22.999999999999943 M 411.00000000000006 22.999999999999943 M 411.00000000000006 24.999999999999943 L 411.00000000000006 26.999999999999943 M 411.00000000000006 26.999999999999943 M 411.00000000000006 28.999999999999943 L 411.00000000000006 30.999999999999943 M 411.00000000000006 30.999999999999943 M 411.00000000000006 32.99999999999994 L 411.00000000000006 34.99999999999994 M 411.00000000000006 34.99999999999994 M 411.00000000000006 36.99999999999994 L 411.00000000000006 38.99999999999994 M 411.00000000000006 38.99999999999994 M 411.00000000000006 40.99999999999994 L 411.00000000000006 42.99999999999994 M 411.00000000000006 42.99999999999994 M 411.00000000000006 44.99999999999994 L 411.00000000000006 46.99999999999994 M 411.00000000000006 46.99999999999994 M 411.00000000000006 48.99999999999994 L 411.00000000000006 50.99999999999994 M 411.00000000000006 50.99999999999994 M 411.00000000000006 52.99999999999994 L 411.00000000000006 54.99999999999994 M 411.00000000000006 54.99999999999994 M 411.00000000000006 56.99999999999994 L 411.00000000000006 58.99999999999994 M 411.00000000000006 58.99999999999994 M 411.00000000000006 60.99999999999994 L 411.00000000000006 62.99999999999994 M 411.00000000000006 62.99999999999994 M 411.00000000000006 64.99999999999994 L 411.00000000000006 66.99999999999994 M 411.00000000000006 66.99999999999994 M 411.00000000000006 68.99999999999994 L 411.00000000000006 70.99999999999994 M 411.00000000000006 70.99999999999994 M 411.00000000000006 72.99999999999994 L 411.00000000000006 74.99999999999994 M 411.00000000000006 74.99999999999994 M 411.00000000000006 76.99999999999994 L 411.00000000000006 78.99999999999994 M 411.00000000000006 78.99999999999994 M 411.00000000000006 80.99999999999994 L 411.00000000000006 82.99999999999994 M 411.00000000000006 82.99999999999994 M 411.00000000000006 84.99999999999994 L 411.00000000000006 86.99999999999994 M 411.00000000000006 86.99999999999994 M 411.00000000000006 88.99999999999994 L 411.00000000000006 90.99999999999994 M 411.00000000000006 90.99999999999994 M 411.00000000000006 92.99999999999994 L 411.00000000000006 94.99999999999994 M 411.00000000000006 94.99999999999994 M 411.00000000000006 96.99999999999994 L 411.00000000000006 98.99999999999994 M 411.00000000000006 98.99999999999994 M 411.00000000000006 100.99999999999994 L 411.00000000000006 102.99999999999994 M 411.00000000000006 102.99999999999994 M 411.00000000000006 104.99999999999994 L 411.00000000000006 106.99999999999994 M 411.00000000000006 106.99999999999994 M 411.00000000000006 108.99999999999994 L 411.00000000000006 110.99999999999994 M 411.00000000000006 110.99999999999994 M 411.00000000000006 112.99999999999994 L 411.00000000000006 114.99999999999994 M 411.00000000000006 114.99999999999994 M 411.00000000000006 116.99999999999994 L 411.00000000000006 118.99999999999994 M 411.00000000000006 118.99999999999994 M 411.00000000000006 120.99999999999994 L 411.00000000000006 122.99999999999994 M 411.00000000000006 122.99999999999994 M 411.00000000000006 124.99999999999994 L 411.00000000000006 126.99999999999994 M 411.00000000000006 126.99999999999994 M 411.00000000000006 128.99999999999994 L 411.00000000000006 130.99999999999994 M 411.00000000000006 130.99999999999994 M 411.00000000000006 132.99999999999994 L 411.00000000000006 134.99999999999994 M 411.00000000000006 134.99999999999994 M 411.00000000000006 136.99999999999994 L 411.00000000000006 138.99999999999994 M 411.00000000000006 138.99999999999994 M 411.00000000000006 140.99999999999994 L 411.00000000000006 142.99999999999994 M 411.00000000000006 142.99999999999994 M 411.00000000000006 144.99999999999994 L 411.00000000000006 146.99999999999994 M 411.00000000000006 146.99999999999994 M 411.00000000000006 148.99999999999994 L 411.00000000000006 150.99999999999994 M 411.00000000000006 150.99999999999994 M 411.00000000000006 152.99999999999994 L 411.00000000000006 154.99999999999994 M 411.00000000000006 154.99999999999994 M 411.00000000000006 156.99999999999994 L 411.00000000000006 158.99999999999994 M 411.00000000000006 158.99999999999994 M 411.00000000000006 160.99999999999994 L 411.00000000000006 162.99999999999994 M 411.00000000000006 162.99999999999994 M 409.0000000000001 163 L 407.0000000000001 163 M 407.0000000000001 163 M 405.0000000000001 163 L 403.0000000000001 163 M 403.0000000000001 163 M 401.0000000000001 163 L 399.0000000000001 163 M 399.0000000000001 163 M 397.0000000000001 163 L 395.0000000000001 163 M 395.0000000000001 163 M 393.0000000000001 163 L 391.0000000000001 163 M 391.0000000000001 163 M 389.0000000000001 163 L 387.0000000000001 163 M 387.0000000000001 163 M 385.0000000000001 163 L 383.0000000000001 163 M 383.0000000000001 163 M 381.0000000000001 163 L 379.0000000000001 163 M 379.0000000000001 163 M 377.0000000000001 163 L 375.0000000000001 163 M 375.0000000000001 163 M 373.0000000000001 163 L 371.0000000000001 163 M 371.0000000000001 163 M 369.0000000000001 163 L 367.0000000000001 163 M 367.0000000000001 163 M 365.0000000000001 163 L 363.0000000000001 163 M 363.0000000000001 163 M 361.0000000000001 163 L 359.0000000000001 163 M 359.0000000000001 163 M 357.0000000000001 163 L 355.0000000000001 163 M 355.0000000000001 163 M 353.0000000000001 163 L 351.0000000000001 163 M 351.0000000000001 163 M 349.0000000000001 163 L 347.0000000000001 163 M 347.0000000000001 163 M 345.0000000000001 163 L 343.0000000000001 163 M 343.0000000000001 163 M 341.0000000000001 163 L 339.0000000000001 163 M 339.0000000000001 163 M 337.0000000000001 163 L 335.0000000000001 163 M 335.0000000000001 163 M 333.0000000000001 163 L 331.0000000000001 163 M 331.0000000000001 163 M 329.0000000000001 163 L 327.0000000000001 163 M 327.0000000000001 163 M 325.0000000000001 163 L 323.0000000000001 163 M 323.0000000000001 163 M 321.0000000000001 163 L 319.0000000000001 163 M 319.0000000000001 163 M 317.0000000000001 163 L 315.0000000000001 163 M 315.0000000000001 163 M 313.0000000000001 163 L 311.0000000000001 163 M 311.0000000000001 163 M 309.0000000000001 163 L 307.0000000000001 163 M 307.0000000000001 163 M 305.0000000000001 163 L 303.0000000000001 163 M 303.0000000000001 163 M 301.0000000000001 163 L 299.0000000000001 163 M 299.0000000000001 163 M 297.0000000000001 163 L 295.0000000000001 163 M 295.0000000000001 163 M 293.0000000000001 163 L 291.0000000000001 163 M 291.0000000000001 163 M 289.0000000000001 163 L 287.0000000000001 163 M 287.0000000000001 163 M 285.0000000000001 163 L 283.0000000000001 163 M 283.0000000000001 163 M 281.0000000000001 163 L 279.0000000000001 163 M 279.0000000000001 163 M 277.0000000000001 163 L 275.0000000000001 163 M 275.0000000000001 163 M 273.0000000000001 163 L 271.0000000000001 163 M 271.0000000000001 163 M 269.0000000000001 163 L 267.0000000000001 163 M 267.0000000000001 163 M 265.0000000000001 163 L 263.0000000000001 163 M 263.0000000000001 163 M 261.0000000000001 163 L 259.0000000000001 163 M 259.0000000000001 163 M 257.0000000000001 163 L 255.0000000000001 163 M 255.0000000000001 163 M 253.0000000000001 163 L 251.0000000000001 163 M 251.0000000000001 163 M 249.0000000000001 163 L 247.0000000000001 163 M 247.0000000000001 163 M 245.0000000000001 163 L 243.0000000000001 163 M 243.0000000000001 163 M 241.0000000000001 163 L 239.0000000000001 163 M 239.0000000000001 163 M 237.0000000000001 163 L 235.0000000000001 163 M 235.0000000000001 163 M 233.0000000000001 163 L 231.0000000000001 163 M 231.0000000000001 163 M 229.0000000000001 163 L 227.0000000000001 163 M 227.0000000000001 163 M 225.0000000000001 163 L 223.0000000000001 163 M 223.0000000000001 163 M 221.0000000000001 163 L 219.0000000000001 163 M 219.0000000000001 163 M 217.0000000000001 163 L 215.0000000000001 163 M 215.0000000000001 163 M 213.0000000000001 163 L 211.0000000000001 163 M 211.0000000000001 163 M 209.0000000000001 163 L 207.0000000000001 163 M 207.0000000000001 163 M 205.0000000000001 163 L 203.0000000000001 163 M 203.0000000000001 163 M 201.0000000000001 163 L 199.0000000000001 163 M 199.0000000000001 163 M 197.0000000000001 163 L 195.0000000000001 163 M 195.0000000000001 163 M 193.0000000000001 163 L 191.0000000000001 163 M 191.0000000000001 163 M 189.0000000000001 163 L 187.0000000000001 163 M 187.0000000000001 163 M 185.0000000000001 163 L 183.0000000000001 163 M 183.0000000000001 163 M 181.0000000000001 163 L 179.0000000000001 163 M 179.0000000000001 163 M 177.0000000000001 163 L 175.0000000000001 163 M 175.0000000000001 163 M 173.0000000000001 163 L 171.0000000000001 163 M 171.0000000000001 163 M 169.0000000000001 163 L 167.0000000000001 163 M 167.0000000000001 163 M 165.0000000000001 163 L 163.0000000000001 163 M 163.0000000000001 163 M 161.0000000000001 163 L 159.0000000000001 163 M 159.0000000000001 163 M 157.0000000000001 163 L 155.0000000000001 163 M 155.0000000000001 163 M 153.0000000000001 163 L 151.0000000000001 163 M 151.0000000000001 163 M 149.0000000000001 163 L 147.0000000000001 163 M 147.0000000000001 163 M 145.0000000000001 163 L 143.0000000000001 163 M 143.0000000000001 163 M 141.0000000000001 163 L 139.0000000000001 163 M 139.0000000000001 163 M 137.0000000000001 163 L 135.0000000000001 163 M 135.0000000000001 163 M 133.0000000000001 163 L 131.0000000000001 163 M 131.0000000000001 163 M 129.0000000000001 163 L 127.00000000000011 163 M 127.00000000000011 163 M 125.00000000000011 163 L 123.00000000000011 163 M 123.00000000000011 163 M 121.00000000000011 163 L 119.00000000000011 163 M 119.00000000000011 163 M 117.00000000000011 163 L 115.00000000000011 163 M 115.00000000000011 163 M 113.00000000000011 163 L 111.00000000000011 163 M 111.00000000000011 163 M 109.00000000000011 163 L 107.00000000000011 163 M 107.00000000000011 163 M 105.00000000000011 163 L 103.00000000000011 163 M 103.00000000000011 163 M 101.00000000000011 163 L 99.00000000000011 163 M 99.00000000000011 163 M 97.00000000000011 163 L 95.00000000000011 163 M 95.00000000000011 163 M 93.00000000000011 163 L 91.00000000000011 163 M 91.00000000000011 163 M 89.00000000000011 163 L 87.00000000000011 163 M 87.00000000000011 163 M 85.00000000000011 163 L 83.00000000000011 163 M 83.00000000000011 163 M 81.00000000000011 163 L 79.00000000000011 163 M 79.00000000000011 163 M 77.00000000000011 163 L 75.00000000000011 163 M 75.00000000000011 163 M 73.00000000000011 163 L 71.00000000000011 163 M 71.00000000000011 163 M 69.00000000000011 163 L 67.00000000000011 163 M 67.00000000000011 163 M 65.00000000000011 163 L 63.000000000000114 163 M 63.000000000000114 163 M 61.000000000000114 163 L 59.000000000000114 163 M 59.000000000000114 163 M 57.000000000000114 163 L 55.000000000000114 163 M 55.000000000000114 163 M 53.000000000000114 163 L 51.000000000000114 163 M 51.000000000000114 163 M 49.000000000000114 163 L 47.000000000000114 163 M 47.000000000000114 163 M 45.000000000000114 163 L 43.000000000000114 163 M 43.000000000000114 163 M 41.000000000000114 163 L 39.000000000000114 163 M 39.000000000000114 163 M 37.000000000000114 163 L 35.000000000000114 163 M 35.000000000000114 163 M 33.000000000000114 163 L 31.000000000000114 163 M 31.000000000000114 163 M 29.000000000000114 163 L 27.000000000000114 163 M 27.000000000000114 163 M 25.000000000000114 163 L 23.000000000000114 163 M 23.000000000000114 163 M 21.000000000000114 163 L 19.000000000000114 163 M 19.000000000000114 163 M 17.000000000000114 163 L 15.000000000000114 163 M 15.000000000000114 163 M 13.000000000000114 163 L 11.000000000000114 163 M 11.000000000000114 163 M 9.000000000000114 163 L 7.000000000000114 163 M 7.000000000000114 163 M 5.000000000000114 163 L 3.0000000000001137 163 M 3.0000000000001137 163 M 1.0000000000001137 163 L 0 163 Q 0 163 0 163 L 0 162.0000000000001 M 0 162.0000000000001 M 0 160.0000000000001 L 0 158.0000000000001 M 0 158.0000000000001 M 0 156.0000000000001 L 0 154.0000000000001 M 0 154.0000000000001 M 0 152.0000000000001 L 0 150.0000000000001 M 0 150.0000000000001 M 0 148.0000000000001 L 0 146.0000000000001 M 0 146.0000000000001 M 0 144.0000000000001 L 0 142.0000000000001 M 0 142.0000000000001 M 0 140.0000000000001 L 0 138.0000000000001 M 0 138.0000000000001 M 0 136.0000000000001 L 0 134.0000000000001 M 0 134.0000000000001 M 0 132.0000000000001 L 0 130.0000000000001 M 0 130.0000000000001 M 0 128.0000000000001 L 0 126.00000000000011 M 0 126.00000000000011 M 0 124.00000000000011 L 0 122.00000000000011 M 0 122.00000000000011 M 0 120.00000000000011 L 0 118.00000000000011 M 0 118.00000000000011 M 0 116.00000000000011 L 0 114.00000000000011 M 0 114.00000000000011 M 0 112.00000000000011 L 0 110.00000000000011 M 0 110.00000000000011 M 0 108.00000000000011 L 0 106.00000000000011 M 0 106.00000000000011 M 0 104.00000000000011 L 0 102.00000000000011 M 0 102.00000000000011 M 0 100.00000000000011 L 0 98.00000000000011 M 0 98.00000000000011 M 0 96.00000000000011 L 0 94.00000000000011 M 0 94.00000000000011 M 0 92.00000000000011 L 0 90.00000000000011 M 0 90.00000000000011 M 0 88.00000000000011 L 0 86.00000000000011 M 0 86.00000000000011 M 0 84.00000000000011 L 0 82.00000000000011 M 0 82.00000000000011 M 0 80.00000000000011 L 0 78.00000000000011 M 0 78.00000000000011 M 0 76.00000000000011 L 0 74.00000000000011 M 0 74.00000000000011 M 0 72.00000000000011 L 0 70.00000000000011 M 0 70.00000000000011 M 0 68.00000000000011 L 0 66.00000000000011 M 0 66.00000000000011 M 0 64.00000000000011 L 0 62.000000000000114 M 0 62.000000000000114 M 0 60.000000000000114 L 0 58.000000000000114 M 0 58.000000000000114 M 0 56.000000000000114 L 0 54.000000000000114 M 0 54.000000000000114 M 0 52.000000000000114 L 0 50.000000000000114 M 0 50.000000000000114 M 0 48.000000000000114 L 0 46.000000000000114 M 0 46.000000000000114 M 0 44.000000000000114 L 0 42.000000000000114 M 0 42.000000000000114 M 0 40.000000000000114 L 0 38.000000000000114 M 0 38.000000000000114 M 0 36.000000000000114 L 0 34.000000000000114 M 0 34.000000000000114 M 0 32.000000000000114 L 0 30.000000000000114 M 0 30.000000000000114 M 0 28.000000000000114 L 0 26.000000000000114 M 0 26.000000000000114 M 0 24.000000000000114 L 0 22.000000000000114 M 0 22.000000000000114 M 0 20.000000000000114 L 0 18.000000000000114 M 0 18.000000000000114 M 0 16.000000000000114 L 0 14.000000000000114 M 0 14.000000000000114 M 0 12.000000000000114 L 0 10.000000000000114 M 0 10.000000000000114 M 0 8.000000000000114 L 0 6.000000000000114 M 0 6.000000000000114 M 0 4.000000000000114 L 0 2.0000000000001137 M 0 2.0000000000001137 M 0 1.1368683772161603e-13 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="matrix(1,0,0,1,515,213)"><g transform="translate(0,0)"><g transform="translate(-100.5,-90.5) translate(-414.5,-122.5) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#cccccc" d="M 519.5 217.5 L 551.5 298" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="matrix(1,0,0,1,535.7106781186548,213)"><g transform="translate(0,0)"><g transform="translate(-139.5,-86.5) translate(-396.21067811865476,-126.5) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#cccccc" d="M 540.2106781186548 217.5 L 744.5 298" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="matrix(1,0,0,1,444.71067811865476,77.5)"><g transform="translate(0,0)"><g transform="translate(-231.28932188134524,-95) translate(-213.4213562373095,17.5) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#cccccc" d="M 498.78932188134524 142.5 L 449.21067811865476 82" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="matrix(1,0,0,1,354.21067811865476,77.49999999999999)"><g transform="translate(0,0)"><g transform="translate(-237,-54) translate(-117.21067811865476,-23.499999999999986) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#cccccc" d="M 407.78932188134524 81.99999999999999 L 358.71067811865476 142.5" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="matrix(1,0,0,1,289.5,213)"><g transform="translate(0,0)"><g transform="translate(-174,-217.5) translate(-115.5,4.5) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#cccccc" d="M 338 217.5 L 294 299" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="matrix(1,0,0,1,103.5,213)"><g transform="translate(0,0)"><g transform="translate(-187,-206.5) translate(83.5,-6.5) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#cccccc" d="M 317.28932188134524 217.5 L 108 298" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g><g transform="translate(0,0) matrix(1,0,0,1,378.5,7)"><g transform="translate(4,4) scale(1.01,1.0133333333333334)"><g><g transform="translate(0,0) scale(1,0.75)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1.3333333333333333)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,0.75)"><g><path fill="url(#kKfylQdvRaFI)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1.3333333333333333)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#kKfylQdvRaFI)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="scale(1,1) matrix(1,0,0,1,380.5,7) translate(8,31)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="0.6484375" y="11">Layer 2 Switch</text></g><g transform="translate(0,0) matrix(1,0,0,1,288,142.5)"><g transform="translate(4,4) scale(1.01,1.0133333333333334)"><g><g transform="translate(0,0) scale(1,0.75)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1.3333333333333333)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,0.75)"><g><path fill="url(#okETWrHtmeUv)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1.3333333333333333)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#okETWrHtmeUv)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="scale(1,1) matrix(1,0,0,1,290,142.5) translate(8,31)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="24.326171875" y="11">Host1</text></g><g transform="translate(0,0) matrix(1,0,0,1,469.5,142.5)"><g transform="translate(4,4) scale(1.01,1.0133333333333334)"><g><g transform="translate(0,0) scale(1,0.75)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1.3333333333333333)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,0.75)"><g><path fill="url(#WLczqDFsktCx)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1.3333333333333333)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#WLczqDFsktCx)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 75 Q 100 75 100 75 L 0 75 Q 0 75 0 75 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="scale(1,1) matrix(1,0,0,1,471.5,142.5) translate(8,31)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="24.326171875" y="11">Host2</text></g><g transform="scale(1,1) matrix(1,0,0,1,278,126) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="0" y="11">eth0 2001:db8:0::1/64</text></g><g transform="scale(1,1) matrix(1,0,0,1,460.5,128) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="0" y="11">eth0 2001:db8:0::2/64</text></g><g transform="scale(1,1) matrix(1,0,0,1,444.5,223) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="23.2978515625" y="11">docker0 fe80::1/64</text></g><g transform="scale(1,1) matrix(1,0,0,1,262,224) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="23.2978515625" y="11">docker0 fe80::1/64</text></g><g transform="translate(0,0) matrix(1,0,0,1,58,298)"><g transform="translate(4,4) scale(1.01,1.01)"><g><g transform="translate(0,0) scale(1,1)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,1)"><g><path fill="url(#EOtrFaZZJZro)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#EOtrFaZZJZro)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="scale(1,1) matrix(1,0,0,1,60,298) translate(8,43.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="5.3125" y="11">Container1-1</text></g><g transform="translate(0,0) matrix(1,0,0,1,244,299)"><g transform="translate(4,4) scale(1.01,1.01)"><g><g transform="translate(0,0) scale(1,1)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,1)"><g><path fill="url(#itkkpRbaglyb)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#itkkpRbaglyb)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="scale(1,1) matrix(1,0,0,1,246,299) translate(8,43.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="5.3125" y="11">Container1-2</text></g><g transform="scale(1,1) matrix(1,0,0,1,34.000000000000014,283) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="14.614257812499986" y="11">eth0 2001:db8:1::1/64</text></g><g transform="translate(0,0) matrix(1,0,0,1,501.5,298)"><g transform="translate(4,4) scale(1.01,1.01)"><g><g transform="translate(0,0) scale(1,1)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,1)"><g><path fill="url(#MpRBXNFHBTHf)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#MpRBXNFHBTHf)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="scale(1,1) matrix(1,0,0,1,503.5,298) translate(8,43.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="5.3125" y="11">Container2-1</text></g><g transform="translate(0,0) matrix(1,0,0,1,694.5,298)"><g transform="translate(4,4) scale(1.01,1.01)"><g><g transform="translate(0,0) scale(1,1)"><g><path fill="#000000" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" opacity="0.294117647"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="none" stroke="rgb(0,0,0)" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-opacity="0" stroke-miterlimit="10" stroke-width="2" opacity="0.294117647"/></g></g></g></g></g><g><g transform="translate(0,0) scale(1,1)"><g><path fill="url(#tVySeLJzhfuC)" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><g transform="scale(1,1)"><path fill="none" stroke="none" d="M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z"/><path fill="url(#tVySeLJzhfuC)" stroke="#333333" d="M 0 0 M 0 0 L 100 0 Q 100 0 100 0 L 100 100 Q 100 100 100 100 L 0 100 Q 0 100 0 100 L 0 0 Q 0 0 0 0 Z" stroke-miterlimit="10" stroke-width="2"/></g></g></g></g></g><g transform="scale(1,1) matrix(1,0,0,1,696.5,298) translate(8,43.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="5.3125" y="11">Container2-2</text></g><g transform="scale(1,1) matrix(1,0,0,1,17.5,148) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="0" y="11">ip -6 route add 2001:db8:0::/64 dev eth0</text><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="0" y="25">ip -6 route add 2001:db8:2::/64 via 2001:db8:0::2</text></g><g transform="scale(1,1) matrix(1,0,0,1,488.75,408) translate(2,1.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="55.462890625" y="11">ip -6 route add default via fe80::1 dev eth0</text></g><g transform="scale(1,1) matrix(1,0,0,1,33.50000000000003,409) translate(2,1.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="58.83789062499997" y="11">ip -6 route add default via fe80::1 dev eth0</text></g><g transform="matrix(1,0,0,1,-17.000680271168676,190.75)"><g transform="translate(0,0)"><g transform="translate(-785,-195) translate(802.0006802711687,4.25) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#000000" d="M 863.7500000000001 194.75 M 863.7500000000001 194.75 L 855.750000325228 194.75228115011907 M 855.750000325228 194.75228115011907 M 847.750000650456 194.75456230023815 L 839.7500009756839 194.75684345035722 M 839.7500009756839 194.75684345035722 M 831.7500013009118 194.7591246004763 L 823.7500016261397 194.76140575059537 M 823.7500016261397 194.76140575059537 M 815.7500019513676 194.76368690071445 L 807.7500022765955 194.76596805083352 M 807.7500022765955 194.76596805083352 M 799.7500026018234 194.7682492009526 L 791.7500029270514 194.77053035107167 M 791.7500029270514 194.77053035107167 M 783.7500032522793 194.77281150119074 L 775.7500035775072 194.77509265130982 M 775.7500035775072 194.77509265130982 M 767.7500039027351 194.7773738014289 L 759.750004227963 194.77965495154797 M 759.750004227963 194.77965495154797 M 751.7500045531909 194.78193610166704 L 743.7500048784188 194.78421725178612 M 743.7500048784188 194.78421725178612 M 735.7500052036468 194.7864984019052 L 727.7500055288747 194.78877955202427 M 727.7500055288747 194.78877955202427 M 719.7500058541026 194.79106070214334 L 711.7500061793305 194.79334185226242 M 711.7500061793305 194.79334185226242 M 703.7500065045584 194.7956230023815 L 695.7500068297863 194.79790415250056 M 695.7500068297863 194.79790415250056 M 687.7500071550143 194.80018530261964 L 679.7500074802422 194.8024664527387 M 679.7500074802422 194.8024664527387 M 671.7500078054701 194.8047476028578 L 663.750008130698 194.80702875297686 M 663.750008130698 194.80702875297686 M 655.7500084559259 194.80930990309594 L 647.7500087811538 194.811591053215 M 647.7500087811538 194.811591053215 M 639.7500091063818 194.81387220333409 L 631.7500094316097 194.81615335345316 M 631.7500094316097 194.81615335345316 M 623.7500097568376 194.81843450357223 L 615.7500100820655 194.8207156536913 M 615.7500100820655 194.8207156536913 M 607.7500104072934 194.82299680381038 L 599.7500107325213 194.82527795392943 M 599.7500107325213 194.82527795392943 M 591.7500110577492 194.8275591040485 L 583.7500113829772 194.82984025416755 M 583.7500113829772 194.82984025416755 M 575.7500117082051 194.83212140428662 L 567.750012033433 194.83440255440567 M 567.750012033433 194.83440255440567 M 559.7500123586609 194.83668370452475 L 551.7500126838888 194.8389648546438 M 551.7500126838888 194.8389648546438 M 543.7500130091167 194.84124600476287 L 535.7500133343447 194.8435271548819 M 535.7500133343447 194.8435271548819 M 527.7500136595726 194.845808305001 L 519.7500139848005 194.84808945512003 M 519.7500139848005 194.84808945512003 M 511.75001431002835 194.8503706052391 L 503.7500146352562 194.85265175535815 M 503.7500146352562 194.85265175535815 M 495.75001496048407 194.85493290547723 L 487.7500152857119 194.85721405559627 M 487.7500152857119 194.85721405559627 M 479.7500156109398 194.85949520571535 L 471.75001593616764 194.8617763558344 M 471.75001593616764 194.8617763558344 M 463.7500162613955 194.86405750595347 L 455.75001658662336 194.86633865607251 M 455.75001658662336 194.86633865607251 M 447.7500169118512 194.8686198061916 L 439.7500172370791 194.87090095631063 M 439.7500172370791 194.87090095631063 M 431.75001756230694 194.8731821064297 L 423.7500178875348 194.87546325654876 M 423.7500178875348 194.87546325654876 M 415.75001821276265 194.87774440666783 L 407.7500185379905 194.88002555678688 M 407.7500185379905 194.88002555678688 M 399.7500188632184 194.88230670690595 L 391.75001918844623 194.884587857025 M 391.75001918844623 194.884587857025 M 383.7500195136741 194.88686900714407 L 375.75001983890195 194.88915015726312 M 375.75001983890195 194.88915015726312 M 367.7500201641298 194.8914313073822 L 359.75002048935767 194.89371245750124 M 359.75002048935767 194.89371245750124 M 351.7500208145855 194.8959936076203 L 343.7500211398134 194.89827475773936 M 343.7500211398134 194.89827475773936 M 335.75002146504124 194.90055590785843 L 327.7500217902691 194.90283705797748 M 327.7500217902691 194.90283705797748 M 319.75002211549696 194.90511820809655 L 311.7500224407248 194.9073993582156 M 311.7500224407248 194.9073993582156 M 303.7500227659527 194.90968050833467 L 295.75002309118054 194.91196165845372 M 295.75002309118054 194.91196165845372 M 287.7500234164084 194.9142428085728 L 279.75002374163626 194.91652395869184 M 279.75002374163626 194.91652395869184 M 271.7500240668641 194.91880510881091 L 263.750024392092 194.92108625892996 M 263.750024392092 194.92108625892996 M 255.75002471731983 194.92336740904904 L 247.75002504254772 194.92564855916808 M 247.75002504254772 194.92564855916808 M 239.75002536777558 194.92792970928716 L 231.75002569300347 194.9302108594062 M 231.75002569300347 194.9302108594062 M 223.75002601823135 194.93249200952528 L 215.7500263434592 194.93477315964432 M 215.7500263434592 194.93477315964432 M 207.7500266686871 194.9370543097634 L 199.75002699391496 194.93933545988244 M 199.75002699391496 194.93933545988244 M 191.75002731914284 194.94161661000152 L 183.75002764437073 194.94389776012056 M 183.75002764437073 194.94389776012056 M 175.75002796959862 194.94617891023964 L 167.75002829482648 194.94846006035868 M 167.75002829482648 194.94846006035868 M 159.75002862005437 194.95074121047776 L 151.75002894528222 194.9530223605968 M 151.75002894528222 194.9530223605968 M 143.7500292705101 194.95530351071588 L 135.750029595738 194.95758466083493 M 135.750029595738 194.95758466083493 M 127.75002992096587 194.959865810954 L 119.75003024619375 194.96214696107305 M 119.75003024619375 194.96214696107305 M 111.75003057142162 194.96442811119212 L 103.75003089664949 194.96670926131117 M 103.75003089664949 194.96670926131117 M 95.75003122187736 194.96899041143024 L 87.75003154710524 194.9712715615493 M 87.75003154710524 194.9712715615493 M 79.75003187233311 194.97355271166836 L 71.75003219756098 194.9758338617874 M 71.75003219756098 194.9758338617874 M 63.75003252278886 194.97811501190648 L 55.75003284801673 194.98039616202553 M 55.75003284801673 194.98039616202553 M 47.7500331732446 194.9826773121446 L 39.750033498472476 194.98495846226365 M 39.750033498472476 194.98495846226365 M 31.75003382370035 194.98723961238272 L 23.750034148928222 194.98952076250177 M 23.750034148928222 194.98952076250177 M 15.750034474156095 194.99180191262084 L 7.750034799383968 194.9940830627399 M 7.750034799383968 194.9940830627399 M -0.2499648753881587 194.99636421285896 L -8.249964550160286 194.998645362978 M -8.249964550160286 194.998645362978" stroke-miterlimit="10"/></g></g></g></g><g transform="scale(1,1) matrix(1,0,0,1,582.5,151) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="0" y="11">ip -6 route add 2001:db8:0::/64 dev eth0</text><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="0" y="25">ip -6 route add 2001:db8:1::/64 via 2001:db8:0::1</text><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="259.53515625" y="25">&#160;</text></g><g transform="scale(1,1) matrix(1,0,0,1,221,283) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="14.6142578125" y="11">eth0 2001:db8:1::2/64</text></g><g transform="scale(1,1) matrix(1,0,0,1,479,284) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="14.6142578125" y="11">eth0 2001:db8:2::1/64</text></g><g transform="scale(1,1) matrix(1,0,0,1,670,284) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="14.6142578125" y="11">eth0 2001:db8:2::2/64</text></g><g transform="scale(1,1) matrix(1,0,0,1,317,436.5) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="10px" font-style="italic" font-weight="normal" text-decoration="none" x="0.7084960937500142" y="9">containers' link-local addresses are not displayed</text></g><g transform="scale(1,1) matrix(1,0,0,1,17.5,205.5) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="0" y="11">ip -6 route add 2001:db8:1::/64&#160;dev docker0</text></g><g transform="scale(1,1) matrix(1,0,0,1,583,204) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="0" y="11">ip -6 route add 2001:db8:2::/64&#160;dev docker0</text></g><g transform="matrix(1,0,0,1,859.7500000000001,176)"><image width="40" height="275" preserveAspectRatio="none" xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAETCAYAAABENES3AAADD0lEQVR4Xu3d7U3rQBCF4UMBfHQAVAKlQCVAJVAKdEAHQAd8FACaq/WVY8KP4zNIEXotIZLgdYZnJ+txRoQ97fi2t+PxiQDTGUIQwVQgHU8OIpgKpOPJQQRTgXQ8OYhgKpCO787BixHQXRrYNL47wGdJn5JOdzHA0rsdgV1KalHsFCy94xFg3W5R7ArwWtLVYlpvJNXj0dYR4JGkEjuQ/l/GVh6+SzqR9JpE2BHgNr0pplgxDbD0niTV921b6VUurlZMAzyXVF/Tdjam+X72WN2e37dmPA1w+WT1Qqljxi+O31qoCdDKjy07I4igK8Ay44ot90cQQVOAhdoE+7Y7ggi6AizUrhinOi6awpxhoQ4B/73DxYV7oohgoldjEUTQFaDccsUotzjVhTnDQh0CcqpLARFE0BagWLDJFgMQRNAUoFgwwWhDtP9FDjlIDroCnOpcMd5Z4J2FMGdYqENArotTQAQRtAUoFmwy3lmg454lDcVC5kfHPfVDEEFfgGLBN9scgSCCpgDFgglGG4I2RJoyCCLoCrBQu2LL/RFE0BWgonbF6HbS7QxzhoU6BKRXlwIiiKAtQLFgk9HtpNuZJQ3FQuZHry71QxBBX4BiwTej28mHkURZQ7EQ8fFhJCkfggiuEKBYWIG2MQRBBE0BigUT7NvuCCLoCrBQu2J03Om4hznDQh0C0i9OARFE0BagWLDJ6LjTcc+ShmIh86NfnPohiKAvQLHgm9Fxp+MeZQ3FQsRHvzjlQxDBFQIUCyvQ6Ljzv8KStKFYSPRqLIIIugKc6lwxOu503MOcYaEOATnVpYAI/j3Bc0lns1+rbtf5/X722MPivqWQFgtHkp4lHf7wrG+STiS9WlHNdk4DrENdj0p6Www34+dr42v57LdSfJG0P6a3gvmU9CHpONGrA3UI/qQY63UGWIqPQ6yOW6KVe/HWJViBXEi6HRFdSrqLo2uc4imWekXX1qLXOcVTgKVYW4vebwTYMasbx+jMwfbgEOwgZYpTRQQRTAXS8eQggqlAOp4cRDAVSMeTg6ngF4jfAyNg0jvuAAAAAElFTkSuQmCC" transform="translate(0,0)"/></g><g transform="scale(1,1) matrix(-1.8369701987210297e-16,-1,1,-1.8369701987210297e-16,863.7500000000001,389.5) translate(2,0.5)"><text fill="#000000" stroke="none" font-family="Arial" font-size="12px" font-style="normal" font-weight="normal" text-decoration="none" x="19.3046875" y="11">managed by Docker</text></g><g transform="matrix(1,0,0,1,853.5000000000001,428)"><g transform="translate(0,0)"><g transform="translate(-898.7500000000001,-432) translate(45.25,4) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#000000" d="M 884.7683424505416 432 L 857.5000000000001 432" stroke-miterlimit="10"/></g></g></g></g><g transform="matrix(1,0,0,1,856.5000000000001,191)"><g transform="translate(0,0)"><g transform="translate(-901.7500000000001,-195) translate(45.25,4) matrix(1,0,0,1,0,0)"><g><path fill="none" stroke="#000000" d="M 888.767693574114 195 L 860.5000000000001 195" stroke-miterlimit="10"/></g></g></g></g></g></svg>
\ No newline at end of file
diff --git a/docs/sources/articles/ambassador_pattern_linking.md b/docs/sources/articles/ambassador_pattern_linking.md
index 9b1b032..2f16826 100644
--- a/docs/sources/articles/ambassador_pattern_linking.md
+++ b/docs/sources/articles/ambassador_pattern_linking.md
@@ -1,8 +1,8 @@
-page_title: Link via an Ambassador Container
+page_title: Link via an ambassador container
 page_description: Using the Ambassador pattern to abstract (network) services
 page_keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
 
-# Link via an Ambassador Container
+# Link via an ambassador container
 
 ## Introduction
 
@@ -30,27 +30,27 @@
 Using the `svendowideit/ambassador` container, the link wiring is
 controlled entirely from the `docker run` parameters.
 
-## Two host Example
+## Two host example
 
 Start actual Redis server on one Docker host
 
-    big-server $ sudo docker run -d --name redis crosbymichael/redis
+    big-server $ docker run -d --name redis crosbymichael/redis
 
 Then add an ambassador linked to the Redis server, mapping a port to the
 outside world
 
-    big-server $ sudo docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador
+    big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador
 
 On the other host, you can set up another ambassador setting environment
 variables for each remote port we want to proxy to the `big-server`
 
-    client-server $ sudo docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
+    client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
 
 Then on the `client-server` host, you can use a Redis client container
 to talk to the remote Redis server, just by linking to the local Redis
 ambassador.
 
-    client-server $ sudo docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
+    client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
     redis 172.17.0.160:6379> ping
     PONG
 
@@ -62,19 +62,19 @@
 On the Docker host (192.168.1.52) that Redis will run on:
 
     # start actual redis server
-    $ sudo docker run -d --name redis crosbymichael/redis
+    $ docker run -d --name redis crosbymichael/redis
 
     # get a redis-cli container for connection testing
-    $ sudo docker pull relateiq/redis-cli
+    $ docker pull relateiq/redis-cli
 
     # test the redis server by talking to it directly
-    $ sudo docker run -t -i --rm --link redis:redis relateiq/redis-cli
+    $ docker run -t -i --rm --link redis:redis relateiq/redis-cli
     redis 172.17.0.136:6379> ping
     PONG
     ^D
 
     # add redis ambassador
-    $ sudo docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 busybox sh
+    $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 busybox sh
 
 In the `redis_ambassador` container, you can see the linked Redis
 containers `env`:
@@ -96,9 +96,9 @@
 This environment is used by the ambassador `socat` script to expose Redis
 to the world (via the `-p 6379:6379` port mapping):
 
-    $ sudo docker rm redis_ambassador
+    $ docker rm redis_ambassador
     $ sudo ./contrib/mkimage-unittest.sh
-    $ sudo docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 docker-ut sh
+    $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 docker-ut sh
 
     $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379
 
@@ -107,14 +107,14 @@
 Now go to a different server:
 
     $ sudo ./contrib/mkimage-unittest.sh
-    $ sudo docker run -t -i --expose 6379 --name redis_ambassador docker-ut sh
+    $ docker run -t -i --expose 6379 --name redis_ambassador docker-ut sh
 
     $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379
 
 And get the `redis-cli` image so we can talk over the ambassador bridge.
 
-    $ sudo docker pull relateiq/redis-cli
-    $ sudo docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
+    $ docker pull relateiq/redis-cli
+    $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
     redis 172.17.0.160:6379> ping
     PONG
 
diff --git a/docs/sources/articles/b2d_volume_resize.md b/docs/sources/articles/b2d_volume_resize.md
index 1b39b49..53c8590 100644
--- a/docs/sources/articles/b2d_volume_resize.md
+++ b/docs/sources/articles/b2d_volume_resize.md
@@ -1,5 +1,5 @@
-page_title: Resizing a Boot2Docker Volume	
-page_description: Resizing a Boot2Docker Volume in VirtualBox with GParted
+page_title: Resizing a Boot2Docker volume	
+page_description: Resizing a Boot2Docker volume in VirtualBox with GParted
 page_keywords: boot2docker, volume, virtualbox
 
 # Getting “no space left on device” errors with Boot2Docker?
@@ -60,7 +60,7 @@
 ## 5. Add the new VDI image 
 
 In the settings for the Boot2Docker image in VirtualBox, remove the VMDK image 
-from the SATA contoller and add the VDI image.
+from the SATA controller and add the VDI image.
 
 <img src="/articles/b2d_volume_images/add_volume.png">
 
diff --git a/docs/sources/articles/baseimages.md b/docs/sources/articles/baseimages.md
index 5a5addd..a1a7665 100644
--- a/docs/sources/articles/baseimages.md
+++ b/docs/sources/articles/baseimages.md
@@ -1,8 +1,8 @@
-page_title: Create a Base Image
+page_title: Create a base image
 page_description: How to create base images
 page_keywords: Examples, Usage, base image, docker, documentation, examples
 
-# Create a Base Image
+# Create a base image
 
 So you want to create your own [*Base Image*](
 /terms/image/#base-image)? Great!
@@ -22,9 +22,9 @@
 It can be as simple as this to create an Ubuntu base image:
 
     $ sudo debootstrap raring raring > /dev/null
-    $ sudo tar -C raring -c . | sudo docker import - raring
+    $ sudo tar -C raring -c . | docker import - raring
     a29c15f1bf7a
-    $ sudo docker run raring cat /etc/lsb-release
+    $ docker run raring cat /etc/lsb-release
     DISTRIB_ID=Ubuntu
     DISTRIB_RELEASE=13.04
     DISTRIB_CODENAME=raring
@@ -65,4 +65,4 @@
 * There's a [complete guide to all the instructions](/reference/builder/) available for use in a `Dockerfile` in the reference section.
 * To help you write a clear, readable, maintainable `Dockerfile`, we've also
 written a [`Dockerfile` Best Practices guide](/articles/dockerfile_best-practices).
-* If you're working on an Official Repo, be sure to check out the [Official Repo Guidelines](/docker-hub/official_repos/).
+* If your goal is to create a new Official Repository, be sure to read up on Docker's [Official Repositories](/docker-hub/official_repos/).
diff --git a/docs/sources/articles/basics.md b/docs/sources/articles/basics.md
index 4cdcab4..7d7c154 100644
--- a/docs/sources/articles/basics.md
+++ b/docs/sources/articles/basics.md
@@ -4,26 +4,30 @@
 
 # First steps with Docker
 
-## Check your Docker install
-
 This guide assumes you have a working installation of Docker. To check
 your Docker install, run the following command:
 
     # Check that you have a working install
-    $ sudo docker info
+    $ docker info
 
 If you get `docker: command not found` or something like
 `/var/lib/docker/repositories: permission denied` you may have an
 incomplete Docker installation or insufficient privileges to access
-Docker on your machine.
+Docker on your machine. Please 
 
-Please refer to [*Installation*](/installation)
-for installation instructions.
+Additionally, depending on your Docker system configuration, you may be required
+to preface each `docker` command with `sudo`. To avoid having to use `sudo` with
+the `docker` command, your system administrator can create a Unix group called
+`docker` and add users to it.
+
+For more information about installing Docker or `sudo` configuration, refer to
+the [installation](/installation) instructions for your operating system.
+
 
 ## Download a pre-built image
 
     # Download an ubuntu image
-    $ sudo docker pull ubuntu
+    $ docker pull ubuntu
 
 This will find the `ubuntu` image by name on
 [*Docker Hub*](/userguide/dockerrepos/#searching-for-images)
@@ -46,7 +50,7 @@
     # To detach the tty without exiting the shell,
     # use the escape sequence Ctrl-p + Ctrl-q
     # note: This will continue to exist in a stopped state once exited (see "docker ps -a")
-    $ sudo docker run -i -t ubuntu /bin/bash
+    $ docker run -i -t ubuntu /bin/bash
 
 ## Bind Docker to another host/port or a Unix socket
 
@@ -92,7 +96,7 @@
 
 Download an `ubuntu` image:
 
-    $ sudo docker -H :5555 pull ubuntu
+    $ docker -H :5555 pull ubuntu
 
 You can use multiple `-H`, for example, if you want to listen on both
 TCP and a Unix socket
@@ -100,60 +104,60 @@
     # Run docker in daemon mode
     $ sudo <path to>/docker -H tcp://127.0.0.1:2375 -H unix:///var/run/docker.sock -d &
     # Download an ubuntu image, use default Unix socket
-    $ sudo docker pull ubuntu
+    $ docker pull ubuntu
     # OR use the TCP port
-    $ sudo docker -H tcp://127.0.0.1:2375 pull ubuntu
+    $ docker -H tcp://127.0.0.1:2375 pull ubuntu
 
 ## Starting a long-running worker process
 
     # Start a very useful long-running process
-    $ JOB=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
+    $ JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
 
     # Collect the output of the job so far
-    $ sudo docker logs $JOB
+    $ docker logs $JOB
 
     # Kill the job
-    $ sudo docker kill $JOB
+    $ docker kill $JOB
 
 ## Listing containers
 
-    $ sudo docker ps # Lists only running containers
-    $ sudo docker ps -a # Lists all containers
+    $ docker ps # Lists only running containers
+    $ docker ps -a # Lists all containers
 
 ## Controlling containers
 
     # Start a new container
-    $ JOB=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
+    $ JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
 
     # Stop the container
-    $ sudo docker stop $JOB
+    $ docker stop $JOB
 
     # Start the container
-    $ sudo docker start $JOB
+    $ docker start $JOB
 
     # Restart the container
-    $ sudo docker restart $JOB
+    $ docker restart $JOB
 
     # SIGKILL a container
-    $ sudo docker kill $JOB
+    $ docker kill $JOB
 
     # Remove a container
-    $ sudo docker stop $JOB # Container must be stopped to remove it
-    $ sudo docker rm $JOB
+    $ docker stop $JOB # Container must be stopped to remove it
+    $ docker rm $JOB
 
 ## Bind a service on a TCP port
 
     # Bind port 4444 of this container, and tell netcat to listen on it
-    $ JOB=$(sudo docker run -d -p 4444 ubuntu:12.10 /bin/nc -l 4444)
+    $ JOB=$(docker run -d -p 4444 ubuntu:12.10 /bin/nc -l 4444)
 
     # Which public port is NATed to my container?
-    $ PORT=$(sudo docker port $JOB 4444 | awk -F: '{ print $2 }')
+    $ PORT=$(docker port $JOB 4444 | awk -F: '{ print $2 }')
 
     # Connect to the public port
     $ echo hello world | nc 127.0.0.1 $PORT
 
     # Verify that the network connection worked
-    $ echo "Daemon received: $(sudo docker logs $JOB)"
+    $ echo "Daemon received: $(docker logs $JOB)"
 
 ## Committing (saving) a container state
 
@@ -166,10 +170,10 @@
 `docker images` command.
 
     # Commit your container to a new named image
-    $ sudo docker commit <container_id> <some_name>
+    $ docker commit <container_id> <some_name>
 
-    # List your containers
-    $ sudo docker images
+    # List your images
+    $ docker images
 
 You now have an image state from which you can create new instances.
 
diff --git a/docs/sources/articles/cfengine_process_management.md b/docs/sources/articles/cfengine_process_management.md
index e32b266..b043726 100644
--- a/docs/sources/articles/cfengine_process_management.md
+++ b/docs/sources/articles/cfengine_process_management.md
@@ -1,8 +1,8 @@
-page_title: Process Management with CFEngine
+page_title: Process management with CFEngine
 page_description: Managing containerized processes with CFEngine
 page_keywords: cfengine, process, management, usage, docker, documentation
 
-# Process Management with CFEngine
+# Process management with CFEngine
 
 Create Docker containers with managed processes.
 
@@ -94,7 +94,7 @@
 Start the container with `apache2` and `sshd` running and managed, forwarding
 a port to our SSH instance:
 
-    $ sudo docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start"
+    $ docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start"
 
 We now clearly see one of the benefits of the cfe-docker integration: it
 allows to start several processes as part of a normal `docker run` command.
diff --git a/docs/sources/articles/chef.md b/docs/sources/articles/chef.md
index cb70215..84ccdff 100644
--- a/docs/sources/articles/chef.md
+++ b/docs/sources/articles/chef.md
@@ -1,4 +1,4 @@
-page_title: Chef Usage
+page_title: Using Chef
 page_description: Installation and using Docker via Chef
 page_keywords: chef, installation, usage, docker, documentation
 
@@ -43,7 +43,7 @@
 
 This is equivalent to running:
 
-    $ sudo docker pull samalba/docker-registry
+    $ docker pull samalba/docker-registry
 
 There are attributes available to control how long the cookbook will
 allow for downloading (5 minute default).
@@ -68,7 +68,7 @@
 
 This is equivalent to running the following command, but under upstart:
 
-    $ sudo docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry
+    $ docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry
 
 The resources will accept a single string or an array of values for any
 Docker flags that allow multiple values.
diff --git a/docs/sources/articles/configuring.md b/docs/sources/articles/configuring.md
new file mode 100644
index 0000000..7bd9241
--- /dev/null
+++ b/docs/sources/articles/configuring.md
@@ -0,0 +1,238 @@
+page_title: Configuring and running Docker
+page_description: Configuring and running the Docker daemon on various distributions
+page_keywords: docker, daemon, configuration, running, process managers
+
+# Configuring and running Docker on various distributions
+
+After successfully installing Docker, the `docker` daemon runs with its default
+configuration.
+
+In a production environment, system administrators typically configure the
+`docker` daemon to start and stop according to an organization's requirements. In most
+cases, the system administrator configures a process manager such as `SysVinit`, `Upstart`,
+or `systemd` to manage the `docker` daemon's start and stop.
+
+### Running the docker daemon directly
+
+The `docker` daemon can be run directly using the `-d` option. By default it listens on
+the Unix socket `unix:///var/run/docker.sock`
+
+    $ docker -d
+
+    INFO[0000] +job init_networkdriver()
+    INFO[0000] +job serveapi(unix:///var/run/docker.sock)
+    INFO[0000] Listening for HTTP on unix (/var/run/docker.sock)
+    ...
+    ...
+
+### Configuring the docker daemon directly
+
+If you're running the `docker` daemon directly by running `docker -d` instead
+of using a process manager, you can append the configuration options to the `docker` run
+command directly. Just like the `-d` option, other options can be passed to the `docker`
+daemon to configure it.
+
+Some of the daemon's options are:
+
+| Flag                  | Description                                               |
+|-----------------------|-----------------------------------------------------------|
+| `-D`, `--debug=false` | Enable or disable debug mode. By default, this is false. |
+| `-H`,`--host=[]`      | Daemon socket(s) to connect to.                           |
+| `--tls=false`         | Enable or disable TLS. By default, this is false.         |
+
+
+Here is a an example of running the `docker` daemon with configuration options:
+
+    $ docker -d -D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376
+
+These options :
+
+- Enable `-D` (debug) mode
+- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively
+- Listen for connections on `tcp://192.168.59.3:2376`
+
+The command line reference has the [complete list of daemon flags](/reference/commandline/cli/#daemon)
+with explanations.
+
+## Ubuntu
+
+As of `14.04`, Ubuntu uses Upstart as a process manager. By default, Upstart jobs
+are located in  `/etc/init` and the `docker` Upstart job can be found at `/etc/init/docker.conf`.
+
+After successfully [installing Docker for Ubuntu](/installation/ubuntulinux/),
+you can check the running status using Upstart in this way:
+
+    $ sudo status docker
+
+    docker start/running, process 989
+
+### Running Docker
+
+You can start/stop/restart the `docker` daemon using
+
+    $ sudo start docker
+
+    $ sudo stop docker
+
+    $ sudo restart docker
+
+
+### Configuring Docker
+
+You configure the `docker` daemon in the `/etc/default/docker` file on your
+system. You do this by specifying values in a `DOCKER_OPTS` variable.
+
+To configure Docker options:
+
+1. Log into your host as a user with `sudo` or `root` privileges.
+
+2. If you don't have one, create the `/etc/default/docker` file on your host. Depending on how
+you installed Docker, you may already have this file.
+
+3. Open the file with your favorite editor.
+
+    ```
+    $ sudo vi /etc/default/docker
+    ```
+
+4. Add a `DOCKER_OPTS` variable with the following options. These options are appended to the
+`docker` daemon's run command.
+
+```
+    DOCKER_OPTS="-D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376"
+```
+
+These options :
+
+- Enable `-D` (debug) mode
+- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively
+- Listen for connections on `tcp://192.168.59.3:2376`
+
+The command line reference has the [complete list of daemon flags](/reference/commandline/cli/#daemon)
+with explanations.
+
+
+5. Save and close the file.
+
+6. Restart the `docker` daemon.
+
+    ```
+    $ sudo restart docker
+    ```
+
+7. Verify that the `docker` daemon is running as specified with the `ps` command.
+
+    ```
+    $ ps aux | grep docker | grep -v grep
+    ```
+
+### Logs
+
+By default logs for Upstart jobs are located in `/var/log/upstart` and the logs for `docker` daemon
+can be located at `/var/log/upstart/docker.log`
+
+    $ tail -f /var/log/upstart/docker.log
+    INFO[0000] Loading containers: done.
+    INFO[0000] docker daemon: 1.6.0 4749651; execdriver: native-0.2; graphdriver: aufs
+    INFO[0000] +job acceptconnections()
+    INFO[0000] -job acceptconnections() = OK (0)
+    INFO[0000] Daemon has completed initialization
+
+
+## CentOS / Red Hat Enterprise Linux / Fedora
+
+As of `7.x`, CentOS and RHEL use `systemd` as the process manager. As of `21`, Fedora uses
+`systemd` as its process manager.
+
+After successfully installing Docker for [CentOS](/installation/centos/)/[Red Hat Enterprise Linux]
+(/installation/rhel/)/[Fedora](/installation/fedora), you can check the running status in this way:
+
+    $ sudo systemctl status docker
+
+### Running Docker
+
+You can start/stop/restart the `docker` daemon using
+
+    $ sudo systemctl start docker
+
+    $ sudo systemctl stop docker
+
+    $ sudo systemctl restart docker
+
+If you want Docker to start at boot, you should also:
+
+    $ sudo systemctl enable docker
+
+### Configuring Docker
+
+You configure the `docker` daemon in the `/etc/sysconfig/docker` file on your
+host. You do this by specifying values in a variable. For CentOS 7.x and RHEL 7.x, the name
+of the variable is `OPTIONS` and for CentOS 6.x and RHEL 6.x, the name of the variable is
+`other_args`. For this section, we will use CentOS 7.x as an example to configure the `docker`
+daemon.
+
+By default, systemd services are located either in `/etc/systemd/service`, `/lib/systemd/system`
+or `/usr/lib/systemd/system`. The `docker.service` file can be found in either of these three
+directories depending on your host.
+
+To configure Docker options:
+
+1. Log into your host as a user with `sudo` or `root` privileges.
+
+2. If you don't have one, create the `/etc/sysconfig/docker` file on your host. Depending on how
+you installed Docker, you may already have this file.
+
+3. Open the file with your favorite editor.
+
+    ```
+    $ sudo vi /etc/sysconfig/docker
+    ```
+
+4. Add a `OPTIONS` variable with the following options. These options are appended to the
+command that starts the `docker` daemon.
+
+```
+    OPTIONS="-D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376"
+```
+
+These options :
+
+- Enable `-D` (debug) mode
+- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively
+- Listen for connections on `tcp://192.168.59.3:2376`
+
+The command line reference has the [complete list of daemon flags](/reference/commandline/cli/#daemon)
+with explanations.
+
+5. Save and close the file.
+
+6. Restart the `docker` daemon.
+
+    ```
+    $ sudo service docker restart
+    ```
+
+7. Verify that the `docker` daemon is running as specified with the `ps` command.
+
+    ```
+    $ ps aux | grep docker | grep -v grep
+    ```
+
+### Logs
+
+systemd has its own logging system called the journal. The logs for the `docker` daemon can
+be viewed using `journalctl -u docker`
+
+    $ sudo journalctl -u docker
+    May 06 00:22:05 localhost.localdomain systemd[1]: Starting Docker Application Container Engine...
+    May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="+job serveapi(unix:///var/run/docker.sock)"
+    May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="Listening for HTTP on unix (/var/run/docker.sock)"
+    May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job init_networkdriver()"
+    May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job init_networkdriver() = OK (0)"
+    May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: start."
+    May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: done."
+    May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="docker daemon: 1.5.0-dev fc0329b/1.5.0; execdriver: native-0.2; graphdriver: devicemapper"
+    May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job acceptconnections()"
+    May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job acceptconnections() = OK (0)"
+
+_Note: Using and configuring journal is an advanced topic and is beyond the scope of this article._
diff --git a/docs/sources/articles/dockerfile_best-practices.md b/docs/sources/articles/dockerfile_best-practices.md
index 2ea7965..2604b22 100644
--- a/docs/sources/articles/dockerfile_best-practices.md
+++ b/docs/sources/articles/dockerfile_best-practices.md
@@ -1,4 +1,4 @@
-page_title: Best Practices for Writing Dockerfiles
+page_title: Best practices for writing Dockerfiles
 page_description: Hints, tips and guidelines for writing clean, reliable Dockerfiles
 page_keywords: Examples, Usage, base image, docker, documentation, dockerfile, best practices, hub, official repo
 
@@ -32,13 +32,14 @@
 destroyed and a new one built and put in place with an absolute minimum of
 set-up and configuration.
 
-### Use [a .dockerignore file](https://docs.docker.com/reference/builder/#the-dockerignore-file)
+### Use a .dockerignore file
 
-For faster uploading and efficiency during `docker build`, you should use
-a `.dockerignore` file to exclude files or directories from the build
-context and final image. For example, unless`.git` is needed by your build
-process or scripts, you should add it to `.dockerignore`, which can save many
-megabytes worth of upload time.
+In most cases, it's best to put each Dockerfile in an empty directory. Then,
+add to that directory only the files needed for building the Dockerfile. To
+increase the build's performance, you can exclude files and directories by
+adding a `.dockerignore` file to that directory as well. This file supports 
+exclusion patterns similar to `.gitignore` files. For information on creating one,
+see the [.dockerignore file](../../reference/builder/#dockerignore-file).
 
 ### Avoid installing unnecessary packages
 
@@ -398,7 +399,15 @@
 
 ### [`ONBUILD`](https://docs.docker.com/reference/builder/#onbuild)
 
-`ONBUILD` is only useful for images that are going to be built `FROM` a given
+An `ONBUILD` command executes after the current `Dockerfile` build completes.
+`ONBUILD` executes in any child image derived `FROM` the current image.  Think
+of the `ONBUILD` command as an instruction the parent `Dockerfile` gives
+to the child `Dockerfile`.
+
+A Docker build executes `ONBUILD` commands before any command in a child
+`Dockerfile`.
+
+`ONBUILD` is useful for images that are going to be built `FROM` a given
 image. For example, you would use `ONBUILD` for a language stack image that
 builds arbitrary user software written in that language within the
 `Dockerfile`, as you can see in [Ruby’s `ONBUILD` variants](https://github.com/docker-library/ruby/blob/master/2.1/onbuild/Dockerfile). 
@@ -411,16 +420,16 @@
 added. Adding a separate tag, as recommended above, will help mitigate this by
 allowing the `Dockerfile` author to make a choice.
 
-## Examples For Official Repositories
+## Examples for Official Repositories
 
-These Official Repos have exemplary `Dockerfile`s:
+These Official Repositories have exemplary `Dockerfile`s:
 
 * [Go](https://registry.hub.docker.com/_/golang/)
 * [Perl](https://registry.hub.docker.com/_/perl/)
 * [Hy](https://registry.hub.docker.com/_/hylang/)
 * [Rails](https://registry.hub.docker.com/_/rails)
 
-## Additional Resources:
+## Additional resources:
 
 * [Dockerfile Reference](https://docs.docker.com/reference/builder/#onbuild)
 * [More about Base Images](https://docs.docker.com/articles/baseimages/)
diff --git a/docs/sources/articles/host_integration.md b/docs/sources/articles/host_integration.md
index cbcb21a..e345176 100644
--- a/docs/sources/articles/host_integration.md
+++ b/docs/sources/articles/host_integration.md
@@ -1,8 +1,8 @@
-page_title: Automatically Start Containers
+page_title: Automatically start containers
 page_description: How to generate scripts for upstart, systemd, etc.
 page_keywords: systemd, upstart, supervisor, docker, documentation, host integration
 
-# Automatically Start Containers
+# Automatically start containers
 
 As of Docker 1.2,
 [restart policies](/reference/commandline/cli/#restart-policies) are the
@@ -18,7 +18,7 @@
 [supervisor](http://supervisord.org/) instead.
 
 
-## Using a Process Manager
+## Using a process manager
 
 Docker does not set any restart policies by default, but be aware that they will
 conflict with most process managers. So don't set restart policies if you are
diff --git a/docs/sources/articles/https.md b/docs/sources/articles/https.md
index 94d9ca3..d6689bb 100644
--- a/docs/sources/articles/https.md
+++ b/docs/sources/articles/https.md
@@ -1,8 +1,8 @@
-page_title: Protecting the Docker daemon Socket with HTTPS
+page_title: Protecting the Docker daemon socket with HTTPS
 page_description: How to setup and run Docker with HTTPS
 page_keywords: docker, docs, article, example, https, daemon, tls, ca, certificate
 
-# Protecting the Docker daemon Socket with HTTPS
+# Protecting the Docker daemon socket with HTTPS
 
 By default, Docker runs via a non-networked Unix socket. It can also
 optionally communicate using a HTTP socket.
@@ -193,7 +193,7 @@
     $ export DOCKER_CERT_PATH=~/.docker/zone1/
     $ docker --tlsverify ps
 
-### Connecting to the Secure Docker port using `curl`
+### Connecting to the secure Docker port using `curl`
 
 To use `curl` to make test API requests, you need to use three extra command line
 flags:
diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md
index 7247d29..8400d1a 100644
--- a/docs/sources/articles/networking.md
+++ b/docs/sources/articles/networking.md
@@ -1,8 +1,8 @@
-page_title: Network Configuration
+page_title: Network configuration
 page_description: Docker networking
 page_keywords: network, networking, bridge, docker, documentation
 
-# Network Configuration
+# Network configuration
 
 ## TL;DR
 
@@ -41,7 +41,7 @@
 commands to tweak, supplement, or entirely replace Docker's default
 networking configuration.
 
-## Quick Guide to the Options
+## Quick guide to the options
 
 Here is a quick list of the networking-related Docker command-line
 options, in case it helps you find the section below that you are
@@ -56,6 +56,12 @@
  *  `--bip=CIDR` — see
     [Customizing docker0](#docker0)
 
+ *  `--default-gateway=IP_ADDRESS` — see
+    [How Docker networks a container](#container-networking)
+
+ *  `--default-gateway-v6=IP_ADDRESS` — see
+    [IPv6](#ipv6)
+
  *  `--fixed-cidr` — see
     [Customizing docker0](#docker0)
 
@@ -87,6 +93,9 @@
  *  `--mtu=BYTES` — see
     [Customizing docker0](#docker0)
 
+ *  `--userland-proxy=true|false` — see
+    [Binding container ports](#binding-ports)
+
 There are two networking options that can be supplied either at startup
 or when `docker run` is invoked.  When provided at startup, set the
 default value that `docker run` will later use if the options are not
@@ -121,8 +130,25 @@
  *  `-P` or `--publish-all=true|false` — see
     [Binding container ports](#binding-ports)
 
-The following sections tackle all of the above topics in an order that
-moves roughly from simplest to most complex.
+To supply networking options to the Docker server at startup, use the
+`DOCKER_OPTS` variable in the Docker upstart configuration file. For Ubuntu, edit the
+variable in `/etc/default/docker` or `/etc/sysconfig/docker` for CentOS.
+
+The following example illustrates how to configure Docker on Ubuntu to recognize a
+newly built bridge. 
+
+Edit the `/etc/default/docker` file:
+
+    $ echo 'DOCKER_OPTS="-b=bridge0"' >> /etc/default/docker 
+
+Then restart the Docker server.
+
+    $ sudo service docker start
+
+For additional information on bridges, see [building your own
+bridge](#building-your-own-bridge) later on this page.
+
+The following sections tackle all of the above topics in an order that we can move roughly from simplest to most complex.
 
 ## Configuring DNS
 
@@ -296,8 +322,7 @@
     policy to `DROP` if `--icc=false`.
 
 It is a strategic question whether to leave `--icc=true` or change it to
-`--icc=false` (on Ubuntu, by editing the `DOCKER_OPTS` variable in
-`/etc/default/docker` and restarting the Docker server) so that
+`--icc=false` so that
 `iptables` will protect other containers — and the main host — from
 having arbitrary ports probed or accessed by a container that gets
 compromised.
@@ -377,7 +402,7 @@
     ...
     Chain POSTROUTING (policy ACCEPT)
     target     prot opt source               destination
-    MASQUERADE  all  --  172.17.0.0/16       !172.17.0.0/16
+    MASQUERADE  all  --  172.17.0.0/16       0.0.0.0/0
     ...
 
 But if you want containers to accept incoming connections, you will need
@@ -426,11 +451,25 @@
 specify the external interface for one particular binding.
 
 Or if you always want Docker port forwards to bind to one specific IP
-address, you can edit your system-wide Docker server settings (on
-Ubuntu, by editing `DOCKER_OPTS` in `/etc/default/docker`) and add the
+address, you can edit your system-wide Docker server settings and add the
 option `--ip=IP_ADDRESS`.  Remember to restart your Docker server after
 editing this setting.
 
+> **Note**:
+> With hairpin NAT enabled (`--userland-proxy=false`), containers port exposure
+> is achieved purely through iptables rules, and no attempt to bind the exposed
+> port is ever made. This means that nothing prevents shadowing a previously
+> listening service outside of Docker through exposing the same port for a
+> container. In such conflicting situation, Docker created iptables rules will
+> take precedence and route to the container.
+
+The `--userland-proxy` parameter, true by default, provides a userland
+implementation for inter-container and outside-to-container communication. When
+disabled, Docker uses both an additional `MASQUERADE` iptable rule and the
+`net.ipv4.route_localnet` kernel parameter which allow the host machine to
+connect to a local container exposed port through the commonly used loopback
+address: this alternative is preferred for performance reason.
+
 Again, this topic is covered without all of these low-level networking
 details in the [Docker User Guide](/userguide/dockerlinks/) document if you
 would like to use that as your port redirection reference instead.
@@ -484,7 +523,9 @@
 ![](/article-img/ipv6_basic_host_config.svg)
 
 Every new container will get an IPv6 address from the defined subnet. Further
-a default route will be added via the gateway `fe80::1` on `eth0`:
+a default route will be added on `eth0` in the container via the address
+specified by the daemon option `--default-gateway-v6` if present, otherwise
+via `fe80::1`:
 
     docker run -it ubuntu bash -c "ip -6 addr show dev eth0; ip -6 route show"
 
@@ -551,9 +592,9 @@
 
 As soon as the router wants to send an IPv6 packet to the first container it
 will transmit a neighbor solicitation request, asking, who has
-`2001:db8::c009`? But it will get no answer because noone on this subnet has
+`2001:db8::c009`? But it will get no answer because no one on this subnet has
 this address. The container with this address is hidden behind the Docker host.
-The Docker host has to listen to neighbor solication requests for the container
+The Docker host has to listen to neighbor solicitation requests for the container
 address and send a response that itself is the device that is responsible for
 the address. This is done by a Kernel feature called `NDP Proxy`. You can
 enable it by executing
@@ -578,9 +619,9 @@
 address in your Docker subnet. Unfortunately there is no functionality for
 adding a whole subnet by executing one command.
 
-### Docker IPv6 Cluster
+### Docker IPv6 cluster
 
-#### Switched Network Environment
+#### Switched network environment
 Using routable IPv6 addresses allows you to realize communication between
 containers on different hosts. Let's have a look at a simple Docker IPv6 cluster
 example:
@@ -626,9 +667,9 @@
 containers. The configuration above the line is up to the user and can be
 adapted to the individual environment.
 
-#### Routed Network Environment
+#### Routed network environment
 
-In a routed network environment you replace the level 2 switch with a level 3
+In a routed network environment you replace the layer 2 switch with a layer 3
 router. Now the hosts just have to know their default gateway (the router) and
 the route to their own containers (managed by Docker). The router holds all
 routing information about the Docker subnets. When you add or remove a host to
@@ -652,7 +693,7 @@
 Remember the subnet for Docker containers should at least have a size of `/80`.
 This way an IPv6 address can end with the container's MAC address and you
 prevent NDP neighbor cache invalidation issues in the Docker layer. So if you
-have a `/64` for your whole environment use `/68` subnets for the hosts and
+have a `/64` for your whole environment use `/78` subnets for the hosts and
 `/80` for the containers. This way you can use 4096 hosts with 16 `/80` subnets
 each.
 
@@ -692,9 +733,6 @@
 
  *  `--mtu=BYTES` — override the maximum packet length on `docker0`.
 
-On Ubuntu you would add these to the `DOCKER_OPTS` setting in
-`/etc/default/docker` on your Docker host and restarting the Docker
-service.
 
 Once you have one or more containers up and running, you can confirm
 that Docker has properly connected them to the `docker0` bridge by
@@ -723,7 +761,7 @@
 
     # The network, as seen from a container
 
-    $ sudo docker run -i -t --rm base /bin/bash
+    $ docker run -i -t --rm base /bin/bash
 
     $$ ip addr show eth0
     24: eth0: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
@@ -853,12 +891,13 @@
     parameter or generate a random one.
 
 5.  Give the container's `eth0` a new IP address from within the
-    bridge's range of network addresses, and set its default route to
-    the IP address that the Docker host owns on the bridge. The MAC
-    address is generated from the IP address unless otherwise specified.
-    This prevents ARP cache invalidation problems, when a new container
-    comes up with an IP used in the past by another container with another
-    MAC.
+    bridge's range of network addresses. The default route is set to the
+    IP address passed to the Docker daemon using the `--default-gateway`
+    option if specified, otherwise to the IP address that the Docker host
+    owns on the bridge. The MAC address is generated from the IP address
+    unless otherwise specified. This prevents ARP cache invalidation
+    problems, when a new container comes up with an IP used in the past by
+    another container with another MAC.
 
 With these steps complete, the container now possesses an `eth0`
 (virtual) network card and will find itself able to communicate with
@@ -908,14 +947,14 @@
     # At one shell, start a container and
     # leave its shell idle and running
 
-    $ sudo docker run -i -t --rm --net=none base /bin/bash
+    $ docker run -i -t --rm --net=none base /bin/bash
     root@63f36fc01b5f:/#
 
     # At another shell, learn the container process ID
     # and create its namespace entry in /var/run/netns/
     # for the "ip netns" command we will be using below
 
-    $ sudo docker inspect -f '{{.State.Pid}}' 63f36fc01b5f
+    $ docker inspect -f '{{.State.Pid}}' 63f36fc01b5f
     2778
     $ pid=2778
     $ sudo mkdir -p /var/run/netns
@@ -972,7 +1011,7 @@
 what let us finish up the configuration without having to take the
 dangerous step of running the container itself with `--privileged=true`.
 
-## Tools and Examples
+## Tools and examples
 
 Before diving into the following sections on custom network topologies,
 you might be interested in glancing at a few external tools or examples
@@ -1016,18 +1055,18 @@
 
     # Start up two containers in two terminal windows
 
-    $ sudo docker run -i -t --rm --net=none base /bin/bash
+    $ docker run -i -t --rm --net=none base /bin/bash
     root@1f1f4c1f931a:/#
 
-    $ sudo docker run -i -t --rm --net=none base /bin/bash
+    $ docker run -i -t --rm --net=none base /bin/bash
     root@12e343489d2f:/#
 
     # Learn the container process IDs
     # and create their namespace entries
 
-    $ sudo docker inspect -f '{{.State.Pid}}' 1f1f4c1f931a
+    $ docker inspect -f '{{.State.Pid}}' 1f1f4c1f931a
     2989
-    $ sudo docker inspect -f '{{.State.Pid}}' 12e343489d2f
+    $ docker inspect -f '{{.State.Pid}}' 12e343489d2f
     3004
     $ sudo mkdir -p /var/run/netns
     $ sudo ln -s /proc/2989/ns/net /var/run/netns/2989
diff --git a/docs/sources/articles/puppet.md b/docs/sources/articles/puppet.md
index d9a7ceb..a1b3d27 100644
--- a/docs/sources/articles/puppet.md
+++ b/docs/sources/articles/puppet.md
@@ -1,5 +1,5 @@
-page_title: Puppet Usage
-page_description: Installating and using Puppet
+page_title: Using Puppet
+page_description: Installing and using Puppet
 page_keywords: puppet, installation, usage, docker, documentation
 
 # Using Puppet
@@ -47,7 +47,7 @@
 
 This is equivalent to running:
 
-    $ sudo docker pull ubuntu
+    $ docker pull ubuntu
 
 Note that it will only be downloaded if an image of that name does not
 already exist. This is downloading a large binary so on first run can
@@ -71,7 +71,7 @@
 
 This is equivalent to running the following command, but under upstart:
 
-    $ sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done"
+    $ docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done"
 
 Run also contains a number of optional parameters:
 
diff --git a/docs/sources/articles/registry_mirror.md b/docs/sources/articles/registry_mirror.md
index adc470d..e928af1 100644
--- a/docs/sources/articles/registry_mirror.md
+++ b/docs/sources/articles/registry_mirror.md
@@ -29,11 +29,11 @@
 You will need to pass the `--registry-mirror` option to your Docker daemon on
 startup:
 
-    sudo docker --registry-mirror=http://<my-docker-mirror-host> -d
+    docker --registry-mirror=http://<my-docker-mirror-host> -d
 
 For example, if your mirror is serving on `http://10.0.0.2:5000`, you would run:
 
-    sudo docker --registry-mirror=http://10.0.0.2:5000 -d
+    docker --registry-mirror=http://10.0.0.2:5000 -d
 
 **NOTE:**
 Depending on your local host setup, you may be able to add the
@@ -47,7 +47,7 @@
 functionality. For example, to run a local registry mirror that serves on
 port `5000` and mirrors the content at `registry-1.docker.io`:
 
-    sudo docker run -p 5000:5000 \
+    docker run -p 5000:5000 \
         -e STANDALONE=false \
         -e MIRROR_SOURCE=https://registry-1.docker.io \
         -e MIRROR_SOURCE_INDEX=https://index.docker.io \
@@ -58,7 +58,7 @@
 With your mirror running, pull an image that you haven't pulled before (using
 `time` to time it):
 
-    $ time sudo docker pull node:latest
+    $ time docker pull node:latest
     Pulling repository node
     [...]
     
@@ -68,11 +68,11 @@
 
 Now, remove the image from your local machine:
 
-    $ sudo docker rmi node:latest
+    $ docker rmi node:latest
 
 Finally, re-pull the image:
 
-    $ time sudo docker pull node:latest
+    $ time docker pull node:latest
     Pulling repository node
     [...]
     
diff --git a/docs/sources/articles/runmetrics.md b/docs/sources/articles/runmetrics.md
index 3276409..a887d43 100644
--- a/docs/sources/articles/runmetrics.md
+++ b/docs/sources/articles/runmetrics.md
@@ -1,8 +1,8 @@
-page_title: Runtime Metrics
+page_title: Runtime metrics
 page_description: Measure the behavior of running containers
 page_keywords: docker, metrics, CPU, memory, disk, IO, run, runtime
 
-# Runtime Metrics
+# Runtime metrics
 
 Linux Containers rely on [control groups](
 https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt)
@@ -11,7 +11,7 @@
 obtain network usage metrics as well. This is relevant for "pure" LXC
 containers, as well as for Docker containers.
 
-## Control Groups
+## Control groups
 
 Control groups are exposed through a pseudo-filesystem. In recent
 distros, you should find this filesystem under `/sys/fs/cgroup`. Under
@@ -28,7 +28,7 @@
 
     $ grep cgroup /proc/mounts
 
-## Enumerating Cgroups
+## Enumerating cgroups
 
 You can look into `/proc/cgroups` to see the different control group subsystems
 known to the system, the hierarchy they belong to, and how many groups they contain.
@@ -39,7 +39,7 @@
 a particular group”, while `/lxc/pumpkin` means that the process is likely to be
 a member of a container named `pumpkin`.
 
-## Finding the Cgroup for a Given Container
+## Finding the cgroup for a given container
 
 For each container, one cgroup will be created in each hierarchy. On
 older systems with older versions of the LXC userland tools, the name of
@@ -55,12 +55,12 @@
 Putting everything together to look at the memory metrics for a Docker
 container, take a look at `/sys/fs/cgroup/memory/lxc/<longid>/`.
 
-## Metrics from Cgroups: Memory, CPU, Block IO
+## Metrics from cgroups: memory, CPU, block I/O
 
 For each subsystem (memory, CPU, and block I/O), you will find one or
 more pseudo-files containing statistics.
 
-### Memory Metrics: `memory.stat`
+### Memory metrics: `memory.stat`
 
 Memory metrics are found in the "memory" cgroup. Note that the memory
 control group adds a little overhead, because it does very fine-grained
@@ -262,7 +262,7 @@
    not perform more I/O, its queue size can increase just because the
    device load increases because of other devices.
 
-## Network Metrics
+## Network metrics
 
 Network metrics are not exposed directly by control groups. There is a
 good explanation for that: network interfaces exist within the context
diff --git a/docs/sources/articles/security.md b/docs/sources/articles/security.md
index a26f79c..b3174c3 100644
--- a/docs/sources/articles/security.md
+++ b/docs/sources/articles/security.md
@@ -1,8 +1,8 @@
-page_title: Docker Security
+page_title: Docker security
 page_description: Review of the Docker Daemon attack surface
 page_keywords: Docker, Docker documentation, security
 
-# Docker Security
+# Docker security
 
 There are three major areas to consider when reviewing Docker security:
 
@@ -14,11 +14,11 @@
  - the "hardening" security features of the kernel and how they
    interact with containers.
 
-## Kernel Namespaces
+## Kernel namespaces
 
 Docker containers are very similar to LXC containers, and they have
-similar security features. When you start a container with `docker
-run`, behind the scenes Docker creates a set of namespaces and control
+similar security features. When you start a container with
+`docker run`, behind the scenes Docker creates a set of namespaces and control
 groups for the container.
 
 **Namespaces provide the first and most straightforward form of
@@ -53,7 +53,7 @@
 merged within the mainstream kernel. And OpenVZ was initially released
 in 2005, so both the design and the implementation are pretty mature.
 
-## Control Groups
+## Control groups
 
 Control Groups are another key component of Linux Containers. They
 implement resource accounting and limiting. They provide many
@@ -72,7 +72,7 @@
 Control Groups have been around for a while as well: the code was
 started in 2006, and initially merged in kernel 2.6.24.
 
-## Docker Daemon Attack Surface
+## Docker daemon attack surface
 
 Running containers (and applications) with Docker implies running the
 Docker daemon. This daemon currently requires `root` privileges, and you
@@ -103,7 +103,7 @@
 use traditional UNIX permission checks to limit access to the control
 socket.
 
-You can also expose the REST API over HTTP if you explicitly decide so.
+You can also expose the REST API over HTTP if you explicitly decide to do so.
 However, if you do that, being aware of the above mentioned security
 implication, you should ensure that it will be reachable only from a
 trusted network or VPN; or protected with e.g., `stunnel` and client SSL
@@ -132,7 +132,7 @@
 favorite admin tools (probably at least an SSH server), as well as
 existing monitoring/supervision processes (e.g., NRPE, collectd, etc).
 
-## Linux Kernel Capabilities
+## Linux kernel capabilities
 
 By default, Docker starts containers with a restricted set of
 capabilities. What does that mean?
@@ -206,7 +206,7 @@
 The best practice for users would be to remove all capabilities except
 those explicitly required for their processes.
 
-## Other Kernel Security Features
+## Other kernel security features
 
 Capabilities are just one of the many security features provided by
 modern Linux kernels. It is also possible to leverage existing,
@@ -249,11 +249,11 @@
 by directly using the clone syscall, or utilizing the 'unshare'
 utility. Using this, some users may find it possible to drop
 more capabilities from their process as user namespaces provide
-an artifical capabilities set. Likewise, however, this artifical
+an artificial capabilities set. Likewise, however, this artificial
 capabilities set may require use of 'capsh' to restrict the
 user-namespace capabilities set when using 'unshare'.
 
-Eventually, it is expected that Docker will direct, native support
+Eventually, it is expected that Docker will have direct, native support
 for user-namespaces, simplifying the process of hardening containers.
 
 ## Conclusions
diff --git a/docs/sources/articles/systemd.md b/docs/sources/articles/systemd.md
index fddd146..18631ee 100644
--- a/docs/sources/articles/systemd.md
+++ b/docs/sources/articles/systemd.md
@@ -1,8 +1,8 @@
-page_title: Controlling and configuring Docker using Systemd
-page_description: Controlling and configuring Docker using Systemd
+page_title: Controlling and configuring Docker using systemd
+page_description: Controlling and configuring Docker using systemd
 page_keywords: docker, daemon, systemd, configuration
 
-# Controlling and configuring Docker using Systemd
+# Controlling and configuring Docker using systemd
 
 Many Linux distributions use systemd to start the Docker daemon. This document
 shows a few examples of how to customise Docker's settings.
@@ -30,8 +30,8 @@
 (often pointing to `/etc/sysconfig/docker`) then you can modify the
 referenced file.
 
-Or, you may need to edit the `docker.service` file, which can be in `/usr/lib/systemd/system`
-or `/etc/systemd/service`.
+Or, you may need to edit the `docker.service` file, which can be in
+`/usr/lib/systemd/system`, `/etc/systemd/service`, or `/lib/systemd/system`.
 
 ### Runtime directory and storage driver
 
@@ -64,7 +64,7 @@
 You can also set other environment variables in this file, for example, the
 `HTTP_PROXY` environment variables described below.
 
-### HTTP Proxy
+### HTTP proxy
 
 This example overrides the default `docker.service` file.
 
diff --git a/docs/sources/articles/using_supervisord.md b/docs/sources/articles/using_supervisord.md
index 5806707..0c55570 100644
--- a/docs/sources/articles/using_supervisord.md
+++ b/docs/sources/articles/using_supervisord.md
@@ -91,13 +91,13 @@
 
 We can now build our new image.
 
-    $ sudo docker build -t <yourname>/supervisord .
+    $ docker build -t <yourname>/supervisord .
 
 ## Running our Supervisor container
 
 Once We've got a built image we can launch a container from it.
 
-    $ sudo docker run -p 22 -p 80 -t -i <yourname>/supervisord
+    $ docker run -p 22 -p 80 -t -i <yourname>/supervisord
     2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file)
     2013-11-25 18:53:22,312 WARN Included extra file "/etc/supervisor/conf.d/supervisord.conf" during parsing
     2013-11-25 18:53:22,342 INFO supervisord started with pid 1
diff --git a/docs/sources/docker-hub-enterprise/admin-metrics.png b/docs/sources/docker-hub-enterprise/admin-metrics.png
new file mode 100644
index 0000000..21a8f74
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/admin-metrics.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/admin-settings-http.png b/docs/sources/docker-hub-enterprise/admin-settings-http.png
new file mode 100644
index 0000000..77df71f
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/admin-settings-http.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/admin.png b/docs/sources/docker-hub-enterprise/admin.png
new file mode 100644
index 0000000..54826e5
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/admin.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/adminguide.md b/docs/sources/docker-hub-enterprise/adminguide.md
new file mode 100644
index 0000000..66f099d
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/adminguide.md
@@ -0,0 +1,103 @@
+page_title: Docker Hub Enterprise: Admin guide
+page_description: Documentation describing administration of Docker Hub Enterprise
+page_keywords: docker, documentation, about, technology, hub, enterprise
+
+# Docker Hub Enterprise Administrator's Guide
+
+This guide covers tasks and functions an administrator of Docker Hub Enterprise
+(DHE) will need to know about, such as reporting, logging, system management,
+performance metrics, etc.
+For tasks DHE users need to accomplish, such as using DHE to push and pull
+images, please visit the [User's Guide](./userguide).
+
+## Reporting
+
+### System Health
+
+![System Health page</admin/metrics/>](../assets/admin-metrics.png)
+
+The "System Health" tab displays resource utilization metrics for the DHE host
+as well as for each of its contained services. The CPU and RAM usage meters at
+the top indicate overall resource usage for the host, while detailed time-series
+charts are provided below for each service. You can mouse-over the charts or
+meters to see detailed data points.
+
+Clicking on a service name (i.e., "load_balancer", "admin_server", etc.) will
+display the network, CPU, and memory (RAM) utilization data for the specified
+service. See below for a
+[detailed explanation of the available services](#services).
+
+### Logs
+
+![System Logs page</admin/logs/>](../assets/admin-logs.png)
+
+Click the "Logs" tab to view all logs related to your DHE instance. You will see
+log sections on this page for each service in your DHE instance. Older or newer
+logs can be loaded by scrolling up or down. See below for a
+[detailed explanation of the available services](#services).
+
+DHE's log files can be found on the host in `/usr/local/etc/dhe/logs/`. The
+files are limited to a maximum size of 64mb. They are rotated every two weeks,
+when the aggregator sends logs to the collection server, or they are rotated if
+a logfile would exceed 64mb without rotation. Log files are named `<component
+name>-<timestamp at rotation>`, where the "component name" is the service it
+provides (`manager`, `admin-server`, etc.).
+
+### Usage statistics and crash reports
+
+During normal use, DHE generates usage statistics and crash reports. This
+information is collected by Docker, Inc. to help us prioritize features, fix
+bugs, and improve our products. Specifically, Docker, Inc. collects the
+following information:
+
+* Error logs
+* Crash logs
+
+## Emergency access to DHE
+
+If your authenticated or public access to the DHE web interface has stopped
+working, but your DHE admin container is still running, you can add an
+[ambassador container](https://docs.docker.com/articles/ambassador_pattern_linking/)
+to get temporary unsecure access to it by running:
+
+    $ docker run --rm -it --link docker_hub_enterprise_admin_server:admin -p 9999:80 svendowideit/ambassador
+
+> **Note:** This guide assumes you can run Docker commands from a machine where
+> you are a member of the `docker` group, or have root privileges. Otherwise,
+> you may need to add `sudo` to the example command above.
+
+This will give you access on port `9999` on your DHE server - `http://<dhe-host-ip>:9999/admin/`.
+
+## Services
+
+DHE runs several Docker services which are essential to its reliability and
+usability. The following services are included; you can see their details by
+running queries on the [System Health](#system-health) and [Logs](#logs) pages:
+
+* `admin_server`: Used for displaying system health, performing upgrades,
+configuring settings, and viewing logs.
+* `load_balancer`: Used for maintaining high availability by distributing load
+to each image storage service (`image_storage_X`).
+* `log_aggregator`: A microservice used for aggregating logs from each of the
+other services. Handles log persistence and rotation on disk.
+* `image_storage_X`: Stores Docker images using the [Docker Registry HTTP API V2](https://github.com/docker/distribution/blob/master/doc/SPEC.md). Typically,
+multiple image storage services are used in order to provide greater uptime and
+faster, more efficient resource utilization.
+
+## DHE system management
+
+The `dockerhubenterprise/manager` image is used to control the DHE system. This
+image uses the Docker socket to orchestrate the multiple services that comprise
+DHE.
+
+     $ sudo bash -c "$(sudo docker run dockerhubenterprise/manager [COMMAND])"
+
+Supported commands are: `install`, `start`, `stop`, `restart`, `status`, and
+`upgrade`.
+
+> **Note**: `sudo` is needed for `dockerhubenterprise/manager` commands to
+> ensure that the Bash script is run with full access to the Docker host.
+
+## Next Steps
+
+For information on installing DHE, take a look at the [Installation instructions](./install.md).
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-logs.png b/docs/sources/docker-hub-enterprise/assets/admin-logs.png
new file mode 100644
index 0000000..3221cc5
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/admin-logs.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-metrics.png b/docs/sources/docker-hub-enterprise/assets/admin-metrics.png
new file mode 100644
index 0000000..965101f
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/admin-metrics.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-basic.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-basic.png
new file mode 100644
index 0000000..530a160
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-basic.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-ldap.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-ldap.png
new file mode 100644
index 0000000..02715d3
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-ldap.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication.png
new file mode 100644
index 0000000..145102a
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-http-unlicensed.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-http-unlicensed.png
new file mode 100644
index 0000000..149be80
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/admin-settings-http-unlicensed.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-http.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-http.png
new file mode 100644
index 0000000..8c402bb
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/admin-settings-http.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-license.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-license.png
new file mode 100644
index 0000000..ab6082d
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/admin-settings-license.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-security.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-security.png
new file mode 100644
index 0000000..5d837dc
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/admin-settings-security.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-storage.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-storage.png
new file mode 100644
index 0000000..a035977
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/admin-settings-storage.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings.png b/docs/sources/docker-hub-enterprise/assets/admin-settings.png
new file mode 100644
index 0000000..a900828
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/admin-settings.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/console-pull.png b/docs/sources/docker-hub-enterprise/assets/console-pull.png
new file mode 100755
index 0000000..db93646
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/console-pull.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/console-push.png b/docs/sources/docker-hub-enterprise/assets/console-push.png
new file mode 100755
index 0000000..e5c45ef
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/console-push.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/docker-hub-org-enterprise-license-CSDE-dropdown.png b/docs/sources/docker-hub-enterprise/assets/docker-hub-org-enterprise-license-CSDE-dropdown.png
new file mode 100644
index 0000000..49dbfab
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/docker-hub-org-enterprise-license-CSDE-dropdown.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/docker-hub-org-enterprise-license.png b/docs/sources/docker-hub-enterprise/assets/docker-hub-org-enterprise-license.png
new file mode 100644
index 0000000..3c70b74
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/docker-hub-org-enterprise-license.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/jenkins-plugins.png b/docs/sources/docker-hub-enterprise/assets/jenkins-plugins.png
new file mode 100755
index 0000000..907146e
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/jenkins-plugins.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/assets/jenkins-ui.png b/docs/sources/docker-hub-enterprise/assets/jenkins-ui.png
new file mode 100755
index 0000000..be7b28d
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/assets/jenkins-ui.png
Binary files differ
diff --git a/docs/sources/docker-hub-enterprise/configuration.md b/docs/sources/docker-hub-enterprise/configuration.md
new file mode 100644
index 0000000..d537bc4
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/configuration.md
@@ -0,0 +1,356 @@
+page_title: Docker Hub Enterprise: Configuration options
+page_description: Configuration instructions for Docker Hub Enterprise
+page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry
+
+# Configuring DHE
+
+## Overview
+
+This page will help you properly configure Docker Hub Enterprise (DHE) so it can
+run in your environment. 
+
+Start with DHE loaded in your browser and click  the "Settings" tab to view
+configuration options. You'll see options for configuring:
+
+* Domains and ports
+* Security settings
+* Storage settings
+* Authentication settings
+* Your DHE license
+
+## Domains and Ports
+
+![Domain and Ports page</admin/settings#http>](../assets/admin-settings-http.png)
+
+* *Domain Name*: **required** defaults to an empty string, the fully qualified domain name assigned to the DHE host.
+* *Load Balancer HTTP Port*: defaults to 80, used as the entry point for the image storage service. To see load balancer status, you can query
+http://&lt;dhe-host&gt;/load_balancer_status.
+* *Load Balancer HTTPS Port*: defaults to 443, used as the secure entry point
+for the image storage service.
+* *HTTP_PROXY*: defaults to an empty string, proxy server for HTTP requests.
+* *HTTPS_PROXY*: defaults to an empty string, proxy server for HTTPS requests.
+* *NO_PROXY*: defaults to an empty string, proxy bypass for HTTP and HTTPS requests.
+
+
+> **Note**: If you need DHE to re-generate a self-signed certificate at some
+> point, you'll need to first delete `/usr/local/etc/dhe/ssl/server.pem`, and
+> then restart the DHE containers, either by changing and saving the "Domain Name",
+> or using `bash -c "$(docker run dockerhubenterprise/manager restart)"`.
+
+
+## Security
+
+![Security settings page</admin/settings#security>](../assets/admin-settings-security.png)
+
+* *SSL Certificate*: Used to enter the hash (string) from the SSL Certificate.
+This cert must be accompanied by its private key, entered below.
+* *Private Key*: The hash from the private key associated with the provided
+SSL Certificate (as a standard x509 key pair).
+
+In order to run, DHE requires encrypted communications via HTTPS/SSL between (a) the DHE registry and your Docker Engine(s), and (b) between your web browser and the DHE admin server. There are a few options for setting this up:
+
+1. You can use the self-signed certificate DHE generates by default.
+2. You can generate your own certificates using a public service or your enterprise's infrastructure. See the [Generating SSL certificates](#generating-ssl-certificates) section for the options available.
+
+If you are generating your own certificates, you can install them by following the instructions for
+[Adding your own registry certificates to DHE](#adding-your-own-registry-certificates-to-dhe).
+
+On the other hand, if you choose to use the DHE-generated certificates, or the
+certificates you generate yourself are not trusted by your client Docker hosts,
+you will need to do one of the following:
+
+* [Install a registry certificate on all of your client Docker daemons](#installing-registry-certificates-on-client-docker-daemons),
+
+* Set your [client Docker daemons to run with an unconfirmed connection to the registry](#if-you-cant-install-the-certificates).
+
+### Generating SSL certificates
+
+There are three basic approaches to generating certificates:
+
+1.  Most enterprises will have private key infrastructure (PKI) in place to
+generate keys. Consult with your security team or whomever manages your private
+key infrastructure. If you have this resource available, Docker recommends you
+use it.
+
+2. If your enterprise can't provide keys, you can use a public Certificate
+Authority (CA) like "InstantSSL.com" or "RapidSSL.com" to generate a
+certificate. If your certificates are generated using a globally trusted
+Certificate Authority, you won't need to install them on all of your
+client Docker daemons.
+
+3. Use the self-signed registry certificate generated by DHE, and install it
+onto the client Docker daemon hosts as shown below.
+
+### Adding your own Registry certificates to DHE
+
+Whichever method you use to generate certificates, once you have them
+you can set up your DHE server to use them by navigating to the "Settings" page,
+going to "Security," and putting the SSL Certificate text (including all
+intermediate Certificates, starting with the host) into the
+"SSL Certificate" edit box, and the previously generated Private key into
+the "SSL Private Key" edit box.
+
+Click the "Save" button, and then wait for the DHE Admin site to restart and
+reload. It should now be using the new certificate.
+
+Once the "Security" page has reloaded, it will show `#` hashes instead of the
+certificate text you pasted in.
+
+If your certificate is signed by a chain of Certificate Authorities that are
+already trusted by your Docker daemon servers, you can skip the "Installing
+registry certificates" step below.
+
+### Installing Registry certificates on client Docker daemons
+
+If your certificates do not have a trusted Certificate Authority, you will need
+to install them on each client Docker daemon host.
+
+The procedure for installing the DHE certificates on each Linux distribution has
+slightly different steps, as shown below.
+
+You can test this certificate using `curl`:
+
+```
+$ curl https://dhe.yourdomain.com/v2/
+curl: (60) SSL certificate problem: self signed certificate
+More details here: http://curl.haxx.se/docs/sslcerts.html
+
+curl performs SSL certificate verification by default, using a "bundle"
+ of Certificate Authority (CA) public keys (CA certs). If the default
+ bundle file isn't adequate, you can specify an alternate file
+ using the --cacert option.
+If this HTTPS server uses a certificate signed by a CA represented in
+ the bundle, the certificate verification probably failed due to a
+ problem with the certificate (it might be expired, or the name might
+ not match the domain name in the URL).
+If you'd like to turn off curl's verification of the certificate, use
+ the -k (or --insecure) option.
+
+$ curl --cacert /usr/local/etc/dhe/ssl/server.pem https://dhe.yourdomain.com/v2/
+{"errors":[{"code":"UNAUTHORIZED","message":"access to the requested resource is not authorized","detail":null}]}
+```
+
+Continue by following the steps corresponding to your chosen OS.
+
+#### Ubuntu/Debian
+
+```
+    $ export DOMAIN_NAME=dhe.yourdomain.com
+    $ openssl s_client -connect $DOMAIN_NAME:443 -showcerts </dev/null 2>/dev/null | openssl x509 -outform PEM | tee /usr/local/share/ca-certificates/$DOMAIN_NAME.crt
+    $ update-ca-certificates
+    Updating certificates in /etc/ssl/certs... 1 added, 0 removed; done.
+    Running hooks in /etc/ca-certificates/update.d....done.
+    $ service docker restart
+    docker stop/waiting
+    docker start/running, process 29291
+```
+
+#### RHEL
+
+```
+    $ export DOMAIN_NAME=dhe.yourdomain.com
+    $ openssl s_client -connect $DOMAIN_NAME:443 -showcerts </dev/null 2>/dev/null | openssl x509 -outform PEM | tee /etc/pki/ca-trust/source/anchors/$DOMAIN_NAME.crt
+    $ update-ca-trust
+    $ /bin/systemctl restart docker.service
+```
+
+#### Boot2Docker 1.6.0
+
+Install the CA cert (or the auto-generated cert) by adding the following to
+your `/var/lib/boot2docker/bootsync.sh`:
+
+```
+#!/bin/sh
+
+cat /var/lib/boot2docker/server.pem >> /etc/ssl/certs/ca-certificates.crt
+```
+
+
+Then get the certificate from the new DHE server using:
+
+```
+$ openssl s_client -connect dhe.yourdomain.com:443 -showcerts </dev/null 2>/dev/null | openssl x509 -outform PEM | sudo tee -a /var/lib/boot2docker/server.pem
+```
+
+If your certificate chain is complicated, you may want to use the changes in
+[Pull request 807](https://github.com/boot2docker/boot2docker/pull/807/files)
+
+Now you can either reboot your Boot2Docker virtual machine, or run the following to
+install the server certificate, and then restart the Docker daemon.
+
+```
+$ sudo chmod 755 /var/lib/boot2docker/bootsync.sh
+$ sudo /var/lib/boot2docker/bootsync.sh
+$ sudo /etc/init.d/docker restart`.
+```
+
+### If you can't install the certificates
+
+If for some reason you can't install the certificate chain on a client Docker host,
+or your certificates do not have a global CA, you can configure your Docker daemon to run in "insecure" mode. This is done by adding an extra flag,
+`--insecure-registry host-ip|domain-name`, to your client Docker daemon startup flags.
+You'll need to restart the Docker daemon for the change to take effect.
+
+This flag means that the communications between your Docker client and the DHE
+Registry server are still encrypted, but the client Docker daemon is not
+confirming that the Registry connection is not being hijacked or diverted.
+
+> **Note**: If you enter a "Domain Name" into the "Security" settings, it needs
+> to be DNS resolvable on any client Docker daemons that are running in
+> "insecure-registry" mode.
+
+To set the flag, follow the directions below for your operating system.
+
+#### Ubuntu
+
+On Ubuntu 14.04 LTS, you customize the Docker daemon configuration with the
+`/etc/defaults/docker` file.
+
+Open or create the `/etc/defaults/docker` file, and add the
+`--insecure-registry` flag to the `DOCKER_OPTS` setting (which may need to be
+added or uncommented) as follows:
+
+```
+DOCKER_OPTS="--insecure-registry dhe.yourdomain.com"
+```
+
+Then restart the Docker daemon with `sudo service docker restart`.
+
+#### RHEL
+
+On RHEL, you customize the Docker daemon configuration with the
+`/etc/sysconfig/docker` file.
+
+Open or create the `/etc/sysconfig/docker` file, and add the
+`--insecure-registry` flag to the `OPTIONS` setting (which may need to be
+added or uncommented) as follows:
+
+```
+OPTIONS="--insecure-registry dhe.yourdomain.com"
+```
+
+Then restart the Docker daemon with `sudo service docker restart`.
+
+### Boot2Docker
+
+On Boot2Docker, you customize the Docker daemon configuration with the
+`/var/lib/boot2docker/profile` file.
+
+Open or create the `/var/lib/boot2docker/profile` file, and add an `EXTRA_ARGS`
+setting as follows:
+
+```
+EXTRA_ARGS="--insecure-registry dhe.yourdomain.com"
+```
+
+Then restart the Docker daemon with `sudo /etc/init.d/docker restart`.
+
+## Image Storage Configuration
+
+DHE offers multiple methods for image storage, which are defined using specific
+storage drivers. Image storage can be local, remote, or on a cloud service such
+as S3. Storage drivers can be added or customized via the DHE storage driver
+API.
+
+![Storage settings page</admin/settings#storage>](../assets/admin-settings-storage.png)
+
+* *Yaml configuration file*: This file (`/usr/local/etc/dhe/storage.yml`) is
+used to configure the image storage services. The editable text of the file is
+displayed in the dialog box. The schema of this file is identical to that used
+by the [Registry 2.0](http://docs.docker.com/registry/configuration/).
+* If you are using the file system driver to provide local image storage, you will need to specify a root directory which will get mounted as a sub-path of
+`/var/local/dhe/image-storage`. The default value of this root directory is
+`/local`, so the full path to it is `/var/local/dhe/image-storage/local`.
+
+> **Note:**
+> Saving changes you've made to settings will restart the Docker Hub Enterprise
+> instance. The restart may cause a brief interruption for users of the image
+> storage system.
+
+## Authentication
+
+The "Authentication" settings tab lets DHE administrators control access
+to the DHE web admin tool and to the DHE Registry.
+
+The current authentication methods are `None`, `Basic` and `LDAP`.
+
+> **Note**: if you have issues logging into the DHE admin web interface after changing the authentication
+> settings, you may need to use the [emergency access to the DHE admin web interface](./adminguide.md#Emergency-access-to-the-dhe-admin-web-interface).
+
+### No authentication
+
+No authentication means that everyone that can access your DHE web administration
+site. This is not recommended for any use other than testing.
+
+
+### Basic authentication
+
+The `Basic` authentication setting allows the admin to provide username/password pairs local to DHE.
+Any user who can successfully authenticate can use DHE to push and pull Docker images.
+You can optionally filter the list of users to a subset of just those users with access to the DHE
+admin web interface.
+
+![Basic authentication settings page</admin/settings#auth>](../assets/admin-settings-authentication-basic.png)
+
+* A button to add one user, or to upload a CSV file containing username,
+password pairs
+* A DHE website Administrator Filter, allowing you to either
+* * *Allow all authenticated users*: to log into the DHE admin web interface, or
+* * *Whitelist usernames*: which allows you to restrict access to the web interface to a listed set of users.
+
+### LDAP authentication
+
+Using LDAP authentication allows you to integrate your DHE registry into your
+organization's existing user and authentication database.
+
+As this involves existing infrastructure external to DHE and Docker, you will need to
+gather the details required to configure DHE for your organization's particular LDAP
+implementation.
+
+You can test that you have the necessary LDAP server information by using it from
+inside a Docker container running on the same server as your DHE:
+
+> **Note**: if the LDAP server is configured to use *StartTLS*, then you need to add `-Z` to the
+> `ldapsearch` command examples below.
+
+```
+docker run --rm -it svendowideit/ldapsearch -h <LDAP Server hostname> -b <User Base DN> -D <Search User DN> -w <Search User Password>
+```
+
+or if the LDAP server is set up to allow anonymous access (which means your *Search User DN* and *Search User Password* settings can remain empty):
+
+```
+docker run --rm -it svendowideit/ldapsearch -h <LDAP Server hostname> -b <User Base DN> -x
+```
+
+The result of these queries should be a (very) long list - if you get an authentication error,
+then the details you have been given are not sufficient.
+
+The *User Login Attribute* key setting must match the field used in the LDAP server
+for the user's login-name. On OpenLDAP, it's generally `uid`, and on Microsoft Active Directory
+servers, it's `sAMAccountName`. The `ldapsearch` output above should allow you to
+confirm which setting you need.
+
+![LDAP authentication settings page</admin/settings#auth>](../assets/admin-settings-authentication-ldap.png)
+
+* *Use StartTLS*: defaults to unchecked, check to enable StartTLS
+* *LDAP Server URL*: **required** defaults to null, LDAP server URL (e.g., - ldap://example.com)
+* *User Base DN*: **required** defaults to null, user base DN in the form (e.g., - dc=example,dc=com)
+* *User Login Attribute*: **required** defaults to null, user login attribute (e.g., - uid or sAMAccountName)
+* *Search User DN*: **required** defaults to null, search user DN (e.g., - domain\username)
+* *Search User Password*: **required** defaults to null, search user password
+* A *DHE Registry User filter*: allowing you to either
+* * *Allow all authenticated users* to push or pull any images, or
+* * *Filter LDAP search results*: which allows you to restrict DHE registry pull and push to users matching the LDAP filter,
+* * *Whitelist usernames*: which allows you to restrict DHE registry pull and push to the listed set of users.
+* A *DHE website Administrator filter*, allowing you to either
+* * *Allow all authenticated users*: to log into the DHE admin web interface, or
+* * *Filter LDAP search results*: which allows you to restrict DHE admin web access to users matching the LDAP filter,
+* * *Whitelist usernames*: which allows you to restrict access to the web interface to the listed set of users.
+
+
+## Next Steps
+
+For information on getting support for DHE, take a look at the
+[Support information](./support.md).
+
diff --git a/docs/sources/docker-hub-enterprise/index.md b/docs/sources/docker-hub-enterprise/index.md
new file mode 100644
index 0000000..d187531
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/index.md
@@ -0,0 +1,52 @@
+page_title: Docker Hub Enterprise: Overview
+page_description: Docker Hub Enterprise
+page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry
+
+# Welcome to Docker Hub Enterprise
+
+## Overview
+
+Docker Hub Enterprise (DHE) lets you run and manage your own Docker image
+storage service, securely on your own infrastructure behind your company
+firewall. This allows you to securely store, push, and pull the images used by
+your enterprise to build, ship, and run applications. DHE also provides
+monitoring and usage information to help you understand the workloads being
+placed on it.
+
+Specifically, DHE provides:
+
+* An image registry to store, manage, and collaborate on Docker images
+* Pluggable storage drivers
+* Configuration options to let you run DHE in your particular enterprise
+environment.
+* Easy, transparent upgrades
+* Logging, usage and system health metrics
+
+DHE is perfect for:
+
+* Providing a secure, on-premise development environment
+* Creating a streamlined build pipeline
+* Building a consistent, high-performance test/QA environment
+* Managing image deployment
+
+DHE is built on [version 2 of the Docker registry](https://github.com/docker/distribution).
+
+## Available Documentation
+
+The following documentation for DHE is available:
+
+* **Overview** This page.
+* [**Quick Start: Basic User Workflow**](./quick-start.md) Go here to learn the
+fundamentals of how DHE works and how you can set up a simple, but useful
+workflow.
+* [**User Guide**](./userguide.md) Go here to learn about using DHE from day to
+day.
+* [**Administrator Guide**](./adminguide.md) Go here if you are an administrator
+responsible for running and maintaining DHE.
+* [**Installation**](install.md) Go here for the steps you'll need to install
+DHE and get it working.
+* [**Configuration**](./configuration.md) Go here to find out details about
+setting up and configuring DHE for your particular environment.
+* [**Support**](./support.md) Go here for information on getting support for
+DHE.
+
diff --git a/docs/sources/docker-hub-enterprise/install-config.md b/docs/sources/docker-hub-enterprise/install-config.md
deleted file mode 100644
index 0b7bcfd..0000000
--- a/docs/sources/docker-hub-enterprise/install-config.md
+++ /dev/null
@@ -1,8 +0,0 @@
-page_title: Using Docker Hub Enterprise Installation
-page_description: Docker Hub Enterprise Installation
-page_keywords: docker hub enterprise
-
-# Docker Hub Enterprise Installation
-
-Documenation coming soon.
-
diff --git a/docs/sources/docker-hub-enterprise/install.md b/docs/sources/docker-hub-enterprise/install.md
new file mode 100644
index 0000000..1bfa7d1
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/install.md
@@ -0,0 +1,348 @@
+page_title: Docker Hub Enterprise: Install
+page_description: Installation instructions for Docker Hub Enterprise
+page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry
+
+# Installing Docker Hub Enterprise
+
+## Overview
+
+This document describes the process of obtaining, installing, and securing
+Docker Hub Enterprise (DHE). DHE is installed from Docker containers. Once
+installed, you will need to select a method of securing it. This doc will
+explain the options you have for security and help you find the resources needed
+to configure it according to your chosen method. More configuration details can
+be found in the [DHE Configuration page](./configuration.md).
+
+Specifically, installation requires completion of these steps, in order:
+
+1. Acquire a license by purchasing DHE or requesting a trial license.
+2. Install the commercially supported Docker Engine.
+3. Install DHE
+4. Add your license to your DHE instance
+
+## Licensing
+
+In order to run DHE, you will need to acquire a license, either by purchasing
+DHE or requesting a trial license. The license will be associated with your
+Docker Hub account or Docker Hub organization (so if you don't have an account,
+you'll need to set one up, which can be done at the same time as your license
+request). To get your license or start your trial, please contact our
+[sales department](mailto:sales@docker.com). Upon completion of your purchase or
+request, you will receive an email with further instructions for licensing your
+copy of DHE.
+
+## Prerequisites
+
+DHE 1.0.1 requires the following:
+
+* Commercially supported Docker Engine 1.6.1 or later running on an
+Ubuntu 14.04 LTS, RHEL 7.1 or RHEL 7.0 host. (See below for instructions on how
+to install the commercially supported Docker Engine.)
+
+> **Note:** In order to remain in compliance with your DHE support agreement,
+> you must use the current version of commercially supported Docker Engine.
+> Running the regular, open source version of Engine is **not** supported.
+
+* Your Docker daemon needs to be listening to the Unix socket (the default) so
+that it can be bind-mounted into the DHE management containers, allowing
+DHE to manage itself and its updates. For this reason, your DHE host will also
+need internet connectivity so it can access the updates.
+
+* Your host also needs to have TCP ports `80` and `443` available for the DHE
+container port mapping.
+
+* You will also need the Docker Hub user-name and password used when obtaining
+the DHE license (or the user-name of an administrator of the Hub organization
+that obtained an Enterprise license).
+
+## Installing the Commercially Supported Docker Engine
+
+Since DHE is installed using Docker, the commercially supported Docker Engine
+must be installed first. This is done with an RPM or DEB repository, which you
+set up using a Bash script downloaded from the [Docker Hub](https://hub.docker.com).
+
+### Download the commercially supported Docker Engine installation script
+
+To download the commercially supported Docker Engine Bash installation script,
+log in to the [Docker Hub](https://hub.docker.com) with the user-name used to
+obtain your license . Once you're logged in, go to the
+["Enterprise Licenses"](https://registry.hub.docker.com/account/licenses/) page
+in your Hub account's "Settings" section.
+
+Select your intended host operating system from the "Download CS Engine" drop-
+down at the top right of the page and then, once the Bash setup script is
+downloaded, follow the steps below appropriate for your chosen OS.
+
+![Docker Hub Docker engine install dropdown](../assets/docker-hub-org-enterprise-license-CSDE-dropdown.png)
+
+### RHEL 7.0/7.1 installation
+
+First, copy the downloaded Bash setup script to your RHEL host. Next, run the
+following to install commercially supported Docker Engine and its dependencies,
+and then start the Docker daemon:
+
+```
+$ sudo yum update && sudo yum upgrade
+$ chmod 755 docker-cs-engine-rpm.sh
+$ sudo ./docker-cs-engine-rpm.sh
+$ sudo yum install docker-engine-cs
+$ sudo systemctl enable docker.service
+$ sudo systemctl start docker.service
+```
+
+In order to simplify using Docker, you can get non-sudo access to the Docker
+socket by adding your user to the `docker` group, then logging out and back in
+again:
+
+```
+$ sudo usermod -a -G docker $USER
+$ exit
+```
+
+> **Note**: you may need to reboot your server to update its RHEL kernel.
+
+### Ubuntu 14.04 LTS installation
+
+First, copy the downloaded Bash setup script to your Ubuntu host. Next, run the
+following to install commercially supported Docker Engine and its dependencies:
+
+```
+$ sudo apt-get update && sudo apt-get upgrade
+$ chmod 755 docker-cs-engine-deb.sh
+$ sudo ./docker-cs-engine-deb.sh
+$ sudo apt-get install docker-engine-cs
+```
+Lastly, confirm Docker is running with `sudo service docker start`.
+
+In order to simplify using Docker, you can get non-sudo access to the Docker
+socket by adding your user to the `docker` group, then logging out and back in
+again:
+
+```
+$ sudo usermod -a -G docker $USER
+$ exit
+```
+
+> **Note**: you may need to reboot your server to update its LTS kernel.
+
+## Upgrading the Commercially Supported Docker Engine
+
+CS Docker Engine 1.6.1 contains fixes to security vulnerabilities,
+  and customers should upgrade to it immediately.
+
+> **Note**: If you have CS Docker Engine 1.6.0 installed, it must be upgraded;
+  however, due to compatibility issues, [DHE must be upgraded](#upgrading-docker-hub-enterprise)
+  first.
+
+The CS Docker Engine installation script set up the RHEL/Ubuntu package repositories,
+so upgrading the Engine only requires you to run the update commands on your server.
+
+### RHEL 7.0/7.1 upgrade
+
+To upgrade CS Docker Engine, run the following command:
+
+```
+    $ sudo yum update
+    $ sudo systemctl daemon-reload && sudo systemctl restart docker
+```
+
+### Ubuntu 14.04 LTS upgrade
+
+To upgrade CS Docker Engine, run the following command:
+
+```
+   $ sudo apt-get update && sudo apt-get dist-upgrade docker-engine-cs
+```
+
+## Installing Docker Hub Enterprise
+
+Once the commercially supported Docker Engine is installed, you can install DHE
+itself. DHE is a self-installing application built and distributed using Docker
+and the [Docker Hub](https://registry.hub.docker.com/). It is able to restart
+and reconfigure itself using the Docker socket that is bind-mounted to its
+container.
+
+Start installing DHE by running the "dockerhubenterprise/manager" container:
+
+```
+	$ sudo bash -c "$(sudo docker run dockerhubenterprise/manager install)"
+```
+
+> **Note**: `sudo` is needed for `dockerhubenterprise/manager` commands to
+> ensure that the Bash script is run with full access to the Docker host.
+
+You can also find this command on the "Enterprise Licenses" section of your Hub
+user profile. The command will execute a shell script that creates the needed
+directories and then runs Docker to pull DHE's images and run its containers.
+
+Depending on your internet connection, this process may take several minutes to
+complete.
+
+A successful installation will pull a large number of Docker images and should
+display output similar to:
+
+```
+$ sudo bash -c "$(sudo docker run dockerhubenterprise/manager install)"
+Unable to find image 'dockerhubenterprise/manager:latest' locally
+Pulling repository dockerhubenterprise/manager
+c46d58daad7d: Pulling image (latest) from dockerhubenterprise/manager
+c46d58daad7d: Pulling image (latest) from dockerhubenterprise/manager
+c46d58daad7d: Pulling dependent layers
+511136ea3c5a: Download complete
+fa4fd76b09ce: Pulling metadata
+fa4fd76b09ce: Pulling fs layer
+ff2996b1faed: Download complete
+...
+fd7612809d57: Pulling metadata
+fd7612809d57: Pulling fs layer
+fd7612809d57: Download complete
+c46d58daad7d: Pulling metadata
+c46d58daad7d: Pulling fs layer
+c46d58daad7d: Download complete
+c46d58daad7d: Download complete
+Status: Downloaded newer image for dockerhubenterprise/manager:latest
+Unable to find image 'dockerhubenterprise/manager:1.0.0_8ce62a61e058' locally
+Pulling repository dockerhubenterprise/manager
+c46d58daad7d: Download complete
+511136ea3c5a: Download complete
+fa4fd76b09ce: Download complete
+1c8294cc5160: Download complete
+117ee323aaa9: Download complete
+2d24f826cb16: Download complete
+33bfc1956932: Download complete
+48f0dd6c9414: Download complete
+65c30f72ecb2: Download complete
+d4b29764d0d3: Download complete
+5654f4fe5384: Download complete
+9b9faa6ecd11: Download complete
+0c275f56ca5c: Download complete
+ff2996b1faed: Download complete
+fd7612809d57: Download complete
+Status: Image is up to date for dockerhubenterprise/manager:1.0.0_8ce62a61e058
+INFO  [1.0.0_8ce62a61e058] Attempting to connect to docker engine dockerHost="unix:///var/run/docker.sock"
+INFO  [1.0.0_8ce62a61e058] Running install command
+<...output truncated...>
+Creating container docker_hub_enterprise_load_balancer with docker daemon unix:///var/run/docker.sock
+Starting container docker_hub_enterprise_load_balancer with docker daemon unix:///var/run/docker.sock
+Bringing up docker_hub_enterprise_log_aggregator.
+Creating container docker_hub_enterprise_log_aggregator with docker daemon unix:///var/run/docker.sock
+Starting container docker_hub_enterprise_log_aggregator with docker daemon unix:///var/run/docker.sock
+$ docker ps
+CONTAINER ID        IMAGE                                                   COMMAND                CREATED             STATUS         PORTS                                      NAMES
+0168f37b6221        dockerhubenterprise/log-aggregator:1.0.0_8ce62a61e058   "log-aggregator"       4 seconds ago       Up 4 seconds                                              docker_hub_enterprise_log_aggregator
+b51c73bebe8b        dockerhubenterprise/nginx:1.0.0_8ce62a61e058            "nginxWatcher"         4 seconds ago       Up 4 seconds   0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp   docker_hub_enterprise_load_balancer
+e8327864356b        dockerhubenterprise/admin-server:1.0.0_8ce62a61e058     "server"               5 seconds ago       Up 5 seconds   80/tcp                                     docker_hub_enterprise_admin_server
+52885a6e830a        dockerhubenterprise/auth_server:alpha-a5a2af8a555e      "garant --authorizat   6 seconds ago       Up 5 seconds   8080/tcp
+```
+
+Once this process completes, you should be able to manage and configure your DHE
+instance by pointing your browser to `https://<host-ip>/`.
+
+Your browser will warn you that this is an unsafe site, with a self-signed,
+untrusted certificate. This is normal and expected; allow this connection
+temporarily.
+
+### Setting the DHE Domain Name
+
+The DHE Administrator site will also warn that the "Domain Name" is not set. Go
+to the "Settings" tab, and set the "Domain Name" to the full host-name of your
+DHE server.
+Hitting the "Save and Restart DHE Server" button will generate a new certificate, which will be used
+by both the DHE Administrator web interface and the DHE Registry server.
+
+After the server restarts, you will again need to allow the connection to the untrusted DHE web admin site.
+
+![http settings page</admin/settings#http>](../assets/admin-settings-http-unlicensed.png)
+
+Lastly, you will see a warning notifying you that this instance of DHE is
+unlicensed. You'll correct this in the next step.
+
+### Add your license
+
+The DHE registry services will not start until you add your license.
+To do that, you'll first download your license from the Docker Hub and then
+upload it to your DHE web admin server. Follow these steps:
+
+1. If needed, log back into the [Docker Hub](https://hub.docker.com)
+   using the user-name you used when obtaining your license. Go to "Settings" (in
+   the menu under your user-name, top right) to get to your account settings, and
+   then click on "Enterprise Licenses" in the side bar at left.
+
+2. You'll see a list of available licenses. Click on the download button to
+   obtain the license file you'd like to use.
+   ![Download DHE license](../assets/docker-hub-org-enterprise-license.png)
+
+3. Next, go to your DHE instance in your browser and click on the Settings tab
+   and then the "License" tab. Click on the "Upload license file" button, which
+   will open a standard file browser. Locate and select the license file you
+   downloaded in step 2, above. Approve the selection to close the dialog.
+   ![http settings page</admin/settings#license>](../assets/admin-settings-license.png)
+
+4. Click the "Save and Restart DHE" button, which will quit DHE and then restart it, registering
+   the new license.
+
+5. Verify the acceptance of the license by confirming that the "unlicensed copy"
+warning is no longer present.
+
+### Securing DHE
+
+Securing DHE is **required**. You will not be able to push or pull from DHE until you secure it.
+
+There are several options and methods for securing DHE. For more information,
+see the [configuration documentation](./configuration.md#security)
+
+### Using DHE to push and pull images
+
+Now that you have DHE configured with a "Domain Name" and have your client
+Docker daemons configured with the required security settings, you can test your
+setup by following the instructions for
+[Using DHE to Push and pull images](./userguide.md#using-dhe-to-push-and-pull-images).
+
+### DHE web interface and registry authentication
+
+By default, there is no authentication set on either the DHE web admin
+interface or the DHE registry. You can restrict access using an in-DHE
+configured set of users (and passwords), or you can configure DHE to use LDAP-
+based authentication.
+
+See [DHE Authentication settings](./configuration.md#authentication) for more
+details.
+
+## Upgrading Docker Hub Enterprise
+
+DHE has been designed to allow on-the-fly software upgrades. Start by
+clicking on the "System Health" tab. In the upper, right-hand side of the
+dashboard, below the navigation bar, you'll see the currently installed version
+(e.g., `Current Version: 0.1.12345`).
+
+If your DHE instance is the latest available, you will also see the message:
+"System Up to Date."
+
+If there is an upgrade available, you will see the message "System Update
+Available!" alongside a button labeled "Update to Version X.XX". To upgrade, DHE
+will pull new DHE container images from the Docker Hub. If you have not already
+connected to Docker Hub, DHE will prompt you to log in.
+
+The upgrade process requires a small amount of downtime to complete. To complete
+the upgrade, DHE will:
+* Connect to the Docker Hub to pull new container images with the new version of
+DHE.
+* Deploy those containers
+* Shut down the old containers
+* Resolve any necessary links/urls.
+
+Assuming you have a decent internet connection, the entire upgrade process
+should complete within a few minutes.
+
+You should now [upgrade CS Docker Engine](#upgrading-the-commercially-supported-docker-engine).
+
+> **Note**: If Docker engine is upgraded first (DHE 1.0.0 on CS Docker Engine 1.6.1),
+> DHE can still be upgraded from the command line by running:
+>
+> `sudo bash -c "$(sudo docker run dockerhubenterprise/manager:1.0.0 upgrade 1.0.1)"`
+
+## Next Steps
+
+For information on configuring DHE for your environment, take a look at the
+[Configuration instructions](./configuration.md).
+
diff --git a/docs/sources/docker-hub-enterprise/quick-start.md b/docs/sources/docker-hub-enterprise/quick-start.md
new file mode 100644
index 0000000..c935363
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/quick-start.md
@@ -0,0 +1,330 @@
+page_title: Docker Hub Enterprise: Quick-start: Basic Workflow
+page_description: Brief tutorial on the basics of Docker Hub Enterprise user workflow
+page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry, image, repository
+
+
+# Docker Hub Enterprise Quick Start: Basic User Workflow
+
+## Overview
+
+This Quick Start Guide will give you a hands-on look at the basics of using
+Docker Hub Enterprise (DHE), Docker’s on-premise image storage application.
+This guide will walk you through using DHE to complete a typical, and critical,
+part of building a development pipeline: setting up a Jenkins instance. Once you
+complete the task, you should have a good idea of how DHE works and how it might
+be useful to you.
+
+Specifically, this guide demonstrates the process of retrieving the
+[official Docker image for Jenkins](https://registry.hub.docker.com/_/jenkins/),
+customizing it to suit your needs, and then hosting it on your private instance
+of DHE located inside your enterprise’s firewalled environment. Your developers
+will then be able to retrieve the custom Jenkins image in order to use it to
+build CI/CD infrastructure for their projects, no matter the platform they’re
+working from, be it a laptop, a VM, or a cloud provider.
+
+The guide will walk you through the following steps:
+
+1. Pulling the official Jenkins image from the public Docker Hub
+2. Customizing the Jenkins image to suit your needs
+3. Pushing the customized image to DHE
+4. Pulling the customized image from DHE
+4. Launching a container from the custom image
+5. Using the new Jenkins container
+
+You should be able to complete this guide in about thirty minutes.
+
+> **Note:** This guide assumes you have installed a working instance of DHE
+> reachable at dhe.yourdomain.com. If you need help installing and configuring
+> DHE, please consult the
+[installation instructions](./install.md).
+
+
+## Pulling the official Jenkins image
+
+> **Note:** This guide assumes you are familiar with basic Docker concepts such
+> as images, containers, and registries. If you need to learn more about Docker
+> fundamentals, please consult the
+> [Docker user guide](http://docs.docker.com/userguide/).
+
+First, you will retrieve a copy of the official Jenkins image from the Docker Hub. By default, if
+Docker can't find an image locally, it will attempt to pull the image from the
+Docker Hub. From the CLI of a machine running the Docker Engine on your network, use
+the
+[`docker pull`](https://docs.docker.com/reference/commandline/cli/#pull)
+command to pull the public Jenkins image.
+
+    $ docker pull jenkins
+
+> **Note:** This guide assumes you can run Docker commands from a machine where
+> you are a member of the `docker` group, or have root privileges. Otherwise, you may
+> need to add `sudo` to the example commands below.
+
+Docker will start the process of pulling the image from the Hub. Once it has completed, the Jenkins image should be visible in the output of a [`docker images`](https://docs.docker.com/reference/commandline/cli/#images) command, which lists your available images:
+
+    $ docker images
+    REPOSITORY  TAG     IMAGE ID      CREATED      VIRTUAL SIZE
+    jenkins     latest  1a7cc22b0ee9  6 days ago   662 MB
+
+> **Note:** Because the `pull` command did not specify any tags, it will pull
+> the latest version of the public Jenkins image. If your enterprise environment
+> requires you to use a specific version, add the tag for the version you need
+> (e.g., `jenkins:1.565`).
+
+## Customizing the Jenkins image
+
+Now that you have a local copy of the Jenkins image, you’ll customize it so that
+the containers it builds will integrate with your infrastructure. To do this,
+you’ll create a custom Docker image that adds a Jenkins plugin that provides
+fine grained user management. You’ll also configure Jenkins to be more secure by
+disabling HTTP access and forcing it to use HTTPS.
+You’ll do this by using a `Dockerfile` and the `docker build` command.
+
+> **Note:** These are obviously just a couple of examples of the many ways you
+> can modify and configure Jenkins. Feel free to add or substitute whatever
+> customization is necessary to run Jenkins in your environment.
+
+### Creating a `build` context
+
+In order to add the new plugin and configure HTTPS access to the custom Jenkins
+image, you need to:
+
+1. Create text file that defines the new plugin
+2. Create copies of the private key and certificate
+
+All of the above files need to be in the same directory as the Dockerfile you
+will create in the next step.
+
+1. Create a build directory called `build`, and change to that new directory:
+
+    $ mkdir build && cd build
+
+In this directory, create a new file called `plugins` and add the following
+line:
+
+    role-strategy:2.2.0
+
+(The plugin version used above was the latest version at the time of writing.)
+
+2. You will also need to make copies of the server’s private key and certificate. Give the copies the following names — `https.key` and `https.pem`.
+
+> **Note:** Because creating new keys varies widely by platform and
+> implementation, this guide won’t cover key generation. We assume you have
+> access to existing keys. If you don’t have access, or can’t generate keys
+> yourself, feel free to skip the steps involving them and HTTPS config. The
+> guide will still walk you through building a custom Jenkins image and pushing
+> and pulling that image using DHE.
+
+### Creating a Dockerfile
+
+In the same directory as the `plugins` file and the private key and certificate,
+create a new [`Dockerfile`](https://docs.docker.com/reference/builder/) with the
+following contents:
+
+     FROM jenkins
+
+     #New plugins must be placed in the plugins file
+     COPY plugins /usr/share/jenkins/plugins
+
+     #The plugins.sh script will install new plugins
+     RUN /usr/local/bin/plugins.sh /usr/share/jenkins/plugins
+
+     #Copy private key and cert to image
+     COPY https.pem /var/lib/jenkins/cert
+     COPY https.key /var/lib/jenkins/pk
+
+     #Configure HTTP off and HTTPS on, using port 1973
+    ENV JENKINS_OPTS --httpPort=-1 --httpsPort=1973 --httpsCertificate=/var/lib/jenkins/cert --httpsPrivateKey=/var/lib/jenkins/pk
+
+The first `COPY` instruction in the above will copy the `plugin` file created
+earlier into the `/usr/share/jenkins` directory within the custom image you are
+defining with the `Dockerfile`.
+
+The `RUN` instruction will execute the `/usr/local/bin/plugins.sh` script with
+the newly copied `plugins` file, which will install the listed plugin.
+
+The next two `COPY` instructions copy the server’s private key and certificate
+into the required directories within the new image.
+
+The `ENV` instruction creates an environment variable called `JENKINS_OPT` in
+the image you are about to create. This environment variable will be present in
+any containers launched form the image and contains the required settings to
+tell Jenkins to disable HTTP and operate over HTTPS.
+
+> **Note:** You can specify any valid port number as part of the `JENKINS_OPT`
+> environment variable declared above. The value `1973` used in the example is
+> arbitrary.
+
+The `Dockerfile`, the `plugins` file, as well as the private key and
+certificate, must all be in the same directory because the `docker build`
+command uses the directory that contains the `Dockerfile` as its “build
+context”. Only files contained within that “build context” will be included in
+the image being built.
+
+### Building your custom image
+
+Now that the `Dockerfile`, the `plugins` file, and the files required for HTTPS
+operation are created in your current working directory, you can build your
+custom image using the
+[`docker build` command](https://docs.docker.com/reference/commandline/cli/#build):
+
+    docker build -t dhe.yourdomain.com/ci-infrastructure/jnkns-img .
+
+> **Note:** Don’t miss the period (`.`) at the end of the command above. This
+> tells the `docker build` command to use the current working directory as the
+> "build context".
+
+This command will build a new Docker image called `jnkns-img` which is based on
+the public Jenkins image you pulled earlier, but contains all of your
+customization.
+
+Please note the use of the `-t` flag in the `docker build` command above. The
+`-t` flag lets you  tag an image so it can be pushed to a custom repository. In
+the example above, the new image is tagged so it can be pushed to the
+`ci-infrastructure` Repository within the `dhe.yourdomain.com` registry (your
+local DHE instance). This will be important when you need to `push` the
+customized image to DHE later.
+
+A `docker images` command will now show the custom image alongside the Jenkins
+image pulled earlier:
+
+    $ sudo docker images
+    REPOSITORY   TAG    IMAGE ID    CREATED    VIRTUAL SIZE
+    dhe.yourdomain.com/ci-infrastructure/jnkns-img    latest    fc0ab3008d40    2 minutes ago    674.5 MB
+    jenkins    latest    1a7cc22b0ee9    6 days ago    662 MB
+
+## Pushing to Docker Hub Enterprise
+
+> **Note**: If your DHE instance has authentication enabled, you will need to
+> use your command line to `docker login <dhe-hostname>` (e.g., `docker login
+> dhe.yourdomain.com`).
+>
+> Failures due to unauthenticated `docker push` and `docker pull` commands will
+> look like :
+>
+>     $ docker pull dhe.yourdomain.com/hello-world
+>     Pulling repository dhe.yourdomain.com/hello-world
+>     FATA[0001] Error: image hello-world:latest not found
+>
+>     $ docker push dhe.yourdomain.com/hello-world
+>     The push refers to a repository [dhe.yourdomain.com/hello-world] (len: 1)
+>     e45a5af57b00: Image push failed
+>     FATA[0001] Error pushing to registry: token auth attempt for registry
+>     https://dhe.yourdomain.com/v2/:
+>     https://dhe.yourdomain.com/auth/v2/token/
+>     ?scope=repository%3Ahello-world%3Apull%2Cpush&service=dhe.yourdomain.com
+>     request failed with status: 401 Unauthorized
+
+Now that you’ve created the custom image, it can be pushed to DHE using the
+[`docker push`command](https://docs.docker.com/reference/commandline/cli/#push):
+
+    $ docker push dhe.yourdomain.com/ci-infrastructure/jnkns-img
+    511136ea3c5a: Image successfully pushed
+    848d84b4b2ab: Image successfully pushed
+    71d9d77ae89e: Image already exists
+    <truncated ouput...>
+    492ed3875e3e: Image successfully pushed
+    fc0ab3008d40: Image successfully pushed
+
+You can view the traffic throughput while the custom image is being pushed from
+the `System Health` tab in DHE:
+
+![DHE console push throughput](../assets/console-push.png)
+
+Once the image is successfully pushed, it can be downloaded, or pulled, by any
+Docker host that has access to DHE.
+
+## Pulling from Docker Hub Enterprise
+To pull the `jnkns-img` image from DHE, run the
+[`docker pull`](https://docs.docker.com/reference/commandline/cli/#pull)
+command from any Docker Host that has access to your DHE instance:
+
+    $ docker pull dhe.yourdomain.com/ci-infrastructure/jnkns-img
+    latest: Pulling from dhe.yourdomain.com/ci-infrastructure/jnkns-img
+    511136ea3c5a: Pull complete
+    848d84b4b2ab: Pull complete
+    71d9d77ae89e: Pull complete
+    <truncated ouput...>
+    492ed3875e3e: Pull complete
+    fc0ab3008d40: Pull complete
+    dhe.yourdomain.com/ci-infrastructure/jnkns-img:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security.
+    Status: Downloaded newer image for dhe.yourdomain.com/ci-infrastructure/jnkns-img:latest
+
+You can view the traffic throughput while the custom image is being pulled from
+the `System Health` tab in DHE:
+
+![DHE console pull throughput](../assets/console-pull.png)
+
+Now that the `jnkns-img` image has been pulled locally from DHE, you can view it
+in the output of the `docker images` command:
+
+     $ docker images
+    REPOSITORY     TAG    IMAGE ID    CREATED    VIRTUAL SIZE
+    dhe.yourdomain.com/ci-infrastructure/jnkns-img    latest  fc0ab3008d40    8 minutes ago    674.5 MB
+
+## Launching a custom Jenkins container
+
+Now that you’ve successfully pulled the customized Jenkins image from DHE, you
+can create a container from it with the
+[`docker run` command](https://docs.docker.com/reference/commandline/cli/#run):
+
+
+    $ docker run -p 1973:1973 --name jenkins01 dhe.yourdomain.com/ci-infrastructure/jnkns-img
+    /usr/share/jenkins/ref/init.groovy.d/tcp-slave-angent-port.groovy
+     /usr/share/jenkins/ref/init.groovy.d/tcp-slave-angent-port.groovy -> init.groovy.d/tcp-slave-angent-port.groovy
+    copy init.groovy.d/tcp-slave-angent-port.groovy to JENKINS_HOME
+    /usr/share/jenkins/ref/plugins/role-strategy.hpi
+     /usr/share/jenkins/ref/plugins/role-strategy.hpi -> plugins/role-strategy.hpi
+    copy plugins/role-strategy.hpi to JENKINS_HOME
+    /usr/share/jenkins/ref/plugins/dockerhub.hpi
+     /usr/share/jenkins/ref/plugins/dockerhub.hpi -> plugins/dockerhub.hpi
+    copy plugins/dockerhub.hpi to JENKINS_HOME
+    <truncated output...>
+    INFO: Jenkins is fully up and running
+
+> **Note:** The `docker run` command above maps port 1973 in the container
+> through to port 1973 on the host. This is the HTTPS port you specified in the
+> Dockerfile earlier. If you specified a different HTTPS port in your
+> Dockerfile, you will need to substitute this with the correct port numbers for
+> your environment.
+
+You can view the newly launched a container, called `jenkins01`, using the
+[`docker ps` command](https://docs.docker.com/reference/commandline/cli/#ps):
+
+    $ docker ps
+    CONTAINER ID     IMAGE     COMMAND     CREATED      STATUS  ...PORTS     NAMES
+    2e5d2f068504    dhe.yourdomain.com/ci-infrastructure/jnkns-img:latest    "/usr/local/bin/jenk     About a minute ago     Up About a minute     50000/tcp, 0.0.0.0:1973->1973/tcp     jenkins01
+
+
+## Accessing the new Jenkins container
+
+The previous `docker run` command mapped port `1973` on the container to port
+`1973` on the Docker host, so the Jenkins Web UI can be accessed at
+`https://<docker-host>:1973` (Don’t forget the `s` at the end of `https`.)
+
+> **Note:** If you are using a self-signed certificate, you may get a security
+> warning from your browser telling you that the certificate is self-signed and
+> not trusted. You may wish to add the certificate to the trusted store in order
+> to prevent further warnings in the future.
+
+![Jenkins landing page](../assets/jenkins-ui.png)
+
+From within the Jenkins Web UI, navigate to `Manage Jenkins` (on the left-hand
+pane) > `Manage Plugins` > `Installed`. The  `Role-based Authorization Strategy`
+plugin should be present with the `Uninstall` button available to the right.
+
+![Jenkins plugin manager](../assets/jenkins-plugins.png)
+
+In another browser session, try to access Jenkins via the default HTTP port 8080
+— `http://<docker-host>:8080`. This should result in a “connection timeout,”
+showing that Jenkins is not available on its default port 8080 over HTTP.
+
+This demonstration shows your Jenkins image has been configured correctly for
+HTTPS access, your new plugin was added and is ready for use, and HTTP access
+has been disabled. At this point, any member of your team can use `docker pull`
+to access the image from your DHE instance, allowing them to access a
+configured, secured Jenkins instance that can run on any infrastructure.
+
+## Next Steps
+
+For more information on using DHE, take a look at the
+[User's Guide](./userguide.md).
diff --git a/docs/sources/docker-hub-enterprise/release-notes.md b/docs/sources/docker-hub-enterprise/release-notes.md
new file mode 100644
index 0000000..f445e2d
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/release-notes.md
@@ -0,0 +1,240 @@
+page_title: Docker Hub Enterprise: Release notes
+page_description: Release notes for Docker Hub Enterprise
+page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry, release
+
+# Release Notes
+
+## Docker Hub Enterprise
+
+### DHE 1.0.1
+(11 May 2015)
+
+- Addresses compatibility issue with 1.6.1 CS Docker Engine
+
+### DHE 1.0.0
+(23 Apr 2015)
+
+- First release
+
+## Commercially Supported Docker Engine
+
+### CS Docker Engine 1.6.2-cs5
+(21 May 2015)
+
+For customers running Docker Engine on [supported versions of RedHat Enterprise
+Linux](https://www.docker.com/enterprise/support/) with [SELinux
+enabled](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/
+6/html/Security-Enhanced_Linux/sect-Security-Enhanced_Linux-Working_with_SELinux
+-Enabling_and_Disabling_SELinux.html), the `docker build` and `docker run`
+commands will not have DNS host name resolution and bind-mounted volumes may
+not be accessible.
+As a result, customers with SELinux will be unable to use hostname-based network
+access in either `docker build` or `docker run`, nor will they be able to
+`docker run` containers
+that use `--volume` or `-v` bind-mounts (with an incorrect SELinux label) in
+their environment. By installing Docker
+Engine 1.6.2-cs5, customers can use Docker as intended on RHEL with SELinux enabled.
+
+For example, you see will failures like:
+
+```
+[root@dhe ~]# docker -v
+Docker version 1.6.0-cs2, build b8dd430
+[root@dhe ~]# ping dhe.home.org.au
+PING dhe.home.org.au (10.10.10.104) 56(84) bytes of data.
+64 bytes from dhe.home.gateway (10.10.10.104): icmp_seq=1 ttl=64 time=0.663 ms
+^C
+--- dhe.home.org.au ping statistics ---
+2 packets transmitted, 2 received, 0% packet loss, time 1001ms
+rtt min/avg/max/mdev = 0.078/0.370/0.663/0.293 ms
+[root@dhe ~]# docker run --rm -it debian ping dhe.home.org.au
+ping: unknown host
+[root@dhe ~]# docker run --rm -it debian cat /etc/resolv.conf
+cat: /etc/resolv.conf: Permission denied
+[root@dhe ~]# docker run --rm -it debian apt-get update
+Err http://httpredir.debian.org jessie InRelease
+
+Err http://security.debian.org jessie/updates InRelease
+
+Err http://httpredir.debian.org jessie-updates InRelease
+
+Err http://security.debian.org jessie/updates Release.gpg
+  Could not resolve 'security.debian.org'
+Err http://httpredir.debian.org jessie Release.gpg
+  Could not resolve 'httpredir.debian.org'
+Err http://httpredir.debian.org jessie-updates Release.gpg
+  Could not resolve 'httpredir.debian.org'
+[output truncated]
+
+```
+
+or when running a `docker build`:
+
+```
+[root@dhe ~]# docker build .
+Sending build context to Docker daemon 11.26 kB
+Sending build context to Docker daemon
+Step 0 : FROM fedora
+ ---> e26efd418c48
+Step 1 : RUN yum install httpd
+ ---> Running in cf274900ea35
+
+One of the configured repositories failed (Fedora 21 - x86_64),
+and yum doesn't have enough cached data to continue. At this point the only
+safe thing yum can do is fail. There are a few ways to work "fix" this:
+
+[output truncated]
+```
+
+
+**Affected Versions**: All previous versions of Docker Engine when SELinux
+is enabled.
+
+Docker **highly recommends** that all customers running previous versions of
+Docker Engine update to this release.
+
+#### **How to workaround this issue**
+
+Customers who choose not to install this update have two options. The
+first option is to disable SELinux. This is *not recommended* for production
+systems where SELinux is typically required.
+
+The second option is to pass the following parameter in to `docker run`.
+
+  	     --security-opt=label:type:docker_t
+
+This parameter cannot be passed to the `docker build` command.
+
+#### **Upgrade notes**
+
+When upgrading, make sure you stop DHE first, perform the Engine upgrade, and
+then restart DHE.
+
+If you are running with SELinux enabled, previous Docker Engine releases allowed
+you to bind-mount additional volumes or files inside the container as follows:
+
+		$ docker run -it -v /home/user/foo.txt:/foobar.txt:ro <imagename>
+
+In the 1.6.2-cs5 release, you must ensure additional bind-mounts have the correct
+SELinux context. For example, if you want to mount `foobar.txt` as read-only
+into the container, do the following to create and test your bind-mount:
+
+1. Add the `z` option to the bind mount when you specify `docker run`.
+
+		$ docker run -it -v /home/user/foo.txt:/foobar.txt:ro,z <imagename>
+
+2. Exec into your new container.
+
+	For example, if your container is `bashful_curie`, open a shell on the
+	container:
+
+		$ docker exec -it bashful_curie bash
+
+3. Use `cat` to check the permissions on the mounted file.
+
+		$ cat /foobar.txt
+		the contents of foobar appear
+
+	If you see the file's contents, your mount succeeded. If you receive a
+	`Permission denied` message and/or the `/var/log/audit/audit.log` file on
+	your Docker host contains an AVC Denial message, the mount did not succeed.
+
+		type=AVC msg=audit(1432145409.197:7570): avc:  denied  { read } for  pid=21167 comm="cat" name="foobar.txt" dev="xvda2" ino=17704136 scontext=system_u:system_r:svirt_lxc_net_t:s0:c909,c965 tcontext=unconfined_u:object_r:user_home_t:s0 tclass=file
+
+	Recheck your command line to make sure you passed in the `z` option.
+
+
+### CS Docker Engine 1.6.2-cs4
+(13 May 2015)
+
+Fix mount regression for `/sys`.
+
+### CS Docker Engine 1.6.1-cs3
+(11 May 2015)
+
+Docker Engine version 1.6.1 has been released to address several vulnerabilities
+and is immediately available for all supported platforms. Users are advised to
+upgrade existing installations of the Docker Engine and use 1.6.1 for new installations.
+
+It should be noted that each of the vulnerabilities allowing privilege escalation
+may only be exploited by a malicious Dockerfile or image.  Users are advised to
+run their own images and/or images built by trusted parties, such as those in
+the official images library.
+
+Please send any questions to security@docker.com.
+
+
+#### **[CVE-2015-3629](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3629) Symlink traversal on container respawn allows local privilege escalation**
+
+Libcontainer version 1.6.0 introduced changes which facilitated a mount namespace
+breakout upon respawn of a container. This allowed malicious images to write
+files to the host system and escape containerization.
+
+Libcontainer and Docker Engine 1.6.1 have been released to address this
+vulnerability. Users running untrusted images are encouraged to upgrade Docker Engine.
+
+Discovered by Tõnis Tiigi.
+
+
+#### **[CVE-2015-3627](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3627) Insecure opening of file-descriptor 1 leading to privilege escalation**
+
+The file-descriptor passed by libcontainer to the pid-1 process of a container
+has been found to be opened prior to performing the chroot, allowing insecure
+open and symlink traversal. This allows malicious container images to trigger
+a local privilege escalation.
+
+Libcontainer and Docker Engine 1.6.1 have been released to address this
+vulnerability. Users running untrusted images are encouraged to upgrade
+Docker Engine.
+
+Discovered by Tõnis Tiigi.
+
+#### **[CVE-2015-3630](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3630) Read/write proc paths allow host modification & information disclosure**
+
+Several paths underneath /proc were writable from containers, allowing global
+system manipulation and configuration. These paths included `/proc/asound`,
+`/proc/timer_stats`, `/proc/latency_stats`, and `/proc/fs`.
+
+By allowing writes to `/proc/fs`, it has been noted that CIFS volumes could be
+forced into a protocol downgrade attack by a root user operating inside of a
+container. Machines having loaded the timer_stats module were vulnerable to
+having this mechanism enabled and consumed by a container.
+
+We are releasing Docker Engine 1.6.1 to address this vulnerability. All
+versions up to 1.6.1 are believed vulnerable. Users running untrusted
+images are encouraged to upgrade.
+
+Discovered by Eric Windisch of the Docker Security Team.
+
+#### **[CVE-2015-3631](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3631) Volume mounts allow LSM profile escalation**
+
+By allowing volumes to override files of `/proc` within a mount namespace, a user
+could specify arbitrary policies for Linux Security Modules, including setting
+an unconfined policy underneath AppArmor, or a `docker_t` policy for processes
+managed by SELinux. In all versions of Docker up until 1.6.1, it is possible for
+malicious images to configure volume mounts such that files of proc may be overridden.
+
+We are releasing Docker Engine 1.6.1 to address this vulnerability. All versions
+up to 1.6.1 are believed vulnerable. Users running untrusted images are encouraged
+to upgrade.
+
+Discovered by Eric Windisch of the Docker Security Team.
+
+#### **AppArmor policy improvements**
+
+The 1.6.1 release also marks preventative additions to the AppArmor policy.
+Recently, several CVEs against the kernel have been reported whereby mount
+namespaces could be circumvented through the use of the sys_mount syscall from
+inside of an unprivileged Docker container. In all reported cases, the
+AppArmor policy included in libcontainer and shipped with Docker has been
+sufficient to deflect these attacks. However, we have deemed it prudent to
+proactively tighten the policy further by outright denying the use of the
+`sys_mount` syscall.
+
+Because this addition is preventative, no CVE-ID is requested.
+
+### CS Docker Engine 1.6.0-cs2
+(23 Apr 2015)
+
+- First release, please see the [Docker Engine 1.6.0 Release notes](/release-notes/)
+  for more details.
diff --git a/docs/sources/docker-hub-enterprise/support.md b/docs/sources/docker-hub-enterprise/support.md
new file mode 100644
index 0000000..1d58f8e
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/support.md
@@ -0,0 +1,16 @@
+page_title: Docker Hub Enterprise: Support
+page_description: Commercial Support
+page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry, support
+
+# Commercial Support Options
+
+## How to get support
+
+Purchasing a DHE License or Commercial Support subscription means your questions
+and issues about DHE will receive prioritized support.
+You can file a ticket through [email](mailto:support@docker.com) from your
+company email address, or visit our [support site](https://support.docker.com).
+In either case, you'll need to verify your email address, and then you can
+communicate with the support team either by email or web interface.
+
+**The availability of support depends on your [support subscription](https://www.docker.com/enterprise/support/)**
diff --git a/docs/sources/docker-hub-enterprise/usage.md b/docs/sources/docker-hub-enterprise/usage.md
deleted file mode 100644
index 252223e..0000000
--- a/docs/sources/docker-hub-enterprise/usage.md
+++ /dev/null
@@ -1,9 +0,0 @@
-page_title: Using Docker Hub Enterprise
-page_description: Docker Hub Enterprise
-page_keywords: docker hub enterprise
-
-# Docker Hub Enterprise
-
-Documenation coming soon.
-
-
diff --git a/docs/sources/docker-hub-enterprise/userguide.md b/docs/sources/docker-hub-enterprise/userguide.md
new file mode 100644
index 0000000..80908a8
--- /dev/null
+++ b/docs/sources/docker-hub-enterprise/userguide.md
@@ -0,0 +1,126 @@
+page_title: Docker Hub Enterprise: User guide
+page_description: Documentation describing basic use of Docker Hub Enterprise
+page_keywords: docker, documentation, about, technology, hub, enterprise
+
+
+# Docker Hub Enterprise User's Guide
+
+This guide covers tasks and functions a user of Docker Hub Enterprise (DHE) will
+need to know about, such as pushing or pulling images, etc. For tasks DHE
+administrators need to accomplish, such as configuring or monitoring DHE, please
+visit the [Administrator's Guide](./adminguide.md).
+
+## Overview
+
+The primary use case for DHE users is to push and pull images to and from the
+DHE image storage service. For example, you might pull an Official Image for
+Ubuntu from the Docker Hub, customize it with configuration settings for your
+infrastructure and then push it to your DHE image storage for other developers
+to pull and use for their development environments.
+
+Pushing and pulling images with DHE works very much like any other Docker
+registry: you use the `docker pull` command to retrieve images and the `docker
+push` command to add an image to a DHE repository. To learn more about Docker
+images, see
+[User Guide: Working with Docker Images](https://docs.docker.com/userguide/dockerimages/). For a step-by-step
+example of the entire process, see the
+[Quick Start: Basic Workflow Guide](./quick-start.md).
+
+> **Note**: If your DHE instance has authentication enabled, you will need to
+>use your command line to `docker login <dhe-hostname>` (e.g., `docker login
+> dhe.yourdomain.com`).
+>
+> Failures due to unauthenticated `docker push` and `docker pull` commands will
+> look like :
+>
+>     $ docker pull dhe.yourdomain.com/hello-world
+>     Pulling repository dhe.yourdomain.com/hello-world
+>     FATA[0001] Error: image hello-world:latest not found
+>
+>     $ docker push dhe.yourdomain.com/hello-world
+>     The push refers to a repository [dhe.yourdomain.com/hello-world] (len: 1)
+>     e45a5af57b00: Image push failed
+>     FATA[0001] Error pushing to registry: token auth attempt for registry
+>     https://dhe.yourdomain.com/v2/:
+>     https://dhe.yourdomain.com/auth/v2/token/?scope=
+>     repository%3Ahello-world%3Apull%2Cpush&service=dhe.yourdomain.com
+>     request failed with status: 401 Unauthorized
+
+## Pushing Images
+
+You push an image up to a DHE repository by using the
+[`docker push` command](https://docs.docker.com/reference/commandline/cli/#push).
+
+You can add a `tag` to your image so that you can more easily identify it
+amongst other variants and so that it refers to your DHE server.
+
+    `$ docker tag hello-world:latest dhe.yourdomain.com/yourusername/hello-mine:latest`
+
+The command labels a `hello-world:latest` image using a new tag in the
+`[REGISTRYHOST/][USERNAME/]NAME[:TAG]` format.  The `REGISTRYHOST` in this
+case is your DHE server, `dhe.yourdomain.com`, and the `USERNAME` is
+`yourusername`. Lastly, the image tag is set to `hello-mine:latest`.
+
+Once an image is tagged, you can push it to DHE with:
+
+    `$ docker push dhe.yourdomain.com/demouser/hello-mine:latest`
+    
+> **Note**: If the Docker daemon on which you are running `docker push` doesn't
+> have the right certificates set up, you will get an error similar to:
+>
+>     $ docker push dhe.yourdomain.com/demouser/hello-world
+>     FATA[0000] Error response from daemon: v1 ping attempt failed with error:
+>     Get https://dhe.yourdomain.com/v1/_ping: x509: certificate signed by
+>     unknown authority. If this private registry supports only HTTP or HTTPS
+>     with an unknown CA certificate, please add `--insecure-registry
+>     dhe.yourdomain.com` to the daemon's arguments. In the case of HTTPS, if
+>     you have access to the registry's CA certificate, no need for the flag;
+>     simply place the CA certificate at
+>     /etc/docker/certs.d/dhe.yourdomain.com/ca.crt
+
+## Pulling images
+
+You can retrieve an image with the
+[`docker pull` command](https://docs.docker.com/reference/commandline/cli/#run),
+or you can retrieve an image and run Docker to build the container with the
+[`docker run`command](https://docs.docker.com/reference/commandline/cli/#run).
+
+To retrieve an image from DHE and then run Docker to build the container, add
+the needed info to `docker run`:
+
+        $ docker run dhe.yourdomain.com/yourusername/hello-mine
+        latest: Pulling from dhe.yourdomain.com/yourusername/hello-mine
+        511136ea3c5a: Pull complete
+        31cbccb51277: Pull complete
+        e45a5af57b00: Already exists
+        Digest: sha256:45f0de377f861694517a1440c74aa32eecc3295ea803261d62f950b1b757bed1
+        Status: Downloaded newer image for dhe.yourdomain.com/demouser/hello-mine:latest
+
+Note that if you don't specify a version, by default the `latest` version of an
+image will be pulled.
+
+If you run `docker images` after this you'll see a `hello-mine` image.
+
+        $ docker images
+        REPOSITORY                           TAG     IMAGE ID      CREATED       VIRTUAL SIZE
+        dhe.yourdomain.com/yourusername/hello-mine  latest  e45a5af57b00  3 months ago  910 B
+
+To pull an image without building the container, use `docker pull` and specify
+your DHE registry by adding it to the command:
+
+     $ docker pull dhe.yourdomain.com/yourusername/hello-mine
+
+
+## Next Steps
+
+For information on administering DHE, take a look at the
+[Administrator's Guide](./adminguide.md).
+
+
+<!--TODO:
+
+* mention that image aliases that are not in the same repository are not updated - either on push or pull
+* but that multiple tags in one repo are pushed if you don't specify the `:tag` (ie, `imagename` does not always mean `imagename:latest`)
+* show what happens for non-latest, and when there are more than one tag in a repo
+* explain the fully-qualified repo/image name
+* explain how to remove an image from DHE -->
diff --git a/docs/sources/docker-hub/accounts.md b/docs/sources/docker-hub/accounts.md
index e4623f9..510111f 100644
--- a/docs/sources/docker-hub/accounts.md
+++ b/docs/sources/docker-hub/accounts.md
@@ -4,7 +4,7 @@
 
 # Accounts on Docker Hub
 
-## Docker Hub Accounts
+## Docker Hub accounts
 
 You can `search` for Docker images and `pull` them from [Docker
 Hub](https://hub.docker.com) without signing in or even having an
@@ -12,7 +12,7 @@
 a repository, you are going to need a [Docker
 Hub](https://hub.docker.com) account.
 
-### Registration for a Docker Hub Account
+### Registration for a Docker Hub account
 
 You can get a [Docker Hub](https://hub.docker.com) account by
 [signing up for one here](https://hub.docker.com/account/signup/). A valid
@@ -32,23 +32,52 @@
 from the [*Password Reset*](https://hub.docker.com/account/forgot-password/)
 page.
 
-## Organizations & Groups
+## Organizations and groups
 
-Also available on the Docker Hub are organizations and groups that allow
-you to collaborate across your organization or team. You can see what
-organizations [you belong to and add new organizations](
+A Docker Hub organization contains public and private repositories just like
+a user account. Access to push, pull or create these organisation owned repositories
+is allocated by defining groups of users and then assigning group rights to
+specific repositories. This allows you to distribute limited access
+Docker images, and to select which Docker Hub users can publish new images.
+
+### Creating and viewing organizations
+
+You can see what organizations [you belong to and add new organizations](
 https://hub.docker.com/account/organizations/) from the Account Settings
-tab. They are also listed below your user name on your repositories page and in your account profile.
+tab. They are also listed below your user name on your repositories page
+and in your account profile.
 
 ![organizations](/docker-hub/hub-images/orgs.png)
 
-From within your organizations you can create groups that allow you to
-further manage who can interact with your repositories.
+### Organization groups
+
+Users in the `Owners` group of an organization can create and modify the
+membership of groups.
+
+Unless they are the organization's `Owner`, users can only see groups of which they
+are members.
 
 ![groups](/docker-hub/hub-images/groups.png)
 
-You can add or invite users to join groups by clicking on the organization and then clicking the edit button for the group to which you want to add members. Enter a user-name (for current Hub users) or email address (if they are not yet Hub users) for the person you want to invite. They will receive an email invitation to join the group.
+### Repository group permissions
 
-![invite members](/docker-hub/hub-images/invite.png)
+Use organization groups to manage who can interact with your repositories.
+
+You need to be a member of the organization's `Owners` group to create a new group,
+Hub repository or automated build. As an `Owner`, you then delegate the following
+repository access rights to groups:
+
+- `Read` access allows a user to view, search, and pull a private repository in the
+  same way as they can a public repository.
+- `Write` access users are able to push to non-automated repositories on the Docker
+  Hub.
+- `Admin` access allows the user to modify the repositories "Description", "Collaborators" rights,
+  "Mark as unlisted", "Public/Private" status and "Delete".
+
+> **Note**: A User who has not yet verified their email address will only have
+> `Read` access to the repository, regardless of the rights their group membership
+>  gives them.
+
+![Organization repository collaborators](/docker-hub/hub-images/org-repo-collaborators.png)
 
 
diff --git a/docs/sources/docker-hub/builds.md b/docs/sources/docker-hub/builds.md
index 1613ad1..541bc15 100644
--- a/docs/sources/docker-hub/builds.md
+++ b/docs/sources/docker-hub/builds.md
@@ -8,20 +8,18 @@
 
 *Automated Builds* are a special feature of Docker Hub which allow you to
 use [Docker Hub's](https://hub.docker.com) build clusters to automatically
-create images from a specified `Dockerfile` and a GitHub or Bitbucket repository
-(or "context"). The system will clone your repository and build the image
-described by the `Dockerfile` using the repository as the context. The
-resulting automated image will then be uploaded to the Docker Hub registry
-and marked as an *Automated Build*.
+create images from a GitHub or Bitbucket repository containing a `Dockerfile`
+The system will clone your repository and build the image described by the
+`Dockerfile` using the directory the `Dockerfile` is in (and subdirectories)
+as the build context. The resulting automated image will then be uploaded
+to the Docker Hub registry and marked as an *Automated Build*.
 
 Automated Builds have several advantages:
 
 * Users of *your* Automated Build can trust that the resulting
 image was built exactly as specified.
-
 * The `Dockerfile` will be available to anyone with access to
-your repository on the Docker Hub registry. 
-
+your repository on the Docker Hub registry.
 * Because the process is automated, Automated Builds help to
 make sure that your repository is always up to date.
 
@@ -33,16 +31,26 @@
 and on GitHub and/or Bitbucket. In either case, the account needs
 to be properly validated and activated before you can link to it.
 
-## Setting up Automated Builds with GitHub
-
-In order to set up an Automated Build, you need to first link your
-[Docker Hub](https://hub.docker.com) account with a GitHub account.
+The first time you to set up an Automated Build, your
+[Docker Hub](https://hub.docker.com) account will need to be linked to
+a GitHub or Bitbucket account.
 This will allow the registry to see your repositories.
 
-> *Note:* 
+If you have previously linked your Docker Hub account, and want to view or modify
+that link, click on the "Manage - Settings" link in the sidebar, and then
+"Linked Accounts" in your Settings sidebar.
+
+## Automated Builds from GitHub
+
+If you've previously linked your Docker Hub account to your GitHub account,
+you'll be able to skip to the [Creating an Automated Build](#creating-an-automated-build).
+
+### Linking your Docker Hub account to a GitHub account
+
+> *Note:*
 > Automated Builds currently require *read* and *write* access since
 > [Docker Hub](https://hub.docker.com) needs to setup a GitHub service
-> hook. We have no choice here, this is how GitHub manages permissions, sorry! 
+> hook. We have no choice here, this is how GitHub manages permissions, sorry!
 > We do guarantee nothing else will be touched in your account.
 
 To get started, log into your Docker Hub account and click the
@@ -51,17 +59,99 @@
 
 Select the [GitHub service](https://registry.hub.docker.com/associate/github/).
 
-Then follow the onscreen instructions to authorize and link your
+When linking to GitHub, you'll need to select either "Public and Private",
+or "Limited" linking.
+
+The "Public and Private" option is the easiest to use,
+as it grants the Docker Hub full access to all of your repositories. GitHub
+also allows you to grant access to repositories belonging to your GitHub
+organizations.
+
+By choosing the "Limited" linking, your Docker Hub account only gets permission
+to access your public data and public repositories.
+
+Follow the onscreen instructions to authorize and link your
 GitHub account to Docker Hub. Once it is linked, you'll be able to
-choose a repo from which to create the Automatic Build.
+choose a source repository from which to create the Automatic Build.
+
+You will be able to review and revoke Docker Hub's access by visiting the
+[GitHub User's Applications settings](https://github.com/settings/applications).
+
+> **Note**: If you delete the GitHub account linkage that is used for one of your
+> automated build repositories, the previously built images will still be available.
+> If you re-link to that GitHub account later, the automated build can be started
+> using the "Start Build" button on the Hub, or if the webhook on the GitHub repository
+> still exists, will be triggered by any subsequent commits.
+
+### Auto builds and limited linked GitHub accounts.
+
+If you selected to link your GitHub account with only a "Limited" link, then
+after creating your automated build, you will need to either manually trigger a
+Docker Hub build using the "Start a Build" button, or add the GitHub webhook
+manually, as described in [GitHub Service Hooks](#github-service-hooks).
+
+### Changing the GitHub user link
+
+If you want to remove, or change the level of linking between your GitHub account
+and the Docker Hub, you need to do this in two places.
+
+First, remove the "Linked Account" from your Docker Hub "Settings".
+Then go to your GitHub account's Personal settings, and in the "Applications"
+section, "Revoke access".
+
+You can now re-link your account at any time.
+
+### GitHub organizations
+
+GitHub organizations and private repositories forked from organizations will be
+made available to auto build using the "Docker Hub Registry" application, which
+needs to be added to the organization - and then will apply to all users.
+
+To check, or request access, go to your GitHub user's "Setting" page, select the
+"Applications" section from the left side bar, then click the "View" button for
+"Docker Hub Registry".
+
+![Check User access to GitHub](/docker-hub/hub-images/gh-check-user-org-dh-app-access.png)
+
+The organization's administrators may need to go to the Organization's "Third
+party access" screen in "Settings" to Grant or Deny access to the Docker Hub
+Registry application. This change will apply to all organization members.
+
+![Check Docker Hub application access to Organization](/docker-hub/hub-images/gh-check-admin-org-dh-app-access.png)
+
+More detailed access controls to specific users and GitHub repositories would be
+managed using the GitHub People and Teams interfaces.
 
 ### Creating an Automated Build
 
 You can [create an Automated Build](
 https://registry.hub.docker.com/builds/github/select/) from any of your
-public or private GitHub repositories with a `Dockerfile`.
+public or private GitHub repositories that have a `Dockerfile`.
 
-### GitHub Submodules
+Once you've selected the source repository, you can then configure:
+
+- The Hub user/org the repository is built to - either your Hub account name,
+or the name of any Hub organizations your account is in
+- The Docker repository name the image is built to
+- If the Docker repository should be "Public" or "Private"
+  You can change the accessibility options after the repository has been created.
+  If you add a Private repository to a Hub user, then you can only add other users
+  as collaborators, and those users will be able to view and pull all images in that 
+  repository. To configure more granular access permissions, such as using groups of 
+  users or allow different users access to different image tags, then you need
+  to add the Private repository to a Hub organization that your user has Administrator
+  privilege on.
+- If you want the GitHub to notify the Docker Hub when a commit is made, and thus trigger
+  a rebuild of all the images in this automated build.
+
+You can also select one or more
+- The git branch/tag, which repository sub-directory to use as the context
+- The Docker image tag name
+
+You can set a description for the repository by clicking "Description" link in the righthand side bar after the automated build - note that the "Full Description" will be over-written next build from the README.md file.
+has been created.
+
+### GitHub private submodules
 
 If your GitHub repository contains links to private submodules, you'll get an
 error message in your build.
@@ -114,17 +204,14 @@
     </tr>
   </tbody>
 </table>
-     
-### GitHub Organizations
 
-GitHub organizations will appear once your membership to that organization is
-made public on GitHub. To verify, you can look at the members tab for your
-organization on GitHub.
+### GitHub service hooks
 
-### GitHub Service Hooks
+The GitHub Service hook allows GitHub to notify the Docker Hub when something has
+been committed to that git repository. You will need to add the Service Hook manually
+if your GitHub account is "Limited" linked to the Docker Hub.
 
-Follow the steps below to configure the GitHub service
-hooks for your Automated Build:
+Follow the steps below to configure the GitHub Service hooks for your Automated Build:
 
 <table class="table table-bordered">
   <thead>
@@ -146,14 +233,16 @@
       <td><img src="/docker-hub/hub-images/gh_menu.png" alt="Webhooks & Services"></td>
       <td>Click on "Webhooks & Services" on the left side of the page.</td></tr>
       <tr><td>3.</td>
-      <td><img src="/docker-hub/hub-images/gh_service_hook.png" alt="Find the service labeled Docker"></td><td>Find the service labeled "Docker" and click on it.</td></tr>
-      <tr><td>4.</td><td><img src="/docker-hub/hub-images/gh_docker-service.png" alt="Activate Service Hooks"></td>
+      <td><img src="/docker-hub/hub-images/gh_service_hook.png" alt="Find the service labeled Docker"></td>
+      <td>Find the service labeled "Docker" (or click on "Add service") and click on it.</td></tr>
+      <tr><td>4.</td>
+      <td><img src="/docker-hub/hub-images/gh_docker-service.png" alt="Activate Service Hooks"></td>
       <td>Make sure the "Active" checkbox is selected and click the "Update service" button to save your changes.</td>
     </tr>
   </tbody>
 </table>
 
-## Setting up Automated Builds with Bitbucket
+## Automated Builds with Bitbucket
 
 In order to setup an Automated Build, you need to first link your
 [Docker Hub](https://hub.docker.com) account with a Bitbucket account.
@@ -249,7 +338,7 @@
 OK
 ```
 
-> **Note:** 
+> **Note:**
 > You can only trigger one build at a time and no more than one
 > every five minutes. If you already have a build pending, or if you
 > recently submitted a build request, those requests *will be ignored*.
diff --git a/docs/sources/docker-hub/home.md b/docs/sources/docker-hub/home.md
index 15baf7b..3f81208 100644
--- a/docs/sources/docker-hub/home.md
+++ b/docs/sources/docker-hub/home.md
@@ -1,8 +1,8 @@
-page_title: The Docker Hub Registry Help
+page_title: The Docker Hub Registry help
 page_description: The Docker Registry help documentation home
 page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation
 
-# The Docker Hub Registry Help
+# The Docker Hub Registry help
 
 ## Introduction
 
diff --git a/docs/sources/docker-hub/hub-images/dashboard.png b/docs/sources/docker-hub/hub-images/dashboard.png
new file mode 100644
index 0000000..594c5d1
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/dashboard.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/gh-check-admin-org-dh-app-access.png b/docs/sources/docker-hub/hub-images/gh-check-admin-org-dh-app-access.png
new file mode 100644
index 0000000..0df38c69
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/gh-check-admin-org-dh-app-access.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/gh-check-user-org-dh-app-access.png b/docs/sources/docker-hub/hub-images/gh-check-user-org-dh-app-access.png
new file mode 100644
index 0000000..13ad646
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/gh-check-user-org-dh-app-access.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/groups.png b/docs/sources/docker-hub/hub-images/groups.png
index 0c6430e..23dbbfc 100644
--- a/docs/sources/docker-hub/hub-images/groups.png
+++ b/docs/sources/docker-hub/hub-images/groups.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/hub.png b/docs/sources/docker-hub/hub-images/hub.png
index 16840e0..489f730 100644
--- a/docs/sources/docker-hub/hub-images/hub.png
+++ b/docs/sources/docker-hub/hub-images/hub.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/invite.png b/docs/sources/docker-hub/hub-images/invite.png
index 0a157ff..f663340 100644
--- a/docs/sources/docker-hub/hub-images/invite.png
+++ b/docs/sources/docker-hub/hub-images/invite.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/org-repo-collaborators.png b/docs/sources/docker-hub/hub-images/org-repo-collaborators.png
new file mode 100644
index 0000000..fdb53f7
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/org-repo-collaborators.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/orgs.png b/docs/sources/docker-hub/hub-images/orgs.png
index 604ed95..6987cd3 100644
--- a/docs/sources/docker-hub/hub-images/orgs.png
+++ b/docs/sources/docker-hub/hub-images/orgs.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/repos.png b/docs/sources/docker-hub/hub-images/repos.png
index f25bb3a..4e83d34 100644
--- a/docs/sources/docker-hub/hub-images/repos.png
+++ b/docs/sources/docker-hub/hub-images/repos.png
Binary files differ
diff --git a/docs/sources/docker-hub/index.md b/docs/sources/docker-hub/index.md
index c29a5f7..db6694d 100644
--- a/docs/sources/docker-hub/index.md
+++ b/docs/sources/docker-hub/index.md
@@ -1,23 +1,32 @@
-page_title: The Docker Hub Help
+page_title: The Docker Hub help
 page_description: The Docker Help documentation home
 page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation, accounts, organizations, repositories, groups
 
 # Docker Hub
 
+The [Docker Hub](https://hub.docker.com) provides a cloud-based platform service
+for distributed applications, including container image distribution and change
+management, user and team collaboration, and lifecycle workflow automation.
+
 ![DockerHub](/docker-hub/hub-images/hub.png)
 
-## [Accounts](accounts/)
+## [Finding and pulling images](./userguide.md)
 
-[Learn how to create](accounts/) a [Docker Hub](https://hub.docker.com)
+Find out how to [use the Docker Hub](./userguide.md) to find and pull Docker
+images to run or build upon.
+
+## [Accounts](./accounts.md)
+
+[Learn how to create](./accounts.md) a Docker Hub
 account and manage your organizations and groups.
 
-## [Repositories](repos/)
+## [Your Repositories](./repos.md)
 
 Find out how to share your Docker images in [Docker Hub
-repositories](repos/) and how to store and manage private images.
+repositories](./repos.md) and how to store and manage private images.
 
-## [Automated Builds](builds/)
+## [Automated builds](./builds.md)
 
 Learn how to automate your build and deploy pipeline with [Automated
-Builds](builds/)
+Builds](./builds.md)
 
diff --git a/docs/sources/docker-hub/official_repos.md b/docs/sources/docker-hub/official_repos.md
index 4ec4312..98c33c6 100644
--- a/docs/sources/docker-hub/official_repos.md
+++ b/docs/sources/docker-hub/official_repos.md
@@ -1,189 +1,106 @@
-page_title: Guidelines for Official Repositories on Docker Hub
+page_title: Official Repositories on Docker Hub
 page_description: Guidelines for Official Repositories on Docker Hub
 page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, official, image, documentation
 
-# Guidelines for Creating and Documenting Official Repositories
+# Official Repositories on Docker Hub
 
-## Introduction
+The Docker [Official Repositories](http://registry.hub.docker.com/official) are
+a curated set of Docker repositories that are promoted on Docker Hub. They are
+designed to:
 
-You’ve been given the job of creating an image for an Official Repository
-hosted on [Docker Hub Registry](https://registry.hub.docker.com/). These are
-our guidelines for getting that task done. Even if you’re not
-planning to create an Official Repo, you can think of these guidelines as best
-practices for image creation generally.
+* Provide essential base OS repositories (for example,
+  [`ubuntu`](https://registry.hub.docker.com/_/ubuntu/),
+  [`centos`](https://registry.hub.docker.com/_/centos/)) that serve as the
+  starting point for the majority of users.
 
-This document consists of two major sections:
+* Provide drop-in solutions for popular programming language runtimes, data
+  stores, and other services, similar to what a Platform-as-a-Service (PAAS)
+  would offer.
 
-* A list of expected files, resources and supporting items for your image,
-along with best practices for creating those items
-* Examples embodying those practices
+* Exemplify [`Dockerfile` best practices](/articles/dockerfile_best-practices)
+  and provide clear documentation to serve as a reference for other `Dockerfile`
+  authors.
 
-## Expected Files & Resources
+* Ensure that security updates are applied in a timely manner. This is
+  particularly important as many Official Repositories are some of the most
+  popular on Docker Hub.
 
-### A Git repository
+* Provide a channel for software vendors to redistribute up-to-date and
+  supported versions of their products. Organization accounts on Docker Hub can
+  also serve this purpose, without the careful review or restrictions on what
+  can be published.
 
-Your image needs to live in a Git repository, preferably on GitHub. (If you’d
-like to use a different provider, please [contact us](mailto:feedback@docker.com)
-directly.) Docker **strongly** recommends that this repo be publicly
-accessible.
+Docker, Inc. sponsors a dedicated team that is responsible for reviewing and
+publishing all Official Repositories content. This team works in collaboration
+with upstream software maintainers, security experts, and the broader Docker
+community.
 
-If the repo is private or has otherwise limited access, you must provide a
-means of at least “read-only” access for both general users and for the
-docker-library maintainers, who need access for review and building purposes.
+While it is preferrable to have upstream software authors maintaining their
+corresponding Official Repositories, this is not a strict requirement. Creating
+and maintaining images for Official Repositories is a public process. It takes
+place openly on GitHub where participation is encouraged. Anyone can provide
+feedback, contribute code, suggest process changes, or even propose a new
+Official Repository.
 
-### A Dockerfile
+## Should I use Official Repositories?
 
-Complete information on `Dockerfile`s can be found in the [Reference section](https://docs.docker.com/reference/builder/).
-We also have a page discussing [best practices for writing `Dockerfile`s](/articles/dockerfile_best-practices).
-Your `Dockerfile` should adhere to the following:
+New Docker users are encouraged to use the Official Repositories in their
+projects. These repositories have clear documentation, promote best practices,
+and are designed for the most common use cases. Advanced users are encouraged to
+review the Official Repositories as part of their `Dockerfile` learning process.
 
-* It must be written either by using `FROM scratch` or be based on another,
-established Official Image.
-* It must follow `Dockerfile` best practices. These are discussed on the
-[best practices page](/articles/dockerfile_best-practices). In addition,
-Docker engineer Michael Crosby has some good tips for `Dockerfiles` in
-this [blog post](http://crosbymichael.com/dockerfile-best-practices-take-2.html).
+A common rationale for diverging from Official Repositories is to optimize for
+image size. For instance, many of the programming language stack images contain
+a complete build toolchain to support installation of modules that depend on
+optimized code. An advanced user could build a custom image with just the
+necessary pre-compiled libraries to save space.
 
-While [`ONBUILD` triggers](https://docs.docker.com/reference/builder/#onbuild)
-are not required, if you choose to use them you should:
+A number of language stacks such as
+[`python`](https://registry.hub.docker.com/_/python/) and
+[`ruby`](https://registry.hub.docker.com/_/ruby/) have `-slim` tag variants
+designed to fill the need for optimization.  Even when these "slim" variants are
+insufficient, it is still recommended to inherit from an Official Repository
+base OS image to leverage the ongoing maintenance work, rather than duplicating
+these efforts.
 
-* Build both `ONBUILD` and non-`ONBUILD` images, with the `ONBUILD` image
-built `FROM` the non-`ONBUILD` image.
-* The `ONBUILD` image should be specifically tagged, for example, `ruby:
-latest`and `ruby:onbuild`, or `ruby:2` and  `ruby:2-onbuild`
+## How can I get involved?
 
-### A short description
+All Official Repositories contain a **User Feedback** section in their
+documentation which covers the details for that specific repository. In most
+cases, the GitHub repository which contains the Dockerfiles for an Official
+Repository also has an active issue tracker. General feedback and support
+questions should be directed to `#docker-library` on Freenode IRC.
 
-Include a brief description of your image (in plaintext). Only one description
-is required; you don’t need additional descriptions for each tag. The file
-should also: 
+## How do I create a new Official Repository?
 
-* Be named `README-short.txt`
-* Reside in the repo for the “latest” tag
-* Not exceed 100 characters
+From a high level, an Official Repository starts out as a proposal in the form
+of a set of GitHub pull requests.  You'll find detailed and objective proposal
+requirements in the following GitHub repositories:
 
-### A logo
+* [docker-library/official-images](https://github.com/docker-library/official-images)
 
-Include a logo of your company or the product (png format preferred). Only one
-logo is required; you don’t need additional logo files for each tag. The logo
-file should have the following characteristics: 
+* [docker-library/docs](https://github.com/docker-library/docs)
 
-* Be named `logo.png`
-* Should reside in the repo for the “latest” tag
-* Should fit inside a 200px square, maximized in one dimension (preferably the
-width)
-* Square or wide (landscape) is preferred over tall (portrait), but exceptions
-can be made based on the logo needed
+The Official Repositories team, with help from community contributors, formally
+review each proposal and provide feedback to the author. This initial review
+process may require a bit of back and forth before the proposal is accepted.
 
-### A long description
+There are also subjective considerations during the review process. These
+subjective concerns boil down to the basic question: "is this image generally
+useful?"  For example, the [`python`](https://registry.hub.docker.com/_/python/)
+Official Repository is "generally useful" to the large Python developer
+community, whereas an obscure text adventure game written in Python last week is
+not.
 
-Include a comprehensive description of your image (in Markdown format, GitHub 
-flavor preferred). Only one description is required; you don’t need additional
-descriptions for each tag. The file should also: 
+When a new proposal is accepted, the author becomes responsibile for keeping
+their images up-to-date and responding to user feedback.  The Official
+Repositories team becomes responsibile for publishing the images and
+documentation on Docker Hub.  Updates to the Official Repository follow the same
+pull request process, though with less review. The Official Repositories team
+ultimately acts as a gatekeeper for all changes, which helps mitigate the risk
+of quality and security issues from being introduced.
 
-* Be named `README.md`
-* Reside in the repo for the “latest” tag
-* Be no longer than absolutely necessary, while still addressing all the
-content requirements
-
-In terms of content, the long description must include the following sections:
-
-* Overview & links
-* How-to/usage
-* Issues & contributions
-
-#### Overview & links
-
-This section should provide:
-
-* an overview of the software contained in the image, similar to the
-introduction in a Wikipedia entry
-
-* a selection of links to outside resources that help to describe the software
-
-* a *mandatory* link to the `Dockerfile`
-
-#### How-to/usage
-
-A section that describes how to run and use the image, including common use
-cases and example `Dockerfile`s (if applicable). Try to provide clear, step-by-
-step instructions wherever possible.
-
-##### Issues & contributions
-
-In this section, point users to any resources that can help them contribute to
-the project. Include contribution guidelines and any specific instructions
-related to your development practices. Include a link to
-[Docker’s resources for contributors](https://docs.docker.com/contributing/contributing/).
-Be sure to include contact info, handles, etc. for official maintainers.
-
-Also include information letting users know where they can go for help and how
-they can file issues with the repo. Point them to any specific IRC channels,
-issue trackers, contacts, additional “how-to” information or other resources.
-
-### License
-
-Include a file, `LICENSE`, of any applicable license.  Docker recommends using
-the license of the software contained in the image, provided it allows Docker,
-Inc. to legally build and distribute the image. Otherwise, Docker recommends
-adopting the [Expat license](http://directory.fsf.org/wiki/License:Expat)
-(a.k.a., the MIT or X11 license).
-
-## Examples
-
-Below are sample short and long description files for an imaginary image
-containing Ruby on Rails.
-
-### Short description
-
-`README-short.txt`
-
-`Ruby on Rails is an open-source application framework written in Ruby. It emphasizes best practices such as convention over configuration, active record pattern, and the model-view-controller pattern.`
-
-### Long description
-
-`README.md`
-
-```markdown
-# What is Ruby on Rails
-
-Ruby on Rails, often simply referred to as Rails, is an open source web application framework which runs via the Ruby programming language. It is a full-stack framework: it allows creating pages and applications that gather information from the web server, talk to or query the database, and render templates out of the box. As a result, Rails features a routing system that is independent of the web server.
-
-> [wikipedia.org/wiki/Ruby_on_Rails](https://en.wikipedia.org/wiki/Ruby_on_Rails)
-
-# How to use this image
-
-## Create a `Dockerfile` in your rails app project
-
-    FROM rails:onbuild
-
-Put this file in the root of your app, next to the `Gemfile`.
-
-This image includes multiple `ONBUILD` triggers so that should be all that you need for most applications. The build will `ADD . /usr/src/app`, `RUN bundle install`, `EXPOSE 3000`, and set the default command to `rails server`.
-
-Then build and run the Docker image.
-
-    docker build -t my-rails-app .
-    docker run --name some-rails-app -d my-rails-app
-
-Test it by visiting `http://container-ip:3000` in a browser. On the other hand, if you need access outside the host on port 8080:
-
-    docker run --name some-rails-app -p 8080:3000 -d my-rails-app
-
-Then go to `http://localhost:8080` or `http://host-ip:8080` in a browser.
-```
-
-For more examples, take a look at these repos: 
-
-* [Go](https://github.com/docker-library/golang)
-* [PostgreSQL](https://github.com/docker-library/postgres)
-* [Buildpack-deps](https://github.com/docker-library/buildpack-deps)
-* ["Hello World" minimal container](https://github.com/docker-library/hello-world)
-* [Node](https://github.com/docker-library/node)
-
-## Submit your repo
-
-Once you've checked off everything in these guidelines, and are confident your
-image is ready for primetime, please contact us at
-[partners@docker.com](mailto:partners@docker.com) to have your project
-considered for the Official Repos program.
+> **Note**: If you are interested in proposing an Official Repository, but would
+> like to discuss it with Docker, Inc. privately first, please send your
+> inquiries to partners@docker.com.  There is no fast-track or pay-for-status
+> option.
diff --git a/docs/sources/docker-hub/repos.md b/docs/sources/docker-hub/repos.md
index 5765835..1c4176f 100644
--- a/docs/sources/docker-hub/repos.md
+++ b/docs/sources/docker-hub/repos.md
@@ -1,42 +1,37 @@
-page_title: Repositories and Images on Docker Hub
-page_description: Repositories and Images on Docker Hub
+page_title: Your Repositories on Docker Hub
+page_description: Your Repositories on Docker Hub
 page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, webhooks, docs, documentation
 
-# Repositories and Images on Docker Hub
+# Your Hub repositories
+
+Docker Hub repositories make it possible for you to share images with co-workers,
+customers or the Docker community at large. If you're building your images internally,
+either on your own Docker daemon, or using your own Continuous integration services,
+you can push them to a Docker Hub repository that you add to your Docker Hub user or
+organization account.
+
+Alternatively, if the source code for your Docker image is on GitHub or Bitbucket,
+you can use an "Automated build" repository, which is built by the Docker Hub
+services. See the [automated builds documentation](./builds.md) to read about
+the extra functionality provided by those services.
 
 ![repositories](/docker-hub/hub-images/repos.png)
 
-## Searching for repositories and images
-
-You can `search` for all the publicly available repositories and images using
-Docker.
-
-    $ sudo docker search ubuntu
-
-This will show you a list of the currently available repositories on the
-Docker Hub which match the provided keyword.
-
-If a repository is private it won't be listed on the repository search
-results. To see repository statuses, you can look at your [profile
-page](https://hub.docker.com) on [Docker Hub](https://hub.docker.com).
-
-## Repositories
-
 Your Docker Hub repositories have a number of useful features.
 
-### Stars
+## Stars
 
 Your repositories can be starred and you can star repositories in
 return. Stars are a way to show that you like a repository. They are
 also an easy way of bookmarking your favorites.
 
-### Comments
+## Comments
 
 You can interact with other members of the Docker community and maintainers by
 leaving comments on repositories. If you find any comments that are not
 appropriate, you can flag them for review.
 
-### Collaborators and their role
+## Collaborators and their role
 
 A collaborator is someone you want to give access to a private
 repository. Once designated, they can `push` and `pull` to your
@@ -48,26 +43,11 @@
 > A collaborator cannot add other collaborators. Only the owner of
 > the repository has administrative access.
 
-You can also collaborate on Docker Hub with organizations and groups.
-You can read more about that [here](accounts/).
+You can also assign more granular collaborator rights ("Read", "Write", or "Admin")
+on Docker Hub by using organizations and groups. For more information
+see the [accounts documentation](accounts/).
 
-## Official Repositories
-
-The Docker Hub contains a number of [official
-repositories](http://registry.hub.docker.com/official). These are
-certified repositories from vendors and contributors to Docker. They
-contain Docker images from vendors like Canonical, Oracle, and Red Hat
-that you can use to build applications and services.
-
-If you use Official Repositories you know you're using a supported,
-optimized and up-to-date image to power your applications.
-
-> **Note:**
-> If you would like to contribute an official repository for your
-> organization, product or team you can see more information
-> [here](https://github.com/docker/stackbrew).
-
-## Private Repositories
+## Private repositories
 
 Private repositories allow you to have repositories that contain images
 that you want to keep private, either to your own account or within an
@@ -100,8 +80,15 @@
 
 ## Webhooks
 
-You can configure webhooks for your repositories on the Repository
-Settings page. A webhook is called only after a successful `push` is
+A webhook is an HTTP call-back triggered by a specific event.
+You can use a Hub repository webhook to notify people, services, and other
+applications after a new image is pushed to your repository (this also happens
+for Automated builds). For example, you can trigger an automated test or
+deployment to happen as soon as the image is available.
+
+To get started adding webhooks, go to the desired repository in the Hub,
+and click "Webhooks" under the "Settings" box.
+A webhook is called only after a successful `push` is
 made. The webhook calls are HTTP POST requests with a JSON payload
 similar to the example shown below.
 
@@ -137,13 +124,9 @@
 }
 ```
 
-Webhooks allow you to notify people, services and other applications of
-new updates to your images and repositories. To get started adding webhooks,
-go to the desired repository in the Hub, and click "Webhooks" under the "Settings"
-box.
+<TODO: does it tell you what tag was updated?>
 
-> **Note:** For testing, you can try an HTTP request tool like
-> [requestb.in](http://requestb.in/).
+For testing, you can try an HTTP request tool like [requestb.in](http://requestb.in/).
 
 > **Note**: The Docker Hub servers are currently in the IP range
 > `162.242.195.64 - 162.242.195.127`, so you can restrict your webhooks to
@@ -161,7 +144,7 @@
 The first webhook in a chain will be called after a successful push. Subsequent
 URLs will be contacted after the callback has been validated.
 
-#### Validating a callback
+### Validating a callback
 
 In order to validate a callback in a webhook chain, you need to
 
@@ -195,3 +178,10 @@
       "context": "Continuous integration by Acme CI",
       "target_url": "http://ci.acme.com/results/afd339c1c3d27"
     }
+
+## Mark as unlisted
+
+By marking a repository as unlisted, you can create a publicly pullable repository
+which will not be in the Hub or commandline search. This allows you to have a limited
+release, but does not restrict access to anyone that is told, or guesses the repository
+name.
diff --git a/docs/sources/docker-hub/userguide.md b/docs/sources/docker-hub/userguide.md
new file mode 100644
index 0000000..7ace5f3
--- /dev/null
+++ b/docs/sources/docker-hub/userguide.md
@@ -0,0 +1,57 @@
+page_title: Docker Hub user guide
+page_description: Docker Hub user guide
+page_keywords: Docker, docker, registry, Docker Hub, docs, documentation
+
+# Using the Docker Hub
+
+Docker Hub is used to find and pull Docker images to run or build upon, and to
+distribute and build images for other users to use.
+
+![your profile](/docker-hub/hub-images/dashboard.png)
+
+## Finding repositories and images
+
+There are two ways you can search for public repositories and images available
+on the Docker Hub. You can use the "Search" tool on the Docker Hub website, or
+you can `search` for all the repositories and images using the Docker commandline
+tool:
+
+    $ docker search ubuntu
+
+Both will show you a list of the currently available public repositories on the
+Docker Hub which match the provided keyword.
+
+If a repository is private or marked as unlisted, it won't be in the repository
+search results. To see all the repositories you have access to and their statuses,
+you can look at your profile page on [Docker Hub](https://hub.docker.com).
+
+## Pulling, running and building images
+
+You can find more information on [working with Docker images](../userguide/dockerimages.md).
+
+## Official Repositories
+
+The Docker Hub contains a number of [Official
+Repositories](http://registry.hub.docker.com/official). These are
+certified repositories from vendors and contributors to Docker. They
+contain Docker images from vendors like Canonical, Oracle, and Red Hat
+that you can use to build applications and services.
+
+If you use Official Repositories you know you're using an optimized and
+up-to-date image to power your applications.
+
+> **Note:**
+> If you would like to contribute an Official Repository for your
+> organization, see [Official Repositories on Docker
+> Hub](/docker-hub/official_repos) for more information.
+
+## Building and shipping your own repositories and images
+
+The Docker Hub provides you and your team with a place to build and ship Docker images.
+
+Collections of Docker images are managed using repositories - 
+
+You can configure two types of repositories to manage on the Docker Hub:
+[Repositories](./repos.md), which allow you to push images to the Hub from your local Docker daemon,
+and [Automated Builds](./builds.md), which allow you to configure GitHub or Bitbucket to
+trigger the Hub to rebuild repositories when changes are made to the repository.
diff --git a/docs/sources/examples.md b/docs/sources/examples.md
index 9dcd67a..f4d5b86 100644
--- a/docs/sources/examples.md
+++ b/docs/sources/examples.md
@@ -1,9 +1,9 @@
 # Examples
 
- - [Dockerizing a Node.js Web App](nodejs_web_app/)
- - [Dockerizing a Redis Service](running_redis_service/)
- - [Dockerizing an SSH Daemon Service](running_ssh_service/)
- - [Dockerizing a CouchDB Service](couchdb_data_volumes/)
- - [Dockerizing a PostgreSQL Service](postgresql_service/)
+ - [Dockerizing a Node.js web app](nodejs_web_app/)
+ - [Dockerizing a Redis service](running_redis_service/)
+ - [Dockerizing an SSH daemon service](running_ssh_service/)
+ - [Dockerizing a CouchDB service](couchdb_data_volumes/)
+ - [Dockerizing a PostgreSQL service](postgresql_service/)
  - [Dockerizing MongoDB](mongodb/)
- - [Dockerizing a Riak Service](running_riak_service/)
+ - [Dockerizing a Riak service](running_riak_service/)
diff --git a/docs/sources/examples/apt-cacher-ng.md b/docs/sources/examples/apt-cacher-ng.md
index cd92cb5..57aa669 100644
--- a/docs/sources/examples/apt-cacher-ng.md
+++ b/docs/sources/examples/apt-cacher-ng.md
@@ -2,7 +2,7 @@
 page_description: Installing and running an apt-cacher-ng service
 page_keywords: docker, example, package installation, networking, debian, ubuntu
 
-# Dockerizing an Apt-Cacher-ng Service
+# Dockerizing an apt-cacher-ng service
 
 > **Note**: 
 > - **If you don't like sudo** then see [*Giving non-root
@@ -35,16 +35,16 @@
 
 To build the image using:
 
-    $ sudo docker build -t eg_apt_cacher_ng .
+    $ docker build -t eg_apt_cacher_ng .
 
 Then run it, mapping the exposed port to one on the host
 
-    $ sudo docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng
+    $ docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng
 
 To see the logfiles that are `tailed` in the default command, you can
 use:
 
-    $ sudo docker logs -f test_apt_cacher_ng
+    $ docker logs -f test_apt_cacher_ng
 
 To get your Debian-based containers to use the proxy, you can do one of
 three things
@@ -68,7 +68,7 @@
 **Option 2** is good for testing, but will break other HTTP clients
 which obey `http_proxy`, such as `curl`, `wget` and others:
 
-    $ sudo docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash
+    $ docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash
 
 **Option 3** is the least portable, but there will be times when you
 might need to do it and you can do it from your `Dockerfile`
@@ -78,7 +78,7 @@
 and they can be used by leveraging the `VOLUME`
 instruction, and the image we built to run the service:
 
-    $ sudo docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash
+    $ docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash
 
     $$ /usr/lib/apt-cacher-ng/distkill.pl
     Scanning /var/cache/apt-cacher-ng, please wait...
@@ -102,6 +102,6 @@
 Finally, clean up after your test by stopping and removing the
 container, and then removing the image.
 
-    $ sudo docker stop test_apt_cacher_ng
-    $ sudo docker rm test_apt_cacher_ng
-    $ sudo docker rmi eg_apt_cacher_ng
+    $ docker stop test_apt_cacher_ng
+    $ docker rm test_apt_cacher_ng
+    $ docker rmi eg_apt_cacher_ng
diff --git a/docs/sources/examples/couchdb_data_volumes.md b/docs/sources/examples/couchdb_data_volumes.md
index 8cd2408..27bce34 100644
--- a/docs/sources/examples/couchdb_data_volumes.md
+++ b/docs/sources/examples/couchdb_data_volumes.md
@@ -1,8 +1,8 @@
-page_title: Dockerizing a CouchDB Service
+page_title: Dockerizing a CouchDB service
 page_description: Sharing data between 2 couchdb databases
 page_keywords: docker, example, package installation, networking, couchdb, data volumes
 
-# Dockerizing a CouchDB Service
+# Dockerizing a CouchDB service
 
 > **Note**: 
 > - **If you don't like sudo** then see [*Giving non-root
@@ -16,7 +16,7 @@
 
 Note that we're marking `/var/lib/couchdb` as a data volume.
 
-    $ COUCH1=$(sudo docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03)
+    $ COUCH1=$(docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03)
 
 ## Add data to the first database
 
@@ -24,19 +24,19 @@
 replace `localhost` with the public IP of your Docker host.
 
     $ HOST=localhost
-    $ URL="http://$HOST:$(sudo docker port $COUCH1 5984 | grep -o '[1-9][0-9]*$')/_utils/"
+    $ URL="http://$HOST:$(docker port $COUCH1 5984 | grep -o '[1-9][0-9]*$')/_utils/"
     $ echo "Navigate to $URL in your browser, and use the couch interface to add data"
 
 ## Create second database
 
 This time, we're requesting shared access to `$COUCH1`'s volumes.
 
-    $ COUCH2=$(sudo docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03)
+    $ COUCH2=$(docker run -d -p 5984 --volumes-from $COUCH1 shykes/couchdb:2013-05-03)
 
 ## Browse data on the second database
 
     $ HOST=localhost
-    $ URL="http://$HOST:$(sudo docker port $COUCH2 5984 | grep -o '[1-9][0-9]*$')/_utils/"
+    $ URL="http://$HOST:$(docker port $COUCH2 5984 | grep -o '[1-9][0-9]*$')/_utils/"
     $ echo "Navigate to $URL in your browser. You should see the same data as in the first database"'!'
 
 Congratulations, you are now running two Couchdb containers, completely
diff --git a/docs/sources/examples/mongodb.md b/docs/sources/examples/mongodb.md
index c4fbf57..b7b423b 100644
--- a/docs/sources/examples/mongodb.md
+++ b/docs/sources/examples/mongodb.md
@@ -10,6 +10,11 @@
 MongoDB pre-installed.  We'll also see how to `push` that image to the
 [Docker Hub registry](https://hub.docker.com) and share it with others!
 
+> **Note:**
+>
+> This guide will show the mechanics of building a MongoDB container, but
+> you will probably want to use the official image on [Docker Hub]( https://registry.hub.docker.com/_/mongo/)
+
 Using Docker and containers for deploying [MongoDB](https://www.mongodb.org/)
 instances will bring several benefits, such as:
 
@@ -59,8 +64,8 @@
 
     # Installation:
     # Import MongoDB public GPG key AND create a MongoDB list file
-    RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv 7F0CEB10
-    RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
+    RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
+    RUN echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list
 
 After this initial preparation we can update our packages and install MongoDB.
 
@@ -70,7 +75,7 @@
 > **Tip:** You can install a specific version of MongoDB by using a list
 > of required packages with versions, e.g.:
 > 
->     RUN apt-get update && apt-get install -y mongodb-org=2.6.1 mongodb-org-server=2.6.1 mongodb-org-shell=2.6.1 mongodb-org-mongos=2.6.1 mongodb-org-tools=2.6.1
+>     RUN apt-get update && apt-get install -y mongodb-org=3.0.1 mongodb-org-server=3.0.1 mongodb-org-shell=3.0.1 mongodb-org-mongos=3.0.1 mongodb-org-tools=3.0.1
 
 MongoDB requires a data directory. Let's create it as the final step of our
 installation instructions.
@@ -86,7 +91,7 @@
     EXPOSE 27017
 
     # Set usr/bin/mongod as the dockerized entry-point application
-    ENTRYPOINT usr/bin/mongod
+    ENTRYPOINT ["/usr/bin/mongod"]
 
 Now save the file and let's build our image.
 
@@ -100,9 +105,9 @@
 experimenting, it is always a good practice to tag Docker images by passing the
 `--tag` option to `docker build` command.
 
-    # Format: sudo docker build --tag/-t <user-name>/<repository> .
+    # Format: docker build --tag/-t <user-name>/<repository> .
     # Example:
-    $ sudo docker build --tag my/repo .
+    $ docker build --tag my/repo .
 
 Once this command is issued, Docker will go through the `Dockerfile` and build
 the image. The final image will be tagged `my/repo`.
@@ -114,13 +119,13 @@
 you need to be logged-in.
 
     # Log-in
-    $ sudo docker login
+    $ docker login
     Username:
     ..
 
     # Push the image
-    # Format: sudo docker push <user-name>/<repository>
-    $ sudo docker push my/repo
+    # Format: docker push <user-name>/<repository>
+    $ docker push my/repo
     The push refers to a repository [my/repo] (len: 1)
     Sending image list
     Pushing repository my/repo (1 tags)
@@ -132,20 +137,36 @@
 as daemon process(es).
 
     # Basic way
-    # Usage: sudo docker run --name <name for container> -d <user-name>/<repository>
-    $ sudo docker run --name mongo_instance_001 -d my/repo
+    # Usage: docker run --name <name for container> -d <user-name>/<repository>
+    $ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo
 
     # Dockerized MongoDB, lean and mean!
-    # Usage: sudo docker run --name <name for container> -d <user-name>/<repository> --noprealloc --smallfiles
-    $ sudo docker run --name mongo_instance_001 -d my/repo --noprealloc --smallfiles
+    # Usage: docker run --name <name for container> -d <user-name>/<repository> --noprealloc --smallfiles
+    $ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo --noprealloc --smallfiles
 
     # Checking out the logs of a MongoDB container
-    # Usage: sudo docker logs <name for container>
-    $ sudo docker logs mongo_instance_001
+    # Usage: docker logs <name for container>
+    $ docker logs mongo_instance_001
 
     # Playing with MongoDB
     # Usage: mongo --port <port you get from `docker ps`> 
-    $ mongo --port 12345
+    $ mongo --port 27017
+
+    # If using boot2docker
+    # Usage: mongo --port <port you get from `docker ps`>  --host <ip address from `boot2docker ip`>
+    $ mongo --port 27017 --host 192.168.59.103
+
+> **Tip:**
+If you want to run two containers on the same engine, then you will need to map
+the exposed port to two different ports on the host
+
+    # Start two containers and map the ports
+    $ docker run -p 28001:27017 --name mongo_instance_001 -d my/repo
+    $ docker run -p 28002:27017 --name mongo_instance_002 -d my/repo
+
+    # Now you can connect to each MongoDB instance on the two ports
+    $ mongo --port 28001
+    $ mongo --port 28002
 
  - [Linking containers](/userguide/dockerlinks)
  - [Cross-host linking containers](/articles/ambassador_pattern_linking/)
diff --git a/docs/sources/examples/mongodb/Dockerfile b/docs/sources/examples/mongodb/Dockerfile
index c17a636..3513da4 100644
--- a/docs/sources/examples/mongodb/Dockerfile
+++ b/docs/sources/examples/mongodb/Dockerfile
@@ -7,9 +7,8 @@
 
 # Installation:
 # Import MongoDB public GPG key AND create a MongoDB list file
-RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv 7F0CEB10
-RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
-
+RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
+RUN echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list
 # Update apt-get sources AND install MongoDB
 RUN apt-get update && apt-get install -y mongodb-org
 
diff --git a/docs/sources/examples/nodejs_web_app.md b/docs/sources/examples/nodejs_web_app.md
index 56f7687..ff7179a 100644
--- a/docs/sources/examples/nodejs_web_app.md
+++ b/docs/sources/examples/nodejs_web_app.md
@@ -1,8 +1,8 @@
-page_title: Dockerizing a Node.js Web App
+page_title: Dockerizing a Node.js web app
 page_description: Installing and running a Node.js app with Docker
 page_keywords: docker, example, package installation, node, centos
 
-# Dockerizing a Node.js Web App
+# Dockerizing a Node.js web app
 
 > **Note**: 
 > - **If you don't like sudo** then see [*Giving non-root
@@ -125,11 +125,11 @@
 to build a Docker image. The `-t` flag lets you tag your image so it's easier
 to find later using the `docker images` command:
 
-    $ sudo docker build -t <your username>/centos-node-hello .
+    $ docker build -t <your username>/centos-node-hello .
 
 Your image will now be listed by Docker:
 
-    $ sudo docker images
+    $ docker images
 
     # Example
     REPOSITORY                          TAG        ID              CREATED
@@ -142,15 +142,15 @@
 container running in the background. The `-p` flag redirects a public port to
 a private port in the container. Run the image you previously built:
 
-    $ sudo docker run -p 49160:8080 -d <your username>/centos-node-hello
+    $ docker run -p 49160:8080 -d <your username>/centos-node-hello
 
 Print the output of your app:
 
     # Get container ID
-    $ sudo docker ps
+    $ docker ps
 
     # Print app output
-    $ sudo docker logs <container id>
+    $ docker logs <container id>
 
     # Example
     Running on http://localhost:8080
@@ -159,7 +159,7 @@
 
 To test your app, get the port of your app that Docker mapped:
 
-    $ sudo docker ps
+    $ docker ps
 
     # Example
     ID            IMAGE                                     COMMAND              ...   PORTS
diff --git a/docs/sources/examples/postgresql_service.Dockerfile b/docs/sources/examples/postgresql_service.Dockerfile
index 9c0c0d4..740f180 100644
--- a/docs/sources/examples/postgresql_service.Dockerfile
+++ b/docs/sources/examples/postgresql_service.Dockerfile
@@ -6,7 +6,7 @@
 MAINTAINER SvenDowideit@docker.com
 
 # Add the PostgreSQL PGP key to verify their Debian packages.
-# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc 
+# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc
 RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
 
 # Add PostgreSQL's repository. It contains the most recent stable release
@@ -33,7 +33,7 @@
     createdb -O docker docker
 
 # Adjust PostgreSQL configuration so that remote connections to the
-# database are possible. 
+# database are possible.
 RUN echo "host all  all    0.0.0.0/0  md5" >> /etc/postgresql/9.3/main/pg_hba.conf
 
 # And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf``
diff --git a/docs/sources/examples/postgresql_service.md b/docs/sources/examples/postgresql_service.md
index 8796009..0911795 100644
--- a/docs/sources/examples/postgresql_service.md
+++ b/docs/sources/examples/postgresql_service.md
@@ -72,11 +72,11 @@
 
 Build an image from the Dockerfile assign it a name.
 
-    $ sudo docker build -t eg_postgresql .
+    $ docker build -t eg_postgresql .
 
 And run the PostgreSQL server container (in the foreground):
 
-    $ sudo docker run --rm -P --name pg_test eg_postgresql
+    $ docker run --rm -P --name pg_test eg_postgresql
 
 There are 2 ways to connect to the PostgreSQL server. We can use [*Link
 Containers*](/userguide/dockerlinks), or we can access it from our host
@@ -93,7 +93,7 @@
 `docker run`. This will set a number of environment
 variables that can then be used to connect:
 
-    $ sudo docker run --rm -t -i --link pg_test:pg eg_postgresql bash
+    $ docker run --rm -t -i --link pg_test:pg eg_postgresql bash
 
     postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password
 
@@ -104,7 +104,7 @@
 to find out what local host port the container is mapped to
 first:
 
-    $ sudo docker ps
+    $ docker ps
     CONTAINER ID        IMAGE                  COMMAND                CREATED             STATUS              PORTS                                      NAMES
     5e24362f27f6        eg_postgresql:latest   /usr/lib/postgresql/   About an hour ago   Up About an hour    0.0.0.0:49153->5432/tcp                    pg_test
     $ psql -h localhost -p 49153 -d docker -U docker --password
@@ -135,7 +135,7 @@
 You can use the defined volumes to inspect the PostgreSQL log files and
 to backup your configuration and data:
 
-    $ sudo docker run --rm --volumes-from pg_test -t -i busybox sh
+    $ docker run --rm --volumes-from pg_test -t -i busybox sh
 
     / # ls
     bin      etc      lib      linuxrc  mnt      proc     run      sys      usr
diff --git a/docs/sources/examples/running_redis_service.md b/docs/sources/examples/running_redis_service.md
index 99036a0..c46bb09 100644
--- a/docs/sources/examples/running_redis_service.md
+++ b/docs/sources/examples/running_redis_service.md
@@ -2,12 +2,12 @@
 page_description: Installing and running an redis service
 page_keywords: docker, example, package installation, networking, redis
 
-# Dockerizing a Redis Service
+# Dockerizing a Redis service
 
 Very simple, no frills, Redis service attached to a web application
 using a link.
 
-## Create a docker container for Redis
+## Create a Docker container for Redis
 
 Firstly, we create a `Dockerfile` for our new Redis
 image.
@@ -20,7 +20,7 @@
 Next we build an image from our `Dockerfile`.
 Replace `<your username>` with your own user name.
 
-    $ sudo docker build -t <your username>/redis .
+    $ docker build -t <your username>/redis .
 
 ## Run the service
 
@@ -33,7 +33,7 @@
 we're going to use a container link to provide access to our Redis
 database.
 
-    $ sudo docker run --name redis -d <your username>/redis
+    $ docker run --name redis -d <your username>/redis
 
 ## Create your web application container
 
@@ -43,7 +43,7 @@
 `redis` container and expose the Redis instance running inside that
 container to only this container.
 
-    $ sudo docker run --link redis:db -i -t ubuntu:14.04 /bin/bash
+    $ docker run --link redis:db -i -t ubuntu:14.04 /bin/bash
 
 Once inside our freshly created container we need to install Redis to
 get the `redis-cli` binary to test our connection.
diff --git a/docs/sources/examples/running_riak_service.Dockerfile b/docs/sources/examples/running_riak_service.Dockerfile
new file mode 100644
index 0000000..1051c1a
--- /dev/null
+++ b/docs/sources/examples/running_riak_service.Dockerfile
@@ -0,0 +1,31 @@
+# Riak
+#
+# VERSION       0.1.1
+
+# Use the Ubuntu base image provided by dotCloud
+FROM ubuntu:trusty
+MAINTAINER Hector Castro hector@basho.com
+
+# Install Riak repository before we do apt-get update, so that update happens
+# in a single step
+RUN apt-get install -q -y curl && \
+    curl -sSL https://packagecloud.io/install/repositories/basho/riak/script.deb | sudo bash
+
+# Install and setup project dependencies
+RUN apt-get update && \
+    apt-get install -y supervisor riak=2.0.5-1
+
+RUN mkdir -p /var/log/supervisor
+
+RUN locale-gen en_US en_US.UTF-8
+
+COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
+
+# Configure Riak to accept connections from any host
+RUN sed -i "s|listener.http.internal = 127.0.0.1:8098|listener.http.internal = 0.0.0.0:8098|" /etc/riak/riak.conf
+RUN sed -i "s|listener.protobuf.internal = 127.0.0.1:8087|listener.protobuf.internal = 0.0.0.0:8087|" /etc/riak/riak.conf
+
+# Expose Riak Protocol Buffers and HTTP interfaces
+EXPOSE 8087 8098
+
+CMD ["/usr/bin/supervisord"]
diff --git a/docs/sources/examples/running_riak_service.md b/docs/sources/examples/running_riak_service.md
index 0b53234..7450cd5 100644
--- a/docs/sources/examples/running_riak_service.md
+++ b/docs/sources/examples/running_riak_service.md
@@ -2,7 +2,7 @@
 page_description: Build a Docker image with Riak pre-installed
 page_keywords: docker, example, package installation, networking, riak
 
-# Dockerizing a Riak Service
+# Dockerizing a Riak service
 
 The goal of this example is to show you how to build a Docker image with
 Riak pre-installed.
@@ -15,61 +15,53 @@
 
 Next, define the parent image you want to use to build your image on top
 of. We'll use [Ubuntu](https://registry.hub.docker.com/_/ubuntu/) (tag:
-`latest`), which is available on [Docker Hub](https://hub.docker.com):
+`trusty`), which is available on [Docker Hub](https://hub.docker.com):
 
     # Riak
     #
-    # VERSION       0.1.0
-
+    # VERSION       0.1.1
+    
     # Use the Ubuntu base image provided by dotCloud
-    FROM ubuntu:latest
+    FROM ubuntu:trusty
     MAINTAINER Hector Castro hector@basho.com
 
-After that, we install and setup a few dependencies:
+After that, we install the curl which is used to download the repository setup
+script and we download the setup script and run it.
 
- - `curl` is used to download Basho's APT
-    repository key
- - `lsb-release` helps us derive the Ubuntu release
-    codename
- - `openssh-server` allows us to login to
-    containers remotely and join Riak nodes to form a cluster
- - `supervisor` is used manage the OpenSSH and Riak
-    processes
+    # Install Riak repository before we do apt-get update, so that update happens
+    # in a single step
+    RUN apt-get install -q -y curl && \
+        curl -sSL https://packagecloud.io/install/repositories/basho/riak/script.deb | sudo bash
+
+Then we install and setup a few dependencies:
+
+ - `supervisor` is used manage the Riak processes
+ - `riak=2.0.5-1` is the Riak package coded to version 2.0.5
 
 <!-- -->
 
     # Install and setup project dependencies
-    RUN apt-get update && apt-get install -y curl lsb-release supervisor openssh-server
+    RUN apt-get update && \
+        apt-get install -y supervisor riak=2.0.5-1
 
-    RUN mkdir -p /var/run/sshd
     RUN mkdir -p /var/log/supervisor
-
+    
     RUN locale-gen en_US en_US.UTF-8
-
+    
     COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
 
-    RUN echo 'root:basho' | chpasswd
+After that, we modify Riak's configuration:
 
-Next, we add Basho's APT repository:
+    # Configure Riak to accept connections from any host
+    RUN sed -i "s|listener.http.internal = 127.0.0.1:8098|listener.http.internal = 0.0.0.0:8098|" /etc/riak/riak.conf
+    RUN sed -i "s|listener.protobuf.internal = 127.0.0.1:8087|listener.protobuf.internal = 0.0.0.0:8087|" /etc/riak/riak.conf
 
-    RUN curl -sSL http://apt.basho.com/gpg/basho.apt.key | apt-key add --
-    RUN echo "deb http://apt.basho.com $(lsb_release -cs) main" > /etc/apt/sources.list.d/basho.list
+Then, we expose the Riak Protocol Buffers and HTTP interfaces:
 
-After that, we install Riak and alter a few defaults:
+    # Expose Riak Protocol Buffers and HTTP interfaces
+    EXPOSE 8087 8098
 
-    # Install Riak and prepare it to run
-    RUN apt-get update && apt-get install -y riak
-    RUN sed -i.bak 's/127.0.0.1/0.0.0.0/' /etc/riak/app.config
-    RUN echo "ulimit -n 4096" >> /etc/default/riak
-
-Then, we expose the Riak Protocol Buffers and HTTP interfaces, along
-with SSH:
-
-    # Expose Riak Protocol Buffers and HTTP interfaces, along with SSH
-    EXPOSE 8087 8098 22
-
-Finally, run `supervisord` so that Riak and OpenSSH
-are started:
+Finally, run `supervisord` so that Riak is started:
 
     CMD ["/usr/bin/supervisord"]
 
@@ -84,16 +76,14 @@
 
     [supervisord]
     nodaemon=true
-
-    [program:sshd]
-    command=/usr/sbin/sshd -D
-    stdout_logfile=/var/log/supervisor/%(program_name)s.log
-    stderr_logfile=/var/log/supervisor/%(program_name)s.log
-    autorestart=true
-
+    
     [program:riak]
-    command=bash -c ". /etc/default/riak && /usr/sbin/riak console"
-    pidfile=/var/log/riak/riak.pid
+    command=bash -c "/usr/sbin/riak console"
+    numprocs=1
+    autostart=true
+    autorestart=true
+    user=riak
+    environment=HOME="/var/lib/riak"
     stdout_logfile=/var/log/supervisor/%(program_name)s.log
     stderr_logfile=/var/log/supervisor/%(program_name)s.log
 
@@ -101,7 +91,7 @@
 
 Now you should be able to build a Docker image for Riak:
 
-    $ sudo docker build -t "<yourname>/riak" .
+    $ docker build -t "<yourname>/riak" .
 
 ## Next steps
 
diff --git a/docs/sources/examples/running_ssh_service.md b/docs/sources/examples/running_ssh_service.md
index 445cfe5..b1000a0 100644
--- a/docs/sources/examples/running_ssh_service.md
+++ b/docs/sources/examples/running_ssh_service.md
@@ -2,7 +2,7 @@
 page_description: Installing and running an SSHd service on Docker
 page_keywords: docker, example, package installation, networking
 
-# Dockerizing an SSH Daemon Service
+# Dockerizing an SSH daemon service
 
 ## Build an `eg_sshd` image
 
@@ -33,15 +33,15 @@
 
 Build the image using:
 
-    $ sudo docker build -t eg_sshd .
+    $ docker build -t eg_sshd .
 
 ## Run a `test_sshd` container
 
 Then run it. You can then use `docker port` to find out what host port
 the container's port 22 is mapped to:
 
-    $ sudo docker run -d -P --name test_sshd eg_sshd
-    $ sudo docker port test_sshd 22
+    $ docker run -d -P --name test_sshd eg_sshd
+    $ docker port test_sshd 22
     0.0.0.0:49154
 
 And now you can ssh as `root` on the container's IP address (you can find it
@@ -72,7 +72,7 @@
 Finally, clean up after your test by stopping and removing the
 container, and then removing the image.
 
-    $ sudo docker stop test_sshd
-    $ sudo docker rm test_sshd
-    $ sudo docker rmi eg_sshd
+    $ docker stop test_sshd
+    $ docker rm test_sshd
+    $ docker rmi eg_sshd
 
diff --git a/docs/sources/examples/supervisord.conf b/docs/sources/examples/supervisord.conf
new file mode 100644
index 0000000..385fbe7
--- /dev/null
+++ b/docs/sources/examples/supervisord.conf
@@ -0,0 +1,12 @@
+[supervisord]
+nodaemon=true
+
+[program:riak]
+command=bash -c "/usr/sbin/riak console"
+numprocs=1
+autostart=true
+autorestart=true
+user=riak
+environment=HOME="/var/lib/riak"
+stdout_logfile=/var/log/supervisor/%(program_name)s.log
+stderr_logfile=/var/log/supervisor/%(program_name)s.log
diff --git a/docs/sources/experimental/experimental.md b/docs/sources/experimental/experimental.md
new file mode 100644
index 0000000..b0d72b9
--- /dev/null
+++ b/docs/sources/experimental/experimental.md
@@ -0,0 +1,51 @@
+page_title: Overview of Experimental Features
+page_keywords: experimental, Docker, feature
+
+# Experimental Features in this Release 
+
+This page contains a list of features in the Docker engine which are
+experimental as of the current release. Experimental features are **not** ready
+for production. They are provided for test and evaluation in your sandbox
+environments.  
+
+The information below describes each feature and the Github pull requests and
+issues associated with it. If necessary, links are provided to additional
+documentation on an issue.  As an active Docker user and community member,
+please feel free to provide any feedback on these features you wish.
+
+## Install Docker experimental 
+
+1. Verify that you have `wget` installed.
+
+        $ which wget
+
+    If `wget` isn't installed, install it after updating your manager:
+
+        $ sudo apt-get update
+        $ sudo apt-get install wget
+
+2. Get the latest Docker package.
+
+        $ wget -qO- https://experimental.docker.com/ | sh
+
+    The system prompts you for your `sudo` password. Then, it downloads and
+    installs Docker and its dependencies.
+
+	>**Note**: If your company is behind a filtering proxy, you may find that the
+	>`apt-key`
+	>command fails for the Docker repo during installation. To work around this,
+	>add the key directly using the following:
+	>
+	>       $ wget -qO- https://experimental.docker.com/gpg | sudo apt-key add -
+
+3. Verify `docker` is installed correctly.
+
+        $ sudo docker run hello-world
+
+    This command downloads a test image and runs it in a container.
+
+## Experimental features in this Release
+
+* [Support for Docker plugins](plugins.md)
+* [Volume plugins](plugins_volume.md)
+
diff --git a/docs/sources/experimental/plugin_api.md b/docs/sources/experimental/plugin_api.md
new file mode 100644
index 0000000..6454fc2
--- /dev/null
+++ b/docs/sources/experimental/plugin_api.md
@@ -0,0 +1,225 @@
+page_title: Plugin API documentation
+page_description: Documentation for writing a Docker plugin.
+page_keywords: docker, plugins, api, extensions
+
+# Experimental: Docker Plugin API
+
+Docker plugins are out-of-process extensions which add capabilities to the
+Docker Engine.
+
+This page is intended for people who want to develop their own Docker plugin.
+If you just want to learn about or use Docker plugins, look
+[here](/userguide/plugins).
+
+This is an experimental feature. For information on installing and using experimental features, see [the experimental feature overview](experimental.md).
+
+## What plugins are
+
+A plugin is a process running on the same docker host as the docker daemon,
+which registers itself by placing a file in `/usr/share/docker/plugins` (the
+"plugin directory").
+
+Plugins have human-readable names, which are short, lowercase strings. For
+example, `flocker` or `weave`.
+
+Plugins can run inside or outside containers. Currently running them outside
+containers is recommended.
+
+## Plugin discovery
+
+Docker discovers plugins by looking for them in the plugin directory whenever a
+user or container tries to use one by name.
+
+There are two types of files which can be put in the plugin directory.
+
+* `.sock` files are UNIX domain sockets.
+* `.spec` files are text files containing a URL, such as `unix:///other.sock`.
+
+The name of the file (excluding the extension) determines the plugin name.
+
+For example, the `flocker` plugin might create a UNIX socket at
+`/usr/share/docker/plugins/flocker.sock`.
+
+Plugins must be run locally on the same machine as the Docker daemon.  UNIX
+domain sockets are strongly encouraged for security reasons.
+
+## Plugin lifecycle
+
+Plugins should be started before Docker, and stopped after Docker.  For
+example, when packaging a plugin for a platform which supports `systemd`, you
+might use [`systemd` dependencies](
+http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to
+manage startup and shutdown order.
+
+When upgrading a plugin, you should first stop the Docker daemon, upgrade the
+plugin, then start Docker again.
+
+If a plugin is packaged as a container, this may cause issues. Plugins as
+containers are currently considered experimental due to these shutdown/startup
+ordering issues. These issues are mitigated by plugin retries (see below).
+
+## Plugin activation
+
+When a plugin is first referred to -- either by a user referring to it by name
+(e.g.  `docker run --volume-driver=foo`) or a container already configured to
+use a plugin being started -- Docker looks for the named plugin in the plugin
+directory and activates it with a handshake. See Handshake API below.
+
+Plugins are *not* activated automatically at Docker daemon startup. Rather,
+they are activated only lazily, or on-demand, when they are needed.
+
+## API design
+
+The Plugin API is RPC-style JSON over HTTP, much like webhooks.
+
+Requests flow *from* the Docker daemon *to* the plugin.  So the plugin needs to
+implement an HTTP server and bind this to the UNIX socket mentioned in the
+"plugin discovery" section.
+
+All requests are HTTP `POST` requests.
+
+The API is versioned via an Accept header, which currently is always set to
+`application/vnd.docker.plugins.v1+json`.
+
+## Handshake API
+
+Plugins are activated via the following "handshake" API call.
+
+### /Plugin.Activate
+
+**Request:** empty body
+
+**Response:**
+```
+{
+    "Implements": ["VolumeDriver"]
+}
+```
+
+Responds with a list of Docker subsystems which this plugin implements.
+After activation, the plugin will then be sent events from this subsystem.
+
+## Volume API
+
+If a plugin registers itself as a `VolumeDriver` (see above) then it is
+expected to provide writeable paths on the host filesystem for the Docker
+daemon to provide to containers to consume.
+
+The Docker daemon handles bind-mounting the provided paths into user
+containers.
+
+### /VolumeDriver.Create
+
+**Request**:
+```
+{
+    "Name": "volume_name"
+}
+```
+
+Instruct the plugin that the user wants to create a volume, given a user
+specified volume name.  The plugin does not need to actually manifest the
+volume on the filesystem yet (until Mount is called).
+
+**Response**:
+```
+{
+    "Err": null
+}
+```
+
+Respond with a string error if an error occurred.
+
+### /VolumeDriver.Remove
+
+**Request**:
+```
+{
+    "Name": "volume_name"
+}
+```
+
+Create a volume, given a user specified volume name.
+
+**Response**:
+```
+{
+    "Err": null
+}
+```
+
+Respond with a string error if an error occurred.
+
+### /VolumeDriver.Mount
+
+**Request**:
+```
+{
+    "Name": "volume_name"
+}
+```
+
+Docker requires the plugin to provide a volume, given a user specified volume
+name. This is called once per container start.
+
+**Response**:
+```
+{
+    "Mountpoint": "/path/to/directory/on/host",
+    "Err": null
+}
+```
+
+Respond with the path on the host filesystem where the volume has been made
+available, and/or a string error if an error occurred.
+
+### /VolumeDriver.Path
+
+**Request**:
+```
+{
+    "Name": "volume_name"
+}
+```
+
+Docker needs reminding of the path to the volume on the host.
+
+**Response**:
+```
+{
+    "Mountpoint": "/path/to/directory/on/host",
+    "Err": null
+}
+```
+
+Respond with the path on the host filesystem where the volume has been made
+available, and/or a string error if an error occurred.
+
+### /VolumeDriver.Unmount
+
+**Request**:
+```
+{
+    "Name": "volume_name"
+}
+```
+
+Indication that Docker no longer is using the named volume. This is called once
+per container stop.  Plugin may deduce that it is safe to deprovision it at
+this point.
+
+**Response**:
+```
+{
+    "Err": null
+}
+```
+
+Respond with a string error if an error occurred.
+
+## Plugin retries
+
+Attempts to call a method on a plugin are retried with an exponential backoff
+for up to 30 seconds. This may help when packaging plugins as containers, since
+it gives plugin containers a chance to start up before failing any user
+containers which depend on them.
diff --git a/docs/sources/experimental/plugins.md b/docs/sources/experimental/plugins.md
new file mode 100644
index 0000000..dbcb70c
--- /dev/null
+++ b/docs/sources/experimental/plugins.md
@@ -0,0 +1,48 @@
+page_title: Experimental feature - Plugins
+page_keywords: experimental, Docker, plugins
+
+# Experimental: Extend Docker with a plugin 
+
+You can extend the capabilities of the Docker Engine by loading third-party
+plugins. 
+
+This is an experimental feature. For information on installing and using experimental features, see [the experimental feature overview](experimental.md).
+
+## Types of plugins
+
+Plugins extend Docker's functionality.  They come in specific types.  For
+example, a [volume plugin](/experimental/plugins_volume) might enable Docker
+volumes to persist across multiple Docker hosts.
+
+Currently Docker supports volume plugins. In the future it will support
+additional plugin types.
+
+## Installing a plugin
+
+Follow the instructions in the plugin's documentation.
+
+## Finding a plugin
+
+The following plugins exist:
+
+* The [Flocker plugin](https://clusterhq.com/docker-plugin/) is a volume plugin
+which provides multi-host portable volumes for Docker, enabling you to run
+  databases and other stateful containers and move them around across a cluster
+  of machines.
+
+## Troubleshooting a plugin
+
+If you are having problems with Docker after loading a plugin, ask the authors
+of the plugin for help. The Docker team may not be able to assist you.
+
+## Writing a plugin
+
+If you are interested in writing a plugin for Docker, or seeing how they work
+under the hood, see the [docker plugins reference](/experimental/plugin_api).
+
+# Related GitHub PRs and issues
+
+- [#13222](https://github.com/docker/docker/pull/13222) Plugins plumbing
+
+Send us feedback and comments on [#13419](https://github.com/docker/docker/issues/13419),
+or on the usual Google Groups (docker-user, docker-dev) and IRC channels.
diff --git a/docs/sources/experimental/plugins_volume.md b/docs/sources/experimental/plugins_volume.md
new file mode 100644
index 0000000..399dda7
--- /dev/null
+++ b/docs/sources/experimental/plugins_volume.md
@@ -0,0 +1,45 @@
+page_title: Experimental feature - Volume plugins
+page_keywords: experimental, Docker, plugins, volume
+
+# Experimental: Docker volume plugins
+
+Docker volume plugins enable Docker deployments to be integrated with external
+storage systems, such as Amazon EBS, and enable data volumes to persist beyond
+the lifetime of a single Docker host. See the [plugin documentation](/experimental/plugins)
+for more information.
+
+This is an experimental feature. For information on installing and using experimental features, see [the experimental feature overview](experimental.md).
+
+# Command-line changes
+
+This experimental features introduces two changes to the `docker run` command:
+
+- The `--volume-driver` flag is introduced.
+- The `-v` syntax is changed to accept a volume name a first component.
+
+Example:
+
+    $ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh
+
+By specifying a volume name in conjunction with a volume driver, volume plugins
+such as [Flocker](https://clusterhq.com/docker-plugin/), once installed, can be
+used to manage volumes external to a single host, such as those on EBS. In this
+example, "volumename" is passed through to the volume plugin as a user-given
+name for the volume which allows the plugin to associate it with an external
+volume beyond the lifetime of a single container or container host. This can be
+used, for example, to move a stateful container from one server to another.
+
+The `volumename` must not begin with a `/`.
+
+# API changes
+
+The container creation endpoint (`/containers/create`) accepts a `VolumeDriver`
+field of type `string` allowing to specify the name of the driver. It's default
+value of `"local"` (the default driver for local volumes).
+
+# Related GitHub PRs and issues
+
+- [#13161](https://github.com/docker/docker/pull/13161) Volume refactor and external volume plugins
+
+Send us feedback and comments on [#13420](https://github.com/docker/docker/issues/13420),
+or on the usual Google Groups (docker-user, docker-dev) and IRC channels.
diff --git a/docs/sources/http-routingtable.md b/docs/sources/http-routingtable.md
index 07029d2..14e1dfc 100644
--- a/docs/sources/http-routingtable.md
+++ b/docs/sources/http-routingtable.md
@@ -1,4 +1,4 @@
-# HTTP Routing Table
+# HTTP routing table
 
 [**/api**](#cap-/api) | [**/auth**](#cap-/auth) |
 [**/build**](#cap-/build) | [**/commit**](#cap-/commit) |
diff --git a/docs/sources/index.md b/docs/sources/index.md
index 993603e..ef827ac 100644
--- a/docs/sources/index.md
+++ b/docs/sources/index.md
@@ -75,18 +75,18 @@
  - See how Docker compares to virtual machines
  - See some common use cases.
 
-### Installation Guides
+### Installation guides
 
 The [installation section](/installation/#installation) will show you how to
 install Docker on a variety of platforms.
 
 
-### Docker User Guide
+### Docker user guide
 
 To learn about Docker in more detail and to answer questions about usage and
 implementation, check out the [Docker User Guide](/userguide/).
 
-## Release Notes
+## Release notes
 
 A summary of the changes in each release in the current series can now be found
 on the separate [Release Notes page](/release-notes/)
diff --git a/docs/sources/installation/SUSE.md b/docs/sources/installation/SUSE.md
index 2a0aa91..106d4cb 100644
--- a/docs/sources/installation/SUSE.md
+++ b/docs/sources/installation/SUSE.md
@@ -8,7 +8,7 @@
 to its current limitations Docker is able to run only **64 bit** architecture.
 
 Docker is not part of the official repositories of openSUSE 12.3 and
-openSUSE 13.1. Hence  it is neccessary to add the [Virtualization
+openSUSE 13.1. Hence  it is necessary to add the [Virtualization
 repository](https://build.opensuse.org/project/show/Virtualization) from
 [OBS](https://build.opensuse.org/) to install the `docker` package.
 
@@ -28,7 +28,7 @@
 due to its current limitations Docker is able to run only on **64 bit**
 architecture.
 
-# Installation
+## Installation
 
 Install the Docker package.
 
@@ -76,6 +76,20 @@
 Docker runtime files, or make other customizations, read our systemd article to
 learn how to [customize your systemd Docker daemon options](/articles/systemd/).
 
+## Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo zypper rm docker
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
 ## What's next
 
 Continue with the [User Guide](/userguide/).
diff --git a/docs/sources/installation/amazon.md b/docs/sources/installation/amazon.md
index 6a28685..3fdeb72 100644
--- a/docs/sources/installation/amazon.md
+++ b/docs/sources/installation/amazon.md
@@ -2,48 +2,14 @@
 page_description: Installation instructions for Docker on Amazon EC2.
 page_keywords: amazon ec2, virtualization, cloud, docker, documentation, installation
 
-# Amazon EC2
+## Amazon EC2
 
-There are several ways to install Docker on AWS EC2. You can use Amazon Linux, which includes the Docker packages in its Software Repository, or opt for any of the other supported Linux images, for example a [*Standard Ubuntu Installation*](#standard-ubuntu-installation).
+You can install Docker on any AWS EC2 Amazon Machine Image (AMI) which runs an
+operating system that Docker supports. Amazon's website includes specific
+instructions for [installing on Amazon
+Linux](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/docker-basics.html#install_docker). To install on
+another AMI, follow the instructions for its specific operating
+system in this installation guide. 
 
-**You'll need an** [AWS account](http://aws.amazon.com/) **first, of
-course.**
-
-## Amazon QuickStart with Amazon Linux AMI 2014.09.1
-
-The latest Amazon Linux AMI, 2014.09.1, is Docker ready. Docker packages can be installed from Amazon's provided Software
-Repository.
-
-1. **Choose an image:**
-   - Launch the [Create Instance
-     Wizard](https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:)
-     menu on your AWS Console.
-   - In the Quick Start menu, select the Amazon provided AMI for Amazon Linux 2014.09.1
-   - For testing you can use the default (possibly free)
-     `t2.micro` instance (more info on
-     [pricing](http://aws.amazon.com/ec2/pricing/)).
-   - Click the `Next: Configure Instance Details`
-      button at the bottom right.
-2. After a few more standard choices where defaults are probably ok,
-   your Amazon Linux instance should be running!
-3. SSH to your instance to install Docker :
-   `ssh -i <path to your private key> ec2-user@<your public IP address>`
-4. Once connected to the instance, type
-    `sudo yum install -y docker ; sudo service docker start`
- to install and start Docker
-
-**If this is your first AWS instance, you may need to set up your Security Group to allow SSH.** By default all incoming ports to your new instance will be blocked by the AWS Security Group, so you might just get timeouts when you try to connect.
-
-Once you`ve got Docker installed, you're ready to try it out – head on
-over to the [User Guide](/userguide).
-
-## Standard Ubuntu Installation
-
-If you want a more hands-on installation, then you can follow the
-[*Ubuntu*](/installation/ubuntulinux) instructions installing Docker
-on any EC2 instance running Ubuntu. Just follow Step 1 from the Amazon
-QuickStart above to pick an image (or use one of your
-own) and skip the step with the *User Data*. Then continue with the
-[*Ubuntu*](/installation/ubuntulinux) instructions.
-
-Continue with the [User Guide](/userguide/).
+For detailed information on Amazon AWS support for Docker, refer to [Amazon's
+documentation](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/docker-basics.html). 
diff --git a/docs/sources/installation/archlinux.md b/docs/sources/installation/archlinux.md
index 99849c7..570e36c 100644
--- a/docs/sources/installation/archlinux.md
+++ b/docs/sources/installation/archlinux.md
@@ -30,13 +30,13 @@
 
 For the normal package a simple
 
-    pacman -S docker
+    $ sudo pacman -S docker
 
 is all that is needed.
 
 For the AUR package execute:
 
-    yaourt -S docker-git
+    $ sudo yaourt -S docker-git
 
 The instructions here assume **yaourt** is installed. See [Arch User
 Repository](https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages)
@@ -59,3 +59,21 @@
 If you need to add an HTTP Proxy, set a different directory or partition for the
 Docker runtime files, or make other customizations, read our systemd article to
 learn how to [customize your systemd Docker daemon options](/articles/systemd/).
+
+## Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo pacman -R docker
+
+To uninstall the Docker package and dependencies that are no longer needed:
+
+    $ sudo pacman -Rns docker
+
+The above commands will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
diff --git a/docs/sources/installation/azure.md b/docs/sources/installation/azure.md
index a8e700f..5491022 100644
--- a/docs/sources/installation/azure.md
+++ b/docs/sources/installation/azure.md
@@ -1,4 +1,4 @@
-page_title: Installation on Microsoft Azure Platform
+page_title: Installation on Microsoft Azure platform
 page_description: Instructions for creating a Docker-ready virtual machine on Microsoft Azure cloud platform.
 page_keywords: Docker, Docker documentation, installation, azure, microsoft
 
diff --git a/docs/sources/installation/binaries.md b/docs/sources/installation/binaries.md
index ef9f5ca..a9a96be 100644
--- a/docs/sources/installation/binaries.md
+++ b/docs/sources/installation/binaries.md
@@ -1,4 +1,4 @@
-page_title: Installation from Binaries
+page_title: Installation from binaries
 page_description: Instructions for installing Docker as a binary. Mostly meant for hackers who want to try out Docker on a variety of environments.
 page_keywords: binaries, installation, docker, documentation, linux
 
@@ -78,18 +78,93 @@
 > vendor for the system, and might break regulations and security
 > policies in heavily regulated environments.
 
-## Get the docker binary:
+## Get the Docker binary
 
-    $ wget https://get.docker.com/builds/Linux/x86_64/docker-latest -O docker
+You can download either the latest release binary or a specific version.
+After downloading a binary file, you must set the file's execute bit to run it.
+
+To set the file's execute bit on Linux and OS X:
+
     $ chmod +x docker
 
-> **Note**:
-> If you have trouble downloading the binary, you can also get the smaller
-> compressed release file:
-> [https://get.docker.com/builds/Linux/x86_64/docker-latest.tgz](
-> https://get.docker.com/builds/Linux/x86_64/docker-latest.tgz)
+To get the list of stable release version numbers from Github, view the
+`docker/docker` [releases page](https://github.com/docker/docker/releases). 
 
-## Run the docker daemon
+> **Note**
+>
+> 1) You can get the MD5 and SHA256 hashes by appending .md5 and .sha256 to the URLs respectively
+>
+> 2) You can get the compressed binaries by appending .tgz to the URLs
+
+### Get the Linux binary
+
+To download the latest version for Linux, use the
+following URLs:
+
+    https://get.docker.com/builds/Linux/i386/docker-latest
+    
+    https://get.docker.com/builds/Linux/x86_64/docker-latest
+
+To download a specific version for Linux, use the
+following URL patterns:
+
+    https://get.docker.com/builds/Linux/i386/docker-<version>
+    
+    https://get.docker.com/builds/Linux/x86_64/docker-<version>
+
+For example:
+
+    https://get.docker.com/builds/Linux/i386/docker-1.6.0
+
+    https://get.docker.com/builds/Linux/x86_64/docker-1.6.0
+
+
+### Get the Mac OS X binary
+
+The Mac OS X binary is only a client. You cannot use it to run the `docker`
+daemon. To download the latest version for Mac OS X, use the following URLs:
+
+    https://get.docker.com/builds/Darwin/i386/docker-latest
+    
+    https://get.docker.com/builds/Darwin/x86_64/docker-latest
+
+To download a specific version for Mac OS X, use the
+following URL patterns:
+
+    https://get.docker.com/builds/Darwin/i386/docker-<version>
+    
+    https://get.docker.com/builds/Darwin/x86_64/docker-<version>
+
+For example:
+
+    https://get.docker.com/builds/Darwin/i386/docker-1.6.0
+
+    https://get.docker.com/builds/Darwin/x86_64/docker-1.6.0
+
+### Get the Windows binary
+ 
+You can only download the Windows client binary for version `1.6.0` onwards.
+Moreover, the binary is only a client, you cannot use it to run the `docker` daemon.
+To download the latest version for Windows, use the following URLs:
+
+    https://get.docker.com/builds/Windows/i386/docker-latest.exe
+    
+    https://get.docker.com/builds/Windows/x86_64/docker-latest.exe
+
+To download a specific version for Windows, use the following URL pattern:
+
+    https://get.docker.com/builds/Windows/i386/docker-<version>.exe
+    
+    https://get.docker.com/builds/Windows/x86_64/docker-<version>.exe
+
+For example:
+
+    https://get.docker.com/builds/Windows/i386/docker-1.6.0.exe
+
+    https://get.docker.com/builds/Windows/x86_64/docker-1.6.0.exe
+
+
+## Run the Docker daemon
 
     # start the docker in daemon mode from the directory you unpacked
     $ sudo ./docker -d &
diff --git a/docs/sources/installation/centos.md b/docs/sources/installation/centos.md
index 862d508..efebad5 100644
--- a/docs/sources/installation/centos.md
+++ b/docs/sources/installation/centos.md
@@ -25,7 +25,10 @@
 kernel version 2.6.32-431 or higher as this has specific kernel fixes to allow
 Docker to run.
 
-## Installing Docker - CentOS-7
+## CentOS-7
+
+### Installation
+
 Docker is included by default in the CentOS-Extras repository. To install
 run the following command:
 
@@ -33,18 +36,23 @@
 
 Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
 
-### FirewallD
+### Uninstallation
 
-CentOS-7 introduced firewalld, which is a wrapper around iptables and can
-conflict with Docker.
+To uninstall the Docker package:
 
-When `firewalld` is started or restarted it will remove the `DOCKER` chain
-from iptables, preventing Docker from working properly.
+    $ sudo yum -y remove docker
 
-When using Systemd, `firewalld` is started before Docker, but if you
-start or restart `firewalld` after Docker, you will have to restart the Docker daemon.
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
 
-## Installing Docker - CentOS-6.5
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
+## CentOS-6.5
+
+### Installation
 
 For CentOS-6.5, the Docker package is part of [Extra Packages
 for Enterprise Linux (EPEL)](https://fedoraproject.org/wiki/EPEL) repository,
@@ -68,6 +76,20 @@
 
 Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
 
+### Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo yum -y remove docker-io
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
 ## Manual installation of latest Docker release
 
 While using a package is the recommended way of installing Docker,
diff --git a/docs/sources/installation/cruxlinux.md b/docs/sources/installation/cruxlinux.md
index ead4c27..e037150 100644
--- a/docs/sources/installation/cruxlinux.md
+++ b/docs/sources/installation/cruxlinux.md
@@ -15,19 +15,19 @@
 
 ## Installation
 
-Assuming you have contrib enabled, update your ports tree and install docker (*as root*):
+Assuming you have contrib enabled, update your ports tree and install docker:
 
-    # prt-get depinst docker
+    $ sudo prt-get depinst docker
 
 
-## Kernel Requirements
+## Kernel requirements
 
 To have a working **CRUX+Docker** Host you must ensure your Kernel has
 the necessary modules enabled for the Docker Daemon to function correctly.
 
 Please read the `README`:
 
-    $ prt-get readme docker
+    $ sudo prt-get readme docker
 
 The `docker` port installs the `contrib/check-config.sh` script
 provided by the Docker contributors for checking your kernel
@@ -39,9 +39,9 @@
 
 ## Starting Docker
 
-There is a rc script created for Docker. To start the Docker service (*as root*):
+There is a rc script created for Docker. To start the Docker service:
 
-    # /etc/rc.d/docker start
+    $ sudo /etc/rc.d/docker start
 
 To start on system boot:
 
@@ -60,6 +60,20 @@
 There are also user contributed [CRUX based image(s)](https://registry.hub.docker.com/repos/crux/) on the Docker Hub.
 
 
+## Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo prt-get remove docker
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
 ## Issues
 
 If you have any issues please file a bug with the
diff --git a/docs/sources/installation/debian.md b/docs/sources/installation/debian.md
index 4644a24..883f920 100644
--- a/docs/sources/installation/debian.md
+++ b/docs/sources/installation/debian.md
@@ -11,8 +11,7 @@
 
 ## Debian Jessie 8.0 (64-bit)
 
-Debian 8 comes with a 3.14.0 Linux kernel, and a `docker.io` package which
-installs all its prerequisites from Debian's repository.
+Debian 8 comes with a 3.16.0 Linux kernel, the `docker.io` package can be found in the `jessie-backports` repository. Reasoning behind this can be found <a href="https://lists.debian.org/debian-release/2015/03/msg00685.html" target="_blank">here</a>. Instructions how to enable the backports repository can be found <a href="http://backports.debian.org/Instructions/" target="_blank">here</a>.
 
 > **Note**:
 > Debian contains a much older KDE3/GNOME2 package called ``docker``, so the
@@ -20,6 +19,8 @@
 
 ### Installation
 
+Make sure you enabled the `jessie-backports` repository, as stated above.
+
 To install the latest Debian package (may not be the latest Docker release):
 
     $ sudo apt-get update
@@ -27,19 +28,38 @@
 
 To verify that everything has worked as expected:
 
-    $ sudo docker run -i -t ubuntu /bin/bash
+    $ sudo docker run --rm hello-world
 
-Which should download the `ubuntu` image, and then start `bash` in a container.
+This command downloads and runs the `hello-world` image in a container. When the
+container runs, it prints an informational message. Then, it exits.
 
 > **Note**:
 > If you want to enable memory and swap accounting see
 > [this](/installation/ubuntulinux/#memory-and-swap-accounting).
 
+### Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo apt-get purge docker-io
+
+To uninstall the Docker package and dependencies that are no longer needed:
+
+    $ sudo apt-get autoremove --purge docker-io
+
+The above commands will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
 ## Debian Wheezy/Stable 7.x (64-bit)
 
 Docker requires Kernel 3.8+, while Wheezy ships with Kernel 3.2 (for more details
 on why 3.8 is required, see discussion on
-[bug #407](https://github.com/docker/docker/issues/407%20kernel%20versions)).
+[bug #407](https://github.com/docker/docker/issues/407)).
 
 Fortunately, wheezy-backports currently has [Kernel 3.16
 ](https://packages.debian.org/search?suite=wheezy-backports&section=all&arch=any&searchon=names&keywords=linux-image-amd64),
@@ -62,9 +82,34 @@
 2. Restart your system. This is necessary for Debian to use your new kernel.
 
 3. Install Docker using the get.docker.com script:
- 
+
     `curl -sSL https://get.docker.com/ | sh`
 
+>**Note**: If your company is behind a filtering proxy, you may find that the
+>`apt-key`
+>command fails for the Docker repo during installation. To work around this,
+>add the key directly using the following:
+>
+>       $ wget -qO- https://get.docker.com/gpg | sudo apt-key add -
+
+### Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo apt-get purge lxc-docker
+
+To uninstall the Docker package and dependencies that are no longer needed:
+
+    $ sudo apt-get autoremove --purge lxc-docker
+
+The above commands will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
 ## Giving non-root access
 
 The `docker` daemon always runs as the `root` user and the `docker`
diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md
index ed4e837..b3f23e4 100644
--- a/docs/sources/installation/fedora.md
+++ b/docs/sources/installation/fedora.md
@@ -13,19 +13,37 @@
 shipped by the distribution. There are kernel changes which will cause issues
 if one decides to step outside that box and run non-distribution kernel packages.
 
-## Fedora 21 and later installation
+## Fedora 21 and later
 
-Install the `docker` package which will install Docker on our host.
+### Installation
+
+Install the Docker package which will install Docker on our host.
 
     $ sudo yum -y install docker
 
-To update the `docker` package:
+To update the Docker package:
 
     $ sudo yum -y update docker
 
 Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
 
-## Fedora 20 installation
+### Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo yum -y remove docker
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
+## Fedora 20
+
+### Installation
 
 For `Fedora 20`, there is a package name conflict with a system tray application
 and its executable, so the Docker RPM package was called `docker-io`.
@@ -36,12 +54,26 @@
     $ sudo yum -y remove docker
     $ sudo yum -y install docker-io
 
-To update the `docker` package:
+To update the Docker package:
 
     $ sudo yum -y update docker-io
 
 Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
 
+### Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo yum -y remove docker-io
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
 ## Starting the Docker daemon
 
 Now that it's installed, let's start the Docker daemon.
diff --git a/docs/sources/installation/frugalware.md b/docs/sources/installation/frugalware.md
index 6b4db23..c700280 100644
--- a/docs/sources/installation/frugalware.md
+++ b/docs/sources/installation/frugalware.md
@@ -28,7 +28,7 @@
 
 A simple
 
-    pacman -S lxc-docker
+    $ sudo pacman -S lxc-docker
 
 is all that is needed.
 
@@ -48,3 +48,21 @@
 If you need to add an HTTP Proxy, set a different directory or partition for the
 Docker runtime files, or make other customizations, read our systemd article to
 learn how to [customize your systemd Docker daemon options](/articles/systemd/).
+
+## Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo pacman -R lxc-docker
+
+To uninstall the Docker package and dependencies that are no longer needed:
+
+    $ sudo pacman -Rns lxc-docker
+
+The above commands will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
diff --git a/docs/sources/installation/gentoolinux.md b/docs/sources/installation/gentoolinux.md
index 716eab9..865e8eb 100644
--- a/docs/sources/installation/gentoolinux.md
+++ b/docs/sources/installation/gentoolinux.md
@@ -95,3 +95,21 @@
 If you need to add an HTTP Proxy, set a different directory or partition for the
 Docker runtime files, or make other customizations, read our systemd article to
 learn how to [customize your systemd Docker daemon options](/articles/systemd/).
+
+## Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo emerge -cav app-emulation/docker
+
+To uninstall the Docker package and dependencies that are no longer needed:
+
+    $ sudo emerge -C app-emulation/docker
+
+The above commands will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
diff --git a/docs/sources/installation/images/linux_docker_host.png b/docs/sources/installation/images/linux_docker_host.png
deleted file mode 100644
index 42895c2..0000000
--- a/docs/sources/installation/images/linux_docker_host.png
+++ /dev/null
Binary files differ
diff --git a/docs/sources/installation/images/linux_docker_host.svg b/docs/sources/installation/images/linux_docker_host.svg
new file mode 100644
index 0000000..0ad7240
--- /dev/null
+++ b/docs/sources/installation/images/linux_docker_host.svg
@@ -0,0 +1,1195 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="88.900002mm"
+   height="112.88889mm"
+   viewBox="0 0 315.00001 399.99999"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.91 r13725"
+   sodipodi:docname="linux_docker_host.svg">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker8148"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path8150"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker6352"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path6354"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker6148"
+       style="overflow:visible;"
+       inkscape:isstock="true"
+       inkscape:collect="always">
+      <path
+         id="path6150"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker5920"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path5922"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker5886"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path5888"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Lend"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path5368"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Lend"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path5350"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         transform="scale(0.8) rotate(180) translate(12.5,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="3.6433246"
+     inkscape:cx="149.73978"
+     inkscape:cy="116.33414"
+     inkscape:document-units="px"
+     inkscape:current-layer="layer2"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:window-width="1681"
+     inkscape:window-height="1010"
+     inkscape:window-x="4"
+     inkscape:window-y="0"
+     inkscape:window-maximized="0" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:groupmode="layer"
+     id="layer2"
+     inkscape:label="host"
+     style="display:inline">
+    <rect
+       style="opacity:1;fill:#ade5f9;fill-opacity:1;stroke:#005976;stroke-width:2.99999976;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       id="rect4394"
+       width="311.17267"
+       height="375.20316"
+       x="2.2081146"
+       y="23.182617"
+       rx="0"
+       ry="0" />
+    <rect
+       style="opacity:1;fill:#fcfcfc;fill-opacity:1;stroke:#005976;stroke-width:3;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       id="rect4345"
+       width="174.98102"
+       height="39.064903"
+       x="13.741047"
+       y="5.175612"
+       ry="10.101524"
+       rx="10.101525" />
+    <g
+       transform="matrix(0.62086873,0,0,0.60736447,-96.218033,-10.961139)"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot4337">
+      <path
+         d="m 187.88477,72.829407 0,-30.3125 7.1875,0 q 4.96093,0 7.36328,1.738282 2.42187,1.71875 3.63281,5.214843 1.21094,3.496094 1.21094,8.28125 0,4.53125 -1.28907,7.96875 -1.26953,3.417969 -3.61328,5.273438 -2.34375,1.835937 -7.30468,1.835937 l -7.1875,0 z m 3.94531,-3.339843 2.63672,0 q 4.0039,0 5.70312,-1.425782 1.71875,-1.425781 2.44141,-3.945312 0.74219,-2.539063 0.74219,-6.992188 0,-4.355468 -0.85938,-6.699218 -0.85937,-2.34375 -2.46094,-3.59375 -1.60156,-1.269532 -5.2539,-1.269532 l -2.94922,0 0,23.925782 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path10650" />
+      <path
+         d="m 220.91211,73.747376 q -5.15625,0 -7.89063,-4.335937 -2.71484,-4.355469 -2.71484,-11.738282 0,-7.363281 2.71484,-11.699218 2.73438,-4.355469 7.89063,-4.355469 5.17578,0 7.89062,4.355469 2.71485,4.335937 2.71485,11.699218 0,7.382813 -2.71485,11.738282 -2.71484,4.335937 -7.89062,4.335937 z m 0,-3.046875 q 2.85156,0 4.74609,-3.222656 1.91407,-3.242188 1.91407,-9.804688 0,-6.5625 -1.91407,-9.785156 -1.89453,-3.242187 -4.74609,-3.242187 -2.85156,0 -4.76563,3.242187 -1.89453,3.222656 -1.89453,9.785156 0,6.5625 1.89453,9.804688 1.91407,3.222656 4.76563,3.222656 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path10652" />
+      <path
+         d="m 254.23242,72.477845 q -3.39844,1.269531 -6.09375,1.269531 -3.55469,0 -6.54297,-1.894531 -2.98828,-1.914063 -4.64843,-5.664063 -1.66016,-3.769531 -1.66016,-8.515625 0,-4.707031 1.64062,-8.457031 1.64063,-3.75 4.62891,-5.683594 2.98828,-1.933593 6.58203,-1.933593 2.69531,0 6.09375,1.269531 l 0,3.59375 q -3.14453,-1.816406 -6.25,-1.816406 -2.34375,0 -4.29687,1.601562 -1.9336,1.601563 -3.125,4.707031 -1.19141,3.085938 -1.19141,6.71875 0,3.691407 1.23047,6.816407 1.23047,3.125 3.14453,4.6875 1.91406,1.542968 4.23828,1.542968 3.08594,0 6.25,-1.816406 l 0,3.574219 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path10654" />
+      <path
+         d="m 260.0332,72.829407 0,-30.3125 3.94532,0 0,14.863282 11.26953,-14.863282 4.17968,0 -10.8789,14.394532 12.40234,15.917968 -4.9414,0 -12.03125,-15.449218 0,15.449218 -3.94532,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path10656" />
+      <path
+         d="m 301.2832,69.489564 0,3.339843 -16.36718,0 0,-30.3125 16.05468,0 0,3.046875 -12.10937,0 0,10.292969 10.89844,0 0,3.027344 -10.89844,0 0,10.605469 12.42187,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path10658" />
+      <path
+         d="m 311.86914,59.958314 0,12.871093 -3.94531,0 0,-30.3125 8.10547,0 q 3.96484,0 5.83984,0.820313 1.875,0.800781 2.89063,2.441406 1.03515,1.640625 1.03515,3.535156 0,1.757813 -0.68359,3.535157 -0.6836,1.777343 -1.89453,3.183593 -1.19141,1.386719 -3.45703,2.636719 l 8.76953,14.160156 -4.6875,0 -8.02735,-12.871093 -3.94531,0 z m 0,-3.046875 5.52734,0 q 1.85547,-1.015625 2.75391,-2.070313 0.91797,-1.054687 1.38672,-2.246094 0.46875,-1.191406 0.46875,-2.382812 0,-2.128906 -1.5625,-3.378906 -1.54297,-1.269532 -5.625,-1.269532 l -2.94922,0 0,11.347657 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path10660" />
+      <path
+         d="m 330.38477,75.856751 0,-3.027344 21.21093,0 0,3.027344 -21.21093,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path10662" />
+      <path
+         d="m 355.52148,72.829407 0,-30.3125 3.94532,0 0,13.046875 11.21093,0 0,-13.046875 3.92579,0 0,30.3125 -3.92579,0 0,-14.238281 -11.21093,0 0,14.238281 -3.94532,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path10664" />
+      <path
+         d="m 389.03711,73.747376 q -5.15625,0 -7.89063,-4.335937 -2.71484,-4.355469 -2.71484,-11.738282 0,-7.363281 2.71484,-11.699218 2.73438,-4.355469 7.89063,-4.355469 5.17578,0 7.89062,4.355469 2.71485,4.335937 2.71485,11.699218 0,7.382813 -2.71485,11.738282 -2.71484,4.335937 -7.89062,4.335937 z m 0,-3.046875 q 2.85156,0 4.74609,-3.222656 1.91407,-3.242188 1.91407,-9.804688 0,-6.5625 -1.91407,-9.785156 -1.89453,-3.242187 -4.74609,-3.242187 -2.85156,0 -4.76563,3.242187 -1.89453,3.222656 -1.89453,9.785156 0,6.5625 1.89453,9.804688 1.91407,3.222656 4.76563,3.222656 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path10666" />
+      <path
+         d="m 404.17383,72.184876 0,-3.710937 q 4.6875,2.226562 8.51562,2.226562 1.60157,0 3.06641,-0.605469 1.46484,-0.625 2.20703,-1.777343 0.76172,-1.152344 0.76172,-2.5 0,-1.582032 -1.03516,-2.949219 -1.03515,-1.386719 -3.88672,-3.046875 l -1.99218,-1.152344 -2.01172,-1.152344 q -5.3125,-3.144531 -5.3125,-7.8125 0,-3.417968 2.36328,-5.742187 2.38281,-2.34375 7.42187,-2.34375 3.24219,0 6.26954,0.9375 l 0,3.378906 q -3.33985,-1.289062 -6.50391,-1.289062 -2.51953,0 -4.08203,1.328125 -1.54297,1.328125 -1.54297,3.203125 0,1.855468 1.19141,3.085937 1.1914,1.230469 3.04687,2.265625 l 1.52344,0.917969 1.89453,1.152344 1.60156,0.9375 q 4.98047,3.046875 4.98047,7.65625 0,3.515625 -2.55859,6.035156 -2.5586,2.519531 -8.20313,2.519531 -1.79687,0 -3.4375,-0.3125 -1.62109,-0.292969 -4.27734,-1.25 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path10668" />
+      <path
+         d="m 435.11133,72.829407 0,-26.972656 -9.08203,0 0,-3.339844 22.1289,0 0,3.339844 -9.10156,0 0,26.972656 -3.94531,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+         id="path10670" />
+    </g>
+    <g
+       transform="matrix(0.62086873,0,0,0.60736447,121.72855,-246.05559)"
+       id="g10196" />
+    <g
+       id="g10214"
+       transform="translate(6.1871843,-28.279642)">
+      <rect
+         rx="16.812302"
+         ry="16.631735"
+         y="460.20282"
+         x="177.63893"
+         height="64.318726"
+         width="109.2876"
+         id="rect10198"
+         style="display:inline;opacity:1;fill:#394d54;fill-opacity:1;stroke:#005976;stroke-width:4.88536119;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+         transform="matrix(0.62086873,0,0,0.60736447,121.72855,-246.05559)" />
+      <g
+         transform="matrix(0.62086873,0,0,0.60736447,119.85325,15.833152)"
+         style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#dbdde0;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         id="flowRoot6134">
+        <path
+           d="m 187.97742,46.673398 0,28.56 18.92,0 0,-3.2 -15.12,0 0,-25.36 -3.8,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path10673" />
+        <path
+           d="m 213.28305,50.833398 0,-4.16 -3.4,0 0,4.16 3.4,0 z m -3.4,3.72 0,20.68 3.4,0 0,-20.68 -3.4,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path10675" />
+        <path
+           d="m 218.5893,54.553398 0,20.68 3.4,0 0,-11.68 q 0,-1.4 0.36,-2.56 0.4,-1.2 1.16,-2.08 0.76,-0.88 1.88,-1.36 1.16,-0.48 2.72,-0.48 1.96,0 3.08,1.12 1.12,1.12 1.12,3.04 l 0,14 3.4,0 0,-13.6 q 0,-1.68 -0.36,-3.04 -0.32,-1.4 -1.16,-2.4 -0.84,-1 -2.2,-1.56 -1.36,-0.56 -3.4,-0.56 -4.6,0 -6.72,3.76 l -0.08,0 0,-3.28 -3.2,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path10677" />
+        <path
+           d="m 257.97492,75.233398 0,-20.68 -3.4,0 0,11.68 q 0,1.4 -0.4,2.6 -0.36,1.16 -1.12,2.04 -0.76,0.88 -1.92,1.36 -1.12,0.48 -2.68,0.48 -1.96,0 -3.08,-1.12 -1.12,-1.12 -1.12,-3.04 l 0,-14 -3.4,0 0,13.6 q 0,1.68 0.32,3.08 0.36,1.36 1.2,2.36 0.84,1 2.2,1.56 1.36,0.52 3.4,0.52 2.28,0 3.96,-0.88 1.68,-0.92 2.76,-2.84 l 0.08,0 0,3.28 3.2,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path10679" />
+        <path
+           d="m 268.68055,64.353398 -7.76,10.88 4.12,0 5.76,-8.56 5.76,8.56 4.36,0 -8,-11.16 7.12,-9.52 -4.08,0 -5.16,7.24 -4.96,-7.24 -4.36,0 7.2,9.8 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path10681" />
+      </g>
+    </g>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker8148)"
+       d="m 114,182 c 47.31731,29.66731 16.44337,34.29153 9,39.75"
+       id="path8146"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+  </g>
+  <g
+     inkscape:groupmode="layer"
+     id="layer4"
+     inkscape:label="arrows">
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker5920)"
+       d="m 74.649377,83.195617 c -85,41.428573 -36.428572,63.571433 -36.428572,63.571433 l 2.232143,1.69643"
+       id="path5884"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="ccc" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker6352)"
+       d="m 197.16754,166.6243 c 86.65404,5.17688 43.37399,40.92551 30.2397,55.44599"
+       id="path6142"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker6148)"
+       d="M 113.32488,182.78675 C -29.730983,213.05834 45.251032,291.85445 69.362222,303.91005"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+  </g>
+  <g
+     inkscape:label="default"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-73.928551,-292.36218)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot4362"
+       style="fill:black;stroke:none;stroke-opacity:1;stroke-width:1px;stroke-linejoin:miter;stroke-linecap:butt;fill-opacity:1;font-family:sans-serif;font-style:normal;font-weight:normal;font-size:40px;line-height:125%;letter-spacing:0px;word-spacing:0px"><flowRegion
+         id="flowRegion4364"><rect
+           id="rect4366"
+           width="95.964493"
+           height="43.436558"
+           x="171.72594"
+           y="102.005" /></flowRegion><flowPara
+         id="flowPara4368"></flowPara></flowRoot>    <rect
+       style="opacity:1;fill:#f2f2f2;fill-opacity:1;stroke:#005976;stroke-width:3;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       id="rect4461"
+       width="151.48232"
+       height="31.31473"
+       x="114.04051"
+       y="364.15875" />
+    <g
+       transform="matrix(1.1029002,0,0,1.1071429,-73.180491,327.75181)"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot4354">
+      <path
+         d="m 187.98859,51.93976 0,-10.026972 2.89587,0 q 1.19455,0 2.00902,0.343886 0.81446,0.325786 1.32124,0.977358 0.52488,0.633473 0.74207,1.556533 0.23529,0.904962 0.23529,2.063312 0,1.194549 -0.25339,2.045213 -0.23529,0.832565 -0.61537,1.393641 -0.38009,0.561076 -0.86876,0.886862 -0.47058,0.325786 -0.95926,0.506778 -0.48868,0.162893 -0.92306,0.217191 -0.43439,0.0362 -0.72397,0.0362 l -2.85968,0 z m -1.71943,-11.47491 0,12.922849 4.43431,0 q 1.61083,0 2.78728,-0.452481 1.17645,-0.452481 1.93662,-1.303144 0.76017,-0.868763 1.12215,-2.11761 0.36199,-1.266946 0.36199,-2.895877 0,-3.113067 -1.61083,-4.633402 -1.61083,-1.520335 -4.59721,-1.520335 l -4.43431,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10625" />
+      <path
+         d="m 199.86394,48.718098 q 0,-0.850664 0.2172,-1.502236 0.23529,-0.669671 0.63347,-1.122152 0.39818,-0.452481 0.92306,-0.687771 0.54298,-0.23529 1.14025,-0.23529 0.59727,0 1.12215,0.23529 0.54298,0.23529 0.94116,0.687771 0.39819,0.452481 0.61538,1.122152 0.23529,0.651572 0.23529,1.502236 0,0.850664 -0.23529,1.520335 -0.21719,0.651572 -0.61538,1.104053 -0.39818,0.434381 -0.94116,0.669671 -0.52488,0.23529 -1.12215,0.23529 -0.59727,0 -1.14025,-0.23529 -0.52488,-0.23529 -0.92306,-0.669671 -0.39818,-0.452481 -0.63347,-1.104053 -0.2172,-0.669671 -0.2172,-1.520335 z m -1.62893,0 q 0,1.031656 0.28959,1.918518 0.28959,0.886862 0.86876,1.556534 0.57918,0.651572 1.42984,1.031656 0.85067,0.361984 1.95472,0.361984 1.12215,0 1.95472,-0.361984 0.85066,-0.380084 1.42984,-1.031656 0.57917,-0.669672 0.86876,-1.556534 0.28959,-0.886862 0.28959,-1.918518 0,-1.031656 -0.28959,-1.918518 -0.28959,-0.904962 -0.86876,-1.556534 -0.57918,-0.669671 -1.42984,-1.049755 -0.83257,-0.380084 -1.95472,-0.380084 -1.10405,0 -1.95472,0.380084 -0.85066,0.380084 -1.42984,1.049755 -0.57917,0.651572 -0.86876,1.556534 -0.28959,0.886862 -0.28959,1.918518 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10627" />
+      <path
+         d="m 215.45219,47.03487 1.59273,0 q -0.0905,-0.832565 -0.43438,-1.429839 -0.34389,-0.615374 -0.88686,-1.013557 -0.52488,-0.398183 -1.23075,-0.579175 -0.68777,-0.199092 -1.48414,-0.199092 -1.10405,0 -1.93662,0.398183 -0.83256,0.380084 -1.39364,1.067855 -0.54297,0.669671 -0.81446,1.592732 -0.27149,0.904961 -0.27149,1.954716 0,1.049756 0.27149,1.936618 0.28959,0.868763 0.83256,1.502236 0.56108,0.633473 1.37554,0.977358 0.83257,0.343885 1.90042,0.343885 1.79183,0 2.82348,-0.94116 1.04976,-0.941159 1.30315,-2.678685 l -1.57464,0 q -0.14479,1.085953 -0.79636,1.683228 -0.63347,0.597274 -1.77373,0.597274 -0.72397,0 -1.24884,-0.289587 -0.52488,-0.289588 -0.85067,-0.760168 -0.32578,-0.488679 -0.48868,-1.104053 -0.14479,-0.615374 -0.14479,-1.266946 0,-0.70587 0.14479,-1.357442 0.1448,-0.669671 0.47058,-1.17645 0.34389,-0.506778 0.90497,-0.814465 0.56107,-0.307687 1.39364,-0.307687 0.97735,0 1.55653,0.488679 0.57917,0.48868 0.76017,1.375542 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10629" />
+      <path
+         d="m 218.91112,40.46485 0,12.922849 1.53843,0 0,-3.547449 1.44794,-1.339343 3.20357,4.886792 1.95471,0 -3.98183,-5.954646 3.71034,-3.402655 -2.06331,0 -4.27142,4.090425 0,-7.655973 -1.53843,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10631" />
+      <path
+         d="m 234.72136,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54298,-0.850664 0.34388,-0.361985 0.81446,-0.561076 0.48868,-0.217191 1.08596,-0.217191 0.57917,0 1.04975,0.217191 0.48868,0.199091 0.83257,0.561076 0.36198,0.343885 0.56107,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48414,2.606288 -1.52034,0 q -0.19909,0.923061 -0.83256,1.375542 -0.61538,0.45248 -1.59273,0.45248 -0.76017,0 -1.32125,-0.253389 -0.56107,-0.253389 -0.92306,-0.669671 -0.36198,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.16289,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32125,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77372,0.361985 -0.79637,0.361984 -1.39364,1.013556 -0.57918,0.651573 -0.90496,1.538435 -0.32579,0.886862 -0.32579,1.954716 0.0362,1.067855 0.30769,1.972816 0.28958,0.904962 0.85066,1.556534 0.56108,0.651572 1.37554,1.013557 0.83257,0.361984 1.95472,0.361984 1.59273,0 2.64249,-0.796366 1.04975,-0.796366 1.35744,-2.370999 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10633" />
+      <path
+         d="m 237.8907,44.030398 0,9.357301 1.53844,0 0,-4.162823 q 0,-0.904961 0.18099,-1.592732 0.18099,-0.70587 0.57918,-1.194549 0.39818,-0.488679 1.04975,-0.742068 0.65157,-0.253389 1.57463,-0.253389 l 0,-1.628931 q -1.24884,-0.0362 -2.06331,0.506779 -0.81446,0.542976 -1.37554,1.683228 l -0.0362,0 0,-1.972816 -1.44794,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10635" />
+      <path
+         d="m 255.3624,47.03487 1.59273,0 q -0.0905,-0.832565 -0.43438,-1.429839 -0.34389,-0.615374 -0.88686,-1.013557 -0.52488,-0.398183 -1.23075,-0.579175 -0.68777,-0.199092 -1.48414,-0.199092 -1.10405,0 -1.93661,0.398183 -0.83257,0.380084 -1.39364,1.067855 -0.54298,0.669671 -0.81447,1.592732 -0.27149,0.904961 -0.27149,1.954716 0,1.049756 0.27149,1.936618 0.28959,0.868763 0.83256,1.502236 0.56108,0.633473 1.37555,0.977358 0.83256,0.343885 1.90041,0.343885 1.79183,0 2.82348,-0.94116 1.04976,-0.941159 1.30315,-2.678685 l -1.57463,0 q -0.1448,1.085953 -0.79637,1.683228 -0.63347,0.597274 -1.77372,0.597274 -0.72397,0 -1.24885,-0.289587 -0.52488,-0.289588 -0.85067,-0.760168 -0.32578,-0.488679 -0.48867,-1.104053 -0.1448,-0.615374 -0.1448,-1.266946 0,-0.70587 0.1448,-1.357442 0.14479,-0.669671 0.47058,-1.17645 0.34388,-0.506778 0.90496,-0.814465 0.56107,-0.307687 1.39364,-0.307687 0.97736,0 1.55653,0.488679 0.57918,0.48868 0.76017,1.375542 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10637" />
+      <path
+         d="m 258.82133,40.46485 0,12.922849 1.53844,0 0,-12.922849 -1.53844,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10639" />
+      <path
+         d="m 264.38967,42.34717 0,-1.88232 -1.53844,0 0,1.88232 1.53844,0 z m -1.53844,1.683228 0,9.357301 1.53844,0 0,-9.357301 -1.53844,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10641" />
+      <path
+         d="m 273.28827,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54297,-0.850664 0.34389,-0.361985 0.81447,-0.561076 0.48868,-0.217191 1.08595,-0.217191 0.57918,0 1.04976,0.217191 0.48868,0.199091 0.83256,0.561076 0.36199,0.343885 0.56108,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48413,2.606288 -1.52033,0 q -0.19909,0.923061 -0.83257,1.375542 -0.61537,0.45248 -1.59273,0.45248 -0.76017,0 -1.32124,-0.253389 -0.56108,-0.253389 -0.92306,-0.669671 -0.36199,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.1629,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32124,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77373,0.361985 -0.79636,0.361984 -1.39364,1.013556 -0.57917,0.651573 -0.90496,1.538435 -0.32578,0.886862 -0.32578,1.954716 0.0362,1.067855 0.30768,1.972816 0.28959,0.904962 0.85067,1.556534 0.56107,0.651572 1.37554,1.013557 0.83256,0.361984 1.95471,0.361984 1.59274,0 2.64249,-0.796366 1.04976,-0.796366 1.35744,-2.370999 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10643" />
+      <path
+         d="m 276.51191,44.030398 0,9.357301 1.53844,0 0,-5.284975 q 0,-0.633473 0.16289,-1.15835 0.18099,-0.542977 0.52488,-0.94116 0.34388,-0.398183 0.85066,-0.615374 0.52488,-0.217191 1.23075,-0.217191 0.88686,0 1.39364,0.506779 0.50678,0.506778 0.50678,1.375541 l 0,6.33473 1.53843,0 0,-6.153738 q 0,-0.760167 -0.16289,-1.375541 -0.1448,-0.633473 -0.52488,-1.085954 -0.38008,-0.45248 -0.99546,-0.70587 -0.61537,-0.253389 -1.53843,-0.253389 -2.08141,0 -3.04067,1.701328 l -0.0362,0 0,-1.484137 -1.44794,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10645" />
+      <path
+         d="m 288.72239,44.030398 0,-2.80538 -1.53844,0 0,2.80538 -1.59273,0 0,1.357442 1.59273,0 0,5.954646 q 0,0.651572 0.1267,1.049755 0.12669,0.398183 0.38008,0.615374 0.27149,0.217191 0.68777,0.307687 0.43439,0.0724 1.03166,0.0724 l 1.17645,0 0,-1.357442 -0.70587,0 q -0.36199,0 -0.59727,-0.0181 -0.2172,-0.0362 -0.34389,-0.126694 -0.12669,-0.0905 -0.18099,-0.253389 -0.0362,-0.162893 -0.0362,-0.434382 l 0,-5.809852 1.86422,0 0,-1.357442 -1.86422,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10647" />
+    </g>
+    <rect
+       y="442.16461"
+       x="116.18336"
+       height="31.31473"
+       width="151.48232"
+       id="rect4484"
+       style="opacity:1;fill:#f2f2f2;fill-opacity:1;stroke:#005976;stroke-width:3;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+    <g
+       transform="matrix(1.1029002,0,0,1.1071429,-83.240434,405.75765)"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot4486">
+      <path
+         d="m 187.98859,51.93976 0,-10.026972 2.89587,0 q 1.19455,0 2.00902,0.343886 0.81446,0.325786 1.32124,0.977358 0.52488,0.633473 0.74207,1.556533 0.23529,0.904962 0.23529,2.063312 0,1.194549 -0.25339,2.045213 -0.23529,0.832565 -0.61537,1.393641 -0.38009,0.561076 -0.86876,0.886862 -0.47058,0.325786 -0.95926,0.506778 -0.48868,0.162893 -0.92306,0.217191 -0.43439,0.0362 -0.72397,0.0362 l -2.85968,0 z m -1.71943,-11.47491 0,12.922849 4.43431,0 q 1.61083,0 2.78728,-0.452481 1.17645,-0.452481 1.93662,-1.303144 0.76017,-0.868763 1.12215,-2.11761 0.36199,-1.266946 0.36199,-2.895877 0,-3.113067 -1.61083,-4.633402 -1.61083,-1.520335 -4.59721,-1.520335 l -4.43431,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10600" />
+      <path
+         d="m 199.86394,48.718098 q 0,-0.850664 0.2172,-1.502236 0.23529,-0.669671 0.63347,-1.122152 0.39818,-0.452481 0.92306,-0.687771 0.54298,-0.23529 1.14025,-0.23529 0.59727,0 1.12215,0.23529 0.54298,0.23529 0.94116,0.687771 0.39819,0.452481 0.61538,1.122152 0.23529,0.651572 0.23529,1.502236 0,0.850664 -0.23529,1.520335 -0.21719,0.651572 -0.61538,1.104053 -0.39818,0.434381 -0.94116,0.669671 -0.52488,0.23529 -1.12215,0.23529 -0.59727,0 -1.14025,-0.23529 -0.52488,-0.23529 -0.92306,-0.669671 -0.39818,-0.452481 -0.63347,-1.104053 -0.2172,-0.669671 -0.2172,-1.520335 z m -1.62893,0 q 0,1.031656 0.28959,1.918518 0.28959,0.886862 0.86876,1.556534 0.57918,0.651572 1.42984,1.031656 0.85067,0.361984 1.95472,0.361984 1.12215,0 1.95472,-0.361984 0.85066,-0.380084 1.42984,-1.031656 0.57917,-0.669672 0.86876,-1.556534 0.28959,-0.886862 0.28959,-1.918518 0,-1.031656 -0.28959,-1.918518 -0.28959,-0.904962 -0.86876,-1.556534 -0.57918,-0.669671 -1.42984,-1.049755 -0.83257,-0.380084 -1.95472,-0.380084 -1.10405,0 -1.95472,0.380084 -0.85066,0.380084 -1.42984,1.049755 -0.57917,0.651572 -0.86876,1.556534 -0.28959,0.886862 -0.28959,1.918518 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10602" />
+      <path
+         d="m 215.45219,47.03487 1.59273,0 q -0.0905,-0.832565 -0.43438,-1.429839 -0.34389,-0.615374 -0.88686,-1.013557 -0.52488,-0.398183 -1.23075,-0.579175 -0.68777,-0.199092 -1.48414,-0.199092 -1.10405,0 -1.93662,0.398183 -0.83256,0.380084 -1.39364,1.067855 -0.54297,0.669671 -0.81446,1.592732 -0.27149,0.904961 -0.27149,1.954716 0,1.049756 0.27149,1.936618 0.28959,0.868763 0.83256,1.502236 0.56108,0.633473 1.37554,0.977358 0.83257,0.343885 1.90042,0.343885 1.79183,0 2.82348,-0.94116 1.04976,-0.941159 1.30315,-2.678685 l -1.57464,0 q -0.14479,1.085953 -0.79636,1.683228 -0.63347,0.597274 -1.77373,0.597274 -0.72397,0 -1.24884,-0.289587 -0.52488,-0.289588 -0.85067,-0.760168 -0.32578,-0.488679 -0.48868,-1.104053 -0.14479,-0.615374 -0.14479,-1.266946 0,-0.70587 0.14479,-1.357442 0.1448,-0.669671 0.47058,-1.17645 0.34389,-0.506778 0.90497,-0.814465 0.56107,-0.307687 1.39364,-0.307687 0.97735,0 1.55653,0.488679 0.57917,0.48868 0.76017,1.375542 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10604" />
+      <path
+         d="m 218.91112,40.46485 0,12.922849 1.53843,0 0,-3.547449 1.44794,-1.339343 3.20357,4.886792 1.95471,0 -3.98183,-5.954646 3.71034,-3.402655 -2.06331,0 -4.27142,4.090425 0,-7.655973 -1.53843,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10606" />
+      <path
+         d="m 234.72136,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54298,-0.850664 0.34388,-0.361985 0.81446,-0.561076 0.48868,-0.217191 1.08596,-0.217191 0.57917,0 1.04975,0.217191 0.48868,0.199091 0.83257,0.561076 0.36198,0.343885 0.56107,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48414,2.606288 -1.52034,0 q -0.19909,0.923061 -0.83256,1.375542 -0.61538,0.45248 -1.59273,0.45248 -0.76017,0 -1.32125,-0.253389 -0.56107,-0.253389 -0.92306,-0.669671 -0.36198,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.16289,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32125,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77372,0.361985 -0.79637,0.361984 -1.39364,1.013556 -0.57918,0.651573 -0.90496,1.538435 -0.32579,0.886862 -0.32579,1.954716 0.0362,1.067855 0.30769,1.972816 0.28958,0.904962 0.85066,1.556534 0.56108,0.651572 1.37554,1.013557 0.83257,0.361984 1.95472,0.361984 1.59273,0 2.64249,-0.796366 1.04975,-0.796366 1.35744,-2.370999 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10608" />
+      <path
+         d="m 237.8907,44.030398 0,9.357301 1.53844,0 0,-4.162823 q 0,-0.904961 0.18099,-1.592732 0.18099,-0.70587 0.57918,-1.194549 0.39818,-0.488679 1.04975,-0.742068 0.65157,-0.253389 1.57463,-0.253389 l 0,-1.628931 q -1.24884,-0.0362 -2.06331,0.506779 -0.81446,0.542976 -1.37554,1.683228 l -0.0362,0 0,-1.972816 -1.44794,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10610" />
+      <path
+         d="m 250.13172,48.790495 q 0,-0.687771 0.1448,-1.339343 0.14479,-0.651572 0.47058,-1.158351 0.32578,-0.506778 0.86876,-0.814465 0.54298,-0.307687 1.32124,-0.307687 0.79637,0 1.35744,0.307687 0.56108,0.289588 0.90497,0.778267 0.36198,0.488679 0.52487,1.140251 0.1629,0.633473 0.1629,1.321244 0,0.651572 -0.1629,1.285045 -0.14479,0.633473 -0.48868,1.140251 -0.34388,0.48868 -0.88686,0.796366 -0.54297,0.307687 -1.32124,0.307687 -0.74207,0 -1.30315,-0.289587 -0.54297,-0.289588 -0.90496,-0.778267 -0.34388,-0.488679 -0.52488,-1.104053 -0.16289,-0.633473 -0.16289,-1.285045 z m 7.23969,4.597204 0,-12.922849 -1.53843,0 0,4.814395 -0.0362,0 q -0.25339,-0.416283 -0.63347,-0.687771 -0.36199,-0.289588 -0.77827,-0.452481 -0.41628,-0.180992 -0.83256,-0.253389 -0.41629,-0.0724 -0.77827,-0.0724 -1.06786,0 -1.88232,0.398183 -0.79637,0.380084 -1.33934,1.049755 -0.52488,0.651573 -0.79637,1.538435 -0.25339,0.886862 -0.25339,1.882319 0,0.995458 0.27149,1.88232 0.27149,0.886862 0.79637,1.556534 0.54297,0.669671 1.33934,1.067854 0.81446,0.398183 1.90042,0.398183 0.97736,0 1.79182,-0.343885 0.81447,-0.343885 1.19455,-1.122152 l 0.0362,0 0,1.266946 1.53843,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10612" />
+      <path
+         d="m 268.01008,53.3515 q -0.39818,0.23529 -1.10405,0.23529 -0.59727,0 -0.95926,-0.325786 -0.34388,-0.343885 -0.34388,-1.104053 -0.63348,0.760168 -1.48414,1.104053 -0.83257,0.325786 -1.80992,0.325786 -0.63348,0 -1.21265,-0.144794 -0.56108,-0.144793 -0.97736,-0.45248 -0.41628,-0.307687 -0.66967,-0.796366 -0.23529,-0.506779 -0.23529,-1.212649 0,-0.796366 0.27149,-1.303144 0.27149,-0.506778 0.70587,-0.814465 0.45248,-0.325786 1.01355,-0.488679 0.57918,-0.162893 1.17645,-0.271489 0.63348,-0.126694 1.19455,-0.180992 0.57918,-0.0724 1.01356,-0.180992 0.43438,-0.126695 0.68777,-0.343886 0.25339,-0.23529 0.25339,-0.669671 0,-0.506779 -0.19909,-0.814465 -0.18099,-0.307687 -0.48868,-0.47058 -0.28959,-0.162893 -0.66967,-0.217191 -0.36199,-0.0543 -0.72397,-0.0543 -0.97736,0 -1.62893,0.380084 -0.65157,0.361985 -0.70587,1.393641 l -1.53844,0 q 0.0362,-0.868763 0.36199,-1.466038 0.32578,-0.597274 0.86876,-0.959259 0.54298,-0.380084 1.23075,-0.542977 0.70587,-0.162893 1.50223,-0.162893 0.63348,0 1.24885,0.0905 0.63347,0.0905 1.14025,0.380084 0.50678,0.271488 0.81447,0.778267 0.30768,0.506778 0.30768,1.321244 l 0,4.814394 q 0,0.542977 0.0543,0.796366 0.0724,0.253389 0.43438,0.253389 0.19909,0 0.47058,-0.0905 l 0,1.194549 z m -2.49769,-4.796295 q -0.28959,0.217191 -0.76017,0.325786 -0.47058,0.0905 -0.99546,0.162893 -0.50677,0.0543 -1.03165,0.144794 -0.52488,0.0724 -0.94116,0.253389 -0.41628,0.180992 -0.68777,0.524878 -0.25339,0.325786 -0.25339,0.904961 0,0.380084 0.14479,0.651572 0.1629,0.253389 0.39819,0.416282 0.25339,0.162894 0.57917,0.23529 0.32579,0.0724 0.68777,0.0724 0.76017,0 1.30315,-0.199091 0.54297,-0.217191 0.88686,-0.524878 0.34388,-0.325786 0.50678,-0.68777 0.16289,-0.380084 0.16289,-0.70587 l 0,-1.574633 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10614" />
+      <path
+         d="m 275.93952,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54298,-0.850664 0.34388,-0.361985 0.81446,-0.561076 0.48868,-0.217191 1.08595,-0.217191 0.57918,0 1.04976,0.217191 0.48868,0.199091 0.83256,0.561076 0.36199,0.343885 0.56108,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48414,2.606288 -1.52034,0 q -0.19909,0.923061 -0.83256,1.375542 -0.61538,0.45248 -1.59274,0.45248 -0.76016,0 -1.32124,-0.253389 -0.56107,-0.253389 -0.92306,-0.669671 -0.36198,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.16289,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32125,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77372,0.361985 -0.79637,0.361984 -1.39364,1.013556 -0.57918,0.651573 -0.90497,1.538435 -0.32578,0.886862 -0.32578,1.954716 0.0362,1.067855 0.30769,1.972816 0.28958,0.904962 0.85066,1.556534 0.56108,0.651572 1.37554,1.013557 0.83257,0.361984 1.95472,0.361984 1.59273,0 2.64248,-0.796366 1.04976,-0.796366 1.35745,-2.370999 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10616" />
+      <path
+         d="m 279.16316,44.030398 0,9.357301 1.53844,0 0,-5.827952 q 0,-0.271488 0.12669,-0.669671 0.1448,-0.416282 0.43439,-0.796366 0.30768,-0.380084 0.77826,-0.651572 0.48868,-0.271489 1.15835,-0.271489 0.52488,0 0.85067,0.162893 0.34388,0.144794 0.54297,0.434382 0.19909,0.271488 0.27149,0.651572 0.0905,0.380084 0.0905,0.832564 l 0,6.135639 1.53843,0 0,-5.827952 q 0,-1.085953 0.65157,-1.737526 0.65158,-0.651572 1.79183,-0.651572 0.56107,0 0.90496,0.162893 0.36198,0.162893 0.56108,0.452481 0.19909,0.271488 0.27148,0.651572 0.0724,0.380084 0.0724,0.814465 l 0,6.135639 1.53843,0 0,-6.859608 q 0,-0.723969 -0.23528,-1.230747 -0.2172,-0.524878 -0.63348,-0.850664 -0.39818,-0.325786 -0.97736,-0.47058 -0.56107,-0.162893 -1.26694,-0.162893 -0.92306,0 -1.70133,0.416282 -0.76017,0.416283 -1.23075,1.17645 -0.28958,-0.868763 -0.99545,-1.230747 -0.70587,-0.361985 -1.57464,-0.361985 -1.97281,0 -3.02257,1.592732 l -0.0362,0 0,-1.375541 -1.44794,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10618" />
+      <path
+         d="m 295.73329,48.718098 q 0,-0.850664 0.21719,-1.502236 0.23529,-0.669671 0.63348,-1.122152 0.39818,-0.452481 0.92306,-0.687771 0.54297,-0.23529 1.14025,-0.23529 0.59727,0 1.12215,0.23529 0.54298,0.23529 0.94116,0.687771 0.39818,0.452481 0.61537,1.122152 0.23529,0.651572 0.23529,1.502236 0,0.850664 -0.23529,1.520335 -0.21719,0.651572 -0.61537,1.104053 -0.39818,0.434381 -0.94116,0.669671 -0.52488,0.23529 -1.12215,0.23529 -0.59728,0 -1.14025,-0.23529 -0.52488,-0.23529 -0.92306,-0.669671 -0.39819,-0.452481 -0.63348,-1.104053 -0.21719,-0.669671 -0.21719,-1.520335 z m -1.62893,0 q 0,1.031656 0.28959,1.918518 0.28959,0.886862 0.86876,1.556534 0.57918,0.651572 1.42984,1.031656 0.85066,0.361984 1.95472,0.361984 1.12215,0 1.95471,-0.361984 0.85067,-0.380084 1.42984,-1.031656 0.57918,-0.669672 0.86877,-1.556534 0.28958,-0.886862 0.28958,-1.918518 0,-1.031656 -0.28958,-1.918518 -0.28959,-0.904962 -0.86877,-1.556534 -0.57917,-0.669671 -1.42984,-1.049755 -0.83256,-0.380084 -1.95471,-0.380084 -1.10406,0 -1.95472,0.380084 -0.85066,0.380084 -1.42984,1.049755 -0.57917,0.651572 -0.86876,1.556534 -0.28959,0.886862 -0.28959,1.918518 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10620" />
+      <path
+         d="m 304.96871,44.030398 0,9.357301 1.53843,0 0,-5.284975 q 0,-0.633473 0.16289,-1.15835 0.181,-0.542977 0.52488,-0.94116 0.34389,-0.398183 0.85067,-0.615374 0.52487,-0.217191 1.23074,-0.217191 0.88687,0 1.39364,0.506779 0.50678,0.506778 0.50678,1.375541 l 0,6.33473 1.53844,0 0,-6.153738 q 0,-0.760167 -0.1629,-1.375541 -0.14479,-0.633473 -0.52487,-1.085954 -0.38009,-0.45248 -0.99546,-0.70587 -0.61538,-0.253389 -1.53844,-0.253389 -2.08141,0 -3.04067,1.701328 l -0.0362,0 0,-1.484137 -1.44793,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path10622" />
+    </g>
+    <g
+       id="g4736"
+       transform="translate(862.40058,602.59637)">
+      <g
+         display="none"
+         id="Layer_2"
+         style="display:none">
+        <line
+           id="line4716"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-49.362"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10" />
+        <line
+           id="line4718"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-46.612"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10" />
+        <line
+           id="line4720"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-4.362"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10" />
+        <line
+           id="line4722"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-67.064003"
+           x1="-52.994999"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10" />
+        <line
+           id="line4724"
+           y2="-49.472"
+           x2="-76.915001"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10" />
+        <line
+           id="line4726"
+           y2="-71.179001"
+           x2="-165"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10" />
+        <line
+           id="line4728"
+           y2="-0.271"
+           x2="-77.349998"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10" />
+      </g>
+    </g>
+  </g>
+  <g
+     inkscape:groupmode="layer"
+     id="layer3"
+     inkscape:label="containers">
+    <g
+       id="g5083"
+       transform="matrix(0.76325157,0,0,0.8038217,61.077665,213.23614)">
+      <g
+         id="Isolation_Mode-6">
+        <polygon
+           id="polygon4984"
+           points="122.005,77.57 122.005,22.429 32.038,0.183 0,19.314 0,72.66 92.65,101.222 "
+           style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd" />
+        <polygon
+           id="polygon4986"
+           points="92.65,94.222 5,67.66 5,23.314 92.65,45.131 "
+           style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd" />
+        <g
+           id="g4988">
+          <g
+             id="g4990">
+            <polygon
+               id="polygon4992"
+               points="11.364,66.906 11.364,30.067 7.917,29.184 7.917,66.177 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon4994"
+               points="17.598,68.765 17.598,31.68 14.134,30.801 14.134,68.013 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon4996"
+               points="23.831,70.624 23.831,33.292 20.353,32.418 20.353,69.848 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon4998"
+               points="30.063,72.483 30.063,34.905 26.569,34.035 26.569,71.684 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5000"
+               points="36.298,74.341 36.298,36.518 32.789,35.652 32.789,73.519 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5002"
+               points="42.532,76.201 42.532,38.13 39.007,37.27 39.007,75.354 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5004"
+               points="48.765,78.06 48.765,39.743 45.224,38.887 45.224,77.19 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5006"
+               points="54.999,79.919 54.999,41.356 51.442,40.504 51.442,79.025 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5008"
+               points="61.231,81.777 61.231,42.968 57.661,42.122 57.661,80.86 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5010"
+               points="67.466,83.637 67.466,44.581 63.878,43.737 63.878,82.697 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5012"
+               points="73.699,85.496 73.699,46.193 70.097,45.355 70.097,84.532 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5014"
+               points="79.933,87.354 79.933,47.806 76.313,46.973 76.313,86.367 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5016"
+               points="82.532,88.203 86.165,89.213 86.165,49.418 82.532,48.589 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+          </g>
+          <g
+             id="g5018">
+            <polygon
+               id="polygon5020"
+               points="11.364,66.906 11.364,30.067 7.917,29.184 7.917,66.177 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5022"
+               points="17.598,68.765 17.598,31.68 14.134,30.801 14.134,68.013 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5024"
+               points="23.831,70.624 23.831,33.292 20.353,32.418 20.353,69.848 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5026"
+               points="30.063,72.483 30.063,34.905 26.569,34.035 26.569,71.684 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5028"
+               points="36.298,74.341 36.298,36.518 32.789,35.652 32.789,73.519 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5030"
+               points="42.532,76.201 42.532,38.13 39.007,37.27 39.007,75.354 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5032"
+               points="48.765,78.06 48.765,39.743 45.224,38.887 45.224,77.19 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5034"
+               points="54.999,79.919 54.999,41.356 51.442,40.504 51.442,79.025 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5036"
+               points="61.231,81.777 61.231,42.968 57.661,42.122 57.661,80.86 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5038"
+               points="67.466,83.637 67.466,44.581 63.878,43.737 63.878,82.697 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5040"
+               points="73.699,85.496 73.699,46.193 70.097,45.355 70.097,84.532 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5042"
+               points="79.933,87.354 79.933,47.806 76.313,46.973 76.313,86.367 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5044"
+               points="82.532,88.203 86.165,89.213 86.165,49.418 82.532,48.589 "
+               style="fill:#394d54" />
+          </g>
+        </g>
+        <polygon
+           id="polygon5046"
+           points="117.005,75.212 92.65,94.398 92.65,46.469 117.005,29.184 "
+           style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+        <polygon
+           id="polygon5048"
+           points="117.005,27.429 92.65,45.131 5,23.314 32.538,6.683 "
+           style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd" />
+        <rect
+           id="rect5050"
+           height="92.299004"
+           width="3.5250001"
+           transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+           y="-12.046"
+           x="46.075001"
+           style="fill:#394d54" />
+        <rect
+           id="rect5052"
+           height="34.868999"
+           width="3.5250001"
+           transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+           y="18.122"
+           x="103.715"
+           style="fill:#394d54" />
+        <rect
+           id="rect5054"
+           height="51.174"
+           width="3.523"
+           transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+           y="44.498001"
+           x="90.234001"
+           style="fill:#394d54" />
+      </g>
+      <g
+         display="none"
+         id="Layer_2-0"
+         style="display:none">
+        <line
+           id="line5063"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-49.362"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5065"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-46.612"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5067"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-4.362"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5069"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-67.064003"
+           x1="-52.994999"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5071"
+           y2="-49.472"
+           x2="-76.915001"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5073"
+           y2="-71.179001"
+           x2="-165"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5075"
+           y2="-0.271"
+           x2="-77.349998"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+      </g>
+    </g>
+    <g
+       transform="matrix(0.76325157,0,0,0.8038217,161.39072,213.23614)"
+       id="g5133">
+      <g
+         id="g5135">
+        <polygon
+           style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd"
+           points="32.038,0.183 0,19.314 0,72.66 92.65,101.222 122.005,77.57 122.005,22.429 "
+           id="polygon5137" />
+        <polygon
+           style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd"
+           points="5,23.314 92.65,45.131 92.65,94.222 5,67.66 "
+           id="polygon5139" />
+        <g
+           id="g5141">
+          <g
+             id="g5143">
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="7.917,29.184 7.917,66.177 11.364,66.906 11.364,30.067 "
+               id="polygon5145" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="14.134,30.801 14.134,68.013 17.598,68.765 17.598,31.68 "
+               id="polygon5147" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="20.353,32.418 20.353,69.848 23.831,70.624 23.831,33.292 "
+               id="polygon5149" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="26.569,34.035 26.569,71.684 30.063,72.483 30.063,34.905 "
+               id="polygon5151" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="32.789,35.652 32.789,73.519 36.298,74.341 36.298,36.518 "
+               id="polygon5153" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="39.007,37.27 39.007,75.354 42.532,76.201 42.532,38.13 "
+               id="polygon5155" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="45.224,38.887 45.224,77.19 48.765,78.06 48.765,39.743 "
+               id="polygon5157" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="51.442,40.504 51.442,79.025 54.999,79.919 54.999,41.356 "
+               id="polygon5159" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="57.661,42.122 57.661,80.86 61.231,81.777 61.231,42.968 "
+               id="polygon5161" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="63.878,43.737 63.878,82.697 67.466,83.637 67.466,44.581 "
+               id="polygon5163" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="70.097,45.355 70.097,84.532 73.699,85.496 73.699,46.193 "
+               id="polygon5165" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="76.313,46.973 76.313,86.367 79.933,87.354 79.933,47.806 "
+               id="polygon5167" />
+            <polygon
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+               points="86.165,49.418 82.532,48.589 82.532,88.203 86.165,89.213 "
+               id="polygon5169" />
+          </g>
+          <g
+             id="g5171">
+            <polygon
+               style="fill:#394d54"
+               points="7.917,29.184 7.917,66.177 11.364,66.906 11.364,30.067 "
+               id="polygon5173" />
+            <polygon
+               style="fill:#394d54"
+               points="14.134,30.801 14.134,68.013 17.598,68.765 17.598,31.68 "
+               id="polygon5175" />
+            <polygon
+               style="fill:#394d54"
+               points="20.353,32.418 20.353,69.848 23.831,70.624 23.831,33.292 "
+               id="polygon5177" />
+            <polygon
+               style="fill:#394d54"
+               points="26.569,34.035 26.569,71.684 30.063,72.483 30.063,34.905 "
+               id="polygon5179" />
+            <polygon
+               style="fill:#394d54"
+               points="32.789,35.652 32.789,73.519 36.298,74.341 36.298,36.518 "
+               id="polygon5181" />
+            <polygon
+               style="fill:#394d54"
+               points="39.007,37.27 39.007,75.354 42.532,76.201 42.532,38.13 "
+               id="polygon5183" />
+            <polygon
+               style="fill:#394d54"
+               points="45.224,38.887 45.224,77.19 48.765,78.06 48.765,39.743 "
+               id="polygon5185" />
+            <polygon
+               style="fill:#394d54"
+               points="51.442,40.504 51.442,79.025 54.999,79.919 54.999,41.356 "
+               id="polygon5187" />
+            <polygon
+               style="fill:#394d54"
+               points="57.661,42.122 57.661,80.86 61.231,81.777 61.231,42.968 "
+               id="polygon5189" />
+            <polygon
+               style="fill:#394d54"
+               points="63.878,43.737 63.878,82.697 67.466,83.637 67.466,44.581 "
+               id="polygon5191" />
+            <polygon
+               style="fill:#394d54"
+               points="70.097,45.355 70.097,84.532 73.699,85.496 73.699,46.193 "
+               id="polygon5193" />
+            <polygon
+               style="fill:#394d54"
+               points="76.313,46.973 76.313,86.367 79.933,87.354 79.933,47.806 "
+               id="polygon5195" />
+            <polygon
+               style="fill:#394d54"
+               points="86.165,49.418 82.532,48.589 82.532,88.203 86.165,89.213 "
+               id="polygon5197" />
+          </g>
+        </g>
+        <polygon
+           style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+           points="92.65,46.469 117.005,29.184 117.005,75.212 92.65,94.398 "
+           id="polygon5199" />
+        <polygon
+           style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd"
+           points="5,23.314 32.538,6.683 117.005,27.429 92.65,45.131 "
+           id="polygon5201" />
+        <rect
+           style="fill:#394d54"
+           x="46.075001"
+           y="-12.046"
+           transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+           width="3.5250001"
+           height="92.299004"
+           id="rect5203" />
+        <rect
+           style="fill:#394d54"
+           x="103.715"
+           y="18.122"
+           transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+           width="3.5250001"
+           height="34.868999"
+           id="rect5205" />
+        <rect
+           style="fill:#394d54"
+           x="90.234001"
+           y="44.498001"
+           transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+           width="3.523"
+           height="51.174"
+           id="rect5207" />
+      </g>
+      <g
+         style="display:none"
+         id="g5209"
+         display="none">
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="-77.349998"
+           y1="-49.362"
+           x2="-1830.454"
+           y2="-492.659"
+           id="line5211" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="-77.349998"
+           y1="-46.612"
+           x2="-1830.454"
+           y2="-492.659"
+           id="line5213" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="-77.349998"
+           y1="-4.362"
+           x2="-1830.454"
+           y2="-492.659"
+           id="line5215" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="-52.994999"
+           y1="-67.064003"
+           x2="-1830.454"
+           y2="-492.659"
+           id="line5217" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="532.88"
+           y1="-492.659"
+           x2="-76.915001"
+           y2="-49.472"
+           id="line5219" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="532.88"
+           y1="-492.659"
+           x2="-165"
+           y2="-71.179001"
+           id="line5221" />
+        <line
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+           display="inline"
+           stroke-miterlimit="10"
+           x1="532.88"
+           y1="-492.659"
+           x2="-77.349998"
+           y2="-0.271"
+           id="line5223" />
+      </g>
+    </g>
+    <g
+       id="g5225"
+       transform="matrix(0.76325157,0,0,0.8038217,61.077665,295.91495)">
+      <g
+         id="g5227">
+        <polygon
+           id="polygon5229"
+           points="0,19.314 0,72.66 92.65,101.222 122.005,77.57 122.005,22.429 32.038,0.183 "
+           style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd" />
+        <polygon
+           id="polygon5231"
+           points="92.65,45.131 92.65,94.222 5,67.66 5,23.314 "
+           style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd" />
+        <g
+           id="g5233">
+          <g
+             id="g5235">
+            <polygon
+               id="polygon5237"
+               points="7.917,66.177 11.364,66.906 11.364,30.067 7.917,29.184 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5239"
+               points="14.134,68.013 17.598,68.765 17.598,31.68 14.134,30.801 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5241"
+               points="20.353,69.848 23.831,70.624 23.831,33.292 20.353,32.418 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5243"
+               points="26.569,71.684 30.063,72.483 30.063,34.905 26.569,34.035 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5245"
+               points="32.789,73.519 36.298,74.341 36.298,36.518 32.789,35.652 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5247"
+               points="39.007,75.354 42.532,76.201 42.532,38.13 39.007,37.27 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5249"
+               points="45.224,77.19 48.765,78.06 48.765,39.743 45.224,38.887 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5251"
+               points="51.442,79.025 54.999,79.919 54.999,41.356 51.442,40.504 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5253"
+               points="57.661,80.86 61.231,81.777 61.231,42.968 57.661,42.122 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5255"
+               points="63.878,82.697 67.466,83.637 67.466,44.581 63.878,43.737 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5257"
+               points="70.097,84.532 73.699,85.496 73.699,46.193 70.097,45.355 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5259"
+               points="76.313,86.367 79.933,87.354 79.933,47.806 76.313,46.973 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            <polygon
+               id="polygon5261"
+               points="82.532,48.589 82.532,88.203 86.165,89.213 86.165,49.418 "
+               style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+          </g>
+          <g
+             id="g5263">
+            <polygon
+               id="polygon5265"
+               points="7.917,66.177 11.364,66.906 11.364,30.067 7.917,29.184 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5267"
+               points="14.134,68.013 17.598,68.765 17.598,31.68 14.134,30.801 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5269"
+               points="20.353,69.848 23.831,70.624 23.831,33.292 20.353,32.418 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5271"
+               points="26.569,71.684 30.063,72.483 30.063,34.905 26.569,34.035 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5273"
+               points="32.789,73.519 36.298,74.341 36.298,36.518 32.789,35.652 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5275"
+               points="39.007,75.354 42.532,76.201 42.532,38.13 39.007,37.27 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5277"
+               points="45.224,77.19 48.765,78.06 48.765,39.743 45.224,38.887 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5279"
+               points="51.442,79.025 54.999,79.919 54.999,41.356 51.442,40.504 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5281"
+               points="57.661,80.86 61.231,81.777 61.231,42.968 57.661,42.122 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5283"
+               points="63.878,82.697 67.466,83.637 67.466,44.581 63.878,43.737 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5285"
+               points="70.097,84.532 73.699,85.496 73.699,46.193 70.097,45.355 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5287"
+               points="76.313,86.367 79.933,87.354 79.933,47.806 76.313,46.973 "
+               style="fill:#394d54" />
+            <polygon
+               id="polygon5289"
+               points="82.532,48.589 82.532,88.203 86.165,89.213 86.165,49.418 "
+               style="fill:#394d54" />
+          </g>
+        </g>
+        <polygon
+           id="polygon5291"
+           points="117.005,29.184 117.005,75.212 92.65,94.398 92.65,46.469 "
+           style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+        <polygon
+           id="polygon5293"
+           points="32.538,6.683 117.005,27.429 92.65,45.131 5,23.314 "
+           style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd" />
+        <rect
+           id="rect5295"
+           height="92.299004"
+           width="3.5250001"
+           transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+           y="-12.046"
+           x="46.075001"
+           style="fill:#394d54" />
+        <rect
+           id="rect5297"
+           height="34.868999"
+           width="3.5250001"
+           transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+           y="18.122"
+           x="103.715"
+           style="fill:#394d54" />
+        <rect
+           id="rect5299"
+           height="51.174"
+           width="3.523"
+           transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+           y="44.498001"
+           x="90.234001"
+           style="fill:#394d54" />
+      </g>
+      <g
+         display="none"
+         id="g5301"
+         style="display:none">
+        <line
+           id="line5303"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-49.362"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5305"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-46.612"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5307"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-4.362"
+           x1="-77.349998"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5309"
+           y2="-492.659"
+           x2="-1830.454"
+           y1="-67.064003"
+           x1="-52.994999"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5311"
+           y2="-49.472"
+           x2="-76.915001"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5313"
+           y2="-71.179001"
+           x2="-165"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        <line
+           id="line5315"
+           y2="-0.271"
+           x2="-77.349998"
+           y1="-492.659"
+           x1="532.88"
+           stroke-miterlimit="10"
+           display="inline"
+           style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+      </g>
+    </g>
+  </g>
+</svg>
diff --git a/docs/sources/installation/images/mac_docker_host.png b/docs/sources/installation/images/mac_docker_host.png
deleted file mode 100644
index 9aa71a4..0000000
--- a/docs/sources/installation/images/mac_docker_host.png
+++ /dev/null
Binary files differ
diff --git a/docs/sources/installation/images/mac_docker_host.svg b/docs/sources/installation/images/mac_docker_host.svg
new file mode 100644
index 0000000..a885a32
--- /dev/null
+++ b/docs/sources/installation/images/mac_docker_host.svg
@@ -0,0 +1,1243 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="88.900002mm"
+   height="112.88889mm"
+   viewBox="0 0 315.00001 399.99999"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.91 r13725"
+   sodipodi:docname="mac_docker_host.svg">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker8148"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path8150"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker6352"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path6354"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker6148"
+       style="overflow:visible;"
+       inkscape:isstock="true"
+       inkscape:collect="always">
+      <path
+         id="path6150"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker5920"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path5922"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker5886"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path5888"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Lend"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path5368"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Lend"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path5350"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         transform="scale(0.8) rotate(180) translate(12.5,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="2.9893238"
+     inkscape:cx="83.5"
+     inkscape:cy="75.5"
+     inkscape:document-units="px"
+     inkscape:current-layer="g4485"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:window-width="1680"
+     inkscape:window-height="1005"
+     inkscape:window-x="4"
+     inkscape:window-y="0"
+     inkscape:window-maximized="0"
+     inkscape:showpageshadow="false" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:groupmode="layer"
+     id="layer2"
+     inkscape:label="host"
+     style="display:inline" />
+  <g
+     inkscape:groupmode="layer"
+     id="layer4"
+     inkscape:label="arrows" />
+  <g
+     inkscape:groupmode="layer"
+     id="layer3"
+     inkscape:label="containers">
+    <g
+       id="g4485"
+       transform="translate(-0.29443947,0)">
+      <rect
+         style="display:inline;opacity:1;fill:#949da7;fill-opacity:1;stroke:#005976;stroke-width:3.64635539;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+         id="rect5262"
+         width="310.02628"
+         height="374.55676"
+         x="2.8077147"
+         y="23.505817"
+         rx="0"
+         ry="0" />
+      <rect
+         rx="6.8984389"
+         ry="10.232184"
+         y="1.5027902"
+         x="240.35112"
+         height="39.570198"
+         width="73.907631"
+         id="rect5280"
+         style="opacity:1;fill:#394d54;fill-opacity:1;stroke:#005976;stroke-width:2.49513435;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+      <g
+         transform="matrix(0.62086873,0,0,0.60736447,133.55565,-15.720889)"
+         style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#dbdde0;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         id="flowRoot5284">
+        <path
+           d="m 190.17742,60.953398 q 0,-2.16 0.56,-4.24 0.56,-2.12 1.76,-3.8 1.2,-1.68 3.08,-2.68 1.88,-1.04 4.48,-1.04 2.6,0 4.48,1.04 1.88,1 3.08,2.68 1.2,1.68 1.76,3.8 0.56,2.08 0.56,4.24 0,2.16 -0.56,4.28 -0.56,2.08 -1.76,3.76 -1.2,1.68 -3.08,2.72 -1.88,1 -4.48,1 -2.6,0 -4.48,-1 -1.88,-1.04 -3.08,-2.72 -1.2,-1.68 -1.76,-3.76 -0.56,-2.12 -0.56,-4.28 z m -3.8,0 q 0,2.92 0.84,5.64 0.88,2.68 2.6,4.76 1.72,2.08 4.28,3.32 2.56,1.2 5.96,1.2 3.4,0 5.96,-1.2 2.56,-1.24 4.28,-3.32 1.72,-2.08 2.56,-4.76 0.88,-2.72 0.88,-5.64 0,-2.92 -0.88,-5.6 -0.84,-2.72 -2.56,-4.8 -1.72,-2.08 -4.28,-3.32 -2.56,-1.24 -5.96,-1.24 -3.4,0 -5.96,1.24 -2.56,1.24 -4.28,3.32 -1.72,2.08 -2.6,4.8 -0.84,2.68 -0.84,5.6 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7763" />
+        <path
+           d="m 234.80992,55.033398 3.6,0 q -0.08,-2.36 -0.92,-4.04 -0.8,-1.72 -2.24,-2.84 -1.4,-1.12 -3.28,-1.64 -1.88,-0.52 -4.08,-0.52 -1.96,0 -3.84,0.52 -1.84,0.48 -3.32,1.52 -1.44,1 -2.32,2.6 -0.88,1.56 -0.88,3.72 0,1.96 0.76,3.28 0.8,1.28 2.08,2.12 1.32,0.8 2.96,1.32 1.64,0.48 3.32,0.88 1.72,0.36 3.36,0.72 1.64,0.36 2.92,0.96 1.32,0.56 2.08,1.48 0.8,0.92 0.8,2.4 0,1.56 -0.64,2.56 -0.64,1 -1.68,1.6 -1.04,0.56 -2.36,0.8 -1.28,0.24 -2.56,0.24 -1.6,0 -3.12,-0.4 -1.52,-0.4 -2.68,-1.24 -1.12,-0.84 -1.84,-2.12 -0.68,-1.32 -0.68,-3.12 l -3.6,0 q 0,2.6 0.92,4.52 0.96,1.88 2.56,3.12 1.64,1.2 3.76,1.8 2.16,0.6 4.56,0.6 1.96,0 3.92,-0.48 2,-0.44 3.6,-1.44 1.6,-1.04 2.6,-2.64 1.04,-1.64 1.04,-3.92 0,-2.12 -0.8,-3.52 -0.76,-1.4 -2.08,-2.32 -1.28,-0.92 -2.92,-1.44 -1.64,-0.56 -3.36,-0.96 -1.68,-0.4 -3.32,-0.72 -1.64,-0.36 -2.96,-0.88 -1.28,-0.52 -2.08,-1.32 -0.76,-0.84 -0.76,-2.16 0,-1.4 0.52,-2.32 0.56,-0.96 1.44,-1.52 0.92,-0.56 2.08,-0.8 1.16,-0.24 2.36,-0.24 2.96,0 4.84,1.4 1.92,1.36 2.24,4.44 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7765" />
+        <path
+           d="m 262.12117,60.553398 -9.92,14.68 4.24,0 7.92,-11.8 7.68,11.8 4.64,0 -10.04,-14.68 9.44,-13.88 -4.24,0 -7.44,11.16 -7.12,-11.16 -4.56,0 9.4,13.88 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7767" />
+      </g>
+      <rect
+         ry="0"
+         rx="0"
+         y="168.52252"
+         x="70.247932"
+         height="230.32341"
+         width="243.59302"
+         id="rect4394"
+         style="display:inline;opacity:1;fill:#ade5f9;fill-opacity:1;stroke:#005976;stroke-width:2.07964277;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+      <rect
+         rx="7.8990598"
+         ry="7.8990588"
+         y="152.22887"
+         x="70.381065"
+         height="30.547466"
+         width="136.82939"
+         id="rect4345"
+         style="opacity:1;fill:#fcfcfc;fill-opacity:1;stroke:#005976;stroke-width:2.34590101;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+      <g
+         transform="matrix(0.48549887,0,0,0.47493898,-15.603306,139.61046)"
+         style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         id="flowRoot4337">
+        <path
+           d="m 187.88477,72.829407 0,-30.3125 7.1875,0 q 4.96093,0 7.36328,1.738282 2.42187,1.71875 3.63281,5.214843 1.21094,3.496094 1.21094,8.28125 0,4.53125 -1.28907,7.96875 -1.26953,3.417969 -3.61328,5.273438 -2.34375,1.835937 -7.30468,1.835937 l -7.1875,0 z m 3.94531,-3.339843 2.63672,0 q 4.0039,0 5.70312,-1.425782 1.71875,-1.425781 2.44141,-3.945312 0.74219,-2.539063 0.74219,-6.992188 0,-4.355468 -0.85938,-6.699218 -0.85937,-2.34375 -2.46094,-3.59375 -1.60156,-1.269532 -5.2539,-1.269532 l -2.94922,0 0,23.925782 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7740" />
+        <path
+           d="m 220.91211,73.747376 q -5.15625,0 -7.89063,-4.335937 -2.71484,-4.355469 -2.71484,-11.738282 0,-7.363281 2.71484,-11.699218 2.73438,-4.355469 7.89063,-4.355469 5.17578,0 7.89062,4.355469 2.71485,4.335937 2.71485,11.699218 0,7.382813 -2.71485,11.738282 -2.71484,4.335937 -7.89062,4.335937 z m 0,-3.046875 q 2.85156,0 4.74609,-3.222656 1.91407,-3.242188 1.91407,-9.804688 0,-6.5625 -1.91407,-9.785156 -1.89453,-3.242187 -4.74609,-3.242187 -2.85156,0 -4.76563,3.242187 -1.89453,3.222656 -1.89453,9.785156 0,6.5625 1.89453,9.804688 1.91407,3.222656 4.76563,3.222656 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7742" />
+        <path
+           d="m 254.23242,72.477845 q -3.39844,1.269531 -6.09375,1.269531 -3.55469,0 -6.54297,-1.894531 -2.98828,-1.914063 -4.64843,-5.664063 -1.66016,-3.769531 -1.66016,-8.515625 0,-4.707031 1.64062,-8.457031 1.64063,-3.75 4.62891,-5.683594 2.98828,-1.933593 6.58203,-1.933593 2.69531,0 6.09375,1.269531 l 0,3.59375 q -3.14453,-1.816406 -6.25,-1.816406 -2.34375,0 -4.29687,1.601562 -1.9336,1.601563 -3.125,4.707031 -1.19141,3.085938 -1.19141,6.71875 0,3.691407 1.23047,6.816407 1.23047,3.125 3.14453,4.6875 1.91406,1.542968 4.23828,1.542968 3.08594,0 6.25,-1.816406 l 0,3.574219 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7744" />
+        <path
+           d="m 260.0332,72.829407 0,-30.3125 3.94532,0 0,14.863282 11.26953,-14.863282 4.17968,0 -10.8789,14.394532 12.40234,15.917968 -4.9414,0 -12.03125,-15.449218 0,15.449218 -3.94532,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7746" />
+        <path
+           d="m 301.2832,69.489564 0,3.339843 -16.36718,0 0,-30.3125 16.05468,0 0,3.046875 -12.10937,0 0,10.292969 10.89844,0 0,3.027344 -10.89844,0 0,10.605469 12.42187,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7748" />
+        <path
+           d="m 311.86914,59.958314 0,12.871093 -3.94531,0 0,-30.3125 8.10547,0 q 3.96484,0 5.83984,0.820313 1.875,0.800781 2.89063,2.441406 1.03515,1.640625 1.03515,3.535156 0,1.757813 -0.68359,3.535157 -0.6836,1.777343 -1.89453,3.183593 -1.19141,1.386719 -3.45703,2.636719 l 8.76953,14.160156 -4.6875,0 -8.02735,-12.871093 -3.94531,0 z m 0,-3.046875 5.52734,0 q 1.85547,-1.015625 2.75391,-2.070313 0.91797,-1.054687 1.38672,-2.246094 0.46875,-1.191406 0.46875,-2.382812 0,-2.128906 -1.5625,-3.378906 -1.54297,-1.269532 -5.625,-1.269532 l -2.94922,0 0,11.347657 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7750" />
+        <path
+           d="m 330.38477,75.856751 0,-3.027344 21.21093,0 0,3.027344 -21.21093,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7752" />
+        <path
+           d="m 355.52148,72.829407 0,-30.3125 3.94532,0 0,13.046875 11.21093,0 0,-13.046875 3.92579,0 0,30.3125 -3.92579,0 0,-14.238281 -11.21093,0 0,14.238281 -3.94532,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7754" />
+        <path
+           d="m 389.03711,73.747376 q -5.15625,0 -7.89063,-4.335937 -2.71484,-4.355469 -2.71484,-11.738282 0,-7.363281 2.71484,-11.699218 2.73438,-4.355469 7.89063,-4.355469 5.17578,0 7.89062,4.355469 2.71485,4.335937 2.71485,11.699218 0,7.382813 -2.71485,11.738282 -2.71484,4.335937 -7.89062,4.335937 z m 0,-3.046875 q 2.85156,0 4.74609,-3.222656 1.91407,-3.242188 1.91407,-9.804688 0,-6.5625 -1.91407,-9.785156 -1.89453,-3.242187 -4.74609,-3.242187 -2.85156,0 -4.76563,3.242187 -1.89453,3.222656 -1.89453,9.785156 0,6.5625 1.89453,9.804688 1.91407,3.222656 4.76563,3.222656 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7756" />
+        <path
+           d="m 404.17383,72.184876 0,-3.710937 q 4.6875,2.226562 8.51562,2.226562 1.60157,0 3.06641,-0.605469 1.46484,-0.625 2.20703,-1.777343 0.76172,-1.152344 0.76172,-2.5 0,-1.582032 -1.03516,-2.949219 -1.03515,-1.386719 -3.88672,-3.046875 l -1.99218,-1.152344 -2.01172,-1.152344 q -5.3125,-3.144531 -5.3125,-7.8125 0,-3.417968 2.36328,-5.742187 2.38281,-2.34375 7.42187,-2.34375 3.24219,0 6.26954,0.9375 l 0,3.378906 q -3.33985,-1.289062 -6.50391,-1.289062 -2.51953,0 -4.08203,1.328125 -1.54297,1.328125 -1.54297,3.203125 0,1.855468 1.19141,3.085937 1.1914,1.230469 3.04687,2.265625 l 1.52344,0.917969 1.89453,1.152344 1.60156,0.9375 q 4.98047,3.046875 4.98047,7.65625 0,3.515625 -2.55859,6.035156 -2.5586,2.519531 -8.20313,2.519531 -1.79687,0 -3.4375,-0.3125 -1.62109,-0.292969 -4.27734,-1.25 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7758" />
+        <path
+           d="m 435.11133,72.829407 0,-26.972656 -9.08203,0 0,-3.339844 22.1289,0 0,3.339844 -9.10156,0 0,26.972656 -3.94531,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7760" />
+      </g>
+      <path
+         sodipodi:nodetypes="cc"
+         inkscape:connector-curvature="0"
+         id="path8146"
+         d="m 157.79863,229.50628 c 37.00058,23.19886 12.85818,26.81485 7.03771,31.08319"
+         style="display:inline;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.78196704px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker8148)" />
+      <path
+         sodipodi:nodetypes="cc"
+         inkscape:connector-curvature="0"
+         id="path6142"
+         d="m 222.83291,217.48299 c 67.7606,4.04815 33.91703,32.0024 23.64644,43.35694"
+         style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.78196704px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker6352)" />
+      <path
+         sodipodi:nodetypes="cc"
+         inkscape:connector-curvature="0"
+         id="path6144"
+         d="M 157.27071,230.12149 C 45.405744,253.79288 92.109064,307.60128 124.18114,326.55296"
+         style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.78196704px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker6148)" />
+      <rect
+         style="opacity:1;fill:#f2f2f2;fill-opacity:1;stroke:#005976;stroke-width:2.34590101;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+         id="rect4484"
+         width="118.45418"
+         height="24.487085"
+         x="101.69627"
+         y="204.32883" />
+      <g
+         transform="matrix(0.86243158,0,0,0.86574923,-54.246575,175.8598)"
+         style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         id="flowRoot4486">
+        <path
+           d="m 187.98859,51.93976 0,-10.026972 2.89587,0 q 1.19455,0 2.00902,0.343886 0.81446,0.325786 1.32124,0.977358 0.52488,0.633473 0.74207,1.556533 0.23529,0.904962 0.23529,2.063312 0,1.194549 -0.25339,2.045213 -0.23529,0.832565 -0.61537,1.393641 -0.38009,0.561076 -0.86876,0.886862 -0.47058,0.325786 -0.95926,0.506778 -0.48868,0.162893 -0.92306,0.217191 -0.43439,0.0362 -0.72397,0.0362 l -2.85968,0 z m -1.71943,-11.47491 0,12.922849 4.43431,0 q 1.61083,0 2.78728,-0.452481 1.17645,-0.452481 1.93662,-1.303144 0.76017,-0.868763 1.12215,-2.11761 0.36199,-1.266946 0.36199,-2.895877 0,-3.113067 -1.61083,-4.633402 -1.61083,-1.520335 -4.59721,-1.520335 l -4.43431,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7700" />
+        <path
+           d="m 199.86394,48.718098 q 0,-0.850664 0.2172,-1.502236 0.23529,-0.669671 0.63347,-1.122152 0.39818,-0.452481 0.92306,-0.687771 0.54298,-0.23529 1.14025,-0.23529 0.59727,0 1.12215,0.23529 0.54298,0.23529 0.94116,0.687771 0.39819,0.452481 0.61538,1.122152 0.23529,0.651572 0.23529,1.502236 0,0.850664 -0.23529,1.520335 -0.21719,0.651572 -0.61538,1.104053 -0.39818,0.434381 -0.94116,0.669671 -0.52488,0.23529 -1.12215,0.23529 -0.59727,0 -1.14025,-0.23529 -0.52488,-0.23529 -0.92306,-0.669671 -0.39818,-0.452481 -0.63347,-1.104053 -0.2172,-0.669671 -0.2172,-1.520335 z m -1.62893,0 q 0,1.031656 0.28959,1.918518 0.28959,0.886862 0.86876,1.556534 0.57918,0.651572 1.42984,1.031656 0.85067,0.361984 1.95472,0.361984 1.12215,0 1.95472,-0.361984 0.85066,-0.380084 1.42984,-1.031656 0.57917,-0.669672 0.86876,-1.556534 0.28959,-0.886862 0.28959,-1.918518 0,-1.031656 -0.28959,-1.918518 -0.28959,-0.904962 -0.86876,-1.556534 -0.57918,-0.669671 -1.42984,-1.049755 -0.83257,-0.380084 -1.95472,-0.380084 -1.10405,0 -1.95472,0.380084 -0.85066,0.380084 -1.42984,1.049755 -0.57917,0.651572 -0.86876,1.556534 -0.28959,0.886862 -0.28959,1.918518 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7702" />
+        <path
+           d="m 215.45219,47.03487 1.59273,0 q -0.0905,-0.832565 -0.43438,-1.429839 -0.34389,-0.615374 -0.88686,-1.013557 -0.52488,-0.398183 -1.23075,-0.579175 -0.68777,-0.199092 -1.48414,-0.199092 -1.10405,0 -1.93662,0.398183 -0.83256,0.380084 -1.39364,1.067855 -0.54297,0.669671 -0.81446,1.592732 -0.27149,0.904961 -0.27149,1.954716 0,1.049756 0.27149,1.936618 0.28959,0.868763 0.83256,1.502236 0.56108,0.633473 1.37554,0.977358 0.83257,0.343885 1.90042,0.343885 1.79183,0 2.82348,-0.94116 1.04976,-0.941159 1.30315,-2.678685 l -1.57464,0 q -0.14479,1.085953 -0.79636,1.683228 -0.63347,0.597274 -1.77373,0.597274 -0.72397,0 -1.24884,-0.289587 -0.52488,-0.289588 -0.85067,-0.760168 -0.32578,-0.488679 -0.48868,-1.104053 -0.14479,-0.615374 -0.14479,-1.266946 0,-0.70587 0.14479,-1.357442 0.1448,-0.669671 0.47058,-1.17645 0.34389,-0.506778 0.90497,-0.814465 0.56107,-0.307687 1.39364,-0.307687 0.97735,0 1.55653,0.488679 0.57917,0.48868 0.76017,1.375542 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7704" />
+        <path
+           d="m 218.91112,40.46485 0,12.922849 1.53843,0 0,-3.547449 1.44794,-1.339343 3.20357,4.886792 1.95471,0 -3.98183,-5.954646 3.71034,-3.402655 -2.06331,0 -4.27142,4.090425 0,-7.655973 -1.53843,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7706" />
+        <path
+           d="m 234.72136,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54298,-0.850664 0.34388,-0.361985 0.81446,-0.561076 0.48868,-0.217191 1.08596,-0.217191 0.57917,0 1.04975,0.217191 0.48868,0.199091 0.83257,0.561076 0.36198,0.343885 0.56107,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48414,2.606288 -1.52034,0 q -0.19909,0.923061 -0.83256,1.375542 -0.61538,0.45248 -1.59273,0.45248 -0.76017,0 -1.32125,-0.253389 -0.56107,-0.253389 -0.92306,-0.669671 -0.36198,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.16289,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32125,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77372,0.361985 -0.79637,0.361984 -1.39364,1.013556 -0.57918,0.651573 -0.90496,1.538435 -0.32579,0.886862 -0.32579,1.954716 0.0362,1.067855 0.30769,1.972816 0.28958,0.904962 0.85066,1.556534 0.56108,0.651572 1.37554,1.013557 0.83257,0.361984 1.95472,0.361984 1.59273,0 2.64249,-0.796366 1.04975,-0.796366 1.35744,-2.370999 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7708" />
+        <path
+           d="m 237.8907,44.030398 0,9.357301 1.53844,0 0,-4.162823 q 0,-0.904961 0.18099,-1.592732 0.18099,-0.70587 0.57918,-1.194549 0.39818,-0.488679 1.04975,-0.742068 0.65157,-0.253389 1.57463,-0.253389 l 0,-1.628931 q -1.24884,-0.0362 -2.06331,0.506779 -0.81446,0.542976 -1.37554,1.683228 l -0.0362,0 0,-1.972816 -1.44794,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7710" />
+        <path
+           d="m 250.13172,48.790495 q 0,-0.687771 0.1448,-1.339343 0.14479,-0.651572 0.47058,-1.158351 0.32578,-0.506778 0.86876,-0.814465 0.54298,-0.307687 1.32124,-0.307687 0.79637,0 1.35744,0.307687 0.56108,0.289588 0.90497,0.778267 0.36198,0.488679 0.52487,1.140251 0.1629,0.633473 0.1629,1.321244 0,0.651572 -0.1629,1.285045 -0.14479,0.633473 -0.48868,1.140251 -0.34388,0.48868 -0.88686,0.796366 -0.54297,0.307687 -1.32124,0.307687 -0.74207,0 -1.30315,-0.289587 -0.54297,-0.289588 -0.90496,-0.778267 -0.34388,-0.488679 -0.52488,-1.104053 -0.16289,-0.633473 -0.16289,-1.285045 z m 7.23969,4.597204 0,-12.922849 -1.53843,0 0,4.814395 -0.0362,0 q -0.25339,-0.416283 -0.63347,-0.687771 -0.36199,-0.289588 -0.77827,-0.452481 -0.41628,-0.180992 -0.83256,-0.253389 -0.41629,-0.0724 -0.77827,-0.0724 -1.06786,0 -1.88232,0.398183 -0.79637,0.380084 -1.33934,1.049755 -0.52488,0.651573 -0.79637,1.538435 -0.25339,0.886862 -0.25339,1.882319 0,0.995458 0.27149,1.88232 0.27149,0.886862 0.79637,1.556534 0.54297,0.669671 1.33934,1.067854 0.81446,0.398183 1.90042,0.398183 0.97736,0 1.79182,-0.343885 0.81447,-0.343885 1.19455,-1.122152 l 0.0362,0 0,1.266946 1.53843,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7712" />
+        <path
+           d="m 268.01008,53.3515 q -0.39818,0.23529 -1.10405,0.23529 -0.59727,0 -0.95926,-0.325786 -0.34388,-0.343885 -0.34388,-1.104053 -0.63348,0.760168 -1.48414,1.104053 -0.83257,0.325786 -1.80992,0.325786 -0.63348,0 -1.21265,-0.144794 -0.56108,-0.144793 -0.97736,-0.45248 -0.41628,-0.307687 -0.66967,-0.796366 -0.23529,-0.506779 -0.23529,-1.212649 0,-0.796366 0.27149,-1.303144 0.27149,-0.506778 0.70587,-0.814465 0.45248,-0.325786 1.01355,-0.488679 0.57918,-0.162893 1.17645,-0.271489 0.63348,-0.126694 1.19455,-0.180992 0.57918,-0.0724 1.01356,-0.180992 0.43438,-0.126695 0.68777,-0.343886 0.25339,-0.23529 0.25339,-0.669671 0,-0.506779 -0.19909,-0.814465 -0.18099,-0.307687 -0.48868,-0.47058 -0.28959,-0.162893 -0.66967,-0.217191 -0.36199,-0.0543 -0.72397,-0.0543 -0.97736,0 -1.62893,0.380084 -0.65157,0.361985 -0.70587,1.393641 l -1.53844,0 q 0.0362,-0.868763 0.36199,-1.466038 0.32578,-0.597274 0.86876,-0.959259 0.54298,-0.380084 1.23075,-0.542977 0.70587,-0.162893 1.50223,-0.162893 0.63348,0 1.24885,0.0905 0.63347,0.0905 1.14025,0.380084 0.50678,0.271488 0.81447,0.778267 0.30768,0.506778 0.30768,1.321244 l 0,4.814394 q 0,0.542977 0.0543,0.796366 0.0724,0.253389 0.43438,0.253389 0.19909,0 0.47058,-0.0905 l 0,1.194549 z m -2.49769,-4.796295 q -0.28959,0.217191 -0.76017,0.325786 -0.47058,0.0905 -0.99546,0.162893 -0.50677,0.0543 -1.03165,0.144794 -0.52488,0.0724 -0.94116,0.253389 -0.41628,0.180992 -0.68777,0.524878 -0.25339,0.325786 -0.25339,0.904961 0,0.380084 0.14479,0.651572 0.1629,0.253389 0.39819,0.416282 0.25339,0.162894 0.57917,0.23529 0.32579,0.0724 0.68777,0.0724 0.76017,0 1.30315,-0.199091 0.54297,-0.217191 0.88686,-0.524878 0.34388,-0.325786 0.50678,-0.68777 0.16289,-0.380084 0.16289,-0.70587 l 0,-1.574633 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7714" />
+        <path
+           d="m 275.93952,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54298,-0.850664 0.34388,-0.361985 0.81446,-0.561076 0.48868,-0.217191 1.08595,-0.217191 0.57918,0 1.04976,0.217191 0.48868,0.199091 0.83256,0.561076 0.36199,0.343885 0.56108,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48414,2.606288 -1.52034,0 q -0.19909,0.923061 -0.83256,1.375542 -0.61538,0.45248 -1.59274,0.45248 -0.76016,0 -1.32124,-0.253389 -0.56107,-0.253389 -0.92306,-0.669671 -0.36198,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.16289,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32125,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77372,0.361985 -0.79637,0.361984 -1.39364,1.013556 -0.57918,0.651573 -0.90497,1.538435 -0.32578,0.886862 -0.32578,1.954716 0.0362,1.067855 0.30769,1.972816 0.28958,0.904962 0.85066,1.556534 0.56108,0.651572 1.37554,1.013557 0.83257,0.361984 1.95472,0.361984 1.59273,0 2.64248,-0.796366 1.04976,-0.796366 1.35745,-2.370999 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7716" />
+        <path
+           d="m 279.16316,44.030398 0,9.357301 1.53844,0 0,-5.827952 q 0,-0.271488 0.12669,-0.669671 0.1448,-0.416282 0.43439,-0.796366 0.30768,-0.380084 0.77826,-0.651572 0.48868,-0.271489 1.15835,-0.271489 0.52488,0 0.85067,0.162893 0.34388,0.144794 0.54297,0.434382 0.19909,0.271488 0.27149,0.651572 0.0905,0.380084 0.0905,0.832564 l 0,6.135639 1.53843,0 0,-5.827952 q 0,-1.085953 0.65157,-1.737526 0.65158,-0.651572 1.79183,-0.651572 0.56107,0 0.90496,0.162893 0.36198,0.162893 0.56108,0.452481 0.19909,0.271488 0.27148,0.651572 0.0724,0.380084 0.0724,0.814465 l 0,6.135639 1.53843,0 0,-6.859608 q 0,-0.723969 -0.23528,-1.230747 -0.2172,-0.524878 -0.63348,-0.850664 -0.39818,-0.325786 -0.97736,-0.47058 -0.56107,-0.162893 -1.26694,-0.162893 -0.92306,0 -1.70133,0.416282 -0.76017,0.416283 -1.23075,1.17645 -0.28958,-0.868763 -0.99545,-1.230747 -0.70587,-0.361985 -1.57464,-0.361985 -1.97281,0 -3.02257,1.592732 l -0.0362,0 0,-1.375541 -1.44794,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7718" />
+        <path
+           d="m 295.73329,48.718098 q 0,-0.850664 0.21719,-1.502236 0.23529,-0.669671 0.63348,-1.122152 0.39818,-0.452481 0.92306,-0.687771 0.54297,-0.23529 1.14025,-0.23529 0.59727,0 1.12215,0.23529 0.54298,0.23529 0.94116,0.687771 0.39818,0.452481 0.61537,1.122152 0.23529,0.651572 0.23529,1.502236 0,0.850664 -0.23529,1.520335 -0.21719,0.651572 -0.61537,1.104053 -0.39818,0.434381 -0.94116,0.669671 -0.52488,0.23529 -1.12215,0.23529 -0.59728,0 -1.14025,-0.23529 -0.52488,-0.23529 -0.92306,-0.669671 -0.39819,-0.452481 -0.63348,-1.104053 -0.21719,-0.669671 -0.21719,-1.520335 z m -1.62893,0 q 0,1.031656 0.28959,1.918518 0.28959,0.886862 0.86876,1.556534 0.57918,0.651572 1.42984,1.031656 0.85066,0.361984 1.95472,0.361984 1.12215,0 1.95471,-0.361984 0.85067,-0.380084 1.42984,-1.031656 0.57918,-0.669672 0.86877,-1.556534 0.28958,-0.886862 0.28958,-1.918518 0,-1.031656 -0.28958,-1.918518 -0.28959,-0.904962 -0.86877,-1.556534 -0.57917,-0.669671 -1.42984,-1.049755 -0.83256,-0.380084 -1.95471,-0.380084 -1.10406,0 -1.95472,0.380084 -0.85066,0.380084 -1.42984,1.049755 -0.57917,0.651572 -0.86876,1.556534 -0.28959,0.886862 -0.28959,1.918518 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7720" />
+        <path
+           d="m 304.96871,44.030398 0,9.357301 1.53843,0 0,-5.284975 q 0,-0.633473 0.16289,-1.15835 0.181,-0.542977 0.52488,-0.94116 0.34389,-0.398183 0.85067,-0.615374 0.52487,-0.217191 1.23074,-0.217191 0.88687,0 1.39364,0.506779 0.50678,0.506778 0.50678,1.375541 l 0,6.33473 1.53844,0 0,-6.153738 q 0,-0.760167 -0.1629,-1.375541 -0.14479,-0.633473 -0.52487,-1.085954 -0.38009,-0.45248 -0.99546,-0.70587 -0.61538,-0.253389 -1.53844,-0.253389 -2.08141,0 -3.04067,1.701328 l -0.0362,0 0,-1.484137 -1.44793,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7722" />
+      </g>
+      <g
+         transform="matrix(0.59683756,0,0,0.62856206,116.41511,253.93191)"
+         id="g5083">
+        <g
+           id="Isolation_Mode-6">
+          <polygon
+             style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd"
+             points="122.005,77.57 122.005,22.429 32.038,0.183 0,19.314 0,72.66 92.65,101.222 "
+             id="polygon4984" />
+          <polygon
+             style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd"
+             points="5,23.314 92.65,45.131 92.65,94.222 5,67.66 "
+             id="polygon4986" />
+          <g
+             id="g4988">
+            <g
+               id="g4990">
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="7.917,29.184 7.917,66.177 11.364,66.906 11.364,30.067 "
+                 id="polygon4992" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="14.134,30.801 14.134,68.013 17.598,68.765 17.598,31.68 "
+                 id="polygon4994" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="20.353,32.418 20.353,69.848 23.831,70.624 23.831,33.292 "
+                 id="polygon4996" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="26.569,34.035 26.569,71.684 30.063,72.483 30.063,34.905 "
+                 id="polygon4998" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="32.789,35.652 32.789,73.519 36.298,74.341 36.298,36.518 "
+                 id="polygon5000" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="39.007,37.27 39.007,75.354 42.532,76.201 42.532,38.13 "
+                 id="polygon5002" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="45.224,38.887 45.224,77.19 48.765,78.06 48.765,39.743 "
+                 id="polygon5004" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="51.442,40.504 51.442,79.025 54.999,79.919 54.999,41.356 "
+                 id="polygon5006" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="57.661,42.122 57.661,80.86 61.231,81.777 61.231,42.968 "
+                 id="polygon5008" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="63.878,43.737 63.878,82.697 67.466,83.637 67.466,44.581 "
+                 id="polygon5010" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="70.097,45.355 70.097,84.532 73.699,85.496 73.699,46.193 "
+                 id="polygon5012" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="76.313,46.973 76.313,86.367 79.933,87.354 79.933,47.806 "
+                 id="polygon5014" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="86.165,49.418 82.532,48.589 82.532,88.203 86.165,89.213 "
+                 id="polygon5016" />
+            </g>
+            <g
+               id="g5018">
+              <polygon
+                 style="fill:#394d54"
+                 points="7.917,29.184 7.917,66.177 11.364,66.906 11.364,30.067 "
+                 id="polygon5020" />
+              <polygon
+                 style="fill:#394d54"
+                 points="14.134,30.801 14.134,68.013 17.598,68.765 17.598,31.68 "
+                 id="polygon5022" />
+              <polygon
+                 style="fill:#394d54"
+                 points="20.353,32.418 20.353,69.848 23.831,70.624 23.831,33.292 "
+                 id="polygon5024" />
+              <polygon
+                 style="fill:#394d54"
+                 points="26.569,34.035 26.569,71.684 30.063,72.483 30.063,34.905 "
+                 id="polygon5026" />
+              <polygon
+                 style="fill:#394d54"
+                 points="32.789,35.652 32.789,73.519 36.298,74.341 36.298,36.518 "
+                 id="polygon5028" />
+              <polygon
+                 style="fill:#394d54"
+                 points="39.007,37.27 39.007,75.354 42.532,76.201 42.532,38.13 "
+                 id="polygon5030" />
+              <polygon
+                 style="fill:#394d54"
+                 points="45.224,38.887 45.224,77.19 48.765,78.06 48.765,39.743 "
+                 id="polygon5032" />
+              <polygon
+                 style="fill:#394d54"
+                 points="51.442,40.504 51.442,79.025 54.999,79.919 54.999,41.356 "
+                 id="polygon5034" />
+              <polygon
+                 style="fill:#394d54"
+                 points="57.661,42.122 57.661,80.86 61.231,81.777 61.231,42.968 "
+                 id="polygon5036" />
+              <polygon
+                 style="fill:#394d54"
+                 points="63.878,43.737 63.878,82.697 67.466,83.637 67.466,44.581 "
+                 id="polygon5038" />
+              <polygon
+                 style="fill:#394d54"
+                 points="70.097,45.355 70.097,84.532 73.699,85.496 73.699,46.193 "
+                 id="polygon5040" />
+              <polygon
+                 style="fill:#394d54"
+                 points="76.313,46.973 76.313,86.367 79.933,87.354 79.933,47.806 "
+                 id="polygon5042" />
+              <polygon
+                 style="fill:#394d54"
+                 points="86.165,49.418 82.532,48.589 82.532,88.203 86.165,89.213 "
+                 id="polygon5044" />
+            </g>
+          </g>
+          <polygon
+             style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+             points="92.65,46.469 117.005,29.184 117.005,75.212 92.65,94.398 "
+             id="polygon5046" />
+          <polygon
+             style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd"
+             points="5,23.314 32.538,6.683 117.005,27.429 92.65,45.131 "
+             id="polygon5048" />
+          <rect
+             style="fill:#394d54"
+             x="46.075001"
+             y="-12.046"
+             transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+             width="3.5250001"
+             height="92.299004"
+             id="rect5050" />
+          <rect
+             style="fill:#394d54"
+             x="103.715"
+             y="18.122"
+             transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+             width="3.5250001"
+             height="34.868999"
+             id="rect5052" />
+          <rect
+             style="fill:#394d54"
+             x="90.234001"
+             y="44.498001"
+             transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+             width="3.523"
+             height="51.174"
+             id="rect5054" />
+        </g>
+        <g
+           style="display:none"
+           id="Layer_2-0"
+           display="none">
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-77.349998"
+             y1="-49.362"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5063" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-77.349998"
+             y1="-46.612"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5065" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-77.349998"
+             y1="-4.362"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5067" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-52.994999"
+             y1="-67.064003"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5069" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="532.88"
+             y1="-492.659"
+             x2="-76.915001"
+             y2="-49.472"
+             id="line5071" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="532.88"
+             y1="-492.659"
+             x2="-165"
+             y2="-71.179001"
+             id="line5073" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="532.88"
+             y1="-492.659"
+             x2="-77.349998"
+             y2="-0.271"
+             id="line5075" />
+        </g>
+      </g>
+      <g
+         id="g5133"
+         transform="matrix(0.59683756,0,0,0.62856206,194.85661,253.93191)">
+        <g
+           id="g5135">
+          <polygon
+             id="polygon5137"
+             points="32.038,0.183 0,19.314 0,72.66 92.65,101.222 122.005,77.57 122.005,22.429 "
+             style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd" />
+          <polygon
+             id="polygon5139"
+             points="92.65,94.222 5,67.66 5,23.314 92.65,45.131 "
+             style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd" />
+          <g
+             id="g5141">
+            <g
+               id="g5143">
+              <polygon
+                 id="polygon5145"
+                 points="11.364,66.906 11.364,30.067 7.917,29.184 7.917,66.177 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5147"
+                 points="17.598,68.765 17.598,31.68 14.134,30.801 14.134,68.013 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5149"
+                 points="23.831,70.624 23.831,33.292 20.353,32.418 20.353,69.848 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5151"
+                 points="30.063,72.483 30.063,34.905 26.569,34.035 26.569,71.684 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5153"
+                 points="36.298,74.341 36.298,36.518 32.789,35.652 32.789,73.519 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5155"
+                 points="42.532,76.201 42.532,38.13 39.007,37.27 39.007,75.354 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5157"
+                 points="48.765,78.06 48.765,39.743 45.224,38.887 45.224,77.19 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5159"
+                 points="54.999,79.919 54.999,41.356 51.442,40.504 51.442,79.025 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5161"
+                 points="61.231,81.777 61.231,42.968 57.661,42.122 57.661,80.86 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5163"
+                 points="67.466,83.637 67.466,44.581 63.878,43.737 63.878,82.697 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5165"
+                 points="73.699,85.496 73.699,46.193 70.097,45.355 70.097,84.532 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5167"
+                 points="79.933,87.354 79.933,47.806 76.313,46.973 76.313,86.367 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5169"
+                 points="82.532,88.203 86.165,89.213 86.165,49.418 82.532,48.589 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            </g>
+            <g
+               id="g5171">
+              <polygon
+                 id="polygon5173"
+                 points="11.364,66.906 11.364,30.067 7.917,29.184 7.917,66.177 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5175"
+                 points="17.598,68.765 17.598,31.68 14.134,30.801 14.134,68.013 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5177"
+                 points="23.831,70.624 23.831,33.292 20.353,32.418 20.353,69.848 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5179"
+                 points="30.063,72.483 30.063,34.905 26.569,34.035 26.569,71.684 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5181"
+                 points="36.298,74.341 36.298,36.518 32.789,35.652 32.789,73.519 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5183"
+                 points="42.532,76.201 42.532,38.13 39.007,37.27 39.007,75.354 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5185"
+                 points="48.765,78.06 48.765,39.743 45.224,38.887 45.224,77.19 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5187"
+                 points="54.999,79.919 54.999,41.356 51.442,40.504 51.442,79.025 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5189"
+                 points="61.231,81.777 61.231,42.968 57.661,42.122 57.661,80.86 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5191"
+                 points="67.466,83.637 67.466,44.581 63.878,43.737 63.878,82.697 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5193"
+                 points="73.699,85.496 73.699,46.193 70.097,45.355 70.097,84.532 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5195"
+                 points="79.933,87.354 79.933,47.806 76.313,46.973 76.313,86.367 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5197"
+                 points="82.532,88.203 86.165,89.213 86.165,49.418 82.532,48.589 "
+                 style="fill:#394d54" />
+            </g>
+          </g>
+          <polygon
+             id="polygon5199"
+             points="117.005,75.212 92.65,94.398 92.65,46.469 117.005,29.184 "
+             style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+          <polygon
+             id="polygon5201"
+             points="117.005,27.429 92.65,45.131 5,23.314 32.538,6.683 "
+             style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd" />
+          <rect
+             id="rect5203"
+             height="92.299004"
+             width="3.5250001"
+             transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+             y="-12.046"
+             x="46.075001"
+             style="fill:#394d54" />
+          <rect
+             id="rect5205"
+             height="34.868999"
+             width="3.5250001"
+             transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+             y="18.122"
+             x="103.715"
+             style="fill:#394d54" />
+          <rect
+             id="rect5207"
+             height="51.174"
+             width="3.523"
+             transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+             y="44.498001"
+             x="90.234001"
+             style="fill:#394d54" />
+        </g>
+        <g
+           display="none"
+           id="g5209"
+           style="display:none">
+          <line
+             id="line5211"
+             y2="-492.659"
+             x2="-1830.454"
+             y1="-49.362"
+             x1="-77.349998"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+          <line
+             id="line5213"
+             y2="-492.659"
+             x2="-1830.454"
+             y1="-46.612"
+             x1="-77.349998"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+          <line
+             id="line5215"
+             y2="-492.659"
+             x2="-1830.454"
+             y1="-4.362"
+             x1="-77.349998"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+          <line
+             id="line5217"
+             y2="-492.659"
+             x2="-1830.454"
+             y1="-67.064003"
+             x1="-52.994999"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+          <line
+             id="line5219"
+             y2="-49.472"
+             x2="-76.915001"
+             y1="-492.659"
+             x1="532.88"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+          <line
+             id="line5221"
+             y2="-71.179001"
+             x2="-165"
+             y1="-492.659"
+             x1="532.88"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+          <line
+             id="line5223"
+             y2="-0.271"
+             x2="-77.349998"
+             y1="-492.659"
+             x1="532.88"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        </g>
+      </g>
+      <g
+         transform="matrix(0.59683756,0,0,0.62856206,116.41511,318.58401)"
+         id="g5225">
+        <g
+           id="g5227">
+          <polygon
+             style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd"
+             points="0,19.314 0,72.66 92.65,101.222 122.005,77.57 122.005,22.429 32.038,0.183 "
+             id="polygon5229" />
+          <polygon
+             style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd"
+             points="5,67.66 5,23.314 92.65,45.131 92.65,94.222 "
+             id="polygon5231" />
+          <g
+             id="g5233">
+            <g
+               id="g5235">
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="11.364,30.067 7.917,29.184 7.917,66.177 11.364,66.906 "
+                 id="polygon5237" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="17.598,31.68 14.134,30.801 14.134,68.013 17.598,68.765 "
+                 id="polygon5239" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="23.831,33.292 20.353,32.418 20.353,69.848 23.831,70.624 "
+                 id="polygon5241" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="30.063,34.905 26.569,34.035 26.569,71.684 30.063,72.483 "
+                 id="polygon5243" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="36.298,36.518 32.789,35.652 32.789,73.519 36.298,74.341 "
+                 id="polygon5245" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="42.532,38.13 39.007,37.27 39.007,75.354 42.532,76.201 "
+                 id="polygon5247" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="48.765,39.743 45.224,38.887 45.224,77.19 48.765,78.06 "
+                 id="polygon5249" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="54.999,41.356 51.442,40.504 51.442,79.025 54.999,79.919 "
+                 id="polygon5251" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="61.231,42.968 57.661,42.122 57.661,80.86 61.231,81.777 "
+                 id="polygon5253" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="67.466,44.581 63.878,43.737 63.878,82.697 67.466,83.637 "
+                 id="polygon5255" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="73.699,46.193 70.097,45.355 70.097,84.532 73.699,85.496 "
+                 id="polygon5257" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="79.933,47.806 76.313,46.973 76.313,86.367 79.933,87.354 "
+                 id="polygon5259" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="86.165,89.213 86.165,49.418 82.532,48.589 82.532,88.203 "
+                 id="polygon5261" />
+            </g>
+            <g
+               id="g5263">
+              <polygon
+                 style="fill:#394d54"
+                 points="11.364,30.067 7.917,29.184 7.917,66.177 11.364,66.906 "
+                 id="polygon5265" />
+              <polygon
+                 style="fill:#394d54"
+                 points="17.598,31.68 14.134,30.801 14.134,68.013 17.598,68.765 "
+                 id="polygon5267" />
+              <polygon
+                 style="fill:#394d54"
+                 points="23.831,33.292 20.353,32.418 20.353,69.848 23.831,70.624 "
+                 id="polygon5269" />
+              <polygon
+                 style="fill:#394d54"
+                 points="30.063,34.905 26.569,34.035 26.569,71.684 30.063,72.483 "
+                 id="polygon5271" />
+              <polygon
+                 style="fill:#394d54"
+                 points="36.298,36.518 32.789,35.652 32.789,73.519 36.298,74.341 "
+                 id="polygon5273" />
+              <polygon
+                 style="fill:#394d54"
+                 points="42.532,38.13 39.007,37.27 39.007,75.354 42.532,76.201 "
+                 id="polygon5275" />
+              <polygon
+                 style="fill:#394d54"
+                 points="48.765,39.743 45.224,38.887 45.224,77.19 48.765,78.06 "
+                 id="polygon5277" />
+              <polygon
+                 style="fill:#394d54"
+                 points="54.999,41.356 51.442,40.504 51.442,79.025 54.999,79.919 "
+                 id="polygon5279" />
+              <polygon
+                 style="fill:#394d54"
+                 points="61.231,42.968 57.661,42.122 57.661,80.86 61.231,81.777 "
+                 id="polygon5281" />
+              <polygon
+                 style="fill:#394d54"
+                 points="67.466,44.581 63.878,43.737 63.878,82.697 67.466,83.637 "
+                 id="polygon5283" />
+              <polygon
+                 style="fill:#394d54"
+                 points="73.699,46.193 70.097,45.355 70.097,84.532 73.699,85.496 "
+                 id="polygon5285" />
+              <polygon
+                 style="fill:#394d54"
+                 points="79.933,47.806 76.313,46.973 76.313,86.367 79.933,87.354 "
+                 id="polygon5287" />
+              <polygon
+                 style="fill:#394d54"
+                 points="86.165,89.213 86.165,49.418 82.532,48.589 82.532,88.203 "
+                 id="polygon5289" />
+            </g>
+          </g>
+          <polygon
+             style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+             points="92.65,94.398 92.65,46.469 117.005,29.184 117.005,75.212 "
+             id="polygon5291" />
+          <polygon
+             style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd"
+             points="92.65,45.131 5,23.314 32.538,6.683 117.005,27.429 "
+             id="polygon5293" />
+          <rect
+             style="fill:#394d54"
+             x="46.075001"
+             y="-12.046"
+             transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+             width="3.5250001"
+             height="92.299004"
+             id="rect5295" />
+          <rect
+             style="fill:#394d54"
+             x="103.715"
+             y="18.122"
+             transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+             width="3.5250001"
+             height="34.868999"
+             id="rect5297" />
+          <rect
+             style="fill:#394d54"
+             x="90.234001"
+             y="44.498001"
+             transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+             width="3.523"
+             height="51.174"
+             id="rect5299" />
+        </g>
+        <g
+           style="display:none"
+           id="g5301"
+           display="none">
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-77.349998"
+             y1="-49.362"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5303" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-77.349998"
+             y1="-46.612"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5305" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-77.349998"
+             y1="-4.362"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5307" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-52.994999"
+             y1="-67.064003"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5309" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="532.88"
+             y1="-492.659"
+             x2="-76.915001"
+             y2="-49.472"
+             id="line5311" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="532.88"
+             y1="-492.659"
+             x2="-165"
+             y2="-71.179001"
+             id="line5313" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="532.88"
+             y1="-492.659"
+             x2="-77.349998"
+             y2="-0.271"
+             id="line5315" />
+        </g>
+      </g>
+      <rect
+         style="opacity:1;fill:#394d54;fill-opacity:1;stroke:#005976;stroke-width:1.95111275;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+         id="rect5078"
+         width="93.442245"
+         height="30.942587"
+         x="220.62865"
+         y="152.03131"
+         ry="8.0012312"
+         rx="5.394352" />
+      <g
+         transform="matrix(0.48549887,0,0,0.47493898,137.13218,138.44897)"
+         style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#dbdde0;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         id="flowRoot6134">
+        <path
+           d="m 187.97742,46.673398 0,28.56 18.92,0 0,-3.2 -15.12,0 0,-25.36 -3.8,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7725" />
+        <path
+           d="m 213.28305,50.833398 0,-4.16 -3.4,0 0,4.16 3.4,0 z m -3.4,3.72 0,20.68 3.4,0 0,-20.68 -3.4,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7727" />
+        <path
+           d="m 218.5893,54.553398 0,20.68 3.4,0 0,-11.68 q 0,-1.4 0.36,-2.56 0.4,-1.2 1.16,-2.08 0.76,-0.88 1.88,-1.36 1.16,-0.48 2.72,-0.48 1.96,0 3.08,1.12 1.12,1.12 1.12,3.04 l 0,14 3.4,0 0,-13.6 q 0,-1.68 -0.36,-3.04 -0.32,-1.4 -1.16,-2.4 -0.84,-1 -2.2,-1.56 -1.36,-0.56 -3.4,-0.56 -4.6,0 -6.72,3.76 l -0.08,0 0,-3.28 -3.2,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7729" />
+        <path
+           d="m 257.97492,75.233398 0,-20.68 -3.4,0 0,11.68 q 0,1.4 -0.4,2.6 -0.36,1.16 -1.12,2.04 -0.76,0.88 -1.92,1.36 -1.12,0.48 -2.68,0.48 -1.96,0 -3.08,-1.12 -1.12,-1.12 -1.12,-3.04 l 0,-14 -3.4,0 0,13.6 q 0,1.68 0.32,3.08 0.36,1.36 1.2,2.36 0.84,1 2.2,1.56 1.36,0.52 3.4,0.52 2.28,0 3.96,-0.88 1.68,-0.92 2.76,-2.84 l 0.08,0 0,3.28 3.2,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7731" />
+        <path
+           d="m 268.68055,64.353398 -7.76,10.88 4.12,0 5.76,-8.56 5.76,8.56 4.36,0 -8,-11.16 7.12,-9.52 -4.08,0 -5.16,7.24 -4.96,-7.24 -4.36,0 7.2,9.8 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7733" />
+        <path
+           d="m 306.71742,75.233398 10.16,-28.56 -3.96,0 -8.24,24.76 -0.08,0 -8.16,-24.76 -4.08,0 10.04,28.56 4.32,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7735" />
+        <path
+           d="m 320.01055,46.673398 0,28.56 3.6,0 0,-23.76 0.08,0 8.92,23.76 3.24,0 8.92,-23.76 0.08,0 0,23.76 3.6,0 0,-28.56 -5.2,0 -9.04,24 -9,-24 -5.2,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7737" />
+      </g>
+    </g>
+  </g>
+  <g
+     inkscape:label="default"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-73.928551,-292.36218)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot4362"
+       style="fill:black;stroke:none;stroke-opacity:1;stroke-width:1px;stroke-linejoin:miter;stroke-linecap:butt;fill-opacity:1;font-family:sans-serif;font-style:normal;font-weight:normal;font-size:40px;line-height:125%;letter-spacing:0px;word-spacing:0px"><flowRegion
+         id="flowRegion4364"><rect
+           id="rect4366"
+           width="95.964493"
+           height="43.436558"
+           x="171.72594"
+           y="102.005" /></flowRegion><flowPara
+         id="flowPara4368" /></flowRoot>    <g
+       style="display:none"
+       id="Layer_2"
+       display="none"
+       transform="translate(862.40058,602.59637)">
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="-77.349998"
+         y1="-49.362"
+         x2="-1830.454"
+         y2="-492.659"
+         id="line4716" />
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="-77.349998"
+         y1="-46.612"
+         x2="-1830.454"
+         y2="-492.659"
+         id="line4718" />
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="-77.349998"
+         y1="-4.362"
+         x2="-1830.454"
+         y2="-492.659"
+         id="line4720" />
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="-52.994999"
+         y1="-67.064003"
+         x2="-1830.454"
+         y2="-492.659"
+         id="line4722" />
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="532.88"
+         y1="-492.659"
+         x2="-76.915001"
+         y2="-49.472"
+         id="line4724" />
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="532.88"
+         y1="-492.659"
+         x2="-165"
+         y2="-71.179001"
+         id="line4726" />
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="532.88"
+         y1="-492.659"
+         x2="-77.349998"
+         y2="-0.271"
+         id="line4728" />
+    </g>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker5920)"
+       d="M 187.77436,389.79113 C 131.26725,402.59092 53.961508,490.55977 172.27674,510.09709"
+       id="path5884"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <rect
+       style="opacity:1;fill:#f2f2f2;fill-opacity:1;stroke:#005976;stroke-width:3;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       id="rect4461"
+       width="151.48232"
+       height="31.31473"
+       x="107.04051"
+       y="357.65875" />
+    <g
+       transform="matrix(1.1029002,0,0,1.1071429,-80.18049,321.25181)"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot4354">
+      <path
+         d="m 187.98859,51.93976 0,-10.026972 2.89587,0 q 1.19455,0 2.00902,0.343886 0.81446,0.325786 1.32124,0.977358 0.52488,0.633473 0.74207,1.556533 0.23529,0.904962 0.23529,2.063312 0,1.194549 -0.25339,2.045213 -0.23529,0.832565 -0.61537,1.393641 -0.38009,0.561076 -0.86876,0.886862 -0.47058,0.325786 -0.95926,0.506778 -0.48868,0.162893 -0.92306,0.217191 -0.43439,0.0362 -0.72397,0.0362 l -2.85968,0 z m -1.71943,-11.47491 0,12.922849 4.43431,0 q 1.61083,0 2.78728,-0.452481 1.17645,-0.452481 1.93662,-1.303144 0.76017,-0.868763 1.12215,-2.11761 0.36199,-1.266946 0.36199,-2.895877 0,-3.113067 -1.61083,-4.633402 -1.61083,-1.520335 -4.59721,-1.520335 l -4.43431,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6983"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 199.86394,48.718098 q 0,-0.850664 0.2172,-1.502236 0.23529,-0.669671 0.63347,-1.122152 0.39818,-0.452481 0.92306,-0.687771 0.54298,-0.23529 1.14025,-0.23529 0.59727,0 1.12215,0.23529 0.54298,0.23529 0.94116,0.687771 0.39819,0.452481 0.61538,1.122152 0.23529,0.651572 0.23529,1.502236 0,0.850664 -0.23529,1.520335 -0.21719,0.651572 -0.61538,1.104053 -0.39818,0.434381 -0.94116,0.669671 -0.52488,0.23529 -1.12215,0.23529 -0.59727,0 -1.14025,-0.23529 -0.52488,-0.23529 -0.92306,-0.669671 -0.39818,-0.452481 -0.63347,-1.104053 -0.2172,-0.669671 -0.2172,-1.520335 z m -1.62893,0 q 0,1.031656 0.28959,1.918518 0.28959,0.886862 0.86876,1.556534 0.57918,0.651572 1.42984,1.031656 0.85067,0.361984 1.95472,0.361984 1.12215,0 1.95472,-0.361984 0.85066,-0.380084 1.42984,-1.031656 0.57917,-0.669672 0.86876,-1.556534 0.28959,-0.886862 0.28959,-1.918518 0,-1.031656 -0.28959,-1.918518 -0.28959,-0.904962 -0.86876,-1.556534 -0.57918,-0.669671 -1.42984,-1.049755 -0.83257,-0.380084 -1.95472,-0.380084 -1.10405,0 -1.95472,0.380084 -0.85066,0.380084 -1.42984,1.049755 -0.57917,0.651572 -0.86876,1.556534 -0.28959,0.886862 -0.28959,1.918518 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6985"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 215.45219,47.03487 1.59273,0 q -0.0905,-0.832565 -0.43438,-1.429839 -0.34389,-0.615374 -0.88686,-1.013557 -0.52488,-0.398183 -1.23075,-0.579175 -0.68777,-0.199092 -1.48414,-0.199092 -1.10405,0 -1.93662,0.398183 -0.83256,0.380084 -1.39364,1.067855 -0.54297,0.669671 -0.81446,1.592732 -0.27149,0.904961 -0.27149,1.954716 0,1.049756 0.27149,1.936618 0.28959,0.868763 0.83256,1.502236 0.56108,0.633473 1.37554,0.977358 0.83257,0.343885 1.90042,0.343885 1.79183,0 2.82348,-0.94116 1.04976,-0.941159 1.30315,-2.678685 l -1.57464,0 q -0.14479,1.085953 -0.79636,1.683228 -0.63347,0.597274 -1.77373,0.597274 -0.72397,0 -1.24884,-0.289587 -0.52488,-0.289588 -0.85067,-0.760168 -0.32578,-0.488679 -0.48868,-1.104053 -0.14479,-0.615374 -0.14479,-1.266946 0,-0.70587 0.14479,-1.357442 0.1448,-0.669671 0.47058,-1.17645 0.34389,-0.506778 0.90497,-0.814465 0.56107,-0.307687 1.39364,-0.307687 0.97735,0 1.55653,0.488679 0.57917,0.48868 0.76017,1.375542 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6987"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 218.91112,40.46485 0,12.922849 1.53843,0 0,-3.547449 1.44794,-1.339343 3.20357,4.886792 1.95471,0 -3.98183,-5.954646 3.71034,-3.402655 -2.06331,0 -4.27142,4.090425 0,-7.655973 -1.53843,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6989"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 234.72136,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54298,-0.850664 0.34388,-0.361985 0.81446,-0.561076 0.48868,-0.217191 1.08596,-0.217191 0.57917,0 1.04975,0.217191 0.48868,0.199091 0.83257,0.561076 0.36198,0.343885 0.56107,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48414,2.606288 -1.52034,0 q -0.19909,0.923061 -0.83256,1.375542 -0.61538,0.45248 -1.59273,0.45248 -0.76017,0 -1.32125,-0.253389 -0.56107,-0.253389 -0.92306,-0.669671 -0.36198,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.16289,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32125,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77372,0.361985 -0.79637,0.361984 -1.39364,1.013556 -0.57918,0.651573 -0.90496,1.538435 -0.32579,0.886862 -0.32579,1.954716 0.0362,1.067855 0.30769,1.972816 0.28958,0.904962 0.85066,1.556534 0.56108,0.651572 1.37554,1.013557 0.83257,0.361984 1.95472,0.361984 1.59273,0 2.64249,-0.796366 1.04975,-0.796366 1.35744,-2.370999 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6991"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 237.8907,44.030398 0,9.357301 1.53844,0 0,-4.162823 q 0,-0.904961 0.18099,-1.592732 0.18099,-0.70587 0.57918,-1.194549 0.39818,-0.488679 1.04975,-0.742068 0.65157,-0.253389 1.57463,-0.253389 l 0,-1.628931 q -1.24884,-0.0362 -2.06331,0.506779 -0.81446,0.542976 -1.37554,1.683228 l -0.0362,0 0,-1.972816 -1.44794,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6993"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 255.3624,47.03487 1.59273,0 q -0.0905,-0.832565 -0.43438,-1.429839 -0.34389,-0.615374 -0.88686,-1.013557 -0.52488,-0.398183 -1.23075,-0.579175 -0.68777,-0.199092 -1.48414,-0.199092 -1.10405,0 -1.93661,0.398183 -0.83257,0.380084 -1.39364,1.067855 -0.54298,0.669671 -0.81447,1.592732 -0.27149,0.904961 -0.27149,1.954716 0,1.049756 0.27149,1.936618 0.28959,0.868763 0.83256,1.502236 0.56108,0.633473 1.37555,0.977358 0.83256,0.343885 1.90041,0.343885 1.79183,0 2.82348,-0.94116 1.04976,-0.941159 1.30315,-2.678685 l -1.57463,0 q -0.1448,1.085953 -0.79637,1.683228 -0.63347,0.597274 -1.77372,0.597274 -0.72397,0 -1.24885,-0.289587 -0.52488,-0.289588 -0.85067,-0.760168 -0.32578,-0.488679 -0.48867,-1.104053 -0.1448,-0.615374 -0.1448,-1.266946 0,-0.70587 0.1448,-1.357442 0.14479,-0.669671 0.47058,-1.17645 0.34388,-0.506778 0.90496,-0.814465 0.56107,-0.307687 1.39364,-0.307687 0.97736,0 1.55653,0.488679 0.57918,0.48868 0.76017,1.375542 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6995"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 258.82133,40.46485 0,12.922849 1.53844,0 0,-12.922849 -1.53844,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6997"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 264.38967,42.34717 0,-1.88232 -1.53844,0 0,1.88232 1.53844,0 z m -1.53844,1.683228 0,9.357301 1.53844,0 0,-9.357301 -1.53844,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6999"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 273.28827,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54297,-0.850664 0.34389,-0.361985 0.81447,-0.561076 0.48868,-0.217191 1.08595,-0.217191 0.57918,0 1.04976,0.217191 0.48868,0.199091 0.83256,0.561076 0.36199,0.343885 0.56108,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48413,2.606288 -1.52033,0 q -0.19909,0.923061 -0.83257,1.375542 -0.61537,0.45248 -1.59273,0.45248 -0.76017,0 -1.32124,-0.253389 -0.56108,-0.253389 -0.92306,-0.669671 -0.36199,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.1629,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32124,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77373,0.361985 -0.79636,0.361984 -1.39364,1.013556 -0.57917,0.651573 -0.90496,1.538435 -0.32578,0.886862 -0.32578,1.954716 0.0362,1.067855 0.30768,1.972816 0.28959,0.904962 0.85067,1.556534 0.56107,0.651572 1.37554,1.013557 0.83256,0.361984 1.95471,0.361984 1.59274,0 2.64249,-0.796366 1.04976,-0.796366 1.35744,-2.370999 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7001"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 276.51191,44.030398 0,9.357301 1.53844,0 0,-5.284975 q 0,-0.633473 0.16289,-1.15835 0.18099,-0.542977 0.52488,-0.94116 0.34388,-0.398183 0.85066,-0.615374 0.52488,-0.217191 1.23075,-0.217191 0.88686,0 1.39364,0.506779 0.50678,0.506778 0.50678,1.375541 l 0,6.33473 1.53843,0 0,-6.153738 q 0,-0.760167 -0.16289,-1.375541 -0.1448,-0.633473 -0.52488,-1.085954 -0.38008,-0.45248 -0.99546,-0.70587 -0.61537,-0.253389 -1.53843,-0.253389 -2.08141,0 -3.04067,1.701328 l -0.0362,0 0,-1.484137 -1.44794,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7003"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 288.72239,44.030398 0,-2.80538 -1.53844,0 0,2.80538 -1.59273,0 0,1.357442 1.59273,0 0,5.954646 q 0,0.651572 0.1267,1.049755 0.12669,0.398183 0.38008,0.615374 0.27149,0.217191 0.68777,0.307687 0.43439,0.0724 1.03166,0.0724 l 1.17645,0 0,-1.357442 -0.70587,0 q -0.36199,0 -0.59727,-0.0181 -0.2172,-0.0362 -0.34389,-0.126694 -0.12669,-0.0905 -0.18099,-0.253389 -0.0362,-0.162893 -0.0362,-0.434382 l 0,-5.809852 1.86422,0 0,-1.357442 -1.86422,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7005"
+         inkscape:connector-curvature="0" />
+    </g>
+  </g>
+</svg>
diff --git a/docs/sources/installation/images/win_docker_host.svg b/docs/sources/installation/images/win_docker_host.svg
new file mode 100644
index 0000000..eef284e
--- /dev/null
+++ b/docs/sources/installation/images/win_docker_host.svg
@@ -0,0 +1,1259 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="88.900002mm"
+   height="112.88889mm"
+   viewBox="0 0 315.00001 399.99999"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.91 r13725"
+   sodipodi:docname="win_docker_host.svg">
+  <defs
+     id="defs4">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker8148"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path8150"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker6352"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path6354"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker6148"
+       style="overflow:visible;"
+       inkscape:isstock="true"
+       inkscape:collect="always">
+      <path
+         id="path6150"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker5920"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path5922"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="marker5886"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path5888"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Lend"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path5368"
+         style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round;stroke:#000000;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Lend"
+       style="overflow:visible;"
+       inkscape:isstock="true">
+      <path
+         id="path5350"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1;fill:#000000;fill-opacity:1"
+         transform="scale(0.8) rotate(180) translate(12.5,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="2.9893238"
+     inkscape:cx="190.2131"
+     inkscape:cy="316.35715"
+     inkscape:document-units="px"
+     inkscape:current-layer="g4485"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:window-width="1680"
+     inkscape:window-height="1005"
+     inkscape:window-x="4"
+     inkscape:window-y="0"
+     inkscape:window-maximized="0"
+     inkscape:showpageshadow="false" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:groupmode="layer"
+     id="layer2"
+     inkscape:label="host"
+     style="display:inline" />
+  <g
+     inkscape:groupmode="layer"
+     id="layer4"
+     inkscape:label="arrows" />
+  <g
+     inkscape:groupmode="layer"
+     id="layer3"
+     inkscape:label="containers">
+    <g
+       id="g4485"
+       transform="translate(-0.29443947,0)">
+      <rect
+         style="display:inline;opacity:1;fill:#949da7;fill-opacity:1;stroke:#005976;stroke-width:3.64635539;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+         id="rect5262"
+         width="310.02628"
+         height="374.55676"
+         x="2.8077147"
+         y="23.505817"
+         rx="0"
+         ry="0" />
+      <rect
+         rx="6.8984389"
+         ry="10.232184"
+         y="1.5027902"
+         x="196.5285"
+         height="39.570198"
+         width="117.73026"
+         id="rect5280"
+         style="opacity:1;fill:#394d54;fill-opacity:1;stroke:#005976;stroke-width:2.49513435;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+      <rect
+         ry="0"
+         rx="0"
+         y="168.52252"
+         x="70.247932"
+         height="230.32341"
+         width="243.59302"
+         id="rect4394"
+         style="display:inline;opacity:1;fill:#ade5f9;fill-opacity:1;stroke:#005976;stroke-width:2.07964277;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+      <rect
+         rx="7.8990598"
+         ry="7.8990588"
+         y="152.22887"
+         x="70.381065"
+         height="30.547466"
+         width="136.82939"
+         id="rect4345"
+         style="opacity:1;fill:#fcfcfc;fill-opacity:1;stroke:#005976;stroke-width:2.34590101;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+      <g
+         transform="matrix(0.48549887,0,0,0.47493898,-15.603306,139.61046)"
+         style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         id="flowRoot4337">
+        <path
+           d="m 187.88477,72.829407 0,-30.3125 7.1875,0 q 4.96093,0 7.36328,1.738282 2.42187,1.71875 3.63281,5.214843 1.21094,3.496094 1.21094,8.28125 0,4.53125 -1.28907,7.96875 -1.26953,3.417969 -3.61328,5.273438 -2.34375,1.835937 -7.30468,1.835937 l -7.1875,0 z m 3.94531,-3.339843 2.63672,0 q 4.0039,0 5.70312,-1.425782 1.71875,-1.425781 2.44141,-3.945312 0.74219,-2.539063 0.74219,-6.992188 0,-4.355468 -0.85938,-6.699218 -0.85937,-2.34375 -2.46094,-3.59375 -1.60156,-1.269532 -5.2539,-1.269532 l -2.94922,0 0,23.925782 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7740" />
+        <path
+           d="m 220.91211,73.747376 q -5.15625,0 -7.89063,-4.335937 -2.71484,-4.355469 -2.71484,-11.738282 0,-7.363281 2.71484,-11.699218 2.73438,-4.355469 7.89063,-4.355469 5.17578,0 7.89062,4.355469 2.71485,4.335937 2.71485,11.699218 0,7.382813 -2.71485,11.738282 -2.71484,4.335937 -7.89062,4.335937 z m 0,-3.046875 q 2.85156,0 4.74609,-3.222656 1.91407,-3.242188 1.91407,-9.804688 0,-6.5625 -1.91407,-9.785156 -1.89453,-3.242187 -4.74609,-3.242187 -2.85156,0 -4.76563,3.242187 -1.89453,3.222656 -1.89453,9.785156 0,6.5625 1.89453,9.804688 1.91407,3.222656 4.76563,3.222656 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7742" />
+        <path
+           d="m 254.23242,72.477845 q -3.39844,1.269531 -6.09375,1.269531 -3.55469,0 -6.54297,-1.894531 -2.98828,-1.914063 -4.64843,-5.664063 -1.66016,-3.769531 -1.66016,-8.515625 0,-4.707031 1.64062,-8.457031 1.64063,-3.75 4.62891,-5.683594 2.98828,-1.933593 6.58203,-1.933593 2.69531,0 6.09375,1.269531 l 0,3.59375 q -3.14453,-1.816406 -6.25,-1.816406 -2.34375,0 -4.29687,1.601562 -1.9336,1.601563 -3.125,4.707031 -1.19141,3.085938 -1.19141,6.71875 0,3.691407 1.23047,6.816407 1.23047,3.125 3.14453,4.6875 1.91406,1.542968 4.23828,1.542968 3.08594,0 6.25,-1.816406 l 0,3.574219 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7744" />
+        <path
+           d="m 260.0332,72.829407 0,-30.3125 3.94532,0 0,14.863282 11.26953,-14.863282 4.17968,0 -10.8789,14.394532 12.40234,15.917968 -4.9414,0 -12.03125,-15.449218 0,15.449218 -3.94532,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7746" />
+        <path
+           d="m 301.2832,69.489564 0,3.339843 -16.36718,0 0,-30.3125 16.05468,0 0,3.046875 -12.10937,0 0,10.292969 10.89844,0 0,3.027344 -10.89844,0 0,10.605469 12.42187,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7748" />
+        <path
+           d="m 311.86914,59.958314 0,12.871093 -3.94531,0 0,-30.3125 8.10547,0 q 3.96484,0 5.83984,0.820313 1.875,0.800781 2.89063,2.441406 1.03515,1.640625 1.03515,3.535156 0,1.757813 -0.68359,3.535157 -0.6836,1.777343 -1.89453,3.183593 -1.19141,1.386719 -3.45703,2.636719 l 8.76953,14.160156 -4.6875,0 -8.02735,-12.871093 -3.94531,0 z m 0,-3.046875 5.52734,0 q 1.85547,-1.015625 2.75391,-2.070313 0.91797,-1.054687 1.38672,-2.246094 0.46875,-1.191406 0.46875,-2.382812 0,-2.128906 -1.5625,-3.378906 -1.54297,-1.269532 -5.625,-1.269532 l -2.94922,0 0,11.347657 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7750" />
+        <path
+           d="m 330.38477,75.856751 0,-3.027344 21.21093,0 0,3.027344 -21.21093,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7752" />
+        <path
+           d="m 355.52148,72.829407 0,-30.3125 3.94532,0 0,13.046875 11.21093,0 0,-13.046875 3.92579,0 0,30.3125 -3.92579,0 0,-14.238281 -11.21093,0 0,14.238281 -3.94532,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7754" />
+        <path
+           d="m 389.03711,73.747376 q -5.15625,0 -7.89063,-4.335937 -2.71484,-4.355469 -2.71484,-11.738282 0,-7.363281 2.71484,-11.699218 2.73438,-4.355469 7.89063,-4.355469 5.17578,0 7.89062,4.355469 2.71485,4.335937 2.71485,11.699218 0,7.382813 -2.71485,11.738282 -2.71484,4.335937 -7.89062,4.335937 z m 0,-3.046875 q 2.85156,0 4.74609,-3.222656 1.91407,-3.242188 1.91407,-9.804688 0,-6.5625 -1.91407,-9.785156 -1.89453,-3.242187 -4.74609,-3.242187 -2.85156,0 -4.76563,3.242187 -1.89453,3.222656 -1.89453,9.785156 0,6.5625 1.89453,9.804688 1.91407,3.222656 4.76563,3.222656 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7756" />
+        <path
+           d="m 404.17383,72.184876 0,-3.710937 q 4.6875,2.226562 8.51562,2.226562 1.60157,0 3.06641,-0.605469 1.46484,-0.625 2.20703,-1.777343 0.76172,-1.152344 0.76172,-2.5 0,-1.582032 -1.03516,-2.949219 -1.03515,-1.386719 -3.88672,-3.046875 l -1.99218,-1.152344 -2.01172,-1.152344 q -5.3125,-3.144531 -5.3125,-7.8125 0,-3.417968 2.36328,-5.742187 2.38281,-2.34375 7.42187,-2.34375 3.24219,0 6.26954,0.9375 l 0,3.378906 q -3.33985,-1.289062 -6.50391,-1.289062 -2.51953,0 -4.08203,1.328125 -1.54297,1.328125 -1.54297,3.203125 0,1.855468 1.19141,3.085937 1.1914,1.230469 3.04687,2.265625 l 1.52344,0.917969 1.89453,1.152344 1.60156,0.9375 q 4.98047,3.046875 4.98047,7.65625 0,3.515625 -2.55859,6.035156 -2.5586,2.519531 -8.20313,2.519531 -1.79687,0 -3.4375,-0.3125 -1.62109,-0.292969 -4.27734,-1.25 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7758" />
+        <path
+           d="m 435.11133,72.829407 0,-26.972656 -9.08203,0 0,-3.339844 22.1289,0 0,3.339844 -9.10156,0 0,26.972656 -3.94531,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monaco;-inkscape-font-specification:Monaco"
+           id="path7760" />
+      </g>
+      <path
+         sodipodi:nodetypes="cc"
+         inkscape:connector-curvature="0"
+         id="path8146"
+         d="m 157.79863,229.50628 c 37.00058,23.19886 12.85818,26.81485 7.03771,31.08319"
+         style="display:inline;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.78196704px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker8148)" />
+      <path
+         sodipodi:nodetypes="cc"
+         inkscape:connector-curvature="0"
+         id="path6142"
+         d="m 222.83291,217.48299 c 67.7606,4.04815 33.91703,32.0024 23.64644,43.35694"
+         style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.78196704px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker6352)" />
+      <path
+         sodipodi:nodetypes="cc"
+         inkscape:connector-curvature="0"
+         id="path6144"
+         d="M 157.27071,230.12149 C 45.405744,253.79288 92.109064,307.60128 124.18114,326.55296"
+         style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.78196704px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker6148)" />
+      <rect
+         style="opacity:1;fill:#f2f2f2;fill-opacity:1;stroke:#005976;stroke-width:2.34590101;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+         id="rect4484"
+         width="118.45418"
+         height="24.487085"
+         x="101.69627"
+         y="204.32883" />
+      <g
+         transform="matrix(0.86243158,0,0,0.86574923,-54.246575,175.8598)"
+         style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         id="flowRoot4486">
+        <path
+           d="m 187.98859,51.93976 0,-10.026972 2.89587,0 q 1.19455,0 2.00902,0.343886 0.81446,0.325786 1.32124,0.977358 0.52488,0.633473 0.74207,1.556533 0.23529,0.904962 0.23529,2.063312 0,1.194549 -0.25339,2.045213 -0.23529,0.832565 -0.61537,1.393641 -0.38009,0.561076 -0.86876,0.886862 -0.47058,0.325786 -0.95926,0.506778 -0.48868,0.162893 -0.92306,0.217191 -0.43439,0.0362 -0.72397,0.0362 l -2.85968,0 z m -1.71943,-11.47491 0,12.922849 4.43431,0 q 1.61083,0 2.78728,-0.452481 1.17645,-0.452481 1.93662,-1.303144 0.76017,-0.868763 1.12215,-2.11761 0.36199,-1.266946 0.36199,-2.895877 0,-3.113067 -1.61083,-4.633402 -1.61083,-1.520335 -4.59721,-1.520335 l -4.43431,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7700" />
+        <path
+           d="m 199.86394,48.718098 q 0,-0.850664 0.2172,-1.502236 0.23529,-0.669671 0.63347,-1.122152 0.39818,-0.452481 0.92306,-0.687771 0.54298,-0.23529 1.14025,-0.23529 0.59727,0 1.12215,0.23529 0.54298,0.23529 0.94116,0.687771 0.39819,0.452481 0.61538,1.122152 0.23529,0.651572 0.23529,1.502236 0,0.850664 -0.23529,1.520335 -0.21719,0.651572 -0.61538,1.104053 -0.39818,0.434381 -0.94116,0.669671 -0.52488,0.23529 -1.12215,0.23529 -0.59727,0 -1.14025,-0.23529 -0.52488,-0.23529 -0.92306,-0.669671 -0.39818,-0.452481 -0.63347,-1.104053 -0.2172,-0.669671 -0.2172,-1.520335 z m -1.62893,0 q 0,1.031656 0.28959,1.918518 0.28959,0.886862 0.86876,1.556534 0.57918,0.651572 1.42984,1.031656 0.85067,0.361984 1.95472,0.361984 1.12215,0 1.95472,-0.361984 0.85066,-0.380084 1.42984,-1.031656 0.57917,-0.669672 0.86876,-1.556534 0.28959,-0.886862 0.28959,-1.918518 0,-1.031656 -0.28959,-1.918518 -0.28959,-0.904962 -0.86876,-1.556534 -0.57918,-0.669671 -1.42984,-1.049755 -0.83257,-0.380084 -1.95472,-0.380084 -1.10405,0 -1.95472,0.380084 -0.85066,0.380084 -1.42984,1.049755 -0.57917,0.651572 -0.86876,1.556534 -0.28959,0.886862 -0.28959,1.918518 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7702" />
+        <path
+           d="m 215.45219,47.03487 1.59273,0 q -0.0905,-0.832565 -0.43438,-1.429839 -0.34389,-0.615374 -0.88686,-1.013557 -0.52488,-0.398183 -1.23075,-0.579175 -0.68777,-0.199092 -1.48414,-0.199092 -1.10405,0 -1.93662,0.398183 -0.83256,0.380084 -1.39364,1.067855 -0.54297,0.669671 -0.81446,1.592732 -0.27149,0.904961 -0.27149,1.954716 0,1.049756 0.27149,1.936618 0.28959,0.868763 0.83256,1.502236 0.56108,0.633473 1.37554,0.977358 0.83257,0.343885 1.90042,0.343885 1.79183,0 2.82348,-0.94116 1.04976,-0.941159 1.30315,-2.678685 l -1.57464,0 q -0.14479,1.085953 -0.79636,1.683228 -0.63347,0.597274 -1.77373,0.597274 -0.72397,0 -1.24884,-0.289587 -0.52488,-0.289588 -0.85067,-0.760168 -0.32578,-0.488679 -0.48868,-1.104053 -0.14479,-0.615374 -0.14479,-1.266946 0,-0.70587 0.14479,-1.357442 0.1448,-0.669671 0.47058,-1.17645 0.34389,-0.506778 0.90497,-0.814465 0.56107,-0.307687 1.39364,-0.307687 0.97735,0 1.55653,0.488679 0.57917,0.48868 0.76017,1.375542 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7704" />
+        <path
+           d="m 218.91112,40.46485 0,12.922849 1.53843,0 0,-3.547449 1.44794,-1.339343 3.20357,4.886792 1.95471,0 -3.98183,-5.954646 3.71034,-3.402655 -2.06331,0 -4.27142,4.090425 0,-7.655973 -1.53843,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7706" />
+        <path
+           d="m 234.72136,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54298,-0.850664 0.34388,-0.361985 0.81446,-0.561076 0.48868,-0.217191 1.08596,-0.217191 0.57917,0 1.04975,0.217191 0.48868,0.199091 0.83257,0.561076 0.36198,0.343885 0.56107,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48414,2.606288 -1.52034,0 q -0.19909,0.923061 -0.83256,1.375542 -0.61538,0.45248 -1.59273,0.45248 -0.76017,0 -1.32125,-0.253389 -0.56107,-0.253389 -0.92306,-0.669671 -0.36198,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.16289,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32125,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77372,0.361985 -0.79637,0.361984 -1.39364,1.013556 -0.57918,0.651573 -0.90496,1.538435 -0.32579,0.886862 -0.32579,1.954716 0.0362,1.067855 0.30769,1.972816 0.28958,0.904962 0.85066,1.556534 0.56108,0.651572 1.37554,1.013557 0.83257,0.361984 1.95472,0.361984 1.59273,0 2.64249,-0.796366 1.04975,-0.796366 1.35744,-2.370999 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7708" />
+        <path
+           d="m 237.8907,44.030398 0,9.357301 1.53844,0 0,-4.162823 q 0,-0.904961 0.18099,-1.592732 0.18099,-0.70587 0.57918,-1.194549 0.39818,-0.488679 1.04975,-0.742068 0.65157,-0.253389 1.57463,-0.253389 l 0,-1.628931 q -1.24884,-0.0362 -2.06331,0.506779 -0.81446,0.542976 -1.37554,1.683228 l -0.0362,0 0,-1.972816 -1.44794,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7710" />
+        <path
+           d="m 250.13172,48.790495 q 0,-0.687771 0.1448,-1.339343 0.14479,-0.651572 0.47058,-1.158351 0.32578,-0.506778 0.86876,-0.814465 0.54298,-0.307687 1.32124,-0.307687 0.79637,0 1.35744,0.307687 0.56108,0.289588 0.90497,0.778267 0.36198,0.488679 0.52487,1.140251 0.1629,0.633473 0.1629,1.321244 0,0.651572 -0.1629,1.285045 -0.14479,0.633473 -0.48868,1.140251 -0.34388,0.48868 -0.88686,0.796366 -0.54297,0.307687 -1.32124,0.307687 -0.74207,0 -1.30315,-0.289587 -0.54297,-0.289588 -0.90496,-0.778267 -0.34388,-0.488679 -0.52488,-1.104053 -0.16289,-0.633473 -0.16289,-1.285045 z m 7.23969,4.597204 0,-12.922849 -1.53843,0 0,4.814395 -0.0362,0 q -0.25339,-0.416283 -0.63347,-0.687771 -0.36199,-0.289588 -0.77827,-0.452481 -0.41628,-0.180992 -0.83256,-0.253389 -0.41629,-0.0724 -0.77827,-0.0724 -1.06786,0 -1.88232,0.398183 -0.79637,0.380084 -1.33934,1.049755 -0.52488,0.651573 -0.79637,1.538435 -0.25339,0.886862 -0.25339,1.882319 0,0.995458 0.27149,1.88232 0.27149,0.886862 0.79637,1.556534 0.54297,0.669671 1.33934,1.067854 0.81446,0.398183 1.90042,0.398183 0.97736,0 1.79182,-0.343885 0.81447,-0.343885 1.19455,-1.122152 l 0.0362,0 0,1.266946 1.53843,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7712" />
+        <path
+           d="m 268.01008,53.3515 q -0.39818,0.23529 -1.10405,0.23529 -0.59727,0 -0.95926,-0.325786 -0.34388,-0.343885 -0.34388,-1.104053 -0.63348,0.760168 -1.48414,1.104053 -0.83257,0.325786 -1.80992,0.325786 -0.63348,0 -1.21265,-0.144794 -0.56108,-0.144793 -0.97736,-0.45248 -0.41628,-0.307687 -0.66967,-0.796366 -0.23529,-0.506779 -0.23529,-1.212649 0,-0.796366 0.27149,-1.303144 0.27149,-0.506778 0.70587,-0.814465 0.45248,-0.325786 1.01355,-0.488679 0.57918,-0.162893 1.17645,-0.271489 0.63348,-0.126694 1.19455,-0.180992 0.57918,-0.0724 1.01356,-0.180992 0.43438,-0.126695 0.68777,-0.343886 0.25339,-0.23529 0.25339,-0.669671 0,-0.506779 -0.19909,-0.814465 -0.18099,-0.307687 -0.48868,-0.47058 -0.28959,-0.162893 -0.66967,-0.217191 -0.36199,-0.0543 -0.72397,-0.0543 -0.97736,0 -1.62893,0.380084 -0.65157,0.361985 -0.70587,1.393641 l -1.53844,0 q 0.0362,-0.868763 0.36199,-1.466038 0.32578,-0.597274 0.86876,-0.959259 0.54298,-0.380084 1.23075,-0.542977 0.70587,-0.162893 1.50223,-0.162893 0.63348,0 1.24885,0.0905 0.63347,0.0905 1.14025,0.380084 0.50678,0.271488 0.81447,0.778267 0.30768,0.506778 0.30768,1.321244 l 0,4.814394 q 0,0.542977 0.0543,0.796366 0.0724,0.253389 0.43438,0.253389 0.19909,0 0.47058,-0.0905 l 0,1.194549 z m -2.49769,-4.796295 q -0.28959,0.217191 -0.76017,0.325786 -0.47058,0.0905 -0.99546,0.162893 -0.50677,0.0543 -1.03165,0.144794 -0.52488,0.0724 -0.94116,0.253389 -0.41628,0.180992 -0.68777,0.524878 -0.25339,0.325786 -0.25339,0.904961 0,0.380084 0.14479,0.651572 0.1629,0.253389 0.39819,0.416282 0.25339,0.162894 0.57917,0.23529 0.32579,0.0724 0.68777,0.0724 0.76017,0 1.30315,-0.199091 0.54297,-0.217191 0.88686,-0.524878 0.34388,-0.325786 0.50678,-0.68777 0.16289,-0.380084 0.16289,-0.70587 l 0,-1.574633 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7714" />
+        <path
+           d="m 275.93952,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54298,-0.850664 0.34388,-0.361985 0.81446,-0.561076 0.48868,-0.217191 1.08595,-0.217191 0.57918,0 1.04976,0.217191 0.48868,0.199091 0.83256,0.561076 0.36199,0.343885 0.56108,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48414,2.606288 -1.52034,0 q -0.19909,0.923061 -0.83256,1.375542 -0.61538,0.45248 -1.59274,0.45248 -0.76016,0 -1.32124,-0.253389 -0.56107,-0.253389 -0.92306,-0.669671 -0.36198,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.16289,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32125,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77372,0.361985 -0.79637,0.361984 -1.39364,1.013556 -0.57918,0.651573 -0.90497,1.538435 -0.32578,0.886862 -0.32578,1.954716 0.0362,1.067855 0.30769,1.972816 0.28958,0.904962 0.85066,1.556534 0.56108,0.651572 1.37554,1.013557 0.83257,0.361984 1.95472,0.361984 1.59273,0 2.64248,-0.796366 1.04976,-0.796366 1.35745,-2.370999 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7716" />
+        <path
+           d="m 279.16316,44.030398 0,9.357301 1.53844,0 0,-5.827952 q 0,-0.271488 0.12669,-0.669671 0.1448,-0.416282 0.43439,-0.796366 0.30768,-0.380084 0.77826,-0.651572 0.48868,-0.271489 1.15835,-0.271489 0.52488,0 0.85067,0.162893 0.34388,0.144794 0.54297,0.434382 0.19909,0.271488 0.27149,0.651572 0.0905,0.380084 0.0905,0.832564 l 0,6.135639 1.53843,0 0,-5.827952 q 0,-1.085953 0.65157,-1.737526 0.65158,-0.651572 1.79183,-0.651572 0.56107,0 0.90496,0.162893 0.36198,0.162893 0.56108,0.452481 0.19909,0.271488 0.27148,0.651572 0.0724,0.380084 0.0724,0.814465 l 0,6.135639 1.53843,0 0,-6.859608 q 0,-0.723969 -0.23528,-1.230747 -0.2172,-0.524878 -0.63348,-0.850664 -0.39818,-0.325786 -0.97736,-0.47058 -0.56107,-0.162893 -1.26694,-0.162893 -0.92306,0 -1.70133,0.416282 -0.76017,0.416283 -1.23075,1.17645 -0.28958,-0.868763 -0.99545,-1.230747 -0.70587,-0.361985 -1.57464,-0.361985 -1.97281,0 -3.02257,1.592732 l -0.0362,0 0,-1.375541 -1.44794,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7718" />
+        <path
+           d="m 295.73329,48.718098 q 0,-0.850664 0.21719,-1.502236 0.23529,-0.669671 0.63348,-1.122152 0.39818,-0.452481 0.92306,-0.687771 0.54297,-0.23529 1.14025,-0.23529 0.59727,0 1.12215,0.23529 0.54298,0.23529 0.94116,0.687771 0.39818,0.452481 0.61537,1.122152 0.23529,0.651572 0.23529,1.502236 0,0.850664 -0.23529,1.520335 -0.21719,0.651572 -0.61537,1.104053 -0.39818,0.434381 -0.94116,0.669671 -0.52488,0.23529 -1.12215,0.23529 -0.59728,0 -1.14025,-0.23529 -0.52488,-0.23529 -0.92306,-0.669671 -0.39819,-0.452481 -0.63348,-1.104053 -0.21719,-0.669671 -0.21719,-1.520335 z m -1.62893,0 q 0,1.031656 0.28959,1.918518 0.28959,0.886862 0.86876,1.556534 0.57918,0.651572 1.42984,1.031656 0.85066,0.361984 1.95472,0.361984 1.12215,0 1.95471,-0.361984 0.85067,-0.380084 1.42984,-1.031656 0.57918,-0.669672 0.86877,-1.556534 0.28958,-0.886862 0.28958,-1.918518 0,-1.031656 -0.28958,-1.918518 -0.28959,-0.904962 -0.86877,-1.556534 -0.57917,-0.669671 -1.42984,-1.049755 -0.83256,-0.380084 -1.95471,-0.380084 -1.10406,0 -1.95472,0.380084 -0.85066,0.380084 -1.42984,1.049755 -0.57917,0.651572 -0.86876,1.556534 -0.28959,0.886862 -0.28959,1.918518 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7720" />
+        <path
+           d="m 304.96871,44.030398 0,9.357301 1.53843,0 0,-5.284975 q 0,-0.633473 0.16289,-1.15835 0.181,-0.542977 0.52488,-0.94116 0.34389,-0.398183 0.85067,-0.615374 0.52487,-0.217191 1.23074,-0.217191 0.88687,0 1.39364,0.506779 0.50678,0.506778 0.50678,1.375541 l 0,6.33473 1.53844,0 0,-6.153738 q 0,-0.760167 -0.1629,-1.375541 -0.14479,-0.633473 -0.52487,-1.085954 -0.38009,-0.45248 -0.99546,-0.70587 -0.61538,-0.253389 -1.53844,-0.253389 -2.08141,0 -3.04067,1.701328 l -0.0362,0 0,-1.484137 -1.44793,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+           id="path7722" />
+      </g>
+      <g
+         transform="matrix(0.59683756,0,0,0.62856206,116.41511,253.93191)"
+         id="g5083">
+        <g
+           id="Isolation_Mode-6">
+          <polygon
+             style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd"
+             points="122.005,77.57 122.005,22.429 32.038,0.183 0,19.314 0,72.66 92.65,101.222 "
+             id="polygon4984" />
+          <polygon
+             style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd"
+             points="5,23.314 92.65,45.131 92.65,94.222 5,67.66 "
+             id="polygon4986" />
+          <g
+             id="g4988">
+            <g
+               id="g4990">
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="7.917,29.184 7.917,66.177 11.364,66.906 11.364,30.067 "
+                 id="polygon4992" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="14.134,30.801 14.134,68.013 17.598,68.765 17.598,31.68 "
+                 id="polygon4994" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="20.353,32.418 20.353,69.848 23.831,70.624 23.831,33.292 "
+                 id="polygon4996" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="26.569,34.035 26.569,71.684 30.063,72.483 30.063,34.905 "
+                 id="polygon4998" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="32.789,35.652 32.789,73.519 36.298,74.341 36.298,36.518 "
+                 id="polygon5000" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="39.007,37.27 39.007,75.354 42.532,76.201 42.532,38.13 "
+                 id="polygon5002" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="45.224,38.887 45.224,77.19 48.765,78.06 48.765,39.743 "
+                 id="polygon5004" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="51.442,40.504 51.442,79.025 54.999,79.919 54.999,41.356 "
+                 id="polygon5006" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="57.661,42.122 57.661,80.86 61.231,81.777 61.231,42.968 "
+                 id="polygon5008" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="63.878,43.737 63.878,82.697 67.466,83.637 67.466,44.581 "
+                 id="polygon5010" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="70.097,45.355 70.097,84.532 73.699,85.496 73.699,46.193 "
+                 id="polygon5012" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="76.313,46.973 76.313,86.367 79.933,87.354 79.933,47.806 "
+                 id="polygon5014" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="86.165,49.418 82.532,48.589 82.532,88.203 86.165,89.213 "
+                 id="polygon5016" />
+            </g>
+            <g
+               id="g5018">
+              <polygon
+                 style="fill:#394d54"
+                 points="7.917,29.184 7.917,66.177 11.364,66.906 11.364,30.067 "
+                 id="polygon5020" />
+              <polygon
+                 style="fill:#394d54"
+                 points="14.134,30.801 14.134,68.013 17.598,68.765 17.598,31.68 "
+                 id="polygon5022" />
+              <polygon
+                 style="fill:#394d54"
+                 points="20.353,32.418 20.353,69.848 23.831,70.624 23.831,33.292 "
+                 id="polygon5024" />
+              <polygon
+                 style="fill:#394d54"
+                 points="26.569,34.035 26.569,71.684 30.063,72.483 30.063,34.905 "
+                 id="polygon5026" />
+              <polygon
+                 style="fill:#394d54"
+                 points="32.789,35.652 32.789,73.519 36.298,74.341 36.298,36.518 "
+                 id="polygon5028" />
+              <polygon
+                 style="fill:#394d54"
+                 points="39.007,37.27 39.007,75.354 42.532,76.201 42.532,38.13 "
+                 id="polygon5030" />
+              <polygon
+                 style="fill:#394d54"
+                 points="45.224,38.887 45.224,77.19 48.765,78.06 48.765,39.743 "
+                 id="polygon5032" />
+              <polygon
+                 style="fill:#394d54"
+                 points="51.442,40.504 51.442,79.025 54.999,79.919 54.999,41.356 "
+                 id="polygon5034" />
+              <polygon
+                 style="fill:#394d54"
+                 points="57.661,42.122 57.661,80.86 61.231,81.777 61.231,42.968 "
+                 id="polygon5036" />
+              <polygon
+                 style="fill:#394d54"
+                 points="63.878,43.737 63.878,82.697 67.466,83.637 67.466,44.581 "
+                 id="polygon5038" />
+              <polygon
+                 style="fill:#394d54"
+                 points="70.097,45.355 70.097,84.532 73.699,85.496 73.699,46.193 "
+                 id="polygon5040" />
+              <polygon
+                 style="fill:#394d54"
+                 points="76.313,46.973 76.313,86.367 79.933,87.354 79.933,47.806 "
+                 id="polygon5042" />
+              <polygon
+                 style="fill:#394d54"
+                 points="86.165,49.418 82.532,48.589 82.532,88.203 86.165,89.213 "
+                 id="polygon5044" />
+            </g>
+          </g>
+          <polygon
+             style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+             points="92.65,46.469 117.005,29.184 117.005,75.212 92.65,94.398 "
+             id="polygon5046" />
+          <polygon
+             style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd"
+             points="5,23.314 32.538,6.683 117.005,27.429 92.65,45.131 "
+             id="polygon5048" />
+          <rect
+             style="fill:#394d54"
+             x="46.075001"
+             y="-12.046"
+             transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+             width="3.5250001"
+             height="92.299004"
+             id="rect5050" />
+          <rect
+             style="fill:#394d54"
+             x="103.715"
+             y="18.122"
+             transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+             width="3.5250001"
+             height="34.868999"
+             id="rect5052" />
+          <rect
+             style="fill:#394d54"
+             x="90.234001"
+             y="44.498001"
+             transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+             width="3.523"
+             height="51.174"
+             id="rect5054" />
+        </g>
+        <g
+           style="display:none"
+           id="Layer_2-0"
+           display="none">
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-77.349998"
+             y1="-49.362"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5063" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-77.349998"
+             y1="-46.612"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5065" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-77.349998"
+             y1="-4.362"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5067" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-52.994999"
+             y1="-67.064003"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5069" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="532.88"
+             y1="-492.659"
+             x2="-76.915001"
+             y2="-49.472"
+             id="line5071" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="532.88"
+             y1="-492.659"
+             x2="-165"
+             y2="-71.179001"
+             id="line5073" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="532.88"
+             y1="-492.659"
+             x2="-77.349998"
+             y2="-0.271"
+             id="line5075" />
+        </g>
+      </g>
+      <g
+         id="g5133"
+         transform="matrix(0.59683756,0,0,0.62856206,194.85661,253.93191)">
+        <g
+           id="g5135">
+          <polygon
+             id="polygon5137"
+             points="32.038,0.183 0,19.314 0,72.66 92.65,101.222 122.005,77.57 122.005,22.429 "
+             style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd" />
+          <polygon
+             id="polygon5139"
+             points="92.65,94.222 5,67.66 5,23.314 92.65,45.131 "
+             style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd" />
+          <g
+             id="g5141">
+            <g
+               id="g5143">
+              <polygon
+                 id="polygon5145"
+                 points="11.364,66.906 11.364,30.067 7.917,29.184 7.917,66.177 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5147"
+                 points="17.598,68.765 17.598,31.68 14.134,30.801 14.134,68.013 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5149"
+                 points="23.831,70.624 23.831,33.292 20.353,32.418 20.353,69.848 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5151"
+                 points="30.063,72.483 30.063,34.905 26.569,34.035 26.569,71.684 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5153"
+                 points="36.298,74.341 36.298,36.518 32.789,35.652 32.789,73.519 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5155"
+                 points="42.532,76.201 42.532,38.13 39.007,37.27 39.007,75.354 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5157"
+                 points="48.765,78.06 48.765,39.743 45.224,38.887 45.224,77.19 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5159"
+                 points="54.999,79.919 54.999,41.356 51.442,40.504 51.442,79.025 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5161"
+                 points="61.231,81.777 61.231,42.968 57.661,42.122 57.661,80.86 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5163"
+                 points="67.466,83.637 67.466,44.581 63.878,43.737 63.878,82.697 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5165"
+                 points="73.699,85.496 73.699,46.193 70.097,45.355 70.097,84.532 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5167"
+                 points="79.933,87.354 79.933,47.806 76.313,46.973 76.313,86.367 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+              <polygon
+                 id="polygon5169"
+                 points="82.532,88.203 86.165,89.213 86.165,49.418 82.532,48.589 "
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+            </g>
+            <g
+               id="g5171">
+              <polygon
+                 id="polygon5173"
+                 points="11.364,66.906 11.364,30.067 7.917,29.184 7.917,66.177 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5175"
+                 points="17.598,68.765 17.598,31.68 14.134,30.801 14.134,68.013 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5177"
+                 points="23.831,70.624 23.831,33.292 20.353,32.418 20.353,69.848 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5179"
+                 points="30.063,72.483 30.063,34.905 26.569,34.035 26.569,71.684 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5181"
+                 points="36.298,74.341 36.298,36.518 32.789,35.652 32.789,73.519 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5183"
+                 points="42.532,76.201 42.532,38.13 39.007,37.27 39.007,75.354 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5185"
+                 points="48.765,78.06 48.765,39.743 45.224,38.887 45.224,77.19 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5187"
+                 points="54.999,79.919 54.999,41.356 51.442,40.504 51.442,79.025 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5189"
+                 points="61.231,81.777 61.231,42.968 57.661,42.122 57.661,80.86 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5191"
+                 points="67.466,83.637 67.466,44.581 63.878,43.737 63.878,82.697 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5193"
+                 points="73.699,85.496 73.699,46.193 70.097,45.355 70.097,84.532 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5195"
+                 points="79.933,87.354 79.933,47.806 76.313,46.973 76.313,86.367 "
+                 style="fill:#394d54" />
+              <polygon
+                 id="polygon5197"
+                 points="82.532,88.203 86.165,89.213 86.165,49.418 82.532,48.589 "
+                 style="fill:#394d54" />
+            </g>
+          </g>
+          <polygon
+             id="polygon5199"
+             points="117.005,75.212 92.65,94.398 92.65,46.469 117.005,29.184 "
+             style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd" />
+          <polygon
+             id="polygon5201"
+             points="117.005,27.429 92.65,45.131 5,23.314 32.538,6.683 "
+             style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd" />
+          <rect
+             id="rect5203"
+             height="92.299004"
+             width="3.5250001"
+             transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+             y="-12.046"
+             x="46.075001"
+             style="fill:#394d54" />
+          <rect
+             id="rect5205"
+             height="34.868999"
+             width="3.5250001"
+             transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+             y="18.122"
+             x="103.715"
+             style="fill:#394d54" />
+          <rect
+             id="rect5207"
+             height="51.174"
+             width="3.523"
+             transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+             y="44.498001"
+             x="90.234001"
+             style="fill:#394d54" />
+        </g>
+        <g
+           display="none"
+           id="g5209"
+           style="display:none">
+          <line
+             id="line5211"
+             y2="-492.659"
+             x2="-1830.454"
+             y1="-49.362"
+             x1="-77.349998"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+          <line
+             id="line5213"
+             y2="-492.659"
+             x2="-1830.454"
+             y1="-46.612"
+             x1="-77.349998"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+          <line
+             id="line5215"
+             y2="-492.659"
+             x2="-1830.454"
+             y1="-4.362"
+             x1="-77.349998"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+          <line
+             id="line5217"
+             y2="-492.659"
+             x2="-1830.454"
+             y1="-67.064003"
+             x1="-52.994999"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+          <line
+             id="line5219"
+             y2="-49.472"
+             x2="-76.915001"
+             y1="-492.659"
+             x1="532.88"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+          <line
+             id="line5221"
+             y2="-71.179001"
+             x2="-165"
+             y1="-492.659"
+             x1="532.88"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+          <line
+             id="line5223"
+             y2="-0.271"
+             x2="-77.349998"
+             y1="-492.659"
+             x1="532.88"
+             stroke-miterlimit="10"
+             display="inline"
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10" />
+        </g>
+      </g>
+      <g
+         transform="matrix(0.59683756,0,0,0.62856206,116.41511,318.58401)"
+         id="g5225">
+        <g
+           id="g5227">
+          <polygon
+             style="clip-rule:evenodd;fill:#394d54;fill-rule:evenodd"
+             points="0,19.314 0,72.66 92.65,101.222 122.005,77.57 122.005,22.429 32.038,0.183 "
+             id="polygon5229" />
+          <polygon
+             style="clip-rule:evenodd;fill:#a5eaf2;fill-rule:evenodd"
+             points="5,67.66 5,23.314 92.65,45.131 92.65,94.222 "
+             id="polygon5231" />
+          <g
+             id="g5233">
+            <g
+               id="g5235">
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="11.364,30.067 7.917,29.184 7.917,66.177 11.364,66.906 "
+                 id="polygon5237" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="17.598,31.68 14.134,30.801 14.134,68.013 17.598,68.765 "
+                 id="polygon5239" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="23.831,33.292 20.353,32.418 20.353,69.848 23.831,70.624 "
+                 id="polygon5241" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="30.063,34.905 26.569,34.035 26.569,71.684 30.063,72.483 "
+                 id="polygon5243" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="36.298,36.518 32.789,35.652 32.789,73.519 36.298,74.341 "
+                 id="polygon5245" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="42.532,38.13 39.007,37.27 39.007,75.354 42.532,76.201 "
+                 id="polygon5247" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="48.765,39.743 45.224,38.887 45.224,77.19 48.765,78.06 "
+                 id="polygon5249" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="54.999,41.356 51.442,40.504 51.442,79.025 54.999,79.919 "
+                 id="polygon5251" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="61.231,42.968 57.661,42.122 57.661,80.86 61.231,81.777 "
+                 id="polygon5253" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="67.466,44.581 63.878,43.737 63.878,82.697 67.466,83.637 "
+                 id="polygon5255" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="73.699,46.193 70.097,45.355 70.097,84.532 73.699,85.496 "
+                 id="polygon5257" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="79.933,47.806 76.313,46.973 76.313,86.367 79.933,87.354 "
+                 id="polygon5259" />
+              <polygon
+                 style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+                 points="86.165,89.213 86.165,49.418 82.532,48.589 82.532,88.203 "
+                 id="polygon5261" />
+            </g>
+            <g
+               id="g5263">
+              <polygon
+                 style="fill:#394d54"
+                 points="11.364,30.067 7.917,29.184 7.917,66.177 11.364,66.906 "
+                 id="polygon5265" />
+              <polygon
+                 style="fill:#394d54"
+                 points="17.598,31.68 14.134,30.801 14.134,68.013 17.598,68.765 "
+                 id="polygon5267" />
+              <polygon
+                 style="fill:#394d54"
+                 points="23.831,33.292 20.353,32.418 20.353,69.848 23.831,70.624 "
+                 id="polygon5269" />
+              <polygon
+                 style="fill:#394d54"
+                 points="30.063,34.905 26.569,34.035 26.569,71.684 30.063,72.483 "
+                 id="polygon5271" />
+              <polygon
+                 style="fill:#394d54"
+                 points="36.298,36.518 32.789,35.652 32.789,73.519 36.298,74.341 "
+                 id="polygon5273" />
+              <polygon
+                 style="fill:#394d54"
+                 points="42.532,38.13 39.007,37.27 39.007,75.354 42.532,76.201 "
+                 id="polygon5275" />
+              <polygon
+                 style="fill:#394d54"
+                 points="48.765,39.743 45.224,38.887 45.224,77.19 48.765,78.06 "
+                 id="polygon5277" />
+              <polygon
+                 style="fill:#394d54"
+                 points="54.999,41.356 51.442,40.504 51.442,79.025 54.999,79.919 "
+                 id="polygon5279" />
+              <polygon
+                 style="fill:#394d54"
+                 points="61.231,42.968 57.661,42.122 57.661,80.86 61.231,81.777 "
+                 id="polygon5281" />
+              <polygon
+                 style="fill:#394d54"
+                 points="67.466,44.581 63.878,43.737 63.878,82.697 67.466,83.637 "
+                 id="polygon5283" />
+              <polygon
+                 style="fill:#394d54"
+                 points="73.699,46.193 70.097,45.355 70.097,84.532 73.699,85.496 "
+                 id="polygon5285" />
+              <polygon
+                 style="fill:#394d54"
+                 points="79.933,47.806 76.313,46.973 76.313,86.367 79.933,87.354 "
+                 id="polygon5287" />
+              <polygon
+                 style="fill:#394d54"
+                 points="86.165,89.213 86.165,49.418 82.532,48.589 82.532,88.203 "
+                 id="polygon5289" />
+            </g>
+          </g>
+          <polygon
+             style="clip-rule:evenodd;fill:#31b4d3;fill-rule:evenodd"
+             points="92.65,94.398 92.65,46.469 117.005,29.184 117.005,75.212 "
+             id="polygon5291" />
+          <polygon
+             style="clip-rule:evenodd;fill:#ffffff;fill-rule:evenodd"
+             points="92.65,45.131 5,23.314 32.538,6.683 117.005,27.429 "
+             id="polygon5293" />
+          <rect
+             style="fill:#394d54"
+             x="46.075001"
+             y="-12.046"
+             transform="matrix(0.241,-0.9705,0.9705,0.241,3.2084,72.3107)"
+             width="3.5250001"
+             height="92.299004"
+             id="rect5295" />
+          <rect
+             style="fill:#394d54"
+             x="103.715"
+             y="18.122"
+             transform="matrix(0.5701,0.8215,-0.8215,0.5701,74.5518,-71.3707)"
+             width="3.5250001"
+             height="34.868999"
+             id="rect5297" />
+          <rect
+             style="fill:#394d54"
+             x="90.234001"
+             y="44.498001"
+             transform="matrix(1,-0.0022,0.0022,1,-0.1552,0.2041)"
+             width="3.523"
+             height="51.174"
+             id="rect5299" />
+        </g>
+        <g
+           style="display:none"
+           id="g5301"
+           display="none">
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-77.349998"
+             y1="-49.362"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5303" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-77.349998"
+             y1="-46.612"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5305" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-77.349998"
+             y1="-4.362"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5307" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="-52.994999"
+             y1="-67.064003"
+             x2="-1830.454"
+             y2="-492.659"
+             id="line5309" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="532.88"
+             y1="-492.659"
+             x2="-76.915001"
+             y2="-49.472"
+             id="line5311" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="532.88"
+             y1="-492.659"
+             x2="-165"
+             y2="-71.179001"
+             id="line5313" />
+          <line
+             style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.3191731;stroke-miterlimit:10"
+             display="inline"
+             stroke-miterlimit="10"
+             x1="532.88"
+             y1="-492.659"
+             x2="-77.349998"
+             y2="-0.271"
+             id="line5315" />
+        </g>
+      </g>
+      <rect
+         style="opacity:1;fill:#394d54;fill-opacity:1;stroke:#005976;stroke-width:1.95111275;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+         id="rect5078"
+         width="93.442245"
+         height="30.942587"
+         x="220.62865"
+         y="152.03131"
+         ry="8.0012312"
+         rx="5.394352" />
+      <g
+         transform="matrix(0.48549887,0,0,0.47493898,137.13218,138.44897)"
+         style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#dbdde0;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         id="flowRoot6134">
+        <path
+           d="m 187.97742,46.673398 0,28.56 18.92,0 0,-3.2 -15.12,0 0,-25.36 -3.8,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7725" />
+        <path
+           d="m 213.28305,50.833398 0,-4.16 -3.4,0 0,4.16 3.4,0 z m -3.4,3.72 0,20.68 3.4,0 0,-20.68 -3.4,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7727" />
+        <path
+           d="m 218.5893,54.553398 0,20.68 3.4,0 0,-11.68 q 0,-1.4 0.36,-2.56 0.4,-1.2 1.16,-2.08 0.76,-0.88 1.88,-1.36 1.16,-0.48 2.72,-0.48 1.96,0 3.08,1.12 1.12,1.12 1.12,3.04 l 0,14 3.4,0 0,-13.6 q 0,-1.68 -0.36,-3.04 -0.32,-1.4 -1.16,-2.4 -0.84,-1 -2.2,-1.56 -1.36,-0.56 -3.4,-0.56 -4.6,0 -6.72,3.76 l -0.08,0 0,-3.28 -3.2,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7729" />
+        <path
+           d="m 257.97492,75.233398 0,-20.68 -3.4,0 0,11.68 q 0,1.4 -0.4,2.6 -0.36,1.16 -1.12,2.04 -0.76,0.88 -1.92,1.36 -1.12,0.48 -2.68,0.48 -1.96,0 -3.08,-1.12 -1.12,-1.12 -1.12,-3.04 l 0,-14 -3.4,0 0,13.6 q 0,1.68 0.32,3.08 0.36,1.36 1.2,2.36 0.84,1 2.2,1.56 1.36,0.52 3.4,0.52 2.28,0 3.96,-0.88 1.68,-0.92 2.76,-2.84 l 0.08,0 0,3.28 3.2,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7731" />
+        <path
+           d="m 268.68055,64.353398 -7.76,10.88 4.12,0 5.76,-8.56 5.76,8.56 4.36,0 -8,-11.16 7.12,-9.52 -4.08,0 -5.16,7.24 -4.96,-7.24 -4.36,0 7.2,9.8 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7733" />
+        <path
+           d="m 306.71742,75.233398 10.16,-28.56 -3.96,0 -8.24,24.76 -0.08,0 -8.16,-24.76 -4.08,0 10.04,28.56 4.32,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7735" />
+        <path
+           d="m 320.01055,46.673398 0,28.56 3.6,0 0,-23.76 0.08,0 8.92,23.76 3.24,0 8.92,-23.76 0.08,0 0,23.76 3.6,0 0,-28.56 -5.2,0 -9.04,24 -9,-24 -5.2,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path7737" />
+      </g>
+      <g
+         transform="translate(-31.485323,51.85119)"
+         style="font-style:normal;font-weight:normal;font-size:25px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#dbdde0;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+         id="flowRoot4370">
+        <path
+           d="m 253.57891,-22.320679 4.775,-17.85 -2.375,0 -3.625,14.85 -0.05,0 -3.95,-14.85 -2.575,0 -4,14.85 -0.05,0 -3.5,-14.85 -2.425,0 4.575,17.85 2.475,0 4.125,-15 0.05,0 4.075,15 2.475,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:25px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path4398" />
+        <path
+           d="m 262.49844,-37.570679 0,-2.6 -2.125,0 0,2.6 2.125,0 z m -2.125,2.325 0,12.925 2.125,0 0,-12.925 -2.125,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:25px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path4400" />
+        <path
+           d="m 265.81484,-35.245679 0,12.925 2.125,0 0,-7.3 q 0,-0.875 0.225,-1.6 0.25,-0.75 0.725,-1.3 0.475,-0.55 1.175,-0.85 0.725,-0.3 1.7,-0.3 1.225,0 1.925,0.7 0.7,0.7 0.7,1.9 l 0,8.75 2.125,0 0,-8.5 q 0,-1.05 -0.225,-1.9 -0.2,-0.875 -0.725,-1.5 -0.525,-0.625 -1.375,-0.975 -0.85,-0.35 -2.125,-0.35 -2.875,0 -4.2,2.35 l -0.05,0 0,-2.05 -2,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:25px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path4402" />
+        <path
+           d="m 281.28086,-28.670679 q 0,-0.95 0.2,-1.85 0.2,-0.9 0.65,-1.6 0.45,-0.7 1.2,-1.125 0.75,-0.425 1.825,-0.425 1.1,0 1.875,0.425 0.775,0.4 1.25,1.075 0.5,0.675 0.725,1.575 0.225,0.875 0.225,1.825 0,0.9 -0.225,1.775 -0.2,0.875 -0.675,1.575 -0.475,0.675 -1.225,1.1 -0.75,0.425 -1.825,0.425 -1.025,0 -1.8,-0.4 -0.75,-0.4 -1.25,-1.075 -0.475,-0.675 -0.725,-1.525 -0.225,-0.875 -0.225,-1.775 z m 10,6.35 0,-17.85 -2.125,0 0,6.65 -0.05,0 q -0.35,-0.575 -0.875,-0.95 -0.5,-0.4 -1.075,-0.625 -0.575,-0.25 -1.15,-0.35 -0.575,-0.1 -1.075,-0.1 -1.475,0 -2.6,0.55 -1.1,0.525 -1.85,1.45 -0.725,0.9 -1.1,2.125 -0.35,1.225 -0.35,2.6 0,1.375 0.375,2.6 0.375,1.225 1.1,2.15 0.75,0.925 1.85,1.475 1.125,0.55 2.625,0.55 1.35,0 2.475,-0.475 1.125,-0.475 1.65,-1.55 l 0.05,0 0,1.75 2.125,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:25px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path4404" />
+        <path
+           d="m 296.07578,-28.770679 q 0,-1.175 0.3,-2.075 0.325,-0.925 0.875,-1.55 0.55,-0.625 1.275,-0.95 0.75,-0.325 1.575,-0.325 0.825,0 1.55,0.325 0.75,0.325 1.3,0.95 0.55,0.625 0.85,1.55 0.325,0.9 0.325,2.075 0,1.175 -0.325,2.1 -0.3,0.9 -0.85,1.525 -0.55,0.6 -1.3,0.925 -0.725,0.325 -1.55,0.325 -0.825,0 -1.575,-0.325 -0.725,-0.325 -1.275,-0.925 -0.55,-0.625 -0.875,-1.525 -0.3,-0.925 -0.3,-2.1 z m -2.25,0 q 0,1.425 0.4,2.65 0.4,1.225 1.2,2.15 0.8,0.9 1.975,1.425 1.175,0.5 2.7,0.5 1.55,0 2.7,-0.5 1.175,-0.525 1.975,-1.425 0.8,-0.925 1.2,-2.15 0.4,-1.225 0.4,-2.65 0,-1.425 -0.4,-2.65 -0.4,-1.25 -1.2,-2.15 -0.8,-0.925 -1.975,-1.45 -1.15,-0.525 -2.7,-0.525 -1.525,0 -2.7,0.525 -1.175,0.525 -1.975,1.45 -0.8,0.9 -1.2,2.15 -0.4,1.225 -0.4,2.65 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:25px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path4406" />
+        <path
+           d="m 321.63242,-22.320679 4.125,-12.925 -2.2,0 -2.9,10.575 -0.05,0 -2.7,-10.575 -2.325,0 -2.6,10.575 -0.05,0 -2.925,-10.575 -2.35,0 4.15,12.925 2.3,0 2.6,-10.275 0.05,0 2.625,10.275 2.25,0 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:25px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path4408" />
+        <path
+           d="m 329.07773,-26.395679 -2.125,0 q 0.05,1.2 0.5,2.05 0.45,0.825 1.2,1.35 0.75,0.5 1.725,0.725 0.975,0.225 2.05,0.225 0.975,0 1.95,-0.2 1,-0.175 1.775,-0.65 0.8,-0.475 1.275,-1.25 0.5,-0.775 0.5,-1.95 0,-0.925 -0.375,-1.55 -0.35,-0.625 -0.95,-1.025 -0.575,-0.425 -1.35,-0.675 -0.75,-0.25 -1.55,-0.425 -0.75,-0.175 -1.5,-0.325 -0.75,-0.175 -1.35,-0.4 -0.6,-0.25 -1,-0.6 -0.375,-0.375 -0.375,-0.925 0,-0.5 0.25,-0.8 0.25,-0.325 0.65,-0.5 0.4,-0.2 0.875,-0.275 0.5,-0.075 0.975,-0.075 0.525,0 1.025,0.125 0.525,0.1 0.95,0.35 0.425,0.25 0.7,0.675 0.275,0.4 0.325,1.025 l 2.125,0 q -0.075,-1.175 -0.5,-1.95 -0.425,-0.8 -1.15,-1.25 -0.7,-0.475 -1.625,-0.65 -0.925,-0.2 -2.025,-0.2 -0.85,0 -1.725,0.225 -0.85,0.2 -1.55,0.65 -0.675,0.425 -1.125,1.125 -0.425,0.7 -0.425,1.675 0,1.25 0.625,1.95 0.625,0.7 1.55,1.1 0.95,0.375 2.05,0.6 1.1,0.2 2.025,0.475 0.95,0.25 1.575,0.675 0.625,0.425 0.625,1.25 0,0.6 -0.3,1 -0.3,0.375 -0.775,0.575 -0.45,0.2 -1,0.275 -0.55,0.075 -1.05,0.075 -0.65,0 -1.275,-0.125 -0.6,-0.125 -1.1,-0.4 -0.475,-0.3 -0.775,-0.775 -0.3,-0.5 -0.325,-1.2 z"
+           style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:25px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue';fill:#dbdde0;fill-opacity:1"
+           id="path4410" />
+      </g>
+    </g>
+  </g>
+  <g
+     inkscape:label="default"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-73.928551,-292.36218)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot4362"
+       style="fill:black;stroke:none;stroke-opacity:1;stroke-width:1px;stroke-linejoin:miter;stroke-linecap:butt;fill-opacity:1;font-family:sans-serif;font-style:normal;font-weight:normal;font-size:40px;line-height:125%;letter-spacing:0px;word-spacing:0px"><flowRegion
+         id="flowRegion4364"><rect
+           id="rect4366"
+           width="95.964493"
+           height="43.436558"
+           x="171.72594"
+           y="102.005" /></flowRegion><flowPara
+         id="flowPara4368" /></flowRoot>    <g
+       style="display:none"
+       id="Layer_2"
+       display="none"
+       transform="translate(862.40058,602.59637)">
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="-77.349998"
+         y1="-49.362"
+         x2="-1830.454"
+         y2="-492.659"
+         id="line4716" />
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="-77.349998"
+         y1="-46.612"
+         x2="-1830.454"
+         y2="-492.659"
+         id="line4718" />
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="-77.349998"
+         y1="-4.362"
+         x2="-1830.454"
+         y2="-492.659"
+         id="line4720" />
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="-52.994999"
+         y1="-67.064003"
+         x2="-1830.454"
+         y2="-492.659"
+         id="line4722" />
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="532.88"
+         y1="-492.659"
+         x2="-76.915001"
+         y2="-49.472"
+         id="line4724" />
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="532.88"
+         y1="-492.659"
+         x2="-165"
+         y2="-71.179001"
+         id="line4726" />
+      <line
+         style="display:inline;fill:none;stroke:#24b8eb;stroke-width:0.25;stroke-miterlimit:10"
+         display="inline"
+         stroke-miterlimit="10"
+         x1="532.88"
+         y1="-492.659"
+         x2="-77.349998"
+         y2="-0.271"
+         id="line4728" />
+    </g>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker5920)"
+       d="M 187.77436,389.79113 C 131.26725,402.59092 53.961508,490.55977 172.27674,510.09709"
+       id="path5884"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="cc" />
+    <rect
+       style="opacity:1;fill:#f2f2f2;fill-opacity:1;stroke:#005976;stroke-width:3;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+       id="rect4461"
+       width="151.48232"
+       height="31.31473"
+       x="107.04051"
+       y="357.65875" />
+    <g
+       transform="matrix(1.1029002,0,0,1.1071429,-80.18049,321.25181)"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       id="flowRoot4354">
+      <path
+         d="m 187.98859,51.93976 0,-10.026972 2.89587,0 q 1.19455,0 2.00902,0.343886 0.81446,0.325786 1.32124,0.977358 0.52488,0.633473 0.74207,1.556533 0.23529,0.904962 0.23529,2.063312 0,1.194549 -0.25339,2.045213 -0.23529,0.832565 -0.61537,1.393641 -0.38009,0.561076 -0.86876,0.886862 -0.47058,0.325786 -0.95926,0.506778 -0.48868,0.162893 -0.92306,0.217191 -0.43439,0.0362 -0.72397,0.0362 l -2.85968,0 z m -1.71943,-11.47491 0,12.922849 4.43431,0 q 1.61083,0 2.78728,-0.452481 1.17645,-0.452481 1.93662,-1.303144 0.76017,-0.868763 1.12215,-2.11761 0.36199,-1.266946 0.36199,-2.895877 0,-3.113067 -1.61083,-4.633402 -1.61083,-1.520335 -4.59721,-1.520335 l -4.43431,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6983"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 199.86394,48.718098 q 0,-0.850664 0.2172,-1.502236 0.23529,-0.669671 0.63347,-1.122152 0.39818,-0.452481 0.92306,-0.687771 0.54298,-0.23529 1.14025,-0.23529 0.59727,0 1.12215,0.23529 0.54298,0.23529 0.94116,0.687771 0.39819,0.452481 0.61538,1.122152 0.23529,0.651572 0.23529,1.502236 0,0.850664 -0.23529,1.520335 -0.21719,0.651572 -0.61538,1.104053 -0.39818,0.434381 -0.94116,0.669671 -0.52488,0.23529 -1.12215,0.23529 -0.59727,0 -1.14025,-0.23529 -0.52488,-0.23529 -0.92306,-0.669671 -0.39818,-0.452481 -0.63347,-1.104053 -0.2172,-0.669671 -0.2172,-1.520335 z m -1.62893,0 q 0,1.031656 0.28959,1.918518 0.28959,0.886862 0.86876,1.556534 0.57918,0.651572 1.42984,1.031656 0.85067,0.361984 1.95472,0.361984 1.12215,0 1.95472,-0.361984 0.85066,-0.380084 1.42984,-1.031656 0.57917,-0.669672 0.86876,-1.556534 0.28959,-0.886862 0.28959,-1.918518 0,-1.031656 -0.28959,-1.918518 -0.28959,-0.904962 -0.86876,-1.556534 -0.57918,-0.669671 -1.42984,-1.049755 -0.83257,-0.380084 -1.95472,-0.380084 -1.10405,0 -1.95472,0.380084 -0.85066,0.380084 -1.42984,1.049755 -0.57917,0.651572 -0.86876,1.556534 -0.28959,0.886862 -0.28959,1.918518 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6985"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 215.45219,47.03487 1.59273,0 q -0.0905,-0.832565 -0.43438,-1.429839 -0.34389,-0.615374 -0.88686,-1.013557 -0.52488,-0.398183 -1.23075,-0.579175 -0.68777,-0.199092 -1.48414,-0.199092 -1.10405,0 -1.93662,0.398183 -0.83256,0.380084 -1.39364,1.067855 -0.54297,0.669671 -0.81446,1.592732 -0.27149,0.904961 -0.27149,1.954716 0,1.049756 0.27149,1.936618 0.28959,0.868763 0.83256,1.502236 0.56108,0.633473 1.37554,0.977358 0.83257,0.343885 1.90042,0.343885 1.79183,0 2.82348,-0.94116 1.04976,-0.941159 1.30315,-2.678685 l -1.57464,0 q -0.14479,1.085953 -0.79636,1.683228 -0.63347,0.597274 -1.77373,0.597274 -0.72397,0 -1.24884,-0.289587 -0.52488,-0.289588 -0.85067,-0.760168 -0.32578,-0.488679 -0.48868,-1.104053 -0.14479,-0.615374 -0.14479,-1.266946 0,-0.70587 0.14479,-1.357442 0.1448,-0.669671 0.47058,-1.17645 0.34389,-0.506778 0.90497,-0.814465 0.56107,-0.307687 1.39364,-0.307687 0.97735,0 1.55653,0.488679 0.57917,0.48868 0.76017,1.375542 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6987"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 218.91112,40.46485 0,12.922849 1.53843,0 0,-3.547449 1.44794,-1.339343 3.20357,4.886792 1.95471,0 -3.98183,-5.954646 3.71034,-3.402655 -2.06331,0 -4.27142,4.090425 0,-7.655973 -1.53843,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6989"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 234.72136,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54298,-0.850664 0.34388,-0.361985 0.81446,-0.561076 0.48868,-0.217191 1.08596,-0.217191 0.57917,0 1.04975,0.217191 0.48868,0.199091 0.83257,0.561076 0.36198,0.343885 0.56107,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48414,2.606288 -1.52034,0 q -0.19909,0.923061 -0.83256,1.375542 -0.61538,0.45248 -1.59273,0.45248 -0.76017,0 -1.32125,-0.253389 -0.56107,-0.253389 -0.92306,-0.669671 -0.36198,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.16289,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32125,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77372,0.361985 -0.79637,0.361984 -1.39364,1.013556 -0.57918,0.651573 -0.90496,1.538435 -0.32579,0.886862 -0.32579,1.954716 0.0362,1.067855 0.30769,1.972816 0.28958,0.904962 0.85066,1.556534 0.56108,0.651572 1.37554,1.013557 0.83257,0.361984 1.95472,0.361984 1.59273,0 2.64249,-0.796366 1.04975,-0.796366 1.35744,-2.370999 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6991"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 237.8907,44.030398 0,9.357301 1.53844,0 0,-4.162823 q 0,-0.904961 0.18099,-1.592732 0.18099,-0.70587 0.57918,-1.194549 0.39818,-0.488679 1.04975,-0.742068 0.65157,-0.253389 1.57463,-0.253389 l 0,-1.628931 q -1.24884,-0.0362 -2.06331,0.506779 -0.81446,0.542976 -1.37554,1.683228 l -0.0362,0 0,-1.972816 -1.44794,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6993"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 255.3624,47.03487 1.59273,0 q -0.0905,-0.832565 -0.43438,-1.429839 -0.34389,-0.615374 -0.88686,-1.013557 -0.52488,-0.398183 -1.23075,-0.579175 -0.68777,-0.199092 -1.48414,-0.199092 -1.10405,0 -1.93661,0.398183 -0.83257,0.380084 -1.39364,1.067855 -0.54298,0.669671 -0.81447,1.592732 -0.27149,0.904961 -0.27149,1.954716 0,1.049756 0.27149,1.936618 0.28959,0.868763 0.83256,1.502236 0.56108,0.633473 1.37555,0.977358 0.83256,0.343885 1.90041,0.343885 1.79183,0 2.82348,-0.94116 1.04976,-0.941159 1.30315,-2.678685 l -1.57463,0 q -0.1448,1.085953 -0.79637,1.683228 -0.63347,0.597274 -1.77372,0.597274 -0.72397,0 -1.24885,-0.289587 -0.52488,-0.289588 -0.85067,-0.760168 -0.32578,-0.488679 -0.48867,-1.104053 -0.1448,-0.615374 -0.1448,-1.266946 0,-0.70587 0.1448,-1.357442 0.14479,-0.669671 0.47058,-1.17645 0.34388,-0.506778 0.90496,-0.814465 0.56107,-0.307687 1.39364,-0.307687 0.97736,0 1.55653,0.488679 0.57918,0.48868 0.76017,1.375542 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6995"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 258.82133,40.46485 0,12.922849 1.53844,0 0,-12.922849 -1.53844,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6997"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 264.38967,42.34717 0,-1.88232 -1.53844,0 0,1.88232 1.53844,0 z m -1.53844,1.683228 0,9.357301 1.53844,0 0,-9.357301 -1.53844,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path6999"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 273.28827,47.813137 -5.37547,0 q 0.0362,-0.542977 0.23529,-1.013557 0.19909,-0.488679 0.54297,-0.850664 0.34389,-0.361985 0.81447,-0.561076 0.48868,-0.217191 1.08595,-0.217191 0.57918,0 1.04976,0.217191 0.48868,0.199091 0.83256,0.561076 0.36199,0.343885 0.56108,0.832565 0.21719,0.488679 0.25339,1.031656 z m 1.48413,2.606288 -1.52033,0 q -0.19909,0.923061 -0.83257,1.375542 -0.61537,0.45248 -1.59273,0.45248 -0.76017,0 -1.32124,-0.253389 -0.56108,-0.253389 -0.92306,-0.669671 -0.36199,-0.434382 -0.52488,-0.977359 -0.16289,-0.561076 -0.14479,-1.176449 l 7.0044,0 q 0.0362,-0.850664 -0.1629,-1.791824 -0.18099,-0.94116 -0.68777,-1.737526 -0.48868,-0.796366 -1.32124,-1.303144 -0.81446,-0.524878 -2.06331,-0.524878 -0.95926,0 -1.77373,0.361985 -0.79636,0.361984 -1.39364,1.013556 -0.57917,0.651573 -0.90496,1.538435 -0.32578,0.886862 -0.32578,1.954716 0.0362,1.067855 0.30768,1.972816 0.28959,0.904962 0.85067,1.556534 0.56107,0.651572 1.37554,1.013557 0.83256,0.361984 1.95471,0.361984 1.59274,0 2.64249,-0.796366 1.04976,-0.796366 1.35744,-2.370999 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7001"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 276.51191,44.030398 0,9.357301 1.53844,0 0,-5.284975 q 0,-0.633473 0.16289,-1.15835 0.18099,-0.542977 0.52488,-0.94116 0.34388,-0.398183 0.85066,-0.615374 0.52488,-0.217191 1.23075,-0.217191 0.88686,0 1.39364,0.506779 0.50678,0.506778 0.50678,1.375541 l 0,6.33473 1.53843,0 0,-6.153738 q 0,-0.760167 -0.16289,-1.375541 -0.1448,-0.633473 -0.52488,-1.085954 -0.38008,-0.45248 -0.99546,-0.70587 -0.61537,-0.253389 -1.53843,-0.253389 -2.08141,0 -3.04067,1.701328 l -0.0362,0 0,-1.484137 -1.44794,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7003"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 288.72239,44.030398 0,-2.80538 -1.53844,0 0,2.80538 -1.59273,0 0,1.357442 1.59273,0 0,5.954646 q 0,0.651572 0.1267,1.049755 0.12669,0.398183 0.38008,0.615374 0.27149,0.217191 0.68777,0.307687 0.43439,0.0724 1.03166,0.0724 l 1.17645,0 0,-1.357442 -0.70587,0 q -0.36199,0 -0.59727,-0.0181 -0.2172,-0.0362 -0.34389,-0.126694 -0.12669,-0.0905 -0.18099,-0.253389 -0.0362,-0.162893 -0.0362,-0.434382 l 0,-5.809852 1.86422,0 0,-1.357442 -1.86422,0 z"
+         style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.09922791px;font-family:'Helvetica Neue';-inkscape-font-specification:'Helvetica Neue'"
+         id="path7005"
+         inkscape:connector-curvature="0" />
+    </g>
+  </g>
+</svg>
diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md
index 9bf7632..89f75d2 100644
--- a/docs/sources/installation/mac.md
+++ b/docs/sources/installation/mac.md
@@ -2,20 +2,19 @@
 page_description: Instructions for installing Docker on OS X using boot2docker.
 page_keywords: Docker, Docker documentation, requirements, boot2docker, VirtualBox, SSH, Linux, OSX, OS X, Mac
 
-# Install Docker on Mac OS X
+# Mac OS X
 
 You can install Docker using Boot2Docker to run `docker` commands at your command-line.
 Choose this installation if you are familiar with the command-line or plan to
 contribute to the Docker project on GitHub.
 
+[<img src="/installation/images/kitematic.png" alt="Download Kitematic"
+style="float:right;">](/kitematic/)
+
 Alternatively, you may want to try <a id="inlinelink" href="https://kitematic.com/"
 target="_blank">Kitematic</a>, an application that lets you set up Docker and
 run containers using a graphical user interface (GUI).
 
-<a id="graphic" href="https://kitematic.com/" target="_blank"><img
-src="/installation/images/kitematic.png" alt="Download Kitematic"></a>
-
-
 ## Command-line Docker with Boot2Docker
 
 Because the Docker daemon uses Linux-specific kernel features, you can't run
@@ -42,12 +41,12 @@
 Docker container using standard localhost addressing such as `localhost:8000` or
 `0.0.0.0:8376`.
 
-![Linux Architecture Diagram](/installation/images/linux_docker_host.png)
+![Linux Architecture Diagram](/installation/images/linux_docker_host.svg)
 
 In an OS X installation, the `docker` daemon is running inside a Linux virtual
 machine provided by Boot2Docker.
 
-![OSX Architecture Diagram](/installation/images/mac_docker_host.png)
+![OSX Architecture Diagram](/installation/images/mac_docker_host.svg)
 
 In OS X, the Docker host address is the address of the Linux VM.
 When you start the `boot2docker` process, the VM is assigned an IP address. Under
@@ -55,17 +54,17 @@
 practice, work through the exercises on this page.
 
 
-### Install Boot2Docker
+### Installation
 
 1. Go to the [boot2docker/osx-installer ](
-https://github.com/boot2docker/osx-installer/releases/latest) release page.
+   https://github.com/boot2docker/osx-installer/releases/latest) release page.
 
 4. Download Boot2Docker by clicking `Boot2Docker-x.x.x.pkg` in the "Downloads"
-section.
+   section.
 
 3. Install Boot2Docker by double-clicking the package.
 
-	The installer places Boot2Docker in your "Applications" folder.
+    The installer places Boot2Docker in your "Applications" folder.
 
 The installation places the `docker` and `boot2docker` binaries in your
 `/usr/local/bin` directory.
@@ -96,30 +95,32 @@
 Once the launch completes, you can run `docker` commands. A good way to verify
 your setup succeeded is to run the `hello-world` container.
 
-		$ docker run hello-world
-		Unable to find image 'hello-world:latest' locally
-		511136ea3c5a: Pull complete
-		31cbccb51277: Pull complete
-		e45a5af57b00: Pull complete
-		hello-world:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security.
-		Status: Downloaded newer image for hello-world:latest
-		Hello from Docker.
-		This message shows that your installation appears to be working correctly.
+    $ docker run hello-world
+    Unable to find image 'hello-world:latest' locally
+    511136ea3c5a: Pull complete
+    31cbccb51277: Pull complete
+    e45a5af57b00: Pull complete
+    hello-world:latest: The image you are pulling has been verified.
+    Important: image verification is a tech preview feature and should not be
+    relied on to provide security.
+    Status: Downloaded newer image for hello-world:latest
+    Hello from Docker.
+    This message shows that your installation appears to be working correctly.
 
-		To generate this message, Docker took the following steps:
-		 1. The Docker client contacted the Docker daemon.
-		 2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
-			(Assuming it was not already locally available.)
-		 3. The Docker daemon created a new container from that image which runs the
-			executable that produces the output you are currently reading.
-		 4. The Docker daemon streamed that output to the Docker client, which sent it
-			to your terminal.
+    To generate this message, Docker took the following steps:
+    1. The Docker client contacted the Docker daemon.
+    2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
+       (Assuming it was not already locally available.)
+    3. The Docker daemon created a new container from that image which runs the
+       executable that produces the output you are currently reading.
+    4. The Docker daemon streamed that output to the Docker client, which sent it
+       to your terminal.
 
-		To try something more ambitious, you can run an Ubuntu container with:
-		 $ docker run -it ubuntu bash
+    To try something more ambitious, you can run an Ubuntu container with:
+    $ docker run -it ubuntu bash
 
-		For more examples and ideas, visit:
-		 http://docs.docker.com/userguide/
+    For more examples and ideas, visit:
+    http://docs.docker.com/userguide/
 
 
 A more typical way to start and stop `boot2docker` is using the command line.
@@ -130,45 +131,45 @@
 
 1. Create a new Boot2Docker VM.
 
-		$ boot2docker init
+        $ boot2docker init
 
-	This creates a new virtual machine. You only need to run this command once.
+    This creates a new virtual machine. You only need to run this command once.
 
 2. Start the `boot2docker` VM.
 
-		$ boot2docker start
+        $ boot2docker start
 
 3. Display the environment variables for the Docker client.
 
-		$ boot2docker shellinit
-		Writing /Users/mary/.boot2docker/certs/boot2docker-vm/ca.pem
-		Writing /Users/mary/.boot2docker/certs/boot2docker-vm/cert.pem
-		Writing /Users/mary/.boot2docker/certs/boot2docker-vm/key.pem
-			export DOCKER_HOST=tcp://192.168.59.103:2376
-			export DOCKER_CERT_PATH=/Users/mary/.boot2docker/certs/boot2docker-vm
-			export DOCKER_TLS_VERIFY=1
+        $ boot2docker shellinit
+        Writing /Users/mary/.boot2docker/certs/boot2docker-vm/ca.pem
+        Writing /Users/mary/.boot2docker/certs/boot2docker-vm/cert.pem
+        Writing /Users/mary/.boot2docker/certs/boot2docker-vm/key.pem
+            export DOCKER_HOST=tcp://192.168.59.103:2376
+            export DOCKER_CERT_PATH=/Users/mary/.boot2docker/certs/boot2docker-vm
+            export DOCKER_TLS_VERIFY=1
 
-	The specific paths and address on your machine will be different.
+    The specific paths and address on your machine will be different.
 
 4. To set the environment variables in your shell do the following:
 
-		$ eval "$(boot2docker shellinit)"
+        $ eval "$(boot2docker shellinit)"
 
-	You can also set them manually by using the `export` commands `boot2docker`
-	returns.
+    You can also set them manually by using the `export` commands `boot2docker`
+    returns.
 
 5. Run the `hello-world` container to verify your setup.
 
-		$ docker run hello-world
+        $ docker run hello-world
 
 
-## Basic Boot2Docker Exercises
+## Basic Boot2Docker exercises
 
 At this point, you should have `boot2docker` running and the `docker` client
 environment initialized. To verify this, run the following commands:
 
-	$ boot2docker status
-	$ docker version
+    $ boot2docker status
+    $ docker version
 
 Work through this section to try some practical container tasks using `boot2docker` VM.
 
@@ -176,52 +177,52 @@
 
 1. Start an NGINX container on the DOCKER_HOST.
 
-		$ docker run -d -P --name web nginx
+        $ docker run -d -P --name web nginx
 
-	Normally, the `docker run` commands starts a container, runs it, and then
-	exits. The `-d` flag keeps the container running in the background
-	after the `docker run` command completes. The `-P` flag publishes exposed ports from the
-	container to your local host; this lets you access them from your Mac.
+    Normally, the `docker run` commands starts a container, runs it, and then
+    exits. The `-d` flag keeps the container running in the background
+    after the `docker run` command completes. The `-P` flag publishes exposed ports from the
+    container to your local host; this lets you access them from your Mac.
 
 2. Display your running container with `docker ps` command
 
-		CONTAINER ID        IMAGE               COMMAND                CREATED             STATUS              PORTS                                           NAMES
-		5fb65ff765e9        nginx:latest        "nginx -g 'daemon of   3 minutes ago       Up 3 minutes        0.0.0.0:49156->443/tcp, 0.0.0.0:49157->80/tcp   web  
+        CONTAINER ID        IMAGE               COMMAND                CREATED             STATUS              PORTS                                           NAMES
+        5fb65ff765e9        nginx:latest        "nginx -g 'daemon of   3 minutes ago       Up 3 minutes        0.0.0.0:49156->443/tcp, 0.0.0.0:49157->80/tcp   web  
 
-	At this point, you can see `nginx` is running as a daemon.
+    At this point, you can see `nginx` is running as a daemon.
 
 3. View just the container's ports.
 
-		$ docker port web
-		443/tcp -> 0.0.0.0:49156
-		80/tcp -> 0.0.0.0:49157
+        $ docker port web
+        443/tcp -> 0.0.0.0:49156
+        80/tcp -> 0.0.0.0:49157
 
-	This tells you that the `web` container's port `80` is mapped to port
-	`49157` on your Docker host.
+    This tells you that the `web` container's port `80` is mapped to port
+    `49157` on your Docker host.
 
 4. Enter the `http://localhost:49157` address (`localhost` is `0.0.0.0`) in your browser:
 
-	   ![Bad Address](/installation/images/bad_host.png)
+    ![Bad Address](/installation/images/bad_host.png)
 
-	This didn't work. The reason it doesn't work is your `DOCKER_HOST` address is
-	not the localhost address (0.0.0.0) but is instead the address of the
-	`boot2docker` VM.
+    This didn't work. The reason it doesn't work is your `DOCKER_HOST` address is
+    not the localhost address (0.0.0.0) but is instead the address of the
+    `boot2docker` VM.
 
 5. Get the address of the `boot2docker` VM.
 
-		$ boot2docker ip
-		192.168.59.103
+        $ boot2docker ip
+        192.168.59.103
 
 6. Enter the `http://192.168.59.103:49157` address in your browser:
 
-	![Correct Addressing](/installation/images/good_host.png)
+    ![Correct Addressing](/installation/images/good_host.png)
 
-	Success!
+    Success!
 
 7. To stop and then remove your running `nginx` container, do the following:
 
-		$ docker stop web
-		$ docker rm web
+        $ docker stop web
+        $ docker rm web
 
 ### Mount a volume on the container
 
@@ -231,46 +232,46 @@
 
 1. Change to your user `$HOME` directory.
 
-		$ cd $HOME
+        $ cd $HOME
 
 2. Make a new `site` directory.
 
-		$ mkdir site
+        $ mkdir site
 
 3. Change into the `site` directory.
 
-		$ cd site
+        $ cd site
 
 4. Create a new `index.html` file.
 
-		$ echo "my new site" > index.html
+        $ echo "my new site" > index.html
 
 5. Start a new `nginx` container and replace the `html` folder with your `site` directory.
 
-		$ docker run -d -P -v $HOME/site:/usr/share/nginx/html --name mysite nginx
+        $ docker run -d -P -v $HOME/site:/usr/share/nginx/html --name mysite nginx
 
 6. Get the `mysite` container's port.
 
-		$ docker port mysite
-		80/tcp -> 0.0.0.0:49166
-		443/tcp -> 0.0.0.0:49165
+        $ docker port mysite
+        80/tcp -> 0.0.0.0:49166
+        443/tcp -> 0.0.0.0:49165
 
 7. Open the site in a browser:
 
-	![My site page](/installation/images/newsite_view.png)
+    ![My site page](/installation/images/newsite_view.png)
 
 8. Try adding a page to your `$HOME/site` in real time.
 
-		$ echo "This is cool" > cool.html
+        $ echo "This is cool" > cool.html
 
 9. Open the new page in the browser.
 
-	![Cool page](/installation/images/cool_view.png)
+    ![Cool page](/installation/images/cool_view.png)
 
 9. Stop and then remove your running `mysite` container.
 
-		$ docker stop mysite
-		$ docker rm mysite
+        $ docker stop mysite
+        $ docker rm mysite
 
 ## Upgrade Boot2Docker
 
@@ -286,11 +287,11 @@
 
 2. Stop the `boot2docker` application.
 
-		$ boot2docker stop
+        $ boot2docker stop
 
 3. Run the upgrade command.
 
-		$ boot2docker upgrade
+        $ boot2docker upgrade
 
 
 ### Use the installer
@@ -301,21 +302,45 @@
 
 2. Stop the `boot2docker` application.
 
-		$ boot2docker stop
+        $ boot2docker stop
 
 3. Go to the [boot2docker/osx-installer ](
    https://github.com/boot2docker/osx-installer/releases/latest) release page.
 
 4. Download Boot2Docker by clicking `Boot2Docker-x.x.x.pkg` in the "Downloads"
-section.
+   section.
 
 2. Install Boot2Docker by double-clicking the package.
 
-	The installer places Boot2Docker in your "Applications" folder.
+    The installer places Boot2Docker in your "Applications" folder.
 
 
-## Learning more and Acknowledgement
+## Uninstallation 
 
+1. Go to the [boot2docker/osx-installer ](
+   https://github.com/boot2docker/osx-installer/releases/latest) release page. 
+
+2. Download the source code by clicking `Source code (zip)` or
+   `Source code (tar.gz)` in the "Downloads" section.
+
+3. Extract the source code.
+
+4. Open a terminal on your local machine.
+
+5. Change to the directory where you extracted the source code:
+
+        $ cd <path to extracted source code>
+
+6. Make sure the uninstall.sh script is executable:
+
+        $ chmod +x uninstall.sh
+
+7. Run the uninstall.sh script:
+
+        $ ./uninstall.sh
+
+
+## Learning more and acknowledgement
 
 Use `boot2docker help` to list the full command line reference. For more
 information about using SSH or SCP to access the Boot2Docker VM, see the README
@@ -324,4 +349,4 @@
 Thanks to Chris Jones whose [blog](http://goo.gl/Be6cCk)  inspired me to redo
 this page.
 
-Continue with the [Docker User Guide](/userguide/).
\ No newline at end of file
+Continue with the [Docker User Guide](/userguide/).
diff --git a/docs/sources/installation/oracle.md b/docs/sources/installation/oracle.md
index 6d2f782..e74decd 100644
--- a/docs/sources/installation/oracle.md
+++ b/docs/sources/installation/oracle.md
@@ -43,35 +43,35 @@
 `/etc/yum.repos.d/public-yum-ol7.repo`
 and set `enabled=1` in the `[ol6_addons]` or the `[ol7_addons]` stanza.
 
-## To install Docker:
+## Installation 
 
 1. Ensure the appropriate *addons* channel or repository has been enabled.
 
 2. Use yum to install the Docker package:
 
-		$ sudo yum install docker
+        $ sudo yum install docker
 
-## To start Docker:
+## Starting Docker 
 
 1. Now that it's installed, start the Docker daemon:
 
-	1. On Oracle Linux 6:
+    1. On Oracle Linux 6:
 
-	    	$ sudo service docker start
+            $ sudo service docker start
 
-	2. On Oracle Linux 7:
+    2. On Oracle Linux 7:
 
-			$ sudo systemctl start docker.service
+            $ sudo systemctl start docker.service
 
 2. If you want the Docker daemon to start automatically at boot:
 
-	1. On Oracle Linux 6:
+    1. On Oracle Linux 6:
 
-	    	$ sudo chkconfig docker on
+            $ sudo chkconfig docker on
 
-	2. On Oracle Linux 7:
+    2. On Oracle Linux 7:
 
-			$ sudo systemctl enable docker.service
+            $ sudo systemctl enable docker.service
 
 **Done!**
 
@@ -99,6 +99,20 @@
 
 You can now continue with the [Docker User Guide](/userguide/).
 
+## Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo yum -y remove docker
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
 ## Known issues
 
 ### Docker unmounts btrfs filesystem on shutdown
@@ -110,7 +124,7 @@
 On Oracle Linux 7, you can use a `systemd.mount` definition and modify the
 Docker `systemd.service` to depend on the btrfs mount defined in systemd.
 
-### SElinux Support on Oracle Linux 7
+### SElinux support on Oracle Linux 7
 SElinux must be set to `Permissive` or `Disabled` in `/etc/sysconfig/selinux` to
 use the btrfs storage engine on Oracle Linux 7.
 
diff --git a/docs/sources/installation/rhel.md b/docs/sources/installation/rhel.md
index 58b2316..9b17346 100644
--- a/docs/sources/installation/rhel.md
+++ b/docs/sources/installation/rhel.md
@@ -7,7 +7,7 @@
 Docker is supported on the following versions of RHEL:
 
 - [*Red Hat Enterprise Linux 7 (64-bit)*](#red-hat-enterprise-linux-7-installation)
-- [*Red Hat Enterprise Linux 6.5 (64-bit)*](#red-hat-enterprise-linux-6.5-installation) or later
+- [*Red Hat Enterprise Linux 6.6 (64-bit)*](#red-hat-enterprise-linux-66-installation) or later
 
 ## Kernel support
 
@@ -16,7 +16,9 @@
 will cause issues if one decides to step outside that box and run
 non-distribution kernel packages.
 
-## Red Hat Enterprise Linux 7 Installation
+## Red Hat Enterprise Linux 7
+
+### Installation
 
 **Red Hat Enterprise Linux 7 (64 bit)** has [shipped with
 Docker](https://access.redhat.com/site/products/red-hat-enterprise-linux/docker-and-containers).
@@ -41,14 +43,28 @@
 
 Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
 
-## Red Hat Enterprise Linux 6.5 Installation
+### Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo yum -y remove docker
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
+## Red Hat Enterprise Linux 6.6
 
 You will need **64 bit** [RHEL
-6.5](https://access.redhat.com/site/articles/3078#RHEL6) or later, with
-a RHEL 6 kernel version 2.6.32-431 or higher as this has specific kernel
-fixes to allow Docker to work.
+6.6](https://access.redhat.com/site/articles/3078#RHEL6) or later, with
+a RHEL 6 kernel version 2.6.32-504.16.2 or higher as this has specific kernel
+fixes to allow Docker to work. Related issues: [#9856](https://github.com/docker/docker/issues/9856).
 
-Docker is available for **RHEL6.5** on EPEL. Please note that
+Docker is available for **RHEL6.6** on EPEL. Please note that
 this package is part of [Extra Packages for Enterprise Linux
 (EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort to
 create and maintain additional packages for the RHEL distribution.
@@ -66,7 +82,7 @@
 >  vulnerabilities and severe bugs (such as those found in kernel 2.6.32)
 > are fixed.
 
-## Installation
+###  Installation
 
 Firstly, you need to install the EPEL repository. Please follow the
 [EPEL installation
@@ -90,6 +106,20 @@
 
 Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon).
 
+### Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo yum -y remove docker-io
+
+The above command will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
+
 ## Starting the Docker daemon
 
 Now that it's installed, let's start the Docker daemon.
@@ -118,7 +148,6 @@
 Docker runtime files, or make other customizations, read our Systemd article to
 learn how to [customize your Systemd Docker daemon options](/articles/systemd/).
 
-
 ## Issues?
 
 If you have any issues - please report them directly in the
diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md
index 85a37d7..652edc9 100644
--- a/docs/sources/installation/ubuntulinux.md
+++ b/docs/sources/installation/ubuntulinux.md
@@ -28,8 +28,8 @@
 To check your current kernel version, open a terminal and use `uname -r` to display
 your kernel version:
 
-	$ uname -r 
-	3.11.0-15-generic
+    $ uname -r 
+    3.11.0-15-generic
 
 >**Caution** Some Ubuntu OS versions **require a version higher than 3.10** to
 >run Docker, see the prerequisites on this page that apply to your Ubuntu
@@ -72,17 +72,17 @@
 
 2. Update your package manager.
 
-		$ sudo apt-get update
+        $ sudo apt-get update
 
 3. Install both the required and optional packages.
 
-		$ sudo apt-get install linux-image-generic-lts-trusty
+        $ sudo apt-get install linux-image-generic-lts-trusty
 
-	Depending on your environment, you may install more as described in the preceding table.
+    Depending on your environment, you may install more as described in the preceding table.
 
 4. Reboot your host.
 
-		$ sudo reboot
+        $ sudo reboot
 
 5. After your system reboots, go ahead and [install Docker](#installing-docker-on-ubuntu).
 
@@ -92,35 +92,42 @@
 Docker uses AUFS as the default storage backend. If you don't have this
 prerequisite installed, Docker's installation process adds it.
 
-##Installing Docker on Ubuntu
+##Installation
 
-Make sure you have intalled the prerequisites for your Ubuntu version. Then,
+Make sure you have installed the prerequisites for your Ubuntu version. Then,
 install Docker using the following:
 
 1. Log into your Ubuntu installation as a user with `sudo` privileges.
 
 2. Verify that you have `wget` installed.
 
-		$ which wget
+        $ which wget
 
-	 If `wget` isn't installed, install it after updating your manager:
+    If `wget` isn't installed, install it after updating your manager:
 
-		$ sudo apt-get update $ sudo apt-get install wget
+        $ sudo apt-get update
+        $ sudo apt-get install wget
 
 3. Get the latest Docker package.
 
-		$ wget -qO- https://get.docker.com/ | sh
+        $ wget -qO- https://get.docker.com/ | sh
 
-	 The system prompts you for your `sudo` password. Then, it downloads and
-	 installs Docker and its dependencies.
+    The system prompts you for your `sudo` password. Then, it downloads and
+    installs Docker and its dependencies.
+>**Note**: If your company is behind a filtering proxy, you may find that the
+>`apt-key`
+>command fails for the Docker repo during installation. To work around this,
+>add the key directly using the following:
+>
+>       $ wget -qO- https://get.docker.com/gpg | sudo apt-key add -
 
 4. Verify `docker` is installed correctly.
 
-		$ sudo docker run hello-world
+        $ sudo docker run hello-world
 
-	This command downloads a test image and runs it in a container.
+    This command downloads a test image and runs it in a container.
 
-## Optional Configurations for Docker on Ubuntu 
+## Optional configurations for Docker on Ubuntu 
 
 This section contains optional procedures for configuring your Ubuntu to work
 better with Docker.
@@ -130,7 +137,7 @@
 * [Enable UFW forwarding](#enable-ufw-forwarding) 
 * [Configure a DNS server for use by Docker](#configure-a-dns-server-for-docker)
 
-### Create a docker group		
+### Create a Docker group		
 
 The `docker` daemon binds to a Unix socket instead of a TCP port. By default
 that Unix socket is owned by the user `root` and other users can access it with
@@ -148,19 +155,19 @@
 
 1. Log into Ubuntu as a user with `sudo` privileges.
 
-	 This procedure assumes you log in as the `ubuntu` user.
+    This procedure assumes you log in as the `ubuntu` user.
 
 3. Create the `docker` group and add your user.
 
-		$ sudo usermod -aG docker ubuntu
+        $ sudo usermod -aG docker ubuntu
 
 3. Log out and log back in.
 
-	This ensures your user is running with the correct permissions.
+    This ensures your user is running with the correct permissions.
 
 4. Verify your work by running `docker` without `sudo`.
 
-		$ docker run hello-world
+        $ docker run hello-world
 
 
 ### Adjust memory and swap accounting
@@ -180,13 +187,13 @@
 
 3. Set the `GRUB_CMDLINE_LINUX` value as follows:
 
-    	GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
+        GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
 
 4. Save and close the file.
 
 5. Update GRUB.
 
-		$ sudo update-grub
+        $ sudo update-grub
 
 6. Reboot your system.
 
@@ -209,25 +216,25 @@
 
 2. Verify that UFW is installed and enabled.
 
-		$ sudo ufw status
+        $ sudo ufw status
 
 3. Open the `/etc/default/ufw` file for editing.
 
-		$ sudo nano /etc/default/ufw
+        $ sudo nano /etc/default/ufw
 
 4. Set the `DEFAULT_FORWARD_POLICY` policy to:
 
-    	DEFAULT_FORWARD_POLICY="ACCEPT"
+        DEFAULT_FORWARD_POLICY="ACCEPT"
 
 5. Save and close the file.
 
 6. Reload UFW to use the new setting.
 
-		$ sudo ufw reload
+        $ sudo ufw reload
 
 7. Allow incoming connections on the Docker port.
 
-		$ sudo ufw allow 2375/tcp
+        $ sudo ufw allow 2375/tcp
 
 ### Configure a DNS server for use by Docker
 
@@ -246,7 +253,7 @@
 Instead, Docker defaults to using an external nameserver.
 
 To avoid this warning, you can specify a DNS server for use by Docker
-containers. Or, you can disable `dnsmasq` in NetworkManager. Though, disabiling
+containers. Or, you can disable `dnsmasq` in NetworkManager. Though, disabling
 `dnsmasq` might make DNS resolution slower on some networks.
 
 To specify a DNS server for use by Docker:
@@ -255,25 +262,25 @@
 
 2. Open the `/etc/default/docker` file for editing.
 
-     	$ sudo nano /etc/default/docker
+        $ sudo nano /etc/default/docker
 
 3. Add a setting for Docker.
 
-      	DOCKER_OPTS="--dns 8.8.8.8"
+        DOCKER_OPTS="--dns 8.8.8.8"
 
     Replace `8.8.8.8` with a local DNS server such as `192.168.1.1`. You can also
     specify multiple DNS servers. Separated them with spaces, for example:
 
-      	--dns 8.8.8.8 --dns 192.168.1.1
+        --dns 8.8.8.8 --dns 192.168.1.1
 
-	>**Warning**: If you're doing this on a laptop which connects to various
-	>networks, make sure to choose a public DNS server.
+    >**Warning**: If you're doing this on a laptop which connects to various
+    >networks, make sure to choose a public DNS server.
 
 4. Save and close the file.
 
 5. Restart the Docker daemon.
 
-    	$ sudo restart docker
+        $ sudo restart docker
 
 
 &nbsp;
@@ -282,24 +289,41 @@
 **Or, as an alternative to the previous procedure,** disable `dnsmasq` in
 NetworkManager (this might slow your network).
 
-1. Open the `/etc/default/docker` file for editing.
+1. Open the `/etc/NetworkManager/NetworkManager.conf` file for editing.
 
-		$ sudo nano /etc/NetworkManager/NetworkManager.conf
+        $ sudo nano /etc/NetworkManager/NetworkManager.conf
 
 2. Comment out the `dns=dsnmasq` line:
 
-		dns=dnsmasq
+        dns=dnsmasq
 
 3. Save and close the file.
 
 4. Restart both the NetworkManager and Docker.
 
-		$ sudo restart network-manager $ sudo restart docker
+        $ sudo restart network-manager $ sudo restart docker
 
 
 ## Upgrade Docker
 
-To install the latest version of Docker, use the standard `-N` flag with `wget`:
+To install the latest version of Docker with `wget`:
 
-	$ wget -N https://get.docker.com/ | sh
+    $ wget -qO- https://get.docker.com/ | sh
 
+## Uninstallation
+
+To uninstall the Docker package:
+
+    $ sudo apt-get purge lxc-docker
+
+To uninstall the Docker package and dependencies that are no longer needed:
+
+    $ sudo apt-get autoremove --purge lxc-docker
+
+The above commands will not remove images, containers, volumes, or user created
+configuration files on your host. If you wish to delete all images, containers,
+and volumes run the following command:
+
+    $ rm -rf /var/lib/docker
+
+You must delete the user created configuration files manually.
diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md
index 95f55af..b5a1484 100644
--- a/docs/sources/installation/windows.md
+++ b/docs/sources/installation/windows.md
@@ -20,6 +20,8 @@
 containers will still be running on Linux. Until the Docker engine for Windows
 is developed, you can launch only Linux containers from your Windows machine.
 
+![Windows Architecture Diagram](/installation/images/win_docker_host.svg)
+
 ## Demonstration
 
 <iframe width="640" height="480" src="//www.youtube.com/embed/TjMU3bDX4vo?rel=0" frameborder="0" allowfullscreen></iframe>
@@ -28,7 +30,7 @@
 
 1. Download the latest release of the
    [Docker for Windows Installer](https://github.com/boot2docker/windows-installer/releases/latest).
-2. Run the installer, which will install Docker Client or Windows, VirtualBox,
+2. Run the installer, which will install Docker Client for Windows, VirtualBox,
    Git for Windows (MSYS-git), the boot2docker Linux ISO, and the Boot2Docker
    management tool.
    ![](/installation/images/windows-installer.png)
@@ -57,7 +59,7 @@
 This should download the very small `hello-world` image and print a
 `Hello from Docker.` message.
 
-## Using docker from Windows Command Line Prompt (cmd.exe)
+## Using Docker from Windows Command Line Prompt (cmd.exe)
 
 Launch a Windows Command Line Prompt (cmd.exe).
 
@@ -65,7 +67,7 @@
 include `bin` folder of the Git installation (which has ssh.exe) to the `%PATH%`
 environment variable by running:
 
-	set PATH=%PATH%;"c:\Program Files (x86)\Git\bin"
+    set PATH=%PATH%;"c:\Program Files (x86)\Git\bin"
 
 and then we can run the `boot2docker start` command to start the Boot2Docker VM.
 (Run `boot2docker init` command if you get an error saying machine does not
@@ -75,11 +77,11 @@
 
 ![](/installation/images/windows-boot2docker-cmd.png)
 
-## Using docker from PowerShell
+## Using Docker from PowerShell
 
 Launch a PowerShell window, then you need to add `ssh.exe` to your PATH:
 
-	$Env:Path = "${Env:Path};c:\Program Files (x86)\Git\bin"
+    $Env:Path = "${Env:Path};c:\Program Files (x86)\Git\bin"
 
 and after running `boot2docker start` command it will print PowerShell commands
 to set the environment variables to connect Docker running inside VM. Run these
@@ -148,6 +150,12 @@
 - then click: "Save Private Key".
 - Then use the saved file to login with PuTTY using `docker@127.0.0.1:2022`.
 
+## Uninstallation
+
+You can uninstall Boot2Docker using Window's standard process for removing programs.
+This process does not remove the `docker-install.exe` file. You must delete that file
+yourself.
+
 ## References
 
 If you have Docker hosts running and if you don't wish to do a 
diff --git a/docs/sources/introduction/understanding-docker.md b/docs/sources/introduction/understanding-docker.md
index 9c99959..060428e 100644
--- a/docs/sources/introduction/understanding-docker.md
+++ b/docs/sources/introduction/understanding-docker.md
@@ -109,7 +109,7 @@
 images, or you can download Docker images that other people have already created.
 Docker images are the **build** component of Docker.
 
-#### Docker Registries
+#### Docker registries
 Docker registries hold images. These are public or private stores from which you upload
 or download images. The public Docker registry is called
 [Docker Hub](http://hub.docker.com). It provides a huge collection of existing
@@ -135,7 +135,7 @@
 
 Let's look at how these elements combine together to make Docker work.
 
-### How does a Docker Image work? 
+### How does a Docker image work? 
 We've already seen that Docker images are read-only templates from which Docker
 containers are launched. Each image consists of a series of layers. Docker
 makes use of [union file systems](http://en.wikipedia.org/wiki/UnionFS) to
@@ -198,7 +198,7 @@
 Either by using the `docker` binary or via the API, the Docker client tells the Docker
 daemon to run a container.
 
-    $ sudo docker run -i -t ubuntu /bin/bash
+    $ docker run -i -t ubuntu /bin/bash
 
 Let's break down this command. The Docker client is launched using the `docker`
 binary with the `run` option telling it to launch a new container. The bare
@@ -280,7 +280,7 @@
 ### Installing Docker
 Visit the [installation section](/installation/#installation).
 
-### The Docker User Guide
+### The Docker user guide
 [Learn Docker in depth](/userguide/).
 
 
diff --git a/docs/sources/project/advanced-contributing.md b/docs/sources/project/advanced-contributing.md
index df5756d..7ee7a86 100644
--- a/docs/sources/project/advanced-contributing.md
+++ b/docs/sources/project/advanced-contributing.md
@@ -67,7 +67,7 @@
 
     The design proposals are <a
     href="https://github.com/docker/docker/pulls?q=is%3Aopen+is%3Apr+label%
-    3AProposal" target="_blank">all online in our GitHub pull requests</a>. 
+    3Akind%2Fproposal" target="_blank">all online in our GitHub pull requests</a>. 
     
 3. Talk to the community about your idea.
 
@@ -89,7 +89,7 @@
     This is a Markdown file that describes your idea. Your proposal
     should include information like:
 
-    * Why is this changed needed or what are the use cases?
+    * Why is this change needed or what are the use cases?
     * What are the requirements this change should meet?
     * What are some ways to design/implement this feature?
     * Which design/implementation do you think is best and why?
@@ -137,3 +137,16 @@
 
 14. Acceptance and merge!
 
+## About the advanced process
+
+Docker is a large project. Our core team gets a great many design proposals.
+Design proposal discussions can span days, weeks, and longer. The number of comments can reach the 100s.
+In that situation, following the discussion flow and the decisions reached is crucial.
+
+Making a pull request with a design proposal simplifies this process:
+* you can leave comments on specific design proposal line
+* replies around line are easy to track
+* as a proposal changes and is updated, pages reset as line items resolve
+* Github maintains the entire history
+
+While proposals in pull requests do not end up merged into a master repository, they provide a convenient tool for managing the design process.
diff --git a/docs/sources/project/coding-style.md b/docs/sources/project/coding-style.md
index e5b6f5f..57f6389 100644
--- a/docs/sources/project/coding-style.md
+++ b/docs/sources/project/coding-style.md
@@ -1,13 +1,13 @@
-page_title: Coding Style Checklist
+page_title: Coding style checklist
 page_description: List of guidelines for coding Docker contributions
 page_keywords: change, commit, squash, request, pull request, test, unit test, integration tests, Go, gofmt, LGTM
 
-# Coding Style Checklist
+# Coding style checklist
 
 This checklist summarizes the material you experienced working through [make a
 code contribution](/project/make-a-contribution) and [advanced
-contributing](/project/advanced-contributing). The checklist applies to code
-that is program code or code that is documentation code.
+contributing](/project/advanced-contributing). The checklist applies to both 
+program code and documentation code.
 
 ## Change and commit code
 
diff --git a/docs/sources/project/create-pr.md b/docs/sources/project/create-pr.md
index 197aee8..613ab69 100644
--- a/docs/sources/project/create-pr.md
+++ b/docs/sources/project/create-pr.md
@@ -11,7 +11,7 @@
 You can see <a href="https://github.com/docker/docker/pulls" target="_blank">the
 list of active pull requests to Docker</a> on GitHub.
 
-## Check Your Work
+## Check your work
 
 Before you create a pull request, check your work.
 
@@ -22,7 +22,7 @@
 2. Checkout your feature branch.
 
         $ git checkout 11038-fix-rhel-link
-        Already on '11038-fix-rhel-link'
+        Switched to branch '11038-fix-rhel-link'
 
 3. Run the full test suite on your branch.
 
@@ -41,7 +41,11 @@
 
 Always rebase and squash your commits before making a pull request. 
 
-1. Fetch any of the last minute changes from `docker/docker`.
+1. Checkout your feature branch in your local `docker-fork` repository.
+
+    This is the branch associated with your request.
+
+2. Fetch any last minute changes from `docker/docker`.
 
         $ git fetch upstream master
         From github.com:docker/docker
@@ -56,28 +60,28 @@
         pick 1a79f55 Tweak some of the other text for grammar
         pick 53e4983 Fix a link
         pick 3ce07bb Add a new line about RHEL
-        
-    If you run into trouble, `git --rebase abort` removes any changes and gets
-    you back to where you started. 
 
-4. Squash the `pick` keyword with `squash` on all but the first commit.
+5. Replace the `pick` keyword with `squash` on all but the first commit.
 
         pick 1a79f55 Tweak some of the other text for grammar
         squash 53e4983 Fix a link
         squash 3ce07bb Add a new line about RHEL
 
-    After closing the file, `git` opens your editor again to edit the commit
-    message. 
+    After you save the changes and quit from the editor, git starts
+    the rebase, reporting the progress along the way. Sometimes
+    your changes can conflict with the work of others. If git
+    encounters a conflict, it stops the rebase, and prints guidance
+    for how to correct the conflict.
 
-5. Edit and save your commit message.
+6. Edit and save your commit message.
 
 		`git commit -s`
 
-		Make sure your message includes <a href="./set-up-git" target="_blank>your signature</a>.
+    Make sure your message includes <a href="./set-up-git" target="_blank>your signature</a>.
 
-8. Push any changes to your fork on GitHub.
+7. Force push any changes to your fork on GitHub.
 
-        $ git push origin 11038-fix-rhel-link
+        $ git push -f origin 11038-fix-rhel-link
         
 ## Create a PR on GitHub
 
@@ -108,7 +112,7 @@
 4. Scroll down and verify the PR contains the commits and changes you expect.
 
     For example, is the file count correct? Are the changes in the files what
-    you expect.
+    you expect?
 
     ![Commits](/project/images/commits_expected.png)
 
@@ -124,4 +128,4 @@
 
 Congratulations, you've created your first pull request to Docker. The next
 step is for you learn how to [participate in your PR's
-review](/project/review-pr/).
\ No newline at end of file
+review](/project/review-pr/).
diff --git a/docs/sources/project/doc-style.md b/docs/sources/project/doc-style.md
index 20e4a9f..0aa0f41 100644
--- a/docs/sources/project/doc-style.md
+++ b/docs/sources/project/doc-style.md
@@ -1,4 +1,4 @@
-page_title: Style Guide for Docker Documentation
+page_title: Style guide for Docker documentation
 page_description: Style guide for Docker documentation describing standards and conventions for contributors
 page_keywords: style, guide, docker, documentation
 
diff --git a/docs/sources/project/find-an-issue.md b/docs/sources/project/find-an-issue.md
index 0a36c88..3e853a6 100644
--- a/docs/sources/project/find-an-issue.md
+++ b/docs/sources/project/find-an-issue.md
@@ -158,23 +158,22 @@
         origin	https://github.com/moxiegirl/docker.git (fetch)
         origin	https://github.com/moxiegirl/docker.git (push)
         upstream	https://github.com/docker/docker.git (fetch)
-        upstream	https://github.com/docker/docker.git (
+        upstream	https://github.com/docker/docker.git (push)
 
     If the `upstream` is missing, add it.
 
         $ git remote add upstream https://github.com/docker/docker.git
 
-5. Fetch all the changes from the `upstream/master` branch.
+5. Fetch all the changes from the `upstream master` branch.
 
-        $ git fetch upstream/master
+        $ git fetch upstream master
         remote: Counting objects: 141, done.
         remote: Compressing objects: 100% (29/29), done.
         remote: Total 141 (delta 52), reused 46 (delta 46), pack-reused 66
         Receiving objects: 100% (141/141), 112.43 KiB | 0 bytes/s, done.
         Resolving deltas: 100% (79/79), done.
-        From github.com:docker/docker
-           9ffdf1e..01d09e4  docs       -> upstream/docs
-           05ba127..ac2521b  master     -> upstream/master
+	    From github.com:docker/docker
+	     * branch            master     -> FETCH_HEAD
 
     This command says get all the changes from the `master` branch belonging to
     the `upstream` remote.
@@ -185,8 +184,8 @@
         First, rewinding head to replay your work on top of it...
         Fast-forwarded master to upstream/master.
 
-    This command writes all the commits from the upstream branch into your local
-    branch.
+    This command applies all the commits from the upstream master to your local
+    master.
 
 8.  Check the status of your local branch.
 
@@ -196,12 +195,12 @@
           (use "git push" to publish your local commits)
         nothing to commit, working directory clean
 
-    Your local repository now has any changes from the `upstream` remote.  You
-    need to push the changes to your own remote fork which is `origin/master`.
+    Your local repository now has all the changes from the `upstream` remote. You 
+    need to push the changes to your own remote fork which is `origin master`.
 
-9. Push the rebased master to `origin/master`.
+9. Push the rebased master to `origin master`.
 
-        $ git push origin
+        $ git push origin master
         Username for 'https://github.com': moxiegirl
         Password for 'https://moxiegirl@github.com': 
         Counting objects: 223, done.
@@ -219,7 +218,7 @@
         $ git checkout -b 11038-fix-rhel-link
         Switched to a new branch '11038-fix-rhel-link'
 
-    Your branch should be up-to-date with the upstream/master. Why? Because you
+    Your branch should be up-to-date with the `upstream/master`. Why? Because you
     branched off a freshly synced master.  Let's check this anyway in the next
     step.
 
@@ -229,8 +228,8 @@
         Current branch 11038-fix-rhel-link is up to date.
 
     At this point, your local branch, your remote repository, and the Docker
-    repository all have identical code. You are ready to make changesfor your
-    issues.
+    repository all have identical code. You are ready to make changes for your
+    issue.
 
 
 ## Where to go next
diff --git a/docs/sources/project/glossary.md b/docs/sources/project/glossary.md
deleted file mode 100644
index 5324cda..0000000
--- a/docs/sources/project/glossary.md
+++ /dev/null
@@ -1,7 +0,0 @@
-page_title: Glossary
-page_description: tbd
-page_keywords: tbd
-
-## Glossary
-
-TBD
\ No newline at end of file
diff --git a/docs/sources/project/images/git_bash.png b/docs/sources/project/images/git_bash.png
new file mode 100644
index 0000000..153fd2f
--- /dev/null
+++ b/docs/sources/project/images/git_bash.png
Binary files differ
diff --git a/docs/sources/project/images/include_gcc.png b/docs/sources/project/images/include_gcc.png
new file mode 100644
index 0000000..e48f50c
--- /dev/null
+++ b/docs/sources/project/images/include_gcc.png
Binary files differ
diff --git a/docs/sources/project/images/path_variable.png b/docs/sources/project/images/path_variable.png
new file mode 100644
index 0000000..52f197a
--- /dev/null
+++ b/docs/sources/project/images/path_variable.png
Binary files differ
diff --git a/docs/sources/project/images/windows-env-vars.png b/docs/sources/project/images/windows-env-vars.png
new file mode 100644
index 0000000..6e9c44b
--- /dev/null
+++ b/docs/sources/project/images/windows-env-vars.png
Binary files differ
diff --git a/docs/sources/project/images/windows-mingw.png b/docs/sources/project/images/windows-mingw.png
new file mode 100644
index 0000000..09f53ba
--- /dev/null
+++ b/docs/sources/project/images/windows-mingw.png
Binary files differ
diff --git a/docs/sources/project/review-pr.md b/docs/sources/project/review-pr.md
index e8cb6c7..b143c30 100644
--- a/docs/sources/project/review-pr.md
+++ b/docs/sources/project/review-pr.md
@@ -1,9 +1,9 @@
-page_title: Participate in the PR Review
+page_title: Participate in the PR review
 page_description: Basic workflow for Docker contributions
 page_keywords: contribute, pull request, review, workflow, beginner, squash, commit
 
 
-# Participate in the PR Review
+# Participate in the PR review
 
 Creating a pull request is nearly the end of the contribution process. At this
 point, your code is reviewed both by our continuous integration (CI) systems and
@@ -14,7 +14,7 @@
 "beings" to review your contribution.
 
 
-## How we proces your review
+## How we process your review
 
 First to review your pull request is Gordon. Gordon is fast. He checks your
 pull request (PR) for common problems like a missing signature. If Gordon finds a
@@ -45,19 +45,27 @@
 their comments specific and brief. If they ask you to make a change, you'll
 need to update your pull request with additional changes.
 
-## Update an Existing Pull Request
+## Update an existing pull request
 
 To update your existing pull request:
 
-1. Change one or more files in your local `docker-fork` repository.
+1. Checkout the PR branch in your local `docker-fork` repository.  
 
-2. Commit the change with the `git commit --amend` command.
+    This is the branch associated with your request.
+
+2. Change one or more files and then stage your changes.
+
+    The command syntax is:
+
+    	git add <path_or_filename>
+
+3. Commit the change.
 
     	$ git commit --amend 
 
     Git opens an editor containing your last commit message.
 
-3. Adjust your last comment to reflect this new change.
+4. Adjust your last comment to reflect this new change.
 
         Added a new sentence per Anaud's suggestion	
 
@@ -72,15 +80,17 @@
         #		modified:   docs/sources/installation/mac.md
         #		modified:   docs/sources/installation/rhel.md
 
-4. Push to your origin.
+5. Force push the change to your origin.
 
-        $ git push origin
+    The command syntax is:
 
-5. Open your browser to your pull request on GitHub.
+        git push -f origin <branch_name>
+
+6. Open your browser to your pull request on GitHub.
 
     You should see your pull request now contains your newly pushed code.
 
-6. Add a comment to your pull request.
+7. Add a comment to your pull request.
 
     GitHub only notifies PR participants when you comment. For example, you can
     mention that you updated your PR. Your comment alerts the maintainers that
diff --git a/docs/sources/project/set-up-dev-env.md b/docs/sources/project/set-up-dev-env.md
index 637eef6..60a59b6 100644
--- a/docs/sources/project/set-up-dev-env.md
+++ b/docs/sources/project/set-up-dev-env.md
@@ -7,7 +7,7 @@
 In this section, you learn to develop like a member of Docker's core team.
 The `docker` repository includes a `Dockerfile` at its root. This file defines
 Docker's development environment.  The `Dockerfile` lists the environment's
-dependencies: system libraries and binaries, go environment, go dependencies,
+dependencies: system libraries and binaries, Go environment, Go dependencies,
 etc. 
 
 Docker's development environment is itself, ultimately a Docker container.
@@ -15,20 +15,19 @@
 run a Docker container, and develop code in the container. Docker itself builds,
 tests, and releases new Docker versions using this container.
 
-If you followed the procedures that <a href="./set-up-prereqs" target="_blank">
-set up the prerequisites</a>, you should have a fork of the `docker/docker`
+If you followed the procedures that <a href="/project/set-up-git" target="_blank">
+set up Git for contributing</a>, you should have a fork of the `docker/docker`
 repository. You also created a branch called `dry-run-test`. In this section,
 you continue working with your fork on this branch.
 
 ##  Clean your host of Docker artifacts
 
-Docker developers run the latest stable release of the Docker software; Or 
-Boot2docker and Docker if their machine is Mac OS X. They clean their local
+Docker developers run the latest stable release of the Docker software (with Boot2Docker if their machine is Mac OS X). They clean their local
 hosts of unnecessary Docker artifacts such as stopped containers or unused
-images. Cleaning unnecessary artifacts isn't strictly necessary but it is
+images. Cleaning unnecessary artifacts isn't strictly necessary, but it is
 good practice, so it is included here.
 
-To remove unnecessary artifacts.
+To remove unnecessary artifacts,
 
 1. Verify that you have no unnecessary containers running on your host.
 
@@ -75,9 +74,9 @@
 
         $ docker rmi -f $(docker images -q -a -f dangling=true)
 
-    This command uses `docker images` to lists all images (`-a` flag) by numeric
-    IDs (`-q` flag) and filter them to find dangling images (`-f
-    dangling=true`). Then, the `docker rmi` command forcibly (`-f` flag) removes
+    This command uses `docker images` to list all images (`-a` flag) by numeric
+    IDs (`-q` flag) and filter them to find dangling images (`-f dangling=true`).
+    Then, the `docker rmi` command forcibly (`-f` flag) removes
     the resulting list. To remove just one image, use the `docker rmi ID`
     command.
 
@@ -97,10 +96,17 @@
 3. Change into the root of your forked repository.
 
         $ cd ~/repos/docker-fork 
+        
+	If you are following along with this guide, you created a `dry-run-test`
+	branch when you <a href="/project/set-up-git" target="_blank"> set up Git for
+	contributing</a>.
 
 4. Ensure you are on your `dry-run-test` branch.
 
         $ git checkout dry-run-test
+        
+    If you get a message that the branch doesn't exist, add the `-b` flag (git checkout -b dry-run-test) so the
+    command both creates the branch and checks it out.
 
 5. Compile your development environment container into an image.
 
@@ -194,7 +200,7 @@
 
     ![Multiple terminals](/project/images/three_terms.png)
 
-    Mac OSX users, make sure you run `eval "$(boot2docker shellinit)"` in any new 
+    Mac OS X users, make sure you run `eval "$(boot2docker shellinit)"` in any new
     terminals.
 
 2. In a terminal, create a new container from your `dry-run-test` image.
@@ -203,9 +209,9 @@
         root@5f8630b873fe:/go/src/github.com/docker/docker# 
 
     The command creates a container from your `dry-run-test` image. It opens an
-    interactive terminal (`-ti`) running a `/bin/bash shell`.  The
+    interactive terminal (`-ti`) running a `/bin/bash` shell.  The
     `--privileged` flag gives the container access to kernel features and device
-    access. It is this flag that allows you to run a container in a container.
+    access. This flag allows you to run a container in a container.
     Finally, the `-rm` flag instructs Docker to remove the container when you
     exit the `/bin/bash` shell.
 
@@ -232,7 +238,8 @@
 
     You will create one in the next steps.
 
-4. From the `/go/src/github.com/docker/docker` directory make a `docker` binary with the `make.sh` script.
+4. From the `/go/src/github.com/docker/docker` directory make a `docker` binary
+with the `make.sh` script.
 
         root@5f8630b873fe:/go/src/github.com/docker/docker# hack/make.sh binary
 
@@ -244,10 +251,9 @@
     When the command completes successfully, you should see the following
     output:
 
-        ---> Making bundle: ubuntu (in bundles/1.5.0-dev/ubuntu)
-        Created package {:path=>"lxc-docker-1.5.0-dev_1.5.0~dev~git20150223.181106.0.1ab0d23_amd64.deb"}
-        Created package {:path=>"lxc-docker_1.5.0~dev~git20150223.181106.0.1ab0d23_amd64.deb"}
-
+	---> Making bundle: binary (in bundles/1.5.0-dev/binary)
+	Created binary: /go/src/github.com/docker/docker/bundles/1.5.0-dev/binary/docker-1.5.0-dev
+	
 5. List all the contents of the `binary` directory.
 
         root@5f8630b873fe:/go/src/github.com/docker/docker#  ls bundles/1.5.0-dev/binary/
@@ -266,15 +272,15 @@
         root@5f8630b873fe:/go/src/github.com/docker/docker# docker --version
         Docker version 1.5.0-dev, build 6e728fb
 
-    Inside the container you are running a development version. This is version
-    on the current branch it reflects the value of the `VERSION` file at the
+    Inside the container you are running a development version. This is the version
+    on the current branch. It reflects the value of the `VERSION` file at the
     root of your `docker-fork` repository.
 
 8. Start a `docker` daemon running inside your container.
 
         root@5f8630b873fe:/go/src/github.com/docker/docker#  docker -dD
 
-    The `-dD` flag starts the daemon in debug mode; You'll find this useful
+    The `-dD` flag starts the daemon in debug mode. You'll find this useful
     when debugging your code.
 
 9. Bring up one of the terminals on your local host.
@@ -357,7 +363,8 @@
 
     Your location will be different because it reflects your environment. 
 
-3. Create a container using `dry-run-test` but this time mount your repository onto the `/go` directory inside the container.
+3. Create a container using `dry-run-test`, but this time, mount your repository
+onto the `/go` directory inside the container.
 
         $  docker run --privileged --rm -ti -v `pwd`:/go/src/github.com/docker/docker dry-run-test /bin/bash
 
@@ -375,7 +382,7 @@
 
         $ cd ~/repos/docker-fork/
 
-6. Create a fresh binary but this time use the `make` command.
+6. Create a fresh binary, but this time, use the `make` command.
 
         $ make BINDDIR=. binary
 
@@ -408,4 +415,5 @@
 Congratulations, you have successfully achieved Docker inception. At this point,
 you've set up your development environment and verified almost all the essential
 processes you need to contribute. Of course, before you start contributing, 
-[you'll need to learn one more piece of the development environment, the test framework](/project/test-and-docs/).
+[you'll need to learn one more piece of the development environment, the test
+framework](/project/test-and-docs/).
diff --git a/docs/sources/project/set-up-git.md b/docs/sources/project/set-up-git.md
index 2292d93..d67ff81 100644
--- a/docs/sources/project/set-up-git.md
+++ b/docs/sources/project/set-up-git.md
@@ -46,9 +46,12 @@
     that instead. You'll need to convert what you see in the guide to what is
     appropriate to your tool.
 
-5. Open a terminal window on your local host and change to your home directory.
+5. Open a terminal window on your local host and change to your home directory. 
 
         $ cd ~
+        
+  In Windows, you'll work in your Boot2Docker window instead of Powershell or
+  a `cmd` window.
 
 6. Create a `repos` directory.
 
@@ -88,7 +91,7 @@
 As you change code in your fork, you'll want to keep it in sync with the changes
 others make in the `docker/docker` repository. To make syncing easier, you'll
 also add a _remote_ called `upstream` that points to `docker/docker`. A remote
-is just another a project version hosted on the internet or network.
+is just another project version hosted on the internet or network.
 
 To configure your username, email, and add a remote:
 
@@ -134,12 +137,12 @@
 
 ## Create and push a branch
 
-As you change code in your fork, you make your changes on a repository branch.
+As you change code in your fork, make your changes on a repository branch.
 The branch name should reflect what you are working on. In this section, you
 create a branch, make a change, and push it up to your fork. 
 
 This branch is just for testing your config for this guide. The changes are part
-of a dry run so the branch name is going to be dry-run-test. To create an push
+of a dry run, so the branch name will be dry-run-test. To create and push
 the branch to your fork on GitHub:
 
 1. Open a terminal and go to the root of your `docker-fork`.
@@ -171,7 +174,7 @@
 
     You can use any text editor you are comfortable with.
 
-6. Close and save the file.
+6. Save and close the file.
 
 7. Check the status of your branch. 
 
diff --git a/docs/sources/project/software-req-win.md b/docs/sources/project/software-req-win.md
new file mode 100644
index 0000000..38cd73d
--- /dev/null
+++ b/docs/sources/project/software-req-win.md
@@ -0,0 +1,258 @@
+page_title: Set up for development on Windows
+page_description: How to set up a server to test Docker Windows client
+page_keywords: development, inception, container, image Dockerfile, dependencies, Go, artifacts, windows
+
+
+# Get the required software for Windows
+
+This page explains how to get the software you need to use a  a Windows Server
+2012 or Windows 8 machine for Docker development. Before you begin contributing
+you must have:
+
+- a GitHub account
+- Git for Windows (msysGit)
+- TDM-GCC, a compiler suite for Windows
+- MinGW (tar and xz)
+- Go language
+
+> **Note**: This installation procedure refers to the `C:\` drive. If you system's main drive
+is `D:\` you'll need to substitute that in where appropriate in these
+instructions.
+
+### Get a GitHub account
+
+To contribute to the Docker project, you will need a <a
+href="https://github.com" target="_blank">GitHub account</a>. A free account is
+fine. All the Docker project repositories are public and visible to everyone.
+
+You should also have some experience using both the GitHub application and `git`
+on the command line. 
+
+## Install Git for Windows
+
+Git for Windows includes several tools including msysGit, which is a build
+environment. The environment contains the tools you need for development such as
+Git and a Git Bash shell.
+
+1. Browse to the [Git for Windows](https://msysgit.github.io/) download page.
+
+2. Click **Download**.
+
+	Windows prompts you to save the file to your machine.
+
+3. Run the saved file.
+
+	The system displays the **Git Setup** wizard.
+
+4. Click the **Next** button to move through the wizard and accept all the defaults.
+
+5. Click **Finish** when you are done.
+
+## Installing TDM-GCC
+
+TDM-GCC is a compiler suite for Windows. You'll use this suite to compile the
+Docker Go code as you develop.
+
+1. Browse to
+   [tdm-gcc download page](http://tdm-gcc.tdragon.net/download).
+
+2. Click on the latest 64-bit version of the package.
+
+	Windows prompts you to save the file to your machine
+
+3. Set up the suite by running the downloaded file.
+
+	The system opens the **TDM-GCC Setup** wizard.
+	
+4. Click **Create**.
+
+5. Click the **Next** button to move through the wizard and accept all the defaults.
+
+6. Click **Finish** when you are done.
+
+
+## Installing MinGW (tar and xz)
+
+MinGW is a minimalist port of the GNU Compiler Collection (GCC). In this
+procedure, you first download and install the MinGW installation manager. Then,
+you use the manager to install the `tar` and `xz` tools from the collection.
+
+1. Browse to MinGW 
+   [SourceForge](http://sourceforge.net/projects/mingw/).
+
+2. Click **Download**.
+
+	 Windows prompts you to save the file to your machine
+
+3. Run the downloaded file.
+
+   The system opens the **MinGW Installation Manager Setup Tool**
+
+4. Choose **Install**  install the MinGW Installation Manager.
+
+5. Press **Continue**.
+
+	The system installs and then opens the MinGW Installation Manager.
+	
+6. Press **Continue** after the install completes to open the manager.
+
+7. Select **All Packages > MSYS Base System** from the left hand menu.
+
+	The system displays the available packages.
+
+8. Click on the the **msys-tar bin** package and choose **Mark for Installation**.
+
+9. Click on the **msys-xz bin** package and choose **Mark for Installation**.
+  
+10. Select **Installation > Apply Changes**, to install the selected packages.
+
+	The system displays the **Schedule of Pending Actions Dialog**.
+
+    ![windows-mingw](/project/images/windows-mingw.png)
+    
+11. Press **Apply**
+
+	MingGW installs the packages for you.
+
+12. Close the dialog and the MinGW Installation Manager.
+
+
+## Set up your environment variables
+
+You'll need to add the compiler to your `Path` environment variable. 
+
+1. Open the **Control Panel**.
+
+2. Choose **System and Security > System**. 
+
+3. Click the **Advanced system settings** link in the sidebar.
+
+	The system opens the **System Properties** dialog.
+
+3. Select the **Advanced** tab.
+
+4. Click **Environment Variables**. 
+
+	The system opens the **Environment Variables dialog** dialog.
+
+5. Locate the **System variables** area and scroll to the **Path**
+   variable.
+
+    ![windows-mingw](/project/images/path_variable.png)
+
+6. Click **Edit** to edit the variable (you can also double-click it).
+
+	The system opens the **Edit System Variable** dialog.
+
+7. Make sure the `Path` includes `C:\TDM-GCC64\bin` 
+
+	 ![include gcc](/project/images/include_gcc.png)
+	 
+	 If you don't see `C:\TDM-GCC64\bin`, add it.
+		
+8. Press **OK** to close this dialog.
+	
+9. Press **OK** twice to close out of the remaining dialogs.
+
+## Install Go and cross-compile it
+
+In this section, you install the Go language. Then, you build the source so that it can cross-compile for `linux/amd64` architectures.
+
+1. Open [Go Language download](http://golang.org/dl/) page in your browser.
+
+2. Locate and click the latest `.msi` installer.
+
+	The system prompts you to save the file.
+
+3. Run the installer.
+
+	The system opens the **Go Programming Language Setup** dialog.
+
+4. Select all the defaults to install.
+
+5. Press **Finish** to close the installation dialog.
+
+6. Start a command prompt.
+
+7. Change to the Go `src` directory.
+
+		cd c:\Go\src 
+
+8. Set the following Go variables
+
+		c:\Go\src> set GOOS=linux
+		c:\Go\src> set GOARCH=amd64
+     
+9. Compile the source.
+
+		c:\Go\src> make.bat
+    
+	Compiling the source also adds a number of variables to your Windows environment.
+
+## Get the Docker repository
+
+In this step, you start a Git `bash` terminal and get the Docker source code from
+Github. 
+
+1. Locate the **Git Bash** program and start it.
+
+	Recall that **Git Bash** came with the Git for Windows installation.  **Git
+	Bash** just as it sounds allows you to run a Bash terminal on Windows.
+	
+	![Git Bash](/project/images/git_bash.png)
+
+2. Change to the root directory.
+
+		$ cd /c/
+				
+3. Make a `gopath` directory.
+
+		$ mkdir gopath
+
+4. Go get the `docker/docker` repository.
+
+		$ go.exe get github.com/docker/docker package github.com/docker/docker
+        imports github.com/docker/docker
+        imports github.com/docker/docker: no buildable Go source files in C:\gopath\src\github.com\docker\docker
+
+	In the next steps, you create environment variables for you Go paths.
+	
+5. Open the **Control Panel** on your system.
+
+6. Choose **System and Security > System**. 
+
+7. Click the **Advanced system settings** link in the sidebar.
+
+	The system opens the **System Properties** dialog.
+
+8. Select the **Advanced** tab.
+
+9. Click **Environment Variables**. 
+
+	The system opens the **Environment Variables dialog** dialog.
+
+10. Locate the **System variables** area and scroll to the **Path**
+   variable.
+
+11. Click **New**.
+
+	Now you are going to create some new variables. These paths you'll create in the next procedure; but you can set them now.
+
+12. Enter `GOPATH` for the **Variable Name**.
+
+13. For the **Variable Value** enter the following:
+ 
+		C:\gopath;C:\gopath\src\github.com\docker\docker\vendor
+		
+	
+14. Press **OK** to close this dialog.
+
+	The system adds `GOPATH` to the list of **System Variables**.
+	
+15. Press **OK** twice to close out of the remaining dialogs.
+
+
+## Where to go next
+
+In the next section, you'll [learn how to set up and configure Git for
+contributing to Docker](/project/set-up-git/).
\ No newline at end of file
diff --git a/docs/sources/project/software-required.md b/docs/sources/project/software-required.md
index 476cbbc..15b9a69 100644
--- a/docs/sources/project/software-required.md
+++ b/docs/sources/project/software-required.md
@@ -2,9 +2,10 @@
 page_description: Describes the software required to contribute to Docker
 page_keywords: GitHub account, repository, Docker, Git, Go, make, 
 
-# Get the required software
+# Get the required software for Linux or OS X
 
-Before you begin contributing you must have:
+This page explains how to get the software you need to use a Linux or OS X
+machine for Docker development. Before you begin contributing you must have:
 
 *  a GitHub account
 * `git`
@@ -82,7 +83,7 @@
 
     $ sudo usermod -aG docker ubuntu
 
-You must log out and back in for this modification to take effect.
+You must log out and log back in for this modification to take effect.
 
 
 ## Where to go next
diff --git a/docs/sources/project/test-and-docs.md b/docs/sources/project/test-and-docs.md
index d586ea2..bcf4167 100644
--- a/docs/sources/project/test-and-docs.md
+++ b/docs/sources/project/test-and-docs.md
@@ -40,7 +40,7 @@
 interface between the components. The `integration` and `integration-cli`
 directories in the Docker repository contain integration test code.
 
-Testing is its own speciality. If you aren't familiar with testing techniques,
+Testing is its own specialty. If you aren't familiar with testing techniques,
 there is a lot of information available to you on the Web. For now, you should
 understand that, the Docker maintainers may ask you to write a new test or
 change an existing one.
@@ -68,10 +68,6 @@
     <td>Run just the unit tests.</td>
   </tr>
   <tr>
-    <td class="monospaced">test-integration</td>
-    <td>Run just integration tests.</td>
-  </tr>
-  <tr>
     <td class="monospaced">test-integration-cli</td>
     <td>Run the test for the integration command line interface.</td>
   </tr>
@@ -128,7 +124,7 @@
 If you are working inside a Docker development container, you use the
 `hack/make.sh` script to run tests. The `hack/make.sh` script doesn't
 have a single target that runs all the tests. Instead, you provide a single
-commmand line with multiple targets that does the same thing.
+command line with multiple targets that does the same thing.
 
 Try this now.
 
@@ -143,7 +139,7 @@
 
 3. Run the tests using the `hack/make.sh` script.
 
-        root@5f8630b873fe:/go/src/github.com/docker/docker# hack/make.sh dynbinary binary cross test-unit test-integration test-integration-cli test-docker-py
+        root@5f8630b873fe:/go/src/github.com/docker/docker# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py
 
     The tests run just as they did within your local host.
 
@@ -159,17 +155,18 @@
 
 ## Running individual or multiple named tests 
 
+We use [gocheck](https://labix.org/gocheck) for our integration-cli tests. 
 You can use the `TESTFLAGS` environment variable to run a single test. The
 flag's value is passed as arguments to the `go test` command. For example, from
 your local host you can run the `TestBuild` test with this command:
 
-    $ TESTFLAGS='-test.run ^TestBuild$' make test
+    $ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test-integration-cli
 
 To run the same test inside your Docker development container, you do this:
 
-    root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-run ^TestBuild$' hack/make.sh
+    root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-check.f TestBuild*' hack/make.sh binary test-integration-cli
 
-## If test under Boot2Docker fail do to space errors
+## If tests under Boot2Docker fail due to disk space errors
 
 Running the tests requires about 2GB of memory. If you are running your
 container on bare metal, that is you are not running with Boot2Docker, your
@@ -229,6 +226,46 @@
 6. Restart your container and try your test again.
 
 
+## Testing just the Windows client
+
+This explains how to test the Windows client on a Windows server set up as a
+development environment.  You'll use the **Git Bash** came with the Git for
+Windows installation.  **Git Bash** just as it sounds allows you to run a Bash
+terminal on Windows. 
+
+1.  If you don't have one, start a Git Bash terminal.
+
+	 ![Git Bash](/project/images/git_bash.png)
+
+2. Change to the `docker` source directory.
+
+		$ cd /c/gopath/src/github.com/docker/docker
+    
+3. Set `DOCKER_CLIENTONLY` as follows:
+
+		$ export DOCKER_CLIENTONLY=1
+     
+	This ensures you are building only the client binary instead of both the
+	binary and the daemon.
+	
+4. Set `DOCKER_TEST_HOST` to the `tcp://IP_ADDRESS:2376` value; substitute your
+machine's actual IP address, for example:
+
+		$ export DOCKER_TEST_HOST=tcp://263.124.23.200:2376
+
+5. Make the binary and the test:
+
+		$ hack/make.sh binary test-integration-cli
+  	
+   Many tests are skipped on Windows for various reasons. You see which tests
+   were skipped by re-running the make and passing in the 
+   `TESTFLAGS='-test.v'` value.
+        
+
+You can now choose to make changes to the Docker source or the tests. If you
+make any changes just run these commands again.
+
+
 ## Build and test the documentation
 
 The Docker documentation source files are under `docs/sources`. The content is
@@ -249,7 +286,7 @@
 
 1. In a terminal, change to the root of your `docker-fork` repository.
 
-        $ cd ~/repos/dry-run-test
+        $ cd ~/repos/docker-fork
 
 2. Make sure you are in your feature branch.
 
diff --git a/docs/sources/project/work-issue.md b/docs/sources/project/work-issue.md
index 190cec0..3291102 100644
--- a/docs/sources/project/work-issue.md
+++ b/docs/sources/project/work-issue.md
@@ -30,8 +30,10 @@
     source into a development container and iterate that way. For documentation
     alone, you can work on your local host. 
 
-    Review <a href="../set-up-dev-env" target="_blank">if you forgot the details
-    of working with a container</a>.
+    Make sure you don't change files in the `vendor` directory and its
+    subdirectories; they contain third-party dependency code. Review <a
+    href="../set-up-dev-env" target="_blank">if you forgot the details of
+    working with a container</a>.
 
 
 3. Test your changes as you work.
@@ -67,7 +69,7 @@
             For example, if you edited the `docker.go` file you would format the file
             like this:
             </p>
-            <p><code>$ gofmt -s -w file.go</code></p>
+            <p><code>$ gofmt -s -w docker.go</code></p>
             <p>
             Most file editors have a plugin to format for you. Check your editor's
             documentation.
@@ -107,7 +109,7 @@
 
 9. Push your change to your repository.
 
-        $ git push origin
+        $ git push origin 11038-fix-rhel-link
         Username for 'https://github.com': moxiegirl
         Password for 'https://moxiegirl@github.com': 
         Counting objects: 60, done.
@@ -117,11 +119,7 @@
         To https://github.com/moxiegirl/docker.git
          * [new branch]      11038-fix-rhel-link -> 11038-fix-rhel-link
         Branch 11038-fix-rhel-link set up to track remote branch 11038-fix-rhel-link from origin.
-        
-    The first time you push a change, you must specify the branch. Later, you can just do this:
-    
-    	git push origin
-    	
+
 ## Review your branch on GitHub
 
 After you push a new branch, you should verify it on GitHub:
@@ -147,54 +145,46 @@
 
 You should pull and rebase frequently as you work.  
 
-1. Return to the terminal on your local machine.
+1. Return to the terminal on your local machine and checkout your
+    feature branch in your local `docker-fork` repository.   
 
-2. Make sure you are in your branch.
+2. Fetch any last minute changes from `docker/docker`.
 
-		$ git branch 11038-fix-rhel-link
+        $ git fetch upstream master
+        From github.com:docker/docker
+         * branch            master     -> FETCH_HEAD
 
-3. Fetch all the changes from the `upstream/master` branch.
+3. Start an interactive rebase.
 
-		 $ git fetch upstream/master
+        $ git rebase -i upstream/master
 
-  	This command says get all the changes from the `master` branch belonging to
-  	the `upstream` remote.
+4. Rebase opens an editor with a list of commits.
 
-4. Rebase your local master with Docker's `upstream/master` branch.
+        pick 1a79f55 Tweak some of the other text for grammar
+        pick 53e4983 Fix a link
+        pick 3ce07bb Add a new line about RHEL
 
-		 $ git rebase -i upstream/master
-   
-  	This command starts an interactive rebase to merge code from Docker's
-  	`upstream/master` branch into your local branch. If you aren't familiar or
-  	comfortable with rebase, you can <a
-  	href="http://nathanleclaire.com/blog/2014/09/14/dont-be-scared-of-git-
-  	rebase" target="_blank">learn more about rebasing</a> on the web.
-  
-5. Rebase opens an editor with a list of commits.
+5. Replace the `pick` keyword with `squash` on all but the first commit.
 
-			pick 1a79f55 Tweak some of the other text for grammar 
-			pick 53e4983 Fix a link 
-			pick 3ce07bb Add a new line about RHEL
-        
-  	If you run into trouble, `git --rebase abort` removes any changes and gets
-  	you back to where you started. 
+        pick 1a79f55 Tweak some of the other text for grammar
+        squash 53e4983 Fix a link
+        squash 3ce07bb Add a new line about RHEL
 
-6. Squash the `pick` keyword with `squash` on all but the first commit.
+    After you save the changes and quit from the editor, git starts
+    the rebase, reporting the progress along the way. Sometimes
+    your changes can conflict with the work of others. If git
+    encounters a conflict, it stops the rebase, and prints guidance
+    for how to correct the conflict.
 
-			pick 1a79f55 Tweak some of the other text for grammar
-			squash 53e4983 Fix a link
-			squash 3ce07bb Add a new line about RHEL
+6. Edit and save your commit message.
 
-  	After closing the file, `git` opens your editor again to edit the commit
-  	message. 
+		`git commit -s`
 
-7. Edit and save your commit message.
+		Make sure your message includes <a href="./set-up-git" target="_blank>your signature</a>.
 
-	Make sure you include your signature.
+7. Force push any changes to your fork on GitHub.
 
-8. Push any changes to your fork on GitHub.
-
-		$ git push origin 11038-fix-rhel-link
+        $ git push -f origin 11038-fix-rhel-link
 
 
 ## Where to go next
diff --git a/docs/sources/reference.md b/docs/sources/reference.md
index 6c1ab46..8cfe304 100644
--- a/docs/sources/reference.md
+++ b/docs/sources/reference.md
@@ -3,6 +3,7 @@
 ## Contents:
 
  - [Commands](commandline/)
+ - [Logging drivers](logging/)
  - [Dockerfile Reference](builder/)
  - [Docker Run Reference](run/)
  - [APIs](api/)
diff --git a/docs/sources/reference/api/docker-io_api.md b/docs/sources/reference/api/docker-io_api.md
index a7557ba..b8da270 100644
--- a/docs/sources/reference/api/docker-io_api.md
+++ b/docs/sources/reference/api/docker-io_api.md
@@ -10,7 +10,7 @@
 
 # Repositories
 
-## User Repository
+## User repository
 
 ### Create a user repository
 
@@ -93,7 +93,7 @@
 - **401** – Unauthorized
 - **403** – Account is not Active
 
-## Library Repository
+## Library repository
 
 ### Create a library repository
 
@@ -182,9 +182,9 @@
 - **401** – Unauthorized
 - **403** – Account is not Active
 
-# Repository Images
+# Repository images
 
-## User Repository Images
+## User repository images
 
 ### Update user repository images
 
@@ -256,7 +256,7 @@
 - **200** – OK
 - **404** – Not found
 
-## Library Repository Images
+## Library repository images
 
 ### Update library repository images
 
@@ -326,9 +326,9 @@
 - **200** – OK
 - **404** – Not found
 
-# Repository Authorization
+# Repository authorization
 
-## Library Repository
+## Library repository
 
 ### Authorize a token for a library
 
@@ -361,7 +361,7 @@
 - **403** – Permission denied
 - **404** – Not found
 
-## User Repository
+## User repository
 
 ### Authorize a token for a user repository
 
@@ -397,7 +397,7 @@
 
 ## Users
 
-### User Login
+### User login
 
 `GET /v1/users/`
 
@@ -424,7 +424,7 @@
 - **401** – Unauthorized
 - **403** – Account is not Active
 
-### User Register
+### User register
 
 `POST /v1/users/`
 
@@ -461,7 +461,7 @@
 - **201** – User Created
 - **400** – Errors (invalid json, missing or invalid fields, etc)
 
-### Update User
+### Update user
 
 `PUT /v1/users/(username)/`
 
diff --git a/docs/sources/reference/api/docker_io_accounts_api.md b/docs/sources/reference/api/docker_io_accounts_api.md
index efb86eb..34f21eb 100644
--- a/docs/sources/reference/api/docker_io_accounts_api.md
+++ b/docs/sources/reference/api/docker_io_accounts_api.md
@@ -1,8 +1,8 @@
-page_title: docker.io Accounts API
+page_title: docker.io accounts API
 page_description: API Documentation for docker.io accounts.
 page_keywords: API, Docker, accounts, REST, documentation
 
-# docker.io Accounts API
+# docker.io accounts API
 
 ## Get a single user
 
diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md
index 3da4cc8..75ce93c 100644
--- a/docs/sources/reference/api/docker_remote_api.md
+++ b/docs/sources/reference/api/docker_remote_api.md
@@ -30,17 +30,68 @@
    Client applications need to take this into account to ensure
    they will not break when talking to newer Docker daemons.
 
-The current version of the API is v1.18
+The current version of the API is v1.19
 
 Calling `/info` is the same as calling
-`/v1.18/info`.
+`/v1.19/info`.
 
 You can still call an old version of the API using
-`/v1.17/info`.
+`/v1.18/info`.
+
+## Docker Events
+
+The following diagram depicts the container states accessible through the API.
+
+![States](../images/event_state.png)
+
+Some container-related events are not affected by container state, so they are not included in this diagram. These events are:
+
+* **export** emitted by `docker export`
+* **exec_create** emitted by `docker exec`
+* **exec_start** emitted by `docker exec` after **exec_create**
+
+Running `docker rmi` emits an **untag** event when removing an image name.  The `rmi` command may also emit **delete** events when images are deleted by ID directly or by deleting the last tag referring to the image.
+
+> **Acknowledgement**: This diagram and the accompanying text were used with the permission of Matt Good and Gilder Labs. See Matt's original blog post [Docker Events Explained](http://gliderlabs.com/blog/2015/04/14/docker-events-explained/).
+
+## v1.19
+
+### Full documentation
+
+[*Docker Remote API v1.19*](/reference/api/docker_remote_api_v1.19/)
+
+### What's new
+
+**New!**
+When the daemon detects a version mismatch with the client, usually when
+the client is newer than the daemon, an HTTP 400 is now returned instead
+of a 404.
+
+`GET /containers/(id)/stats`
+
+**New!**
+You can now supply a `stream` bool to get only one set of stats and
+disconnect
+
+`GET /containers(id)/logs`
+
+**New!**
+
+This endpoint now accepts a `since` timestamp parameter.
+
+`GET /info`
+
+**New!**
+
+The fields `Debug`, `IPv4Forwarding`, `MemoryLimit`, and `SwapLimit`
+are now returned as boolean instead of as an int.
+
+In addition, the end point now returns the new boolean fields
+`CpuCfsPeriod`, `CpuCfsQuota`, and `OomKillDisable`.
 
 ## v1.18
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.18*](/reference/api/docker_remote_api_v1.18/)
 
@@ -75,15 +126,19 @@
 **New!**
 (`CgroupParent`) can be passed in the host config to setup container cgroups under a specific cgroup.
 
-
 `POST /build`
 
 **New!**
 Closing the HTTP request will now cause the build to be canceled.
 
+`POST /containers/(id)/exec`
+
+**New!**
+Add `Warnings` field to response.
+
 ## v1.17
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.17*](/reference/api/docker_remote_api_v1.17/)
 
@@ -141,7 +196,7 @@
 
 ## v1.16
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.16*](/reference/api/docker_remote_api_v1.16/)
 
@@ -169,7 +224,7 @@
 
 ## v1.15
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.15*](/reference/api/docker_remote_api_v1.15/)
 
@@ -183,7 +238,7 @@
 
 ## v1.14
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.14*](/reference/api/docker_remote_api_v1.14/)
 
@@ -209,7 +264,7 @@
 
 ## v1.13
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.13*](/reference/api/docker_remote_api_v1.13/)
 
@@ -237,7 +292,7 @@
 
 ## v1.12
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.12*](/reference/api/docker_remote_api_v1.12/)
 
@@ -262,7 +317,7 @@
 
 ## v1.11
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.11*](/reference/api/docker_remote_api_v1.11/)
 
@@ -285,7 +340,7 @@
 
 ## v1.10
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.10*](/reference/api/docker_remote_api_v1.10/)
 
@@ -308,7 +363,7 @@
 
 ## v1.9
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.9*](/reference/api/docker_remote_api_v1.9/)
 
@@ -324,7 +379,7 @@
 
 ## v1.8
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.8*](/reference/api/docker_remote_api_v1.8/)
 
@@ -356,7 +411,7 @@
 
 ## v1.7
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.7*](/reference/api/docker_remote_api_v1.7/)
 
@@ -455,7 +510,7 @@
 
 ## v1.6
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.6*](/reference/api/docker_remote_api_v1.6/)
 
@@ -473,7 +528,7 @@
 
 ## v1.5
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.5*](/reference/api/docker_remote_api_v1.5/)
 
@@ -500,7 +555,7 @@
 
 ## v1.4
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.4*](/reference/api/docker_remote_api_v1.4/)
 
@@ -527,7 +582,7 @@
 docker v0.5.0
 [51f6c4a](https://github.com/docker/docker/commit/51f6c4a7372450d164c61e0054daf0223ddbd909)
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.3*](/reference/api/docker_remote_api_v1.3/)
 
@@ -567,7 +622,7 @@
 docker v0.4.2
 [2e7649b](https://github.com/docker/docker/commit/2e7649beda7c820793bd46766cbc2cfeace7b168)
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.2*](/reference/api/docker_remote_api_v1.2/)
 
@@ -599,7 +654,7 @@
 docker v0.4.0
 [a8ae398](https://github.com/docker/docker/commit/a8ae398bf52e97148ee7bd0d5868de2e15bd297f)
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.1*](/reference/api/docker_remote_api_v1.1/)
 
@@ -626,7 +681,7 @@
 docker v0.3.4
 [8d73740](https://github.com/docker/docker/commit/8d73740343778651c09160cde9661f5f387b36f4)
 
-### Full Documentation
+### Full documentation
 
 [*Docker Remote API v1.0*](/reference/api/docker_remote_api_v1.0/)
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md
index 7837b82..e13dccf 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.10.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.10.md
@@ -535,7 +535,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1)
 
@@ -1047,7 +1047,7 @@
     Request Headers:
 
 -   **Content-type** – should be set to `"application/tar"`.
--   **X-Registry-Config** – base64-encoded ConfigFile objec
+-   **X-Registry-Config** – base64-encoded ConfigFile object
 
 Status Codes:
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md
index 6bcabfc..5a2a0e4 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.11.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.11.md
@@ -570,7 +570,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1)
 
@@ -1053,7 +1053,7 @@
     Request Headers:
 
 -   **Content-type** – should be set to `"application/tar"`.
--   **X-Registry-Config** – base64-encoded ConfigFile objec
+-   **X-Registry-Config** – base64-encoded ConfigFile object
 
 Status Codes:
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md
index 58f3bc3..999aca9 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.12.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.12.md
@@ -618,7 +618,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1
 
@@ -1115,7 +1115,7 @@
     Request Headers:
 
 -   **Content-type** – should be set to `"application/tar"`.
--   **X-Registry-Config** – base64-encoded ConfigFile objec
+-   **X-Registry-Config** – base64-encoded ConfigFile object
 
 Status Codes:
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.13.md b/docs/sources/reference/api/docker_remote_api_v1.13.md
index 1590978..287008c 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.13.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.13.md
@@ -611,7 +611,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1
 
@@ -1104,7 +1104,7 @@
     Request Headers:
 
 -   **Content-type** – should be set to `"application/tar"`.
--   **X-Registry-Config** – base64-encoded ConfigFile objec
+-   **X-Registry-Config** – base64-encoded ConfigFile object
 
 Status Codes:
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.14.md b/docs/sources/reference/api/docker_remote_api_v1.14.md
index f4e1b3e..58634b2 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.14.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.14.md
@@ -621,7 +621,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1
 
@@ -1114,7 +1114,7 @@
     Request Headers:
 
 -   **Content-type** – should be set to `"application/tar"`.
--   **X-Registry-Config** – base64-encoded ConfigFile objec
+-   **X-Registry-Config** – base64-encoded ConfigFile object
 
 Status Codes:
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.15.md b/docs/sources/reference/api/docker_remote_api_v1.15.md
index a956d45..e4fe507 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.15.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.15.md
@@ -174,12 +174,12 @@
       container.
 -   **Domainname** - A string value containing the desired domain name to use
       for the container.
--   **User** - A string value containg the user to use inside the container.
+-   **User** - A string value containing the user to use inside the container.
 -   **Memory** - Memory limit in bytes.
 -   **MemorySwap**- Total memory usage (memory + swap); set `-1` to disable swap.
 -   **CpuShares** - An integer value containing the CPU Shares for container
-      (ie. the relative weight vs othercontainers).
-    **CpuSet** - String value containg the cgroups Cpuset to use.
+      (ie. the relative weight vs other containers).
+    **CpuSet** - String value containing the cgroups Cpuset to use.
 -   **AttachStdin** - Boolean value, attaches to stdin.
 -   **AttachStdout** - Boolean value, attaches to stdout.
 -   **AttachStderr** - Boolean value, attaches to stderr.
@@ -195,7 +195,7 @@
         container to empty objects.
 -   **WorkingDir** - A string value containing the working dir for commands to
       run in.
--   **NetworkDisabled** - Boolean value, when true disables neworking for the
+-   **NetworkDisabled** - Boolean value, when true disables networking for the
       container
 -   **ExposedPorts** - An object mapping ports to an empty object in the form of:
       `"ExposedPorts": { "<port>/<tcp|udp>: {}" }`
@@ -207,8 +207,8 @@
           volume for the container), `host_path:container_path` (to bind-mount
           a host path into the container), or `host_path:container_path:ro`
           (to make the bind-mount read-only inside the container).
-  -   **Links** - A list of links for the container.  Each link entry should be of
-        of the form "container_name:alias".
+  -   **Links** - A list of links for the container.  Each link entry should be
+        in the form of "container_name:alias".
   -   **LxcConf** - LXC specific configurations.  These configurations will only
         work when using the `lxc` execution driver.
   -   **PortBindings** - A map of exposed container ports and the host port they
@@ -225,8 +225,8 @@
       container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`.
   -   **VolumesFrom** - A list of volumes to inherit from another container.
         Specified in the form `<container name>[:<ro|rw>]`
-  -   **CapAdd** - A list of kernel capabilties to add to the container.
-  -   **Capdrop** - A list of kernel capabilties to drop from the container.
+  -   **CapAdd** - A list of kernel capabilities to add to the container.
+  -   **Capdrop** - A list of kernel capabilities to drop from the container.
   -   **RestartPolicy** – The behavior to apply when the container exits.  The
           value is an object with a `Name` property of either `"always"` to
           always restart or `"on-failure"` to restart only when the container
@@ -553,8 +553,8 @@
 -   **DnsSearch** - A list of DNS search domains
 -   **VolumesFrom** - A list of volumes to inherit from another container.
       Specified in the form `<container name>[:<ro|rw>]`
--   **CapAdd** - A list of kernel capabilties to add to the container.
--   **Capdrop** - A list of kernel capabilties to drop from the container.
+-   **CapAdd** - A list of kernel capabilities to add to the container.
+-   **Capdrop** - A list of kernel capabilities to drop from the container.
 -   **RestartPolicy** – The behavior to apply when the container exits.  The
         value is an object with a `Name` property of either `"always"` to
         always restart or `"on-failure"` to restart only when the container
@@ -766,7 +766,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1
 
@@ -1261,7 +1261,7 @@
     Request Headers:
 
 -   **Content-type** – should be set to `"application/tar"`.
--   **X-Registry-Config** – base64-encoded ConfigFile objec
+-   **X-Registry-Config** – base64-encoded ConfigFile object
 
 Status Codes:
 
@@ -1495,7 +1495,7 @@
 by `name`.
 
 If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image
-(and its parents) are returned. If `name` is an image ID, similarly only tha
+(and its parents) are returned. If `name` is an image ID, similarly only that
 image (and its parents) are returned, but with the exclusion of the
 'repositories' file in the tarball, as there were no image names referenced.
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md
index 86df97b..df8e5be 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.16.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.16.md
@@ -174,12 +174,12 @@
       container.
 -   **Domainname** - A string value containing the desired domain name to use
       for the container.
--   **User** - A string value containg the user to use inside the container.
+-   **User** - A string value containing the user to use inside the container.
 -   **Memory** - Memory limit in bytes.
 -   **MemorySwap**- Total memory usage (memory + swap); set `-1` to disable swap.
 -   **CpuShares** - An integer value containing the CPU Shares for container
-      (ie. the relative weight vs othercontainers).
-    **CpuSet** - String value containg the cgroups Cpuset to use.
+      (ie. the relative weight vs other containers).
+    **CpuSet** - String value containing the cgroups Cpuset to use.
 -   **AttachStdin** - Boolean value, attaches to stdin.
 -   **AttachStdout** - Boolean value, attaches to stdout.
 -   **AttachStderr** - Boolean value, attaches to stderr.
@@ -195,7 +195,7 @@
         container to empty objects.
 -   **WorkingDir** - A string value containing the working dir for commands to
       run in.
--   **NetworkDisabled** - Boolean value, when true disables neworking for the
+-   **NetworkDisabled** - Boolean value, when true disables networking for the
       container
 -   **ExposedPorts** - An object mapping ports to an empty object in the form of:
       `"ExposedPorts": { "<port>/<tcp|udp>: {}" }`
@@ -207,8 +207,8 @@
           volume for the container), `host_path:container_path` (to bind-mount
           a host path into the container), or `host_path:container_path:ro`
           (to make the bind-mount read-only inside the container).
-  -   **Links** - A list of links for the container.  Each link entry should be of
-        of the form "container_name:alias".
+  -   **Links** - A list of links for the container.  Each link entry should be
+        in the form of "container_name:alias".
   -   **LxcConf** - LXC specific configurations.  These configurations will only
         work when using the `lxc` execution driver.
   -   **PortBindings** - A map of exposed container ports and the host port they
@@ -225,8 +225,8 @@
       container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`.
   -   **VolumesFrom** - A list of volumes to inherit from another container.
         Specified in the form `<container name>[:<ro|rw>]`
-  -   **CapAdd** - A list of kernel capabilties to add to the container.
-  -   **Capdrop** - A list of kernel capabilties to drop from the container.
+  -   **CapAdd** - A list of kernel capabilities to add to the container.
+  -   **Capdrop** - A list of kernel capabilities to drop from the container.
   -   **RestartPolicy** – The behavior to apply when the container exits.  The
           value is an object with a `Name` property of either `"always"` to
           always restart or `"on-failure"` to restart only when the container
@@ -509,12 +509,66 @@
         POST /containers/(id)/start HTTP/1.1
         Content-Type: application/json
 
+        {
+             "Binds": ["/tmp:/tmp"],
+             "Links": ["redis3:redis"],
+             "LxcConf": {"lxc.utsname":"docker"},
+             "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
+             "PublishAllPorts": false,
+             "Privileged": false,
+             "Dns": ["8.8.8.8"],
+             "DnsSearch": [""],
+             "VolumesFrom": ["parent", "other:ro"],
+             "CapAdd": ["NET_ADMIN"],
+             "CapDrop": ["MKNOD"],
+             "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 },
+             "NetworkMode": "bridge",
+             "Devices": []
+        }
+
 **Example response**:
 
         HTTP/1.1 204 No Content
 
 Json Parameters:
 
+-   **Binds** – A list of volume bindings for this container.  Each volume
+        binding is a string of the form `container_path` (to create a new
+        volume for the container), `host_path:container_path` (to bind-mount
+        a host path into the container), or `host_path:container_path:ro`
+        (to make the bind-mount read-only inside the container).
+-   **Links** - A list of links for the container.  Each link entry should be of
+      of the form "container_name:alias".
+-   **LxcConf** - LXC specific configurations.  These configurations will only
+      work when using the `lxc` execution driver.
+-   **PortBindings** - A map of exposed container ports and the host port they
+      should map to. It should be specified in the form
+      `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
+      Take note that `port` is specified as a string and not an integer value.
+-   **PublishAllPorts** - Allocates a random host port for all of a container's
+      exposed ports. Specified as a boolean value.
+-   **Privileged** - Gives the container full access to the host.  Specified as
+      a boolean value.
+-   **Dns** - A list of dns servers for the container to use.
+-   **DnsSearch** - A list of DNS search domains
+-   **VolumesFrom** - A list of volumes to inherit from another container.
+      Specified in the form `<container name>[:<ro|rw>]`
+-   **CapAdd** - A list of kernel capabilities to add to the container.
+-   **Capdrop** - A list of kernel capabilities to drop from the container.
+-   **RestartPolicy** – The behavior to apply when the container exits.  The
+        value is an object with a `Name` property of either `"always"` to
+        always restart or `"on-failure"` to restart only when the container
+        exit code is non-zero.  If `on-failure` is used, `MaximumRetryCount`
+        controls the number of times to retry before giving up.
+        The default is not to restart. (optional)
+        An ever increasing delay (double the previous delay, starting at 100mS)
+        is added before each restart to prevent flooding the server.
+-   **NetworkMode** - Sets the networking mode for the container. Supported
+      values are: `bridge`, `host`, and `container:<name|id>`
+-   **Devices** - A list of devices to add to the container specified in the
+      form
+      `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}`
+
 Status Codes:
 
 -   **204** – no error
@@ -712,7 +766,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1
 
@@ -1208,7 +1262,7 @@
     Request Headers:
 
 -   **Content-type** – should be set to `"application/tar"`.
--   **X-Registry-Config** – base64-encoded ConfigFile objec
+-   **X-Registry-Config** – base64-encoded ConfigFile object
 
 Status Codes:
 
@@ -1455,7 +1509,7 @@
 by `name`.
 
 If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image
-(and its parents) are returned. If `name` is an image ID, similarly only tha
+(and its parents) are returned. If `name` is an image ID, similarly only that
 image (and its parents) are returned, but with the exclusion of the
 'repositories' file in the tarball, as there were no image names referenced.
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.17.md b/docs/sources/reference/api/docker_remote_api_v1.17.md
index 9688755..d8ef81c 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.17.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.17.md
@@ -138,7 +138,6 @@
              "ExposedPorts": {
                      "22/tcp": {}
              },
-             "SecurityOpts": [""],
              "HostConfig": {
                "Binds": ["/tmp:/tmp"],
                "Links": ["redis3:redis"],
@@ -156,6 +155,7 @@
                "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 },
                "NetworkMode": "bridge",
                "Devices": []
+               "SecurityOpt": [""],
             }
         }
 
@@ -175,13 +175,13 @@
       container.
 -   **Domainname** - A string value containing the desired domain name to use
       for the container.
--   **User** - A string value containg the user to use inside the container.
+-   **User** - A string value containing the user to use inside the container.
 -   **Memory** - Memory limit in bytes.
 -   **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap,
       always use this with `memory`, and make the value larger than `memory`.
 -   **CpuShares** - An integer value containing the CPU Shares for container
-      (ie. the relative weight vs othercontainers).
-    **CpuSet** - String value containg the cgroups Cpuset to use.
+      (ie. the relative weight vs other containers).
+    **CpuSet** - String value containing the cgroups Cpuset to use.
 -   **AttachStdin** - Boolean value, attaches to stdin.
 -   **AttachStdout** - Boolean value, attaches to stdout.
 -   **AttachStderr** - Boolean value, attaches to stderr.
@@ -197,20 +197,18 @@
         container to empty objects.
 -   **WorkingDir** - A string value containing the working dir for commands to
       run in.
--   **NetworkDisabled** - Boolean value, when true disables neworking for the
+-   **NetworkDisabled** - Boolean value, when true disables networking for the
       container
 -   **ExposedPorts** - An object mapping ports to an empty object in the form of:
       `"ExposedPorts": { "<port>/<tcp|udp>: {}" }`
--   **SecurityOpts**: A list of string values to customize labels for MLS
-      systems, such as SELinux.
 -   **HostConfig**
   -   **Binds** – A list of volume bindings for this container.  Each volume
           binding is a string of the form `container_path` (to create a new
           volume for the container), `host_path:container_path` (to bind-mount
           a host path into the container), or `host_path:container_path:ro`
           (to make the bind-mount read-only inside the container).
-  -   **Links** - A list of links for the container.  Each link entry should be of
-        of the form "container_name:alias".
+  -   **Links** - A list of links for the container.  Each link entry should be
+        in the form of "container_name:alias".
   -   **LxcConf** - LXC specific configurations.  These configurations will only
         work when using the `lxc` execution driver.
   -   **PortBindings** - A map of exposed container ports and the host port they
@@ -229,8 +227,8 @@
       container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`.
   -   **VolumesFrom** - A list of volumes to inherit from another container.
         Specified in the form `<container name>[:<ro|rw>]`
-  -   **CapAdd** - A list of kernel capabilties to add to the container.
-  -   **Capdrop** - A list of kernel capabilties to drop from the container.
+  -   **CapAdd** - A list of kernel capabilities to add to the container.
+  -   **Capdrop** - A list of kernel capabilities to drop from the container.
   -   **RestartPolicy** – The behavior to apply when the container exits.  The
           value is an object with a `Name` property of either `"always"` to
           always restart or `"on-failure"` to restart only when the container
@@ -244,6 +242,8 @@
   -   **Devices** - A list of devices to add to the container specified in the
         form
         `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}`
+  -   **SecurityOpt**: A list of string values to customize labels for MLS
+        systems, such as SELinux.
 
 Query Parameters:
 
@@ -639,12 +639,69 @@
         POST /containers/(id)/start HTTP/1.1
         Content-Type: application/json
 
+        {
+             "Binds": ["/tmp:/tmp"],
+             "Links": ["redis3:redis"],
+             "LxcConf": {"lxc.utsname":"docker"},
+             "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
+             "PublishAllPorts": false,
+             "Privileged": false,
+             "ReadonlyRootfs": false,
+             "Dns": ["8.8.8.8"],
+             "DnsSearch": [""],
+             "VolumesFrom": ["parent", "other:ro"],
+             "CapAdd": ["NET_ADMIN"],
+             "CapDrop": ["MKNOD"],
+             "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 },
+             "NetworkMode": "bridge",
+             "Devices": []
+        }
+
 **Example response**:
 
         HTTP/1.1 204 No Content
 
 Json Parameters:
 
+-   **Binds** – A list of volume bindings for this container.  Each volume
+        binding is a string of the form `container_path` (to create a new
+        volume for the container), `host_path:container_path` (to bind-mount
+        a host path into the container), or `host_path:container_path:ro`
+        (to make the bind-mount read-only inside the container).
+-   **Links** - A list of links for the container.  Each link entry should be of
+      of the form "container_name:alias".
+-   **LxcConf** - LXC specific configurations.  These configurations will only
+      work when using the `lxc` execution driver.
+-   **PortBindings** - A map of exposed container ports and the host port they
+      should map to. It should be specified in the form
+      `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
+      Take note that `port` is specified as a string and not an integer value.
+-   **PublishAllPorts** - Allocates a random host port for all of a container's
+      exposed ports. Specified as a boolean value.
+-   **Privileged** - Gives the container full access to the host.  Specified as
+      a boolean value.
+-   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
+      Specified as a boolean value.
+-   **Dns** - A list of dns servers for the container to use.
+-   **DnsSearch** - A list of DNS search domains
+-   **VolumesFrom** - A list of volumes to inherit from another container.
+      Specified in the form `<container name>[:<ro|rw>]`
+-   **CapAdd** - A list of kernel capabilities to add to the container.
+-   **Capdrop** - A list of kernel capabilities to drop from the container.
+-   **RestartPolicy** – The behavior to apply when the container exits.  The
+        value is an object with a `Name` property of either `"always"` to
+        always restart or `"on-failure"` to restart only when the container
+        exit code is non-zero.  If `on-failure` is used, `MaximumRetryCount`
+        controls the number of times to retry before giving up.
+        The default is not to restart. (optional)
+        An ever increasing delay (double the previous delay, starting at 100mS)
+        is added before each restart to prevent flooding the server.
+-   **NetworkMode** - Sets the networking mode for the container. Supported
+      values are: `bridge`, `host`, and `container:<name|id>`
+-   **Devices** - A list of devices to add to the container specified in the
+      form
+      `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}`
+
 Status Codes:
 
 -   **204** – no error
@@ -870,7 +927,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1
 
@@ -1083,7 +1140,7 @@
     Request Headers:
 
 -   **Content-type** – should be set to `"application/tar"`.
--   **X-Registry-Config** – base64-encoded ConfigFile objec
+-   **X-Registry-Config** – base64-encoded ConfigFile object
 
 Status Codes:
 
@@ -1618,7 +1675,7 @@
 by `name`.
 
 If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image
-(and its parents) are returned. If `name` is an image ID, similarly only tha
+(and its parents) are returned. If `name` is an image ID, similarly only that
 image (and its parents) are returned, but with the exclusion of the
 'repositories' file in the tarball, as there were no image names referenced.
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.18.md b/docs/sources/reference/api/docker_remote_api_v1.18.md
index ee89ccb..2f6f2aa 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.18.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.18.md
@@ -91,6 +91,7 @@
 -   **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters:
   -   exited=&lt;int&gt; -- containers with exit code of &lt;int&gt;
   -   status=(restarting|running|paused|exited)
+  -   label=`key` or `key=value` of a container label
 
 Status Codes:
 
@@ -139,7 +140,6 @@
              "ExposedPorts": {
                      "22/tcp": {}
              },
-             "SecurityOpts": [""],
              "HostConfig": {
                "Binds": ["/tmp:/tmp"],
                "Links": ["redis3:redis"],
@@ -163,6 +163,7 @@
                "Devices": [],
                "Ulimits": [{}],
                "LogConfig": { "Type": "json-file", Config: {} },
+               "SecurityOpt": [""],
                "CgroupParent": ""
             }
         }
@@ -183,14 +184,14 @@
       container.
 -   **Domainname** - A string value containing the desired domain name to use
       for the container.
--   **User** - A string value containg the user to use inside the container.
+-   **User** - A string value containing the user to use inside the container.
 -   **Memory** - Memory limit in bytes.
 -   **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap,
       always use this with `memory`, and make the value larger than `memory`.
 -   **CpuShares** - An integer value containing the CPU Shares for container
-      (ie. the relative weight vs othercontainers).
+      (ie. the relative weight vs other containers).
 -   **Cpuset** - The same as CpusetCpus, but deprecated, please don't use.
--   **CpusetCpus** - String value containg the cgroups CpusetCpus to use.
+-   **CpusetCpus** - String value containing the cgroups CpusetCpus to use.
 -   **AttachStdin** - Boolean value, attaches to stdin.
 -   **AttachStdout** - Boolean value, attaches to stdout.
 -   **AttachStderr** - Boolean value, attaches to stderr.
@@ -207,61 +208,61 @@
       container to empty objects.
 -   **WorkingDir** - A string value containing the working dir for commands to
       run in.
--   **NetworkDisabled** - Boolean value, when true disables neworking for the
+-   **NetworkDisabled** - Boolean value, when true disables networking for the
       container
 -   **ExposedPorts** - An object mapping ports to an empty object in the form of:
       `"ExposedPorts": { "<port>/<tcp|udp>: {}" }`
--   **SecurityOpts**: A list of string values to customize labels for MLS
-      systems, such as SELinux.
 -   **HostConfig**
-  -   **Binds** – A list of volume bindings for this container.  Each volume
-          binding is a string of the form `container_path` (to create a new
-          volume for the container), `host_path:container_path` (to bind-mount
-          a host path into the container), or `host_path:container_path:ro`
-          (to make the bind-mount read-only inside the container).
-  -   **Links** - A list of links for the container.  Each link entry should be of
-        of the form "container_name:alias".
-  -   **LxcConf** - LXC specific configurations.  These configurations will only
-        work when using the `lxc` execution driver.
-  -   **PortBindings** - A map of exposed container ports and the host port they
-        should map to. It should be specified in the form
-        `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
-        Take note that `port` is specified as a string and not an integer value.
-  -   **PublishAllPorts** - Allocates a random host port for all of a container's
-        exposed ports. Specified as a boolean value.
-  -   **Privileged** - Gives the container full access to the host.  Specified as
-        a boolean value.
-  -   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
-        Specified as a boolean value.
-  -   **Dns** - A list of dns servers for the container to use.
-  -   **DnsSearch** - A list of DNS search domains
-  -   **ExtraHosts** - A list of hostnames/IP mappings to be added to the
-      container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`.
-  -   **VolumesFrom** - A list of volumes to inherit from another container.
-        Specified in the form `<container name>[:<ro|rw>]`
-  -   **CapAdd** - A list of kernel capabilties to add to the container.
-  -   **Capdrop** - A list of kernel capabilties to drop from the container.
-  -   **RestartPolicy** – The behavior to apply when the container exits.  The
-          value is an object with a `Name` property of either `"always"` to
-          always restart or `"on-failure"` to restart only when the container
-          exit code is non-zero.  If `on-failure` is used, `MaximumRetryCount`
-          controls the number of times to retry before giving up.
-          The default is not to restart. (optional)
-          An ever increasing delay (double the previous delay, starting at 100mS)
-          is added before each restart to prevent flooding the server.
-  -   **NetworkMode** - Sets the networking mode for the container. Supported
-        values are: `bridge`, `host`, and `container:<name|id>`
-  -   **Devices** - A list of devices to add to the container specified in the
-        form
-        `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}`
-  -   **Ulimits** - A list of ulimits to be set in the container, specified as
-        `{ "Name": <name>, "Soft": <soft limit>, "Hard": <hard limit> }`, for example:
-        `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}`
-  -   **LogConfig** - Logging configuration to container, format
-        `{ "Type": "<driver_name>", "Config": {"key1": "val1"}}
-        Available types: `json-file`, `syslog`, `none`.
-        `json-file` logging driver.
-  -   **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
+    -   **Binds** – A list of volume bindings for this container. Each volume
+            binding is a string of the form `container_path` (to create a new
+            volume for the container), `host_path:container_path` (to bind-mount
+            a host path into the container), or `host_path:container_path:ro`
+            (to make the bind-mount read-only inside the container).
+    -   **Links** - A list of links for the container. Each link entry should be
+          in the form of `container_name:alias`.
+    -   **LxcConf** - LXC specific configurations. These configurations will only
+          work when using the `lxc` execution driver.
+    -   **PortBindings** - A map of exposed container ports and the host port they
+          should map to. It should be specified in the form
+          `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
+          Take note that `port` is specified as a string and not an integer value.
+    -   **PublishAllPorts** - Allocates a random host port for all of a container's
+          exposed ports. Specified as a boolean value.
+    -   **Privileged** - Gives the container full access to the host. Specified as
+          a boolean value.
+    -   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
+          Specified as a boolean value.
+    -   **Dns** - A list of dns servers for the container to use.
+    -   **DnsSearch** - A list of DNS search domains
+    -   **ExtraHosts** - A list of hostnames/IP mappings to be added to the
+        container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`.
+    -   **VolumesFrom** - A list of volumes to inherit from another container.
+          Specified in the form `<container name>[:<ro|rw>]`
+    -   **CapAdd** - A list of kernel capabilities to add to the container.
+    -   **Capdrop** - A list of kernel capabilities to drop from the container.
+    -   **RestartPolicy** – The behavior to apply when the container exits.  The
+            value is an object with a `Name` property of either `"always"` to
+            always restart or `"on-failure"` to restart only when the container
+            exit code is non-zero.  If `on-failure` is used, `MaximumRetryCount`
+            controls the number of times to retry before giving up.
+            The default is not to restart. (optional)
+            An ever increasing delay (double the previous delay, starting at 100mS)
+            is added before each restart to prevent flooding the server.
+    -   **NetworkMode** - Sets the networking mode for the container. Supported
+          values are: `bridge`, `host`, and `container:<name|id>`
+    -   **Devices** - A list of devices to add to the container specified in the
+          form
+          `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}`
+    -   **Ulimits** - A list of ulimits to be set in the container, specified as
+          `{ "Name": <name>, "Soft": <soft limit>, "Hard": <hard limit> }`, for example:
+          `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}`
+    -   **SecurityOpt**: A list of string values to customize labels for MLS
+        systems, such as SELinux.
+    -   **LogConfig** - Log configuration for the container, specified as
+          `{ "Type": "<driver_name>", "Config": {"key1": "val1"}}`.
+          Available types: `json-file`, `syslog`, `none`.
+          `json-file` logging driver.
+    -   **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
 
 Query Parameters:
 
@@ -679,12 +680,90 @@
         POST /containers/(id)/start HTTP/1.1
         Content-Type: application/json
 
+        {
+            "Binds": ["/tmp:/tmp"],
+            "Links": ["redis3:redis"],
+            "LxcConf": {"lxc.utsname":"docker"},
+            "Memory": 0,
+            "MemorySwap": 0,
+            "CpuShares": 512,
+            "CpusetCpus": "0,1",
+            "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
+            "PublishAllPorts": false,
+            "Privileged": false,
+            "ReadonlyRootfs": false,
+            "Dns": ["8.8.8.8"],
+            "DnsSearch": [""],
+            "ExtraHosts": null,
+            "VolumesFrom": ["parent", "other:ro"],
+            "CapAdd": ["NET_ADMIN"],
+            "CapDrop": ["MKNOD"],
+            "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 },
+            "NetworkMode": "bridge",
+            "Devices": [],
+            "Ulimits": [{}],
+            "LogConfig": { "Type": "json-file", Config: {} },
+            "SecurityOpt": [""],
+            "CgroupParent": ""
+        }
+
 **Example response**:
 
         HTTP/1.1 204 No Content
 
 Json Parameters:
 
+-   **Binds** – A list of volume bindings for this container. Each volume
+        binding is a string of the form `container_path` (to create a new
+        volume for the container), `host_path:container_path` (to bind-mount
+        a host path into the container), or `host_path:container_path:ro`
+        (to make the bind-mount read-only inside the container).
+-   **Links** - A list of links for the container. Each link entry should be of
+      of the form `container_name:alias`.
+-   **LxcConf** - LXC specific configurations. These configurations will only
+      work when using the `lxc` execution driver.
+-   **PortBindings** - A map of exposed container ports and the host port they
+      should map to. It should be specified in the form
+      `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
+      Take note that `port` is specified as a string and not an integer value.
+-   **PublishAllPorts** - Allocates a random host port for all of a container's
+      exposed ports. Specified as a boolean value.
+-   **Privileged** - Gives the container full access to the host. Specified as
+      a boolean value.
+-   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
+      Specified as a boolean value.
+-   **Dns** - A list of dns servers for the container to use.
+-   **DnsSearch** - A list of DNS search domains
+-   **ExtraHosts** - A list of hostnames/IP mappings to be added to the
+    container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`.
+-   **VolumesFrom** - A list of volumes to inherit from another container.
+      Specified in the form `<container name>[:<ro|rw>]`
+-   **CapAdd** - A list of kernel capabilities to add to the container.
+-   **Capdrop** - A list of kernel capabilities to drop from the container.
+-   **RestartPolicy** – The behavior to apply when the container exits.  The
+        value is an object with a `Name` property of either `"always"` to
+        always restart or `"on-failure"` to restart only when the container
+        exit code is non-zero.  If `on-failure` is used, `MaximumRetryCount`
+        controls the number of times to retry before giving up.
+        The default is not to restart. (optional)
+        An ever increasing delay (double the previous delay, starting at 100mS)
+        is added before each restart to prevent flooding the server.
+-   **NetworkMode** - Sets the networking mode for the container. Supported
+      values are: `bridge`, `host`, and `container:<name|id>`
+-   **Devices** - A list of devices to add to the container specified in the
+      form
+      `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}`
+-   **Ulimits** - A list of ulimits to be set in the container, specified as
+      `{ "Name": <name>, "Soft": <soft limit>, "Hard": <hard limit> }`, for example:
+      `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}`
+-   **SecurityOpt**: A list of string values to customize labels for MLS
+    systems, such as SELinux.
+-   **LogConfig** - Log configuration for the container, specified as
+      `{ "Type": "<driver_name>", "Config": {"key1": "val1"}}`.
+      Available types: `json-file`, `syslog`, `none`.
+      `json-file` logging driver.
+-   **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
+
 Status Codes:
 
 -   **204** – no error
@@ -910,7 +989,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1
 
@@ -1113,6 +1192,7 @@
 -   **all** – 1/True/true or 0/False/false, default false
 -   **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters:
   -   dangling=true
+  -   label=`key` or `key=value` of an image label
 
 ### Build image from a Dockerfile
 
@@ -1167,12 +1247,12 @@
 -   **memory** - set memory limit for build
 -   **memswap** - Total memory (memory + swap), `-1` to disable swap
 -   **cpushares** - CPU shares (relative weight)
--   **cpusetcpus** - CPUs in which to allow exection, e.g., `0-3`, `0,1`
+-   **cpusetcpus** - CPUs in which to allow execution, e.g., `0-3`, `0,1`
 
     Request Headers:
 
 -   **Content-type** – should be set to `"application/tar"`.
--   **X-Registry-Config** – base64-encoded ConfigFile objec
+-   **X-Registry-Config** – base64-encoded ConfigFile object
 
 Status Codes:
 
@@ -1514,35 +1594,50 @@
         Content-Type: application/json
 
         {
-             "Containers":11,
-             "Images":16,
-             "Driver":"btrfs",
-             "DriverStatus": [[""]],
-             "ExecutionDriver":"native-0.1",
-             "KernelVersion":"3.12.0-1-amd64"
-             "NCPU":1,
-             "MemTotal":2099236864,
-             "Name":"prod-server-42",
-             "ID":"7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS",
-             "Debug":false,
-             "NFd": 11,
-             "NGoroutines":21,
-             "SystemTime": "2015-03-10T11:11:23.730591467-07:00"
-             "NEventsListener":0,
-             "InitPath":"/usr/bin/docker",
-             "InitSha1":"",
-             "IndexServerAddress":["https://index.docker.io/v1/"],
-             "MemoryLimit":true,
-             "SwapLimit":false,
-             "IPv4Forwarding":true,
-             "Labels":["storage=ssd"],
-             "DockerRootDir": "/var/lib/docker",
-             "HttpProxy": "http://test:test@localhost:8080"
-             "HttpsProxy": "https://test:test@localhost:8080"
-             "NoProxy": "9.81.1.160"
-             "OperatingSystem": "Boot2Docker",
+            "Containers": 11,
+            "Debug": 0,
+            "DockerRootDir": "/var/lib/docker",
+            "Driver": "btrfs",
+            "DriverStatus": [[""]],
+            "ExecutionDriver": "native-0.1",
+            "HttpProxy": "http://test:test@localhost:8080",
+            "HttpsProxy": "https://test:test@localhost:8080",
+            "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS",
+            "IPv4Forwarding": 1,
+            "Images": 16,
+            "IndexServerAddress": "https://index.docker.io/v1/",
+            "InitPath": "/usr/bin/docker",
+            "InitSha1": "",
+            "KernelVersion": "3.12.0-1-amd64",
+            "Labels": [
+                "storage=ssd"
+            ],
+            "MemTotal": 2099236864,
+            "MemoryLimit": 1,
+            "NCPU": 1,
+            "NEventsListener": 0,
+            "NFd": 11,
+            "NGoroutines": 21,
+            "Name": "prod-server-42",
+            "NoProxy": "9.81.1.160",
+            "OperatingSystem": "Boot2Docker",
+            "RegistryConfig": {
+                "IndexConfigs": {
+                    "docker.io": {
+                        "Mirrors": null,
+                        "Name": "docker.io",
+                        "Official": true,
+                        "Secure": true
+                    }
+                },
+                "InsecureRegistryCIDRs": [
+                    "127.0.0.0/8"
+                ]
+            },
+            "SwapLimit": 0,
+            "SystemTime": "2015-03-10T11:11:23.730591467-07:00"
         }
-
+        
 Status Codes:
 
 -   **200** – no error
@@ -1713,7 +1808,7 @@
 by `name`.
 
 If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image
-(and its parents) are returned. If `name` is an image ID, similarly only tha
+(and its parents) are returned. If `name` is an image ID, similarly only that
 image (and its parents) are returned, but with the exclusion of the
 'repositories' file in the tarball, as there were no image names referenced.
 
@@ -1835,6 +1930,7 @@
 
         {
              "Id": "f90e34656806"
+             "Warnings":[]
         }
 
 Json Parameters:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.19.md b/docs/sources/reference/api/docker_remote_api_v1.19.md
new file mode 100644
index 0000000..dde8ee7
--- /dev/null
+++ b/docs/sources/reference/api/docker_remote_api_v1.19.md
@@ -0,0 +1,2187 @@
+page_title: Remote API v1.19
+page_description: API Documentation for Docker
+page_keywords: API, Docker, rcli, REST, documentation
+
+# Docker Remote API v1.19
+
+## 1. Brief introduction
+
+ - The Remote API has replaced `rcli`.
+ - The daemon listens on `unix:///var/run/docker.sock` but you can
+   [Bind Docker to another host/port or a Unix socket](
+   /articles/basics/#bind-docker-to-another-hostport-or-a-unix-socket).
+ - The API tends to be REST, but for some complex commands, like `attach`
+   or `pull`, the HTTP connection is hijacked to transport `STDOUT`,
+   `STDIN` and `STDERR`.
+ - When the client API version is newer than the daemon's an HTTP
+   `400 Bad Request` error message is returned.
+
+# 2. Endpoints
+
+## 2.1 Containers
+
+### List containers
+
+`GET /containers/json`
+
+List containers
+
+**Example request**:
+
+        GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        [
+             {
+                     "Id": "8dfafdbc3a40",
+                     "Image": "ubuntu:latest",
+                     "Command": "echo 1",
+                     "Created": 1367854155,
+                     "Status": "Exit 0",
+                     "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}],
+                     "SizeRw": 12288,
+                     "SizeRootFs": 0
+             },
+             {
+                     "Id": "9cd87474be90",
+                     "Image": "ubuntu:latest",
+                     "Command": "echo 222222",
+                     "Created": 1367854155,
+                     "Status": "Exit 0",
+                     "Ports": [],
+                     "SizeRw": 12288,
+                     "SizeRootFs": 0
+             },
+             {
+                     "Id": "3176a2479c92",
+                     "Image": "ubuntu:latest",
+                     "Command": "echo 3333333333333333",
+                     "Created": 1367854154,
+                     "Status": "Exit 0",
+                     "Ports":[],
+                     "SizeRw":12288,
+                     "SizeRootFs":0
+             },
+             {
+                     "Id": "4cb07b47f9fb",
+                     "Image": "ubuntu:latest",
+                     "Command": "echo 444444444444444444444444444444444",
+                     "Created": 1367854152,
+                     "Status": "Exit 0",
+                     "Ports": [],
+                     "SizeRw": 12288,
+                     "SizeRootFs": 0
+             }
+        ]
+
+Query Parameters:
+
+-   **all** – 1/True/true or 0/False/false, Show all containers.
+        Only running containers are shown by default (i.e., this defaults to false)
+-   **limit** – Show `limit` last created
+        containers, include non-running ones.
+-   **since** – Show only containers created since Id, include
+        non-running ones.
+-   **before** – Show only containers created before Id, include
+        non-running ones.
+-   **size** – 1/True/true or 0/False/false, Show the containers
+        sizes
+-   **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters:
+  -   exited=&lt;int&gt; -- containers with exit code of &lt;int&gt;
+  -   status=(restarting|running|paused|exited)
+  -   label=`key` or `key=value` of a container label
+
+Status Codes:
+
+-   **200** – no error
+-   **400** – bad parameter
+-   **500** – server error
+
+### Create a container
+
+`POST /containers/create`
+
+Create a container
+
+**Example request**:
+
+        POST /containers/create HTTP/1.1
+        Content-Type: application/json
+
+        {
+             "Hostname": "",
+             "Domainname": "",
+             "User": "",
+             "AttachStdin": false,
+             "AttachStdout": true,
+             "AttachStderr": true,
+             "Tty": false,
+             "OpenStdin": false,
+             "StdinOnce": false,
+             "Env": null,
+             "Cmd": [
+                     "date"
+             ],
+             "Entrypoint": "",
+             "Image": "ubuntu",
+             "Labels": {
+                     "com.example.vendor": "Acme",
+                     "com.example.license": "GPL",
+                     "com.example.version": "1.0"
+             },
+             "Volumes": {
+                     "/tmp": {}
+             },
+             "WorkingDir": "",
+             "NetworkDisabled": false,
+             "MacAddress": "12:34:56:78:9a:bc",
+             "ExposedPorts": {
+                     "22/tcp": {}
+             },
+             "HostConfig": {
+               "Binds": ["/tmp:/tmp"],
+               "Links": ["redis3:redis"],
+               "LxcConf": {"lxc.utsname":"docker"},
+               "Memory": 0,
+               "MemorySwap": 0,
+               "CpuShares": 512,
+               "CpuPeriod": 100000,
+               "CpusetCpus": "0,1",
+               "CpusetMems": "0,1",
+               "BlkioWeight": 300,
+               "OomKillDisable": false,
+               "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
+               "PublishAllPorts": false,
+               "Privileged": false,
+               "ReadonlyRootfs": false,
+               "Dns": ["8.8.8.8"],
+               "DnsSearch": [""],
+               "ExtraHosts": null,
+               "VolumesFrom": ["parent", "other:ro"],
+               "CapAdd": ["NET_ADMIN"],
+               "CapDrop": ["MKNOD"],
+               "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 },
+               "NetworkMode": "bridge",
+               "Devices": [],
+               "Ulimits": [{}],
+               "LogConfig": { "Type": "json-file", "Config": {} },
+               "SecurityOpt": [""],
+               "CgroupParent": ""
+            }
+        }
+
+**Example response**:
+
+        HTTP/1.1 201 Created
+        Content-Type: application/json
+
+        {
+             "Id":"e90e34656806"
+             "Warnings":[]
+        }
+
+Json Parameters:
+
+-   **Hostname** - A string value containing the desired hostname to use for the
+      container.
+-   **Domainname** - A string value containing the desired domain name to use
+      for the container.
+-   **User** - A string value containing the user to use inside the container.
+-   **Memory** - Memory limit in bytes.
+-   **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap,
+      always use this with `memory`, and make the value larger than `memory`.
+-   **CpuShares** - An integer value containing the CPU Shares for container
+      (ie. the relative weight vs other containers).
+-   **CpuPeriod** - The length of a CPU period (in microseconds).
+-   **Cpuset** - The same as CpusetCpus, but deprecated, please don't use.
+-   **CpusetCpus** - String value containing the cgroups CpusetCpus to use.
+-   **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
+-   **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000.
+-   **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not.
+-   **AttachStdin** - Boolean value, attaches to stdin.
+-   **AttachStdout** - Boolean value, attaches to stdout.
+-   **AttachStderr** - Boolean value, attaches to stderr.
+-   **Tty** - Boolean value, Attach standard streams to a tty, including stdin if it is not closed.
+-   **OpenStdin** - Boolean value, opens stdin,
+-   **StdinOnce** - Boolean value, close stdin after the 1 attached client disconnects.
+-   **Env** - A list of environment variables in the form of `VAR=value`
+-   **Labels** - Adds a map of labels that to a container. To specify a map: `{"key":"value"[,"key2":"value2"]}`
+-   **Cmd** - Command to run specified as a string or an array of strings.
+-   **Entrypoint** - Set the entrypoint for the container a a string or an array
+      of strings
+-   **Image** - String value containing the image name to use for the container
+-   **Volumes** – An object mapping mountpoint paths (strings) inside the
+      container to empty objects.
+-   **WorkingDir** - A string value containing the working dir for commands to
+      run in.
+-   **NetworkDisabled** - Boolean value, when true disables networking for the
+      container
+-   **ExposedPorts** - An object mapping ports to an empty object in the form of:
+      `"ExposedPorts": { "<port>/<tcp|udp>: {}" }`
+-   **HostConfig**
+    -   **Binds** – A list of volume bindings for this container. Each volume
+            binding is a string of the form `container_path` (to create a new
+            volume for the container), `host_path:container_path` (to bind-mount
+            a host path into the container), or `host_path:container_path:ro`
+            (to make the bind-mount read-only inside the container).
+    -   **Links** - A list of links for the container. Each link entry should be
+          in the form of `container_name:alias`.
+    -   **LxcConf** - LXC specific configurations. These configurations will only
+          work when using the `lxc` execution driver.
+    -   **PortBindings** - A map of exposed container ports and the host port they
+          should map to. It should be specified in the form
+          `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
+          Take note that `port` is specified as a string and not an integer value.
+    -   **PublishAllPorts** - Allocates a random host port for all of a container's
+          exposed ports. Specified as a boolean value.
+    -   **Privileged** - Gives the container full access to the host. Specified as
+          a boolean value.
+    -   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
+          Specified as a boolean value.
+    -   **Dns** - A list of dns servers for the container to use.
+    -   **DnsSearch** - A list of DNS search domains
+    -   **ExtraHosts** - A list of hostnames/IP mappings to be added to the
+        container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`.
+    -   **VolumesFrom** - A list of volumes to inherit from another container.
+          Specified in the form `<container name>[:<ro|rw>]`
+    -   **CapAdd** - A list of kernel capabilities to add to the container.
+    -   **Capdrop** - A list of kernel capabilities to drop from the container.
+    -   **RestartPolicy** – The behavior to apply when the container exits.  The
+            value is an object with a `Name` property of either `"always"` to
+            always restart or `"on-failure"` to restart only when the container
+            exit code is non-zero.  If `on-failure` is used, `MaximumRetryCount`
+            controls the number of times to retry before giving up.
+            The default is not to restart. (optional)
+            An ever increasing delay (double the previous delay, starting at 100mS)
+            is added before each restart to prevent flooding the server.
+    -   **NetworkMode** - Sets the networking mode for the container. Supported
+          values are: `bridge`, `host`, and `container:<name|id>`
+    -   **Devices** - A list of devices to add to the container specified in the
+          form
+          `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}`
+    -   **Ulimits** - A list of ulimits to be set in the container, specified as
+          `{ "Name": <name>, "Soft": <soft limit>, "Hard": <hard limit> }`, for example:
+          `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}`
+    -   **SecurityOpt**: A list of string values to customize labels for MLS
+        systems, such as SELinux.
+    -   **LogConfig** - Log configuration for the container, specified as
+          `{ "Type": "<driver_name>", "Config": {"key1": "val1"}}`.
+          Available types: `json-file`, `syslog`, `journald`, `none`.
+          `json-file` logging driver.
+    -   **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
+
+Query Parameters:
+
+-   **name** – Assign the specified name to the container. Must
+    match `/?[a-zA-Z0-9_-]+`.
+
+Status Codes:
+
+-   **201** – no error
+-   **404** – no such container
+-   **406** – impossible to attach (container not running)
+-   **500** – server error
+
+### Inspect a container
+
+`GET /containers/(id)/json`
+
+Return low-level information on the container `id`
+
+
+**Example request**:
+
+        GET /containers/4fa6e0f0c678/json HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+	{
+		"AppArmorProfile": "",
+		"Args": [
+			"-c",
+			"exit 9"
+		],
+		"Config": {
+			"AttachStderr": true,
+			"AttachStdin": false,
+			"AttachStdout": true,
+			"Cmd": [
+				"/bin/sh",
+				"-c",
+				"exit 9"
+			],
+			"Domainname": "",
+			"Entrypoint": null,
+			"Env": [
+				"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+			],
+			"ExposedPorts": null,
+			"Hostname": "ba033ac44011",
+			"Image": "ubuntu",
+			"Labels": {
+				"com.example.vendor": "Acme",
+				"com.example.license": "GPL",
+				"com.example.version": "1.0"
+			},
+			"MacAddress": "",
+			"NetworkDisabled": false,
+			"OnBuild": null,
+			"OpenStdin": false,
+			"PortSpecs": null,
+			"StdinOnce": false,
+			"Tty": false,
+			"User": "",
+			"Volumes": null,
+			"WorkingDir": ""
+		},
+		"Created": "2015-01-06T15:47:31.485331387Z",
+		"Driver": "devicemapper",
+		"ExecDriver": "native-0.2",
+		"ExecIDs": null,
+		"HostConfig": {
+			"Binds": null,
+			"BlkioWeight": 0,
+			"CapAdd": null,
+			"CapDrop": null,
+			"ContainerIDFile": "",
+			"CpusetCpus": "",
+			"CpusetMems": "",
+			"CpuShares": 0,
+			"CpuPeriod": 100000,
+			"Devices": [],
+			"Dns": null,
+			"DnsSearch": null,
+			"ExtraHosts": null,
+			"IpcMode": "",
+			"Links": null,
+			"LxcConf": [],
+			"Memory": 0,
+			"MemorySwap": 0,
+			"OomKillDisable": false,
+			"NetworkMode": "bridge",
+			"PortBindings": {},
+			"Privileged": false,
+			"ReadonlyRootfs": false,
+			"PublishAllPorts": false,
+			"RestartPolicy": {
+				"MaximumRetryCount": 2,
+				"Name": "on-failure"
+			},
+			"LogConfig": {
+				"Config": null,
+				"Type": "json-file"
+			},
+			"SecurityOpt": null,
+			"VolumesFrom": null,
+			"Ulimits": [{}]
+		},
+		"HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname",
+		"HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts",
+		"LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log",
+		"Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39",
+		"Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2",
+		"MountLabel": "",
+		"Name": "/boring_euclid",
+		"NetworkSettings": {
+			"Bridge": "",
+			"Gateway": "",
+			"IPAddress": "",
+			"IPPrefixLen": 0,
+			"MacAddress": "",
+			"PortMapping": null,
+			"Ports": null
+		},
+		"Path": "/bin/sh",
+		"ProcessLabel": "",
+		"ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf",
+		"RestartCount": 1,
+		"State": {
+			"Error": "",
+			"ExitCode": 9,
+			"FinishedAt": "2015-01-06T15:47:32.080254511Z",
+			"OOMKilled": false,
+			"Paused": false,
+			"Pid": 0,
+			"Restarting": false,
+			"Running": false,
+			"StartedAt": "2015-01-06T15:47:32.072697474Z"
+		},
+		"Volumes": {},
+		"VolumesRW": {}
+	}
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – no such container
+-   **500** – server error
+
+### List processes running inside a container
+
+`GET /containers/(id)/top`
+
+List processes running inside the container `id`
+
+**Example request**:
+
+        GET /containers/4fa6e0f0c678/top HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {
+             "Titles": [
+                     "USER",
+                     "PID",
+                     "%CPU",
+                     "%MEM",
+                     "VSZ",
+                     "RSS",
+                     "TTY",
+                     "STAT",
+                     "START",
+                     "TIME",
+                     "COMMAND"
+                     ],
+             "Processes": [
+                     ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"],
+                     ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"]
+             ]
+        }
+
+Query Parameters:
+
+-   **ps_args** – ps arguments to use (e.g., aux)
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – no such container
+-   **500** – server error
+
+### Get container logs
+
+`GET /containers/(id)/logs`
+
+Get stdout and stderr logs from the container ``id``
+
+> **Note**:
+> This endpoint works only for containers with `json-file` logging driver.
+
+**Example request**:
+
+       GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1&timestamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1
+
+**Example response**:
+
+       HTTP/1.1 101 UPGRADED
+       Content-Type: application/vnd.docker.raw-stream
+       Connection: Upgrade
+       Upgrade: tcp
+
+       {{ STREAM }}
+
+Query Parameters:
+
+-   **follow** – 1/True/true or 0/False/false, return stream. Default false
+-   **stdout** – 1/True/true or 0/False/false, show stdout log. Default false
+-   **stderr** – 1/True/true or 0/False/false, show stderr log. Default false
+-   **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp
+    will only output log-entries since that timestamp. Default: 0 (unfiltered)
+-   **timestamps** – 1/True/true or 0/False/false, print timestamps for
+        every log line. Default false
+-   **tail** – Output specified number of lines at the end of logs: `all` or `<number>`. Default all
+
+Status Codes:
+
+-   **101** – no error, hints proxy about hijacking
+-   **200** – no error, no upgrade header found
+-   **404** – no such container
+-   **500** – server error
+
+### Inspect changes on a container's filesystem
+
+`GET /containers/(id)/changes`
+
+Inspect changes on container `id`'s filesystem
+
+**Example request**:
+
+        GET /containers/4fa6e0f0c678/changes HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        [
+             {
+                     "Path": "/dev",
+                     "Kind": 0
+             },
+             {
+                     "Path": "/dev/kmsg",
+                     "Kind": 1
+             },
+             {
+                     "Path": "/test",
+                     "Kind": 1
+             }
+        ]
+
+Values for `Kind`:
+
+- `0`: Modify
+- `1`: Add
+- `2`: Delete
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – no such container
+-   **500** – server error
+
+### Export a container
+
+`GET /containers/(id)/export`
+
+Export the contents of container `id`
+
+**Example request**:
+
+        GET /containers/4fa6e0f0c678/export HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/octet-stream
+
+        {{ TAR STREAM }}
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – no such container
+-   **500** – server error
+
+### Get container stats based on resource usage
+
+`GET /containers/(id)/stats`
+
+This endpoint returns a live stream of a container's resource usage statistics.
+
+> **Note**: this functionality currently only works when using the *libcontainer* exec-driver.
+
+**Example request**:
+
+        GET /containers/redis1/stats HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {
+           "read" : "2015-01-08T22:57:31.547920715Z",
+           "network" : {
+              "rx_dropped" : 0,
+              "rx_bytes" : 648,
+              "rx_errors" : 0,
+              "tx_packets" : 8,
+              "tx_dropped" : 0,
+              "rx_packets" : 8,
+              "tx_errors" : 0,
+              "tx_bytes" : 648
+           },
+           "memory_stats" : {
+              "stats" : {
+                 "total_pgmajfault" : 0,
+                 "cache" : 0,
+                 "mapped_file" : 0,
+                 "total_inactive_file" : 0,
+                 "pgpgout" : 414,
+                 "rss" : 6537216,
+                 "total_mapped_file" : 0,
+                 "writeback" : 0,
+                 "unevictable" : 0,
+                 "pgpgin" : 477,
+                 "total_unevictable" : 0,
+                 "pgmajfault" : 0,
+                 "total_rss" : 6537216,
+                 "total_rss_huge" : 6291456,
+                 "total_writeback" : 0,
+                 "total_inactive_anon" : 0,
+                 "rss_huge" : 6291456,
+                 "hierarchical_memory_limit" : 67108864,
+                 "total_pgfault" : 964,
+                 "total_active_file" : 0,
+                 "active_anon" : 6537216,
+                 "total_active_anon" : 6537216,
+                 "total_pgpgout" : 414,
+                 "total_cache" : 0,
+                 "inactive_anon" : 0,
+                 "active_file" : 0,
+                 "pgfault" : 964,
+                 "inactive_file" : 0,
+                 "total_pgpgin" : 477
+              },
+              "max_usage" : 6651904,
+              "usage" : 6537216,
+              "failcnt" : 0,
+              "limit" : 67108864
+           },
+           "blkio_stats" : {},
+           "cpu_stats" : {
+              "cpu_usage" : {
+                 "percpu_usage" : [
+                    16970827,
+                    1839451,
+                    7107380,
+                    10571290
+                 ],
+                 "usage_in_usermode" : 10000000,
+                 "total_usage" : 36488948,
+                 "usage_in_kernelmode" : 20000000
+              },
+              "system_cpu_usage" : 20091722000000000,
+              "throttling_data" : {}
+           }
+        }
+
+Query Parameters:
+
+-   **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default true
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – no such container
+-   **500** – server error
+
+### Resize a container TTY
+
+`POST /containers/(id)/resize?h=<height>&w=<width>`
+
+Resize the TTY for container with  `id`. The container must be restarted for the resize to take effect.
+
+**Example request**:
+
+        POST /containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Length: 0
+        Content-Type: text/plain; charset=utf-8
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – No such container
+-   **500** – Cannot resize container
+
+### Start a container
+
+`POST /containers/(id)/start`
+
+Start the container `id`
+
+**Example request**:
+
+        POST /containers/(id)/start HTTP/1.1
+        Content-Type: application/json
+
+        {
+           "Binds": ["/tmp:/tmp"],
+           "Links": ["redis3:redis"],
+           "LxcConf": {"lxc.utsname":"docker"},
+           "Memory": 0,
+           "MemorySwap": 0,
+           "CpuShares": 512,
+           "CpusetCpus": "0,1",
+           "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
+           "PublishAllPorts": false,
+           "Privileged": false,
+           "ReadonlyRootfs": false,
+           "Dns": ["8.8.8.8"],
+           "DnsSearch": [""],
+           "ExtraHosts": null,
+           "VolumesFrom": ["parent", "other:ro"],
+           "CapAdd": ["NET_ADMIN"],
+           "CapDrop": ["MKNOD"],
+           "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 },
+           "NetworkMode": "bridge",
+           "Devices": [],
+           "Ulimits": [{}],
+           "LogConfig": { "Type": "json-file", "Config": {} },
+           "SecurityOpt": [""],
+           "CgroupParent": ""
+        }
+
+**Example response**:
+
+        HTTP/1.1 204 No Content
+
+Json Parameters:
+
+-   **Binds** – A list of volume bindings for this container. Each volume
+        binding is a string of the form `container_path` (to create a new
+        volume for the container), `host_path:container_path` (to bind-mount
+        a host path into the container), or `host_path:container_path:ro`
+        (to make the bind-mount read-only inside the container).
+-   **Links** - A list of links for the container. Each link entry should be of
+      of the form `container_name:alias`.
+-   **LxcConf** - LXC specific configurations. These configurations will only
+      work when using the `lxc` execution driver.
+-   **PortBindings** - A map of exposed container ports and the host port they
+      should map to. It should be specified in the form
+      `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
+      Take note that `port` is specified as a string and not an integer value.
+-   **PublishAllPorts** - Allocates a random host port for all of a container's
+      exposed ports. Specified as a boolean value.
+-   **Privileged** - Gives the container full access to the host. Specified as
+      a boolean value.
+-   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
+      Specified as a boolean value.
+-   **Dns** - A list of dns servers for the container to use.
+-   **DnsSearch** - A list of DNS search domains
+-   **ExtraHosts** - A list of hostnames/IP mappings to be added to the
+    container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`.
+-   **VolumesFrom** - A list of volumes to inherit from another container.
+      Specified in the form `<container name>[:<ro|rw>]`
+-   **CapAdd** - A list of kernel capabilities to add to the container.
+-   **Capdrop** - A list of kernel capabilities to drop from the container.
+-   **RestartPolicy** – The behavior to apply when the container exits.  The
+        value is an object with a `Name` property of either `"always"` to
+        always restart or `"on-failure"` to restart only when the container
+        exit code is non-zero.  If `on-failure` is used, `MaximumRetryCount`
+        controls the number of times to retry before giving up.
+        The default is not to restart. (optional)
+        An ever increasing delay (double the previous delay, starting at 100mS)
+        is added before each restart to prevent flooding the server.
+-   **NetworkMode** - Sets the networking mode for the container. Supported
+      values are: `bridge`, `host`, and `container:<name|id>`
+-   **Devices** - A list of devices to add to the container specified in the
+      form
+      `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}`
+-   **Ulimits** - A list of ulimits to be set in the container, specified as
+      `{ "Name": <name>, "Soft": <soft limit>, "Hard": <hard limit> }`, for example:
+      `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}`
+-   **SecurityOpt**: A list of string values to customize labels for MLS
+    systems, such as SELinux.
+-   **LogConfig** - Log configuration for the container, specified as
+      `{ "Type": "<driver_name>", "Config": {"key1": "val1"}}`.
+      Available types: `json-file`, `syslog`, `journald`, `none`.
+      `json-file` logging driver.
+-   **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
+
+Status Codes:
+
+-   **204** – no error
+-   **304** – container already started
+-   **404** – no such container
+-   **500** – server error
+
+### Stop a container
+
+`POST /containers/(id)/stop`
+
+Stop the container `id`
+
+**Example request**:
+
+        POST /containers/e90e34656806/stop?t=5 HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 204 No Content
+
+Query Parameters:
+
+-   **t** – number of seconds to wait before killing the container
+
+Status Codes:
+
+-   **204** – no error
+-   **304** – container already stopped
+-   **404** – no such container
+-   **500** – server error
+
+### Restart a container
+
+`POST /containers/(id)/restart`
+
+Restart the container `id`
+
+**Example request**:
+
+        POST /containers/e90e34656806/restart?t=5 HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 204 No Content
+
+Query Parameters:
+
+-   **t** – number of seconds to wait before killing the container
+
+Status Codes:
+
+-   **204** – no error
+-   **404** – no such container
+-   **500** – server error
+
+### Kill a container
+
+`POST /containers/(id)/kill`
+
+Kill the container `id`
+
+**Example request**:
+
+        POST /containers/e90e34656806/kill HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 204 No Content
+
+Query Parameters
+
+-   **signal** - Signal to send to the container: integer or string like "SIGINT".
+        When not set, SIGKILL is assumed and the call will waits for the container to exit.
+
+Status Codes:
+
+-   **204** – no error
+-   **404** – no such container
+-   **500** – server error
+
+### Rename a container
+
+`POST /containers/(id)/rename`
+
+Rename the container `id` to a `new_name`
+
+**Example request**:
+
+        POST /containers/e90e34656806/rename?name=new_name HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 204 No Content
+
+Query Parameters:
+
+-   **name** – new name for the container
+
+Status Codes:
+
+-   **204** – no error
+-   **404** – no such container
+-   **409** - conflict name already assigned
+-   **500** – server error
+
+### Pause a container
+
+`POST /containers/(id)/pause`
+
+Pause the container `id`
+
+**Example request**:
+
+        POST /containers/e90e34656806/pause HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 204 No Content
+
+Status Codes:
+
+-   **204** – no error
+-   **404** – no such container
+-   **500** – server error
+
+### Unpause a container
+
+`POST /containers/(id)/unpause`
+
+Unpause the container `id`
+
+**Example request**:
+
+        POST /containers/e90e34656806/unpause HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 204 No Content
+
+Status Codes:
+
+-   **204** – no error
+-   **404** – no such container
+-   **500** – server error
+
+### Attach to a container
+
+`POST /containers/(id)/attach`
+
+Attach to the container `id`
+
+**Example request**:
+
+        POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 101 UPGRADED
+        Content-Type: application/vnd.docker.raw-stream
+        Connection: Upgrade
+        Upgrade: tcp
+
+        {{ STREAM }}
+
+Query Parameters:
+
+-   **logs** – 1/True/true or 0/False/false, return logs. Default false
+-   **stream** – 1/True/true or 0/False/false, return stream.
+        Default false
+-   **stdin** – 1/True/true or 0/False/false, if stream=true, attach
+        to stdin. Default false
+-   **stdout** – 1/True/true or 0/False/false, if logs=true, return
+        stdout log, if stream=true, attach to stdout. Default false
+-   **stderr** – 1/True/true or 0/False/false, if logs=true, return
+        stderr log, if stream=true, attach to stderr. Default false
+
+Status Codes:
+
+-   **101** – no error, hints proxy about hijacking
+-   **200** – no error, no upgrade header found
+-   **400** – bad parameter
+-   **404** – no such container
+-   **500** – server error
+
+    **Stream details**:
+
+    When using the TTY setting is enabled in
+    [`POST /containers/create`
+    ](/reference/api/docker_remote_api_v1.9/#create-a-container "POST /containers/create"),
+    the stream is the raw data from the process PTY and client's stdin.
+    When the TTY is disabled, then the stream is multiplexed to separate
+    stdout and stderr.
+
+    The format is a **Header** and a **Payload** (frame).
+
+    **HEADER**
+
+    The header will contain the information on which stream write the
+    stream (stdout or stderr). It also contain the size of the
+    associated frame encoded on the last 4 bytes (uint32).
+
+    It is encoded on the first 8 bytes like this:
+
+        header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}
+
+    `STREAM_TYPE` can be:
+
+-   0: stdin (will be written on stdout)
+-   1: stdout
+-   2: stderr
+
+    `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of
+    the uint32 size encoded as big endian.
+
+    **PAYLOAD**
+
+    The payload is the raw stream.
+
+    **IMPLEMENTATION**
+
+    The simplest way to implement the Attach protocol is the following:
+
+    1.  Read 8 bytes
+    2.  chose stdout or stderr depending on the first byte
+    3.  Extract the frame size from the last 4 bytes
+    4.  Read the extracted size and output it on the correct output
+    5.  Goto 1
+
+### Attach to a container (websocket)
+
+`GET /containers/(id)/attach/ws`
+
+Attach to the container `id` via websocket
+
+Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455)
+
+**Example request**
+
+        GET /containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1
+
+**Example response**
+
+        {{ STREAM }}
+
+Query Parameters:
+
+-   **logs** – 1/True/true or 0/False/false, return logs. Default false
+-   **stream** – 1/True/true or 0/False/false, return stream.
+        Default false
+-   **stdin** – 1/True/true or 0/False/false, if stream=true, attach
+        to stdin. Default false
+-   **stdout** – 1/True/true or 0/False/false, if logs=true, return
+        stdout log, if stream=true, attach to stdout. Default false
+-   **stderr** – 1/True/true or 0/False/false, if logs=true, return
+        stderr log, if stream=true, attach to stderr. Default false
+
+Status Codes:
+
+-   **200** – no error
+-   **400** – bad parameter
+-   **404** – no such container
+-   **500** – server error
+
+### Wait a container
+
+`POST /containers/(id)/wait`
+
+Block until container `id` stops, then returns the exit code
+
+**Example request**:
+
+        POST /containers/16253994b7c4/wait HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {"StatusCode": 0}
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – no such container
+-   **500** – server error
+
+### Remove a container
+
+`DELETE /containers/(id)`
+
+Remove the container `id` from the filesystem
+
+**Example request**:
+
+        DELETE /containers/16253994b7c4?v=1 HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 204 No Content
+
+Query Parameters:
+
+-   **v** – 1/True/true or 0/False/false, Remove the volumes
+        associated to the container. Default false
+-   **force** - 1/True/true or 0/False/false, Kill then remove the container.
+        Default false
+
+Status Codes:
+
+-   **204** – no error
+-   **400** – bad parameter
+-   **404** – no such container
+-   **500** – server error
+
+### Copy files or folders from a container
+
+`POST /containers/(id)/copy`
+
+Copy files or folders of container `id`
+
+**Example request**:
+
+        POST /containers/4fa6e0f0c678/copy HTTP/1.1
+        Content-Type: application/json
+
+        {
+             "Resource": "test.txt"
+        }
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/x-tar
+
+        {{ TAR STREAM }}
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – no such container
+-   **500** – server error
+
+## 2.2 Images
+
+### List Images
+
+`GET /images/json`
+
+**Example request**:
+
+        GET /images/json?all=0 HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        [
+          {
+             "RepoTags": [
+               "ubuntu:12.04",
+               "ubuntu:precise",
+               "ubuntu:latest"
+             ],
+             "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
+             "Created": 1365714795,
+             "Size": 131506275,
+             "VirtualSize": 131506275
+          },
+          {
+             "RepoTags": [
+               "ubuntu:12.10",
+               "ubuntu:quantal"
+             ],
+             "ParentId": "27cf784147099545",
+             "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+             "Created": 1364102658,
+             "Size": 24653,
+             "VirtualSize": 180116135
+          }
+        ]
+
+**Example request, with digest information**:
+
+        GET /images/json?digests=1 HTTP/1.1
+
+**Example response, with digest information**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        [
+          {
+            "Created": 1420064636,
+            "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125",
+            "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2",
+            "RepoDigests": [
+              "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"
+            ],
+            "RepoTags": [
+              "localhost:5000/test/busybox:latest",
+              "playdate:latest"
+            ],
+            "Size": 0,
+            "VirtualSize": 2429728
+          }
+        ]
+
+The response shows a single image `Id` associated with two repositories
+(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use
+either of the `RepoTags` values `localhost:5000/test/busybox:latest` or
+`playdate:latest` to reference the image.
+
+You can also use `RepoDigests` values to reference an image. In this response,
+the array has only one reference and that is to the
+`localhost:5000/test/busybox` repository; the `playdate` repository has no
+digest. You can reference this digest using the value:
+`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...`
+
+See the `docker run` and `docker build` commands for examples of digest and tag
+references on the command line.
+
+Query Parameters:
+
+-   **all** – 1/True/true or 0/False/false, default false
+-   **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters:
+  -   dangling=true
+  -   label=`key` or `key=value` of an image label
+
+### Build image from a Dockerfile
+
+`POST /build`
+
+Build an image from a Dockerfile
+
+**Example request**:
+
+        POST /build HTTP/1.1
+
+        {{ TAR STREAM }}
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {"stream": "Step 1..."}
+        {"stream": "..."}
+        {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}}
+
+The input stream must be a tar archive compressed with one of the
+following algorithms: identity (no compression), gzip, bzip2, xz.
+
+The archive must include a build instructions file, typically called
+`Dockerfile` at the root of the archive. The `dockerfile` parameter may be
+used to specify a different build instructions file by having its value be
+the path to the alternate build instructions file to use.
+
+The archive may include any number of other files,
+which will be accessible in the build context (See the [*ADD build
+command*](/reference/builder/#dockerbuilder)).
+
+The build will also be canceled if the client drops the connection by quitting
+or being killed.
+
+Query Parameters:
+
+-   **dockerfile** - path within the build context to the Dockerfile. This is 
+        ignored if `remote` is specified and points to an individual filename.
+-   **t** – repository name (and optionally a tag) to be applied to
+        the resulting image in case of success
+-   **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the 
+        URI specifies a filename, the file's contents are placed into a file 
+		called `Dockerfile`.
+-   **q** – suppress verbose build output
+-   **nocache** – do not use the cache when building the image
+-   **pull** - attempt to pull the image even if an older image exists locally
+-   **rm** - remove intermediate containers after a successful build (default behavior)
+-   **forcerm** - always remove intermediate containers (includes rm)
+-   **memory** - set memory limit for build
+-   **memswap** - Total memory (memory + swap), `-1` to disable swap
+-   **cpushares** - CPU shares (relative weight)
+-   **cpusetcpus** - CPUs in which to allow execution, e.g., `0-3`, `0,1`
+
+    Request Headers:
+
+-   **Content-type** – should be set to `"application/tar"`.
+-   **X-Registry-Config** – base64-encoded ConfigFile object
+
+Status Codes:
+
+-   **200** – no error
+-   **500** – server error
+
+### Create an image
+
+`POST /images/create`
+
+Create an image, either by pulling it from the registry or by importing it
+
+**Example request**:
+
+        POST /images/create?fromImage=ubuntu HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {"status": "Pulling..."}
+        {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}}
+        {"error": "Invalid..."}
+        ...
+
+    When using this endpoint to pull an image from the registry, the
+    `X-Registry-Auth` header can be used to include
+    a base64-encoded AuthConfig object.
+
+Query Parameters:
+
+-   **fromImage** – name of the image to pull
+-   **fromSrc** – source to import.  The value may be a URL from which the image
+        can be retrieved or `-` to read the image from the request body.
+-   **repo** – repository
+-   **tag** – tag
+-   **registry** – the registry to pull from
+
+    Request Headers:
+
+-   **X-Registry-Auth** – base64-encoded AuthConfig object
+
+Status Codes:
+
+-   **200** – no error
+-   **500** – server error
+
+
+
+### Inspect an image
+
+`GET /images/(name)/json`
+
+Return low-level information on the image `name`
+
+**Example request**:
+
+        GET /images/ubuntu/json HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {
+             "Created": "2013-03-23T22:24:18.818426-07:00",
+             "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0",
+             "ContainerConfig":
+                     {
+                             "Hostname": "",
+                             "User": "",
+                             "AttachStdin": false,
+                             "AttachStdout": false,
+                             "AttachStderr": false,
+                             "PortSpecs": null,
+                             "Tty": true,
+                             "OpenStdin": true,
+                             "StdinOnce": false,
+                             "Env": null,
+                             "Cmd": ["/bin/bash"],
+                             "Dns": null,
+                             "Image": "ubuntu",
+                             "Labels": {
+                                 "com.example.vendor": "Acme",
+                                 "com.example.license": "GPL",
+                                 "com.example.version": "1.0"
+                             },
+                             "Volumes": null,
+                             "VolumesFrom": "",
+                             "WorkingDir": ""
+                     },
+             "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+             "Parent": "27cf784147099545",
+             "Size": 6824592
+        }
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – no such image
+-   **500** – server error
+
+### Get the history of an image
+
+`GET /images/(name)/history`
+
+Return the history of the image `name`
+
+**Example request**:
+
+        GET /images/ubuntu/history HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        [
+             {
+                     "Id": "b750fe79269d",
+                     "Created": 1364102658,
+                     "CreatedBy": "/bin/bash"
+             },
+             {
+                     "Id": "27cf78414709",
+                     "Created": 1364068391,
+                     "CreatedBy": ""
+             }
+        ]
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – no such image
+-   **500** – server error
+
+### Push an image on the registry
+
+`POST /images/(name)/push`
+
+Push the image `name` on the registry
+
+**Example request**:
+
+        POST /images/test/push HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {"status": "Pushing..."}
+        {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}}
+        {"error": "Invalid..."}
+        ...
+
+    If you wish to push an image on to a private registry, that image must already have been tagged
+    into a repository which references that registry host name and port.  This repository name should
+    then be used in the URL. This mirrors the flow of the CLI.
+
+**Example request**:
+
+        POST /images/registry.acme.com:5000/test/push HTTP/1.1
+
+
+Query Parameters:
+
+-   **tag** – the tag to associate with the image on the registry, optional
+
+Request Headers:
+
+-   **X-Registry-Auth** – include a base64-encoded AuthConfig
+        object.
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – no such image
+-   **500** – server error
+
+### Tag an image into a repository
+
+`POST /images/(name)/tag`
+
+Tag the image `name` into a repository
+
+**Example request**:
+
+        POST /images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 201 OK
+
+Query Parameters:
+
+-   **repo** – The repository to tag in
+-   **force** – 1/True/true or 0/False/false, default false
+-   **tag** - The new tag name
+
+Status Codes:
+
+-   **201** – no error
+-   **400** – bad parameter
+-   **404** – no such image
+-   **409** – conflict
+-   **500** – server error
+
+### Remove an image
+
+`DELETE /images/(name)`
+
+Remove the image `name` from the filesystem
+
+**Example request**:
+
+        DELETE /images/test HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-type: application/json
+
+        [
+         {"Untagged": "3e2f21a89f"},
+         {"Deleted": "3e2f21a89f"},
+         {"Deleted": "53b4f83ac9"}
+        ]
+
+Query Parameters:
+
+-   **force** – 1/True/true or 0/False/false, default false
+-   **noprune** – 1/True/true or 0/False/false, default false
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – no such image
+-   **409** – conflict
+-   **500** – server error
+
+### Search images
+
+`GET /images/search`
+
+Search for an image on [Docker Hub](https://hub.docker.com).
+
+> **Note**:
+> The response keys have changed from API v1.6 to reflect the JSON
+> sent by the registry server to the docker daemon's request.
+
+**Example request**:
+
+        GET /images/search?term=sshd HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        [
+                {
+                    "description": "",
+                    "is_official": false,
+                    "is_automated": false,
+                    "name": "wma55/u1210sshd",
+                    "star_count": 0
+                },
+                {
+                    "description": "",
+                    "is_official": false,
+                    "is_automated": false,
+                    "name": "jdswinbank/sshd",
+                    "star_count": 0
+                },
+                {
+                    "description": "",
+                    "is_official": false,
+                    "is_automated": false,
+                    "name": "vgauthier/sshd",
+                    "star_count": 0
+                }
+        ...
+        ]
+
+Query Parameters:
+
+-   **term** – term to search
+
+Status Codes:
+
+-   **200** – no error
+-   **500** – server error
+
+## 2.3 Misc
+
+### Check auth configuration
+
+`POST /auth`
+
+Get the default username and email
+
+**Example request**:
+
+        POST /auth HTTP/1.1
+        Content-Type: application/json
+
+        {
+             "username":" hannibal",
+             "password: "xxxx",
+             "email": "hannibal@a-team.com",
+             "serveraddress": "https://index.docker.io/v1/"
+        }
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+
+Status Codes:
+
+-   **200** – no error
+-   **204** – no error
+-   **500** – server error
+
+### Display system-wide information
+
+`GET /info`
+
+Display system-wide information
+
+**Example request**:
+
+        GET /info HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {
+            "Containers": 11,
+            "CpuCfsPeriod": true,
+            "CpuCfsQuota": true,
+            "Debug": false,
+            "DockerRootDir": "/var/lib/docker",
+            "Driver": "btrfs",
+            "DriverStatus": [[""]],
+            "ExecutionDriver": "native-0.1",
+            "ExperimentalBuild": false,
+            "HttpProxy": "http://test:test@localhost:8080",
+            "HttpsProxy": "https://test:test@localhost:8080",
+            "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS",
+            "IPv4Forwarding": true,
+            "Images": 16,
+            "IndexServerAddress": "https://index.docker.io/v1/",
+            "InitPath": "/usr/bin/docker",
+            "InitSha1": "",
+            "KernelVersion": "3.12.0-1-amd64",
+            "Labels": [
+                "storage=ssd"
+            ],
+            "MemTotal": 2099236864,
+            "MemoryLimit": true,
+            "NCPU": 1,
+            "NEventsListener": 0,
+            "NFd": 11,
+            "NGoroutines": 21,
+            "Name": "prod-server-42",
+            "NoProxy": "9.81.1.160",
+            "OomKillDisable": true,
+            "OperatingSystem": "Boot2Docker",
+            "RegistryConfig": {
+                "IndexConfigs": {
+                    "docker.io": {
+                        "Mirrors": null,
+                        "Name": "docker.io",
+                        "Official": true,
+                        "Secure": true
+                    }
+                },
+                "InsecureRegistryCIDRs": [
+                    "127.0.0.0/8"
+                ]
+            },
+            "SwapLimit": false,
+            "SystemTime": "2015-03-10T11:11:23.730591467-07:00"
+        }
+
+Status Codes:
+
+-   **200** – no error
+-   **500** – server error
+
+### Show the docker version information
+
+`GET /version`
+
+Show the docker version information
+
+**Example request**:
+
+        GET /version HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {
+             "Version": "1.5.0",
+             "Os": "linux",
+             "KernelVersion": "3.18.5-tinycore64",
+             "GoVersion": "go1.4.1",
+             "GitCommit": "a8a31ef",
+             "Arch": "amd64",
+             "ApiVersion": "1.19"
+        }
+
+Status Codes:
+
+-   **200** – no error
+-   **500** – server error
+
+### Ping the docker server
+
+`GET /_ping`
+
+Ping the docker server
+
+**Example request**:
+
+        GET /_ping HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: text/plain
+
+        OK
+
+Status Codes:
+
+-   **200** - no error
+-   **500** - server error
+
+### Create a new image from a container's changes
+
+`POST /commit`
+
+Create a new image from a container's changes
+
+**Example request**:
+
+        POST /commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1
+        Content-Type: application/json
+
+        {
+             "Hostname": "",
+             "Domainname": "",
+             "User": "",
+             "AttachStdin": false,
+             "AttachStdout": true,
+             "AttachStderr": true,
+             "PortSpecs": null,
+             "Tty": false,
+             "OpenStdin": false,
+             "StdinOnce": false,
+             "Env": null,
+             "Cmd": [
+                     "date"
+             ],
+             "Volumes": {
+                     "/tmp": {}
+             },
+             "WorkingDir": "",
+             "NetworkDisabled": false,
+             "ExposedPorts": {
+                     "22/tcp": {}
+             }
+        }
+
+**Example response**:
+
+        HTTP/1.1 201 Created
+        Content-Type: application/vnd.docker.raw-stream
+
+        {"Id": "596069db4bf5"}
+
+Json Parameters:
+
+-  **config** - the container's configuration
+
+Query Parameters:
+
+-   **container** – source container
+-   **repo** – repository
+-   **tag** – tag
+-   **comment** – commit message
+-   **author** – author (e.g., "John Hannibal Smith
+    <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
+
+Status Codes:
+
+-   **201** – no error
+-   **404** – no such container
+-   **500** – server error
+
+### Monitor Docker's events
+
+`GET /events`
+
+Get container events from docker, either in real time via streaming, or via
+polling (using since).
+
+Docker containers will report the following events:
+
+    create, destroy, die, exec_create, exec_start, export, kill, oom, pause, restart, start, stop, unpause
+
+and Docker images will report:
+
+    untag, delete
+
+**Example request**:
+
+        GET /events?since=1374067924
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/json
+
+        {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924}
+        {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924}
+        {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966}
+        {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970}
+
+Query Parameters:
+
+-   **since** – timestamp used for polling
+-   **until** – timestamp used for polling
+-   **filters** – a json encoded value of the filters (a map[string][]string) to process on the event list. Available filters:
+  -   event=&lt;string&gt; -- event to filter
+  -   image=&lt;string&gt; -- image to filter
+  -   container=&lt;string&gt; -- container to filter
+
+Status Codes:
+
+-   **200** – no error
+-   **500** – server error
+
+### Get a tarball containing all images in a repository
+
+`GET /images/(name)/get`
+
+Get a tarball containing all images and metadata for the repository specified
+by `name`.
+
+If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image
+(and its parents) are returned. If `name` is an image ID, similarly only that
+image (and its parents) are returned, but with the exclusion of the
+'repositories' file in the tarball, as there were no image names referenced.
+
+See the [image tarball format](#image-tarball-format) for more details.
+
+**Example request**
+
+        GET /images/ubuntu/get
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/x-tar
+
+        Binary data stream
+
+Status Codes:
+
+-   **200** – no error
+-   **500** – server error
+
+### Get a tarball containing all images.
+
+`GET /images/get`
+
+Get a tarball containing all images and metadata for one or more repositories.
+
+For each value of the `names` parameter: if it is a specific name and tag (e.g.
+ubuntu:latest), then only that image (and its parents) are returned; if it is
+an image ID, similarly only that image (and its parents) are returned and there
+would be no names referenced in the 'repositories' file for this image ID.
+
+See the [image tarball format](#image-tarball-format) for more details.
+
+**Example request**
+
+        GET /images/get?names=myname%2Fmyapp%3Alatest&names=busybox
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: application/x-tar
+
+        Binary data stream
+
+Status Codes:
+
+-   **200** – no error
+-   **500** – server error
+
+### Load a tarball with a set of images and tags into docker
+
+`POST /images/load`
+
+Load a set of images and tags into the docker repository.
+See the [image tarball format](#image-tarball-format) for more details.
+
+**Example request**
+
+        POST /images/load
+
+        Tarball in body
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+
+Status Codes:
+
+-   **200** – no error
+-   **500** – server error
+
+### Image tarball format
+
+An image tarball contains one directory per image layer (named using its long ID),
+each containing three files:
+
+1. `VERSION`: currently `1.0` - the file format version
+2. `json`: detailed layer information, similar to `docker inspect layer_id`
+3. `layer.tar`: A tarfile containing the filesystem changes in this layer
+
+The `layer.tar` file will contain `aufs` style `.wh..wh.aufs` files and directories
+for storing attribute changes and deletions.
+
+If the tarball defines a repository, there will also be a `repositories` file at
+the root that contains a list of repository and tag names mapped to layer IDs.
+
+```
+{"hello-world":
+    {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"}
+}
+```
+
+### Exec Create
+
+`POST /containers/(id)/exec`
+
+Sets up an exec instance in a running container `id`
+
+**Example request**:
+
+        POST /containers/e90e34656806/exec HTTP/1.1
+        Content-Type: application/json
+
+        {
+	     "AttachStdin": false,
+	     "AttachStdout": true,
+	     "AttachStderr": true,
+	     "Tty": false,
+	     "Cmd": [
+                     "date"
+             ],
+        }
+
+**Example response**:
+
+        HTTP/1.1 201 OK
+        Content-Type: application/json
+
+        {
+             "Id": "f90e34656806"
+             "Warnings":[]
+        }
+
+Json Parameters:
+
+-   **AttachStdin** - Boolean value, attaches to stdin of the exec command.
+-   **AttachStdout** - Boolean value, attaches to stdout of the exec command.
+-   **AttachStderr** - Boolean value, attaches to stderr of the exec command.
+-   **Tty** - Boolean value to allocate a pseudo-TTY
+-   **Cmd** - Command to run specified as a string or an array of strings.
+
+
+Status Codes:
+
+-   **201** – no error
+-   **404** – no such container
+
+### Exec Start
+
+`POST /exec/(id)/start`
+
+Starts a previously set up exec instance `id`. If `detach` is true, this API
+returns after starting the `exec` command. Otherwise, this API sets up an
+interactive session with the `exec` command.
+
+**Example request**:
+
+        POST /exec/e90e34656806/start HTTP/1.1
+        Content-Type: application/json
+
+        {
+	     "Detach": false,
+	     "Tty": false,
+        }
+
+**Example response**:
+
+        HTTP/1.1 201 OK
+        Content-Type: application/json
+
+        {{ STREAM }}
+
+Json Parameters:
+
+-   **Detach** - Detach from the exec command
+-   **Tty** - Boolean value to allocate a pseudo-TTY
+
+Status Codes:
+
+-   **201** – no error
+-   **404** – no such exec instance
+
+    **Stream details**:
+    Similar to the stream behavior of `POST /container/(id)/attach` API
+
+### Exec Resize
+
+`POST /exec/(id)/resize`
+
+Resizes the tty session used by the exec command `id`.
+This API is valid only if `tty` was specified as part of creating and starting the exec command.
+
+**Example request**:
+
+        POST /exec/e90e34656806/resize HTTP/1.1
+        Content-Type: text/plain
+
+**Example response**:
+
+        HTTP/1.1 201 OK
+        Content-Type: text/plain
+
+Query Parameters:
+
+-   **h** – height of tty session
+-   **w** – width
+
+Status Codes:
+
+-   **201** – no error
+-   **404** – no such exec instance
+
+### Exec Inspect
+
+`GET /exec/(id)/json`
+
+Return low-level information about the exec command `id`.
+
+**Example request**:
+
+        GET /exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1
+
+**Example response**:
+
+        HTTP/1.1 200 OK
+        Content-Type: plain/text
+
+        {
+          "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39",
+          "Running" : false,
+          "ExitCode" : 2,
+          "ProcessConfig" : {
+            "privileged" : false,
+            "user" : "",
+            "tty" : false,
+            "entrypoint" : "sh",
+            "arguments" : [
+              "-c",
+              "exit 2"
+            ]
+          },
+          "OpenStdin" : false,
+          "OpenStderr" : false,
+          "OpenStdout" : false,
+          "Container" : {
+            "State" : {
+              "Running" : true,
+              "Paused" : false,
+              "Restarting" : false,
+              "OOMKilled" : false,
+              "Pid" : 3650,
+              "ExitCode" : 0,
+              "Error" : "",
+              "StartedAt" : "2014-11-17T22:26:03.717657531Z",
+              "FinishedAt" : "0001-01-01T00:00:00Z"
+            },
+            "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c",
+            "Created" : "2014-11-17T22:26:03.626304998Z",
+            "Path" : "date",
+            "Args" : [],
+            "Config" : {
+              "Hostname" : "8f177a186b97",
+              "Domainname" : "",
+              "User" : "",
+              "AttachStdin" : false,
+              "AttachStdout" : false,
+              "AttachStderr" : false,
+              "PortSpecs" : null,
+              "ExposedPorts" : null,
+              "Tty" : false,
+              "OpenStdin" : false,
+              "StdinOnce" : false,
+              "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ],
+              "Cmd" : [
+                "date"
+              ],
+              "Image" : "ubuntu",
+              "Volumes" : null,
+              "WorkingDir" : "",
+              "Entrypoint" : null,
+              "NetworkDisabled" : false,
+              "MacAddress" : "",
+              "OnBuild" : null,
+              "SecurityOpt" : null
+            },
+            "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5",
+            "NetworkSettings" : {
+              "IPAddress" : "172.17.0.2",
+              "IPPrefixLen" : 16,
+              "MacAddress" : "02:42:ac:11:00:02",
+              "Gateway" : "172.17.42.1",
+              "Bridge" : "docker0",
+              "PortMapping" : null,
+              "Ports" : {}
+            },
+            "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf",
+            "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname",
+            "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts",
+            "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log",
+            "Name" : "/test",
+            "Driver" : "aufs",
+            "ExecDriver" : "native-0.2",
+            "MountLabel" : "",
+            "ProcessLabel" : "",
+            "AppArmorProfile" : "",
+            "RestartCount" : 0,
+            "Volumes" : {},
+            "VolumesRW" : {}
+          }
+        }
+
+Status Codes:
+
+-   **200** – no error
+-   **404** – no such exec instance
+-   **500** - server error
+
+# 3. Going further
+
+## 3.1 Inside `docker run`
+
+As an example, the `docker run` command line makes the following API calls:
+
+- Create the container
+
+- If the status code is 404, it means the image doesn't exist:
+    - Try to pull it
+    - Then retry to create the container
+
+- Start the container
+
+- If you are not in detached mode:
+- Attach to the container, using logs=1 (to have stdout and
+      stderr from the container's start) and stream=1
+
+- If in detached mode or only stdin is attached:
+- Display the container's id
+
+## 3.2 Hijacking
+
+In this version of the API, /attach, uses hijacking to transport stdin,
+stdout and stderr on the same socket.
+
+To hint potential proxies about connection hijacking, Docker client sends
+connection upgrade headers similarly to websocket.
+
+    Upgrade: tcp
+    Connection: Upgrade
+
+When Docker daemon detects the `Upgrade` header, it will switch its status code
+from **200 OK** to **101 UPGRADED** and resend the same headers.
+
+This might change in the future.
+
+## 3.3 CORS Requests
+
+To set cross origin requests to the remote api please give values to 
+"--api-cors-header" when running docker in daemon mode. Set * will allow all,
+default or blank means CORS disabled
+
+    $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar"
diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.md b/docs/sources/reference/api/docker_remote_api_v1.6.md
index d0f9661..cd8a730 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.6.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.6.md
@@ -560,7 +560,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1)
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.md b/docs/sources/reference/api/docker_remote_api_v1.7.md
index 6cdd603..dade45f 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.7.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.7.md
@@ -505,7 +505,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1)
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.md b/docs/sources/reference/api/docker_remote_api_v1.8.md
index 409e63a..56260db 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.8.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.8.md
@@ -553,7 +553,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1)
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md
index 7ea3fc9..26c6b45 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.9.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.9.md
@@ -557,7 +557,7 @@
 
     1.  Read 8 bytes
     2.  chose stdout or stderr depending on the first byte
-    3.  Extract the frame size from the last 4 byets
+    3.  Extract the frame size from the last 4 bytes
     4.  Read the extracted size and output it on the correct output
     5.  Goto 1)
 
@@ -675,7 +675,7 @@
 
 ## 2.2 Images
 
-### List Images
+### List images
 
 `GET /images/json`
 
@@ -1052,7 +1052,7 @@
     Request Headers:
 
 -   **Content-type** – should be set to `"application/tar"`.
--   **X-Registry-Config** – base64-encoded ConfigFile objec
+-   **X-Registry-Config** – base64-encoded ConfigFile object
 
 Status Codes:
 
@@ -1119,7 +1119,7 @@
 -   **200** – no error
 -   **500** – server error
 
-### Show the docker version information
+### Show the Docker version information
 
 `GET /version`
 
@@ -1343,7 +1343,7 @@
 In this version of the API, /attach, uses hijacking to transport stdin,
 stdout and stderr on the same socket. This might change in the future.
 
-## 3.3 CORS Requests
+## 3.3 CORS requests
 
 To enable cross origin requests to the remote api add the flag
 "--api-enable-cors" when running docker in daemon mode.
diff --git a/docs/sources/reference/api/hub_registry_spec.md b/docs/sources/reference/api/hub_registry_spec.md
index 0e39784..b1481e3 100644
--- a/docs/sources/reference/api/hub_registry_spec.md
+++ b/docs/sources/reference/api/hub_registry_spec.md
@@ -1,4 +1,4 @@
-page_title: Registry Documentation
+page_title: Registry documentation
 page_description: Documentation for docker Registry and Registry API
 page_keywords: docker, registry, api, hub
 
@@ -115,7 +115,7 @@
 
 It's possible to run:
 
-    $ sudo docker pull https://<registry>/repositories/samalba/busybox
+    $ docker pull https://<registry>/repositories/samalba/busybox
 
 In this case, Docker bypasses the Docker Hub. However the security is not
 guaranteed (in case Registry A is corrupted) because there won't be any
@@ -679,7 +679,7 @@
 On the next request, the client will always pick a server from this
 list.
 
-## Authentication & Authorization
+## Authentication and authorization
 
 ### On the Docker Hub
 
@@ -747,7 +747,7 @@
     GET /(...)
     Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=&timestamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="
 
-## Document Version
+## Document version
 
  - 1.0 : May 6th 2013 : initial release
  - 1.1 : June 1st 2013 : Added Delete Repository and way to handle new
diff --git a/docs/sources/reference/api/images/event_state.gliffy b/docs/sources/reference/api/images/event_state.gliffy
new file mode 100644
index 0000000..2eb0f3a
--- /dev/null
+++ b/docs/sources/reference/api/images/event_state.gliffy
@@ -0,0 +1 @@
+{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":1109,"height":539,"nodeIndex":335,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":26.46762966848334,"y":100},"max":{"x":1109,"y":538.0017856687341}},"objects":[{"x":83.0,"y":251.0,"rotation":0.0,"id":328,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":328,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":188,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-52.03237033151666,-0.9999999999999716],[87.0,-1.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":332,"width":67.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5233416311379174,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-weight:bold;font-size:12px;\"><span style=\"\">docker run</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":74.0,"y":318.0,"rotation":0.0,"id":327,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":327,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":228,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-42.0,1.0],[96.0,1.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":333,"width":83.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5689443767164591,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-weight:bold;font-size:12px;\"><span style=\"\">docker create</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":191.0,"y":409.0,"rotation":0.0,"id":325,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":325,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":193,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":215,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-21.0,1.0],[-61.0,1.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":331.0,"y":346.0,"rotation":0.0,"id":320,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":320,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":193,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-21.0,1.0],[-53.5,1.0],[-53.5,64.0],[-86.0,64.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":324,"width":63.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-weight:bold;font-size:12px;\">docker rm</span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":265.0,"y":245.0,"rotation":0.0,"id":319,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":319,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":188,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-20.0,5.0],[312.5,5.0],[312.5,55.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":872.0,"y":503.0,"rotation":0.0,"id":310,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":310,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":205,"py":0.0,"px":0.2928932188134524}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-60.03300858899104,-53.0],[-148.0,-151.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":735.0,"y":341.0,"rotation":0.0,"id":307,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":307,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":203,"py":0.2928932188134525,"px":1.1102230246251563E-16}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[0.0,0.0],[137.5,60.7157287525381]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":309,"width":83.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.37922003257116654,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-weight:bold;font-size:12px;\"><span style=\"\">docker pause</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":1023.0,"y":446.0,"rotation":0.0,"id":298,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":298,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":213,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":205,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[39.5,-1.0],[39.5,24.0],[-158.0,24.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":313,"width":101.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.37286693198126664,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:left;\"><span style=\"font-family:Arial;font-weight:bold;font-size:12px;\"><span style=\"\"> docker unpause</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":904.0,"y":434.0,"rotation":0.0,"id":295,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":295,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":203,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":213,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[43.5,-24.0],[123.5,-24.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":411.0,"y":419.0,"rotation":0.0,"id":291,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":291,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":217,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":209,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.2659812842322253,1.0],[3.2659812842322253,-32.0],[-81.46803743153555,-32.0],[-81.46803743153555,-65.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":292,"width":21.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.4870188236535277,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-size:12px;font-style:italic;\"><span style=\"\">No</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":415.0,"y":419.0,"rotation":0.0,"id":289,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":289,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":217,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-0.7340187157677747,1.0],[-0.7340187157677747,-32.5],[162.5,-32.5],[162.5,-79.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":290,"width":26.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.46753493572435184,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-size:12px;font-style:italic;\"><span style=\"\">Yes</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":521.0,"y":209.0,"rotation":0.0,"id":287,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":287,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":195,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-11.0,-19.0],[-87.0,-19.0],[-87.0,84.0],[-163.0,84.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":988.0,"y":232.0,"rotation":0.0,"id":282,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":282,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":201,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[39.5,18.0],[-150.0,18.0],[-150.0,68.0],[-250.0,68.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[]},{"x":664.0,"y":493.0,"rotation":0.0,"id":276,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":276,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":207,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":236,"py":0.7071067811865475,"px":0.9999999999999998}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[6.0,-33.0],[-36.5,-33.0],[-36.5,-44.7157287525381],[-79.0,-44.7157287525381]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":678.0,"y":344.0,"rotation":0.0,"id":273,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":273,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":236,"py":0.29289321881345237,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.0,-4.0],[-45.5,-4.0],[-45.5,87.7157287525381],[-93.0,87.7157287525381]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":275,"width":57.0,"height":40.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.5,"linePerpValue":0.0,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-size:12px;\"><span style=\"\">container </span></span></p><p style=\"text-align:center;\"><span style=\"font-family:Arial;font-size:12px;\"><span style=\"\">process</span></span></p><p style=\"text-align:center;\"><span style=\"font-family:Arial;font-size:12px;\"><span style=\"\">exited</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":566.0,"y":431.0,"rotation":0.0,"id":272,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":272,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":236,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":217,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-56.0,9.0],[-78.86700935788389,9.0],[-78.86700935788389,39.0],[-101.73401871576777,39.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":785.0,"y":119.0,"rotation":0.0,"id":270,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":270,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":199,"py":0.5,"px":0.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":209,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[5.0,1.0],[-455.46803743153555,1.0],[-455.46803743153555,165.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":829.0,"y":172.0,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":269,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":248,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":199,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.5,-2.0],[-1.5,-32.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":661.0,"y":189.0,"rotation":0.0,"id":267,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":267,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":195,"py":0.5,"px":1.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[7.0,2.284271247461902],[-76.0,1.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":946.0,"y":319.0,"rotation":0.0,"id":263,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":263,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":197,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":233,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.5,1.0],[81.5,1.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":708.0,"y":286.0,"rotation":0.0,"id":256,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":256,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":211,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":254,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.5,-2.0],[-0.5,-76.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":258,"width":64.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.3108108108108108,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-weight:bold;font-size:12px;\"><span style=\"\">docker kill</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":710.0,"y":359.0,"rotation":0.0,"id":245,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":46,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":211,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":207,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-2.5,-5.0],[-2.5,81.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":247,"width":83.0,"height":27.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-size:12px;\"><span style=\"font-style:italic;\"> killed </span></span><span style=\"\">by</span></p><p style=\"text-align:center;\"><span style=\"font-family:Arial;font-size:12px;\"><span style=\"font-style:italic;\">out-of-memory</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":761.0,"y":318.0,"rotation":0.0,"id":238,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":42,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":211,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":197,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-18.5,1.0],[111.5,2.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":240,"width":85.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.4363456059259962,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-weight:bold;font-size:12px;\"><span style=\"\">docker restart</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":608.0,"y":319.0,"rotation":0.0,"id":232,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":37,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":191,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":211,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[7.0,1.0],[64.5,0.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":454.0,"y":325.0,"rotation":0.0,"id":231,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":36,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":209,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":191,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-89.46803743153555,-6.0],[86.0,-5.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":288,"width":77.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":0.3163731357954714,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:right;\"><span style=\"font-family:Arial;font-size:12px;\"><span style=\"\"><span style=\"font-weight:bold;\">docker start </span><br /></span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":299.0,"y":321.0,"rotation":0.0,"id":230,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":35,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":228,"py":0.5,"px":1.0}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":209,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-54.0,-2.0],[-4.468037431535549,-2.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":294.53196256846445,"y":284.0,"rotation":0.0,"id":209,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":20,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#e6b8af","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555554,"y":0.0,"rotation":0.0,"id":210,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">stopped</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":300.0,"rotation":0.0,"id":191,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":192,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">start</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":510.0,"y":170.0,"rotation":0.0,"id":195,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":196,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">kill</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":872.5,"y":300.0,"rotation":0.0,"id":197,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":198,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">die</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":790.0,"y":100.0,"rotation":0.0,"id":199,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":200,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">stop</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":790.0,"y":450.0,"rotation":0.0,"id":205,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":206,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">unpause</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":670.0,"y":440.0,"rotation":0.0,"id":207,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":18,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":208,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">OOM</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":672.5,"y":284.0,"rotation":0.0,"id":211,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#b6d7a8","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555556,"y":0.0,"rotation":0.0,"id":212,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">running</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":60.0,"y":375.0,"rotation":0.0,"id":215,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":26,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#b7b7b7","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555556,"y":0.0,"rotation":0.0,"id":216,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">deleted</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":349.53196256846445,"y":420.0,"rotation":0.0,"id":227,"width":130.46803743153555,"height":116.23401871576777,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":32,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-6.765981284232225,"y":76.0,"rotation":45.0,"id":223,"width":80.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-weight:bold;font-size:12px;\"><span style=\"\">Restart </span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":57.234018715767775,"y":75.0,"rotation":315.0,"id":219,"width":80.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-weight:bold;font-size:12px;\"><span style=\"\">Policy</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":14.734018715767775,"y":0.0,"rotation":0.0,"id":217,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.decision","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.diamond.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":218,"width":96.0,"height":27.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">Should restart?</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]}]},{"x":1027.5,"y":375.0,"rotation":0.0,"id":213,"width":70.0,"height":70.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.connector","order":24,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ellipse.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#fce5cd","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5555555555555556,"y":0.0,"rotation":0.0,"id":214,"width":66.88888888888889,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">paused</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":872.5,"y":390.0,"rotation":0.0,"id":203,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":204,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">pause</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":510.0,"y":420.0,"rotation":0.0,"id":236,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":237,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">die</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":790.0,"y":170.0,"rotation":0.0,"id":248,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":48,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":249,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">die</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":670.0,"y":170.0,"rotation":0.0,"id":254,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":53,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":255,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">die</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":740.0,"y":323.0,"rotation":0.0,"id":250,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":50,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":248,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-10.0,-33.0],[87.5,-113.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":253,"width":73.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"both","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"font-family:Arial;font-weight:bold;font-size:12px;\"><span style=\"\">docker stop</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":1027.5,"y":300.0,"rotation":0.0,"id":233,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":234,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">start</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":1027.5,"y":230.0,"rotation":0.0,"id":201,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":202,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">restart</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":1066.5,"y":298.0,"rotation":0.0,"id":264,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":264,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":233,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":201,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.5,2.0],[-1.5,-28.0]],"lockSegments":{},"ortho":false}},"linkMap":[]},{"x":170.0,"y":299.0,"rotation":0.0,"id":228,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":229,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">create</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":170.0,"y":230.0,"rotation":0.0,"id":188,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":190,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">create</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":170.0,"y":390.0,"rotation":0.0,"id":193,"width":75.0,"height":40.0,"uid":"com.gliffy.shape.flowchart.flowchart_v1.default.process","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":1.5,"y":0.0,"rotation":0.0,"id":194,"width":72.0,"height":13.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"<p style=\"text-align:center;\"><span style=\"text-decoration:none;font-family:Arial;font-size:12px;\"><span style=\"text-decoration:none;\">destroy</span></span></p>","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]}],"shapeStyles":{"com.gliffy.shape.uml.uml_v2.state_machine":{"fill":"#e2e2e2","stroke":"#000000","strokeWidth":2},"com.gliffy.shape.flowchart.flowchart_v1.default":{"fill":"#b7b7b7","stroke":"#333333","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1}},"textStyles":{"global":{"bold":true,"color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}}
\ No newline at end of file
diff --git a/docs/sources/reference/api/images/event_state.png b/docs/sources/reference/api/images/event_state.png
new file mode 100644
index 0000000..aeeaca3
--- /dev/null
+++ b/docs/sources/reference/api/images/event_state.png
Binary files differ
diff --git a/docs/sources/reference/api/registry_api_client_libraries.md b/docs/sources/reference/api/registry_api_client_libraries.md
index 811ac85..965ba46 100644
--- a/docs/sources/reference/api/registry_api_client_libraries.md
+++ b/docs/sources/reference/api/registry_api_client_libraries.md
@@ -1,8 +1,8 @@
-page_title: Registry API Client Libraries
+page_title: Registry API client libraries
 page_description: Various client libraries available to use with the Docker registry API
 page_keywords: API, Docker, index, registry, REST, documentation, clients, C#, Erlang, Go, Groovy, Java, JavaScript, Perl, PHP, Python, Ruby, Rust, Scala
 
-# Docker Registry 1.0 API Client Libraries
+# Docker Registry 1.0 API client libraries
 
 These libraries have not been tested by the Docker maintainers for
 compatibility. Please file issues with the library owners. If you find
diff --git a/docs/sources/reference/api/remote_api_client_libraries.md b/docs/sources/reference/api/remote_api_client_libraries.md
index d79bbd8..7b237c2 100644
--- a/docs/sources/reference/api/remote_api_client_libraries.md
+++ b/docs/sources/reference/api/remote_api_client_libraries.md
@@ -1,8 +1,8 @@
-page_title: Remote API Client Libraries
+page_title: Remote API client libraries
 page_description: Various client libraries available to use with the Docker remote API
 page_keywords: API, Docker, index, registry, REST, documentation, clients, C#, Erlang, Go, Groovy, Java, JavaScript, Perl, PHP, Python, Ruby, Rust, Scala
 
-# Docker Remote API Client Libraries
+# Docker Remote API client libraries
 
 These libraries have not been tested by the Docker maintainers for
 compatibility. Please file issues with the library owners. If you find
@@ -43,23 +43,35 @@
       <td>Active</td>
     </tr>
     <tr class="row-even">
+      <td>Dart</td>
+      <td>bwu_docker</td>
+      <td><a class="reference external" href="https://github.com/bwu-dart/bwu_docker">https://github.com/bwu-dart/bwu_docker</a></td>
+      <td>Active</td>
+    </tr>
+    <tr class="row-odd">
       <td>Go</td>
       <td>go-dockerclient</td>
       <td><a class="reference external" href="https://github.com/fsouza/go-dockerclient">https://github.com/fsouza/go-dockerclient</a></td>
       <td>Active</td>
     </tr>
-    <tr class="row-odd">
+    <tr class="row-even">
       <td>Go</td>
       <td>dockerclient</td>
       <td><a class="reference external" href="https://github.com/samalba/dockerclient">https://github.com/samalba/dockerclient</a></td>
       <td>Active</td>
     </tr>
-    <tr class="row-even">
+    <tr class="row-odd">
       <td>Groovy</td>
       <td>docker-client</td>
       <td><a class="reference external" href="https://github.com/gesellix-docker/docker-client">https://github.com/gesellix-docker/docker-client</a></td>
       <td>Active</td>
     </tr>
+    <tr class="row-even">
+      <td>Haskell</td>
+      <td>docker-hs</td>
+      <td><a class="reference external" href="https://github.com/denibertovic/docker-hs">https://github.com/denibertovic/docker-hs</a></td>
+      <td>Active</td>
+    </tr>
     <tr class="row-odd">
       <td>Java</td>
       <td>docker-java</td>
diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md
index 025b253..fb4d5ce 100644
--- a/docs/sources/reference/builder.md
+++ b/docs/sources/reference/builder.md
@@ -1,8 +1,8 @@
-page_title: Dockerfile Reference
+page_title: Dockerfile reference
 page_description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
 page_keywords: builder, docker, Dockerfile, automation, image creation
 
-# Dockerfile Reference
+# Dockerfile reference
 
 **Docker can build images automatically** by reading the instructions
 from a `Dockerfile`. A `Dockerfile` is a text document that contains all
@@ -26,7 +26,7 @@
 Then call `docker build` with the path of your source repository as the argument
 (for example, `.`):
 
-    $ sudo docker build .
+    $ docker build .
 
 The path to the source repository defines where to find the *context* of
 the build. The build is run by the Docker daemon, not by the CLI, so the
@@ -41,15 +41,16 @@
 > repository, the entire contents of your hard drive will get sent to the daemon (and
 > thus to the machine running the daemon). You probably don't want that.
 
-In most cases, it's best to put each Dockerfile in an empty directory, and then add only
-the files needed for building that Dockerfile to that directory. To further speed up the
-build, you can exclude files and directories by adding a `.dockerignore` file to the same
-directory.
+In most cases, it's best to put each Dockerfile in an empty directory. Then,
+only add the files needed for building the Dockerfile to the directory. To
+increase the build's performance, you can exclude files and directories by
+adding a `.dockerignore` file to the directory.  For information about how to
+[create a `.dockerignore` file](#the-dockerignore-file) on this page.
 
 You can specify a repository and tag at which to save the new image if
 the build succeeds:
 
-    $ sudo docker build -t shykes/myapp .
+    $ docker build -t shykes/myapp .
 
 The Docker daemon will run your steps one-by-one, committing the result
 to a new image if necessary, before finally outputting the ID of your
@@ -65,7 +66,7 @@
 see the [`Dockerfile` Best Practices
 guide](/articles/dockerfile_best-practices/#build-cache) for more information):
 
-    $ sudo docker build -t SvenDowideit/ambassador .
+    $ docker build -t SvenDowideit/ambassador .
     Uploading context 10.24 kB
     Uploading context
     Step 1 : FROM docker-ut
@@ -105,7 +106,7 @@
 Here is the set of instructions you can use in a `Dockerfile` for building
 images.
 
-### Environment Replacement
+### Environment replacement
 
 > **Note**: prior to 1.3, `Dockerfile` environment variables were handled
 > similarly, in that they would be replaced as described below. However, there
@@ -113,18 +114,30 @@
 > replacement at the time. After 1.3 this behavior will be preserved and
 > canonical.
 
-Environment variables (declared with [the `ENV` statement](#env)) can also be used in
-certain instructions as variables to be interpreted by the `Dockerfile`. Escapes
-are also handled for including variable-like syntax into a statement literally.
+Environment variables (declared with [the `ENV` statement](#env)) can also be
+used in certain instructions as variables to be interpreted by the
+`Dockerfile`. Escapes are also handled for including variable-like syntax
+into a statement literally.
 
 Environment variables are notated in the `Dockerfile` either with
 `$variable_name` or `${variable_name}`. They are treated equivalently and the
 brace syntax is typically used to address issues with variable names with no
 whitespace, like `${foo}_bar`.
 
+The `${variable_name}` syntax also supports a few of the standard `bash`
+modifiers as specified below:
+
+* `${variable:-word}` indicates that if `variable` is set then the result
+  will be that value. If `variable` is not set then `word` will be the result.
+* `${variable:+word}` indicates that if `variable` is set then `word` will be
+  the result, otherwise the result is the empty string.
+
+In all cases, `word` can be any string, including additional environment
+variables.
+
 Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`,
 for example, will translate to `$foo` and `${foo}` literals respectively.
- 
+
 Example (parsed representation is displayed after the `#`):
 
     FROM busybox
@@ -146,7 +159,7 @@
 `ONBUILD` instructions are **NOT** supported for environment replacement, even
 the instructions above.
 
-Environment variable subtitution will use the same value for each variable
+Environment variable substitution will use the same value for each variable
 throughout the entire command.  In other words, in this example:
 
     ENV abc=hello
@@ -157,43 +170,67 @@
 `ghi` will have a value of `bye` because it is not part of the same command
 that set `abc` to `bye`.
 
-## The `.dockerignore` file
+### .dockerignore file
 
-If a file named `.dockerignore` exists in the source repository, then it
-is interpreted as a newline-separated list of exclusion patterns.
-Exclusion patterns match files or directories relative to the source repository
-that will be excluded from the context. Globbing is done using Go's
+If a file named `.dockerignore` exists in the root of `PATH`, then Docker
+interprets it as a newline-separated list of exclusion patterns. Docker excludes
+files or directories relative to `PATH` that match these exclusion patterns. If
+there are any `.dockerignore` files in `PATH` subdirectories, Docker treats
+them as normal files. 
+
+Filepaths in `.dockerignore` are absolute with the current directory as the
+root. Wildcards are allowed but the search is not recursive. Globbing (file name
+expansion) is done using Go's
 [filepath.Match](http://golang.org/pkg/path/filepath#Match) rules.
 
-> **Note**:
-> The `.dockerignore` file can even be used to ignore the `Dockerfile` and
-> `.dockerignore` files. This might be useful if you are copying files from
-> the root of the build context into your new containter but do not want to 
-> include the `Dockerfile` or `.dockerignore` files (e.g. `ADD . /someDir/`).
+You can specify exceptions to exclusion rules. To do this, simply prefix a
+pattern with an `!` (exclamation mark) in the same way you would in a
+`.gitignore` file.  Currently there is no support for regular expressions.
+Formats like `[^temp*]` are ignored. 
 
-The following example shows the use of the `.dockerignore` file to exclude the
-`.git` directory from the context. Its effect can be seen in the changed size of
-the uploaded context.
+The following is an example `.dockerignore` file:
 
-    $ sudo docker build .
-    Uploading context 18.829 MB
-    Uploading context
-    Step 0 : FROM busybox
-     ---> 769b9341d937
-    Step 1 : CMD echo Hello World
-     ---> Using cache
-     ---> 99cc1ad10469
-    Successfully built 99cc1ad10469
-    $ echo ".git" > .dockerignore
-    $ sudo docker build .
-    Uploading context  6.76 MB
-    Uploading context
-    Step 0 : FROM busybox
-     ---> 769b9341d937
-    Step 1 : CMD echo Hello World
-     ---> Using cache
-     ---> 99cc1ad10469
-    Successfully built 99cc1ad10469
+```
+    */temp*
+    */*/temp*
+    temp?
+    *.md
+    !LICENCSE.md
+```
+
+This file causes the following build behavior:
+
+| Rule           | Behavior                                                                                                                                                                     |
+|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `*/temp*`      | Exclude all files with names starting with`temp` in any subdirectory below the root directory. For example, a file named`/somedir/temporary.txt` is ignored.              |
+| `*/*/temp*`    | Exclude files starting with name `temp` from any subdirectory that is two levels below the root directory. For example, the file `/somedir/subdir/temporary.txt` is ignored. |
+| `temp?`        | Exclude the files that match the pattern in the root directory. For example, the files `tempa`, `tempb` in the root directory are ignored.                               |
+| `*.md `        | Exclude all markdown files.                                                                                                                                                  |
+| `!LICENSE.md` | Exception to the exclude all Markdown files is this file,  `LICENSE.md`, include this file in the build.                                                                     |
+
+The placement of  `!` exception rules influences the matching algorithm; the
+last line of the `.dockerignore` that matches a particular file determines
+whether it is included or excluded. In the above example, the `LICENSE.md` file
+matches both the  `*.md` and `!LICENSE.md` rule. If you reverse the lines in the
+example:
+
+```
+    */temp*
+    */*/temp*
+    temp?
+    !LICENCSE.md
+    *.md
+```
+
+The build would exclude `LICENSE.md` because the last `*.md` rule adds all
+Markdown files back onto the ignore list. The `!LICENSE.md` rule has no effect
+because the subsequent `*.md` rule overrides it.
+
+You can even use the  `.dockerignore` file to ignore the `Dockerfile` and
+`.dockerignore` files. This is useful if you are copying files from the root of
+the build context into your new container but do not want to include the
+`Dockerfile` or `.dockerignore` files (e.g. `ADD . /someDir/`).
+
 
 ## FROM
 
@@ -276,7 +313,7 @@
 The cache for `RUN` instructions can be invalidated by `ADD` instructions. See
 [below](#add) for details.
 
-### Known Issues (RUN)
+### Known issues (RUN)
 
 - [Issue 783](https://github.com/docker/docker/issues/783) is about file
   permissions problems that can occur when using the AUFS file system. You
@@ -287,7 +324,7 @@
   the layers with `dirperm1` option. More details on `dirperm1` option can be
   found at [`aufs` man page](http://aufs.sourceforge.net/aufs3/man.html)
 
-  If your system doesnt have support for `dirperm1`, the issue describes a workaround.
+  If your system doesn't have support for `dirperm1`, the issue describes a workaround.
 
 ## CMD
 
@@ -356,14 +393,13 @@
 
 The `LABEL` instruction adds metadata to an image. A `LABEL` is a
 key-value pair. To include spaces within a `LABEL` value, use quotes and
-blackslashes as you would in command-line parsing.
+backslashes as you would in command-line parsing.
 
     LABEL "com.example.vendor"="ACME Incorporated"
 
 An image can have more than one label. To specify multiple labels, separate each
-key-value pair by an EOL.
+key-value pair with whitespace.
 
-    LABEL com.example.label-without-value
     LABEL com.example.label-with-value="foo"
     LABEL version="1.0"
     LABEL description="This text illustrates \
@@ -373,6 +409,8 @@
 possible. Each `LABEL` instruction produces a new layer which can result in an
 inefficient image if you use many labels. This example results in four image
 layers. 
+
+    LABEL multi.label1="value1" multi.label2="value2" other="value3"
     
 Labels are additive including `LABEL`s in `FROM` images. As the system
 encounters and then applies a new label, new `key`s override any previous labels
@@ -380,6 +418,16 @@
 
 To view an image's labels, use the `docker inspect` command.
 
+    "Labels": {
+        "com.example.vendor": "ACME Incorporated"
+        "com.example.label-with-value": "foo",
+        "version": "1.0",
+        "description": "This text illustrates that label-values can span multiple lines.",
+        "multi.label1": "value1",
+        "multi.label2": "value2",
+        "other": "value3"
+    },
+
 ## EXPOSE
 
     EXPOSE <port> [<port>...]
@@ -844,11 +892,11 @@
 
     FROM ubuntu
     RUN mkdir /myvol
-    RUN echo "hello world" > /myvol/greating
+    RUN echo "hello world" > /myvol/greeting
     VOLUME /myvol
 
 This Dockerfile results in an image that causes `docker run`, to
-create a new mount point at `/myvol` and copy the  `greating` file 
+create a new mount point at `/myvol` and copy the  `greeting` file 
 into the newly created volume.
 
 > **Note**:
@@ -950,7 +998,7 @@
 
 > **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions.
 
-## Dockerfile Examples
+## Dockerfile examples
 
     # Nginx
     #
diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md
index 6c3eb1c..c5e276e 100644
--- a/docs/sources/reference/commandline/cli.md
+++ b/docs/sources/reference/commandline/cli.md
@@ -9,18 +9,78 @@
 To list available commands, either run `docker` with no parameters
 or execute `docker help`:
 
-    $ sudo docker
+    $ docker
       Usage: docker [OPTIONS] COMMAND [arg...]
         -H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.
 
       A self-sufficient runtime for Linux containers.
 
       ...
+Depending on your Docker system configuration, you may be required
+to preface each `docker` command with `sudo`. To avoid having to use `sudo` with
+the `docker` command, your system administrator can create a Unix group called
+`docker` and add users to it.
+
+For more information about installing Docker or `sudo` configuration, refer to
+the [installation](/installation) instructions for your operating system.
+
+## Environment variables
+
+For easy reference, the following list of environment variables are supported
+by the `docker` command line:
+
+* `DOCKER_CERT_PATH` The location of your authentication keys.
+* `DOCKER_DRIVER` The graph driver to use.
+* `DOCKER_HOST` Daemon socket to connect to.
+* `DOCKER_NOWARN_KERNEL_VERSION` Prevent warnings that your Linux kernel is unsuitable for Docker.
+* `DOCKER_RAMDISK` If set this will disable 'pivot_root'.
+* `DOCKER_TLS_VERIFY` When set Docker uses TLS and verifies the remote.
+* `DOCKER_TMPDIR` Location for temporary Docker files.
+
+Because Docker is developed using 'Go', you can also use any environment
+variables used by the 'Go' runtime. In particular, you may find these useful:
+
+* `HTTP_PROXY`
+* `HTTPS_PROXY`
+* `NO_PROXY`
+
+These Go environment variables are case-insensitive. See the
+[Go specification](http://golang.org/pkg/net/http/) for details on these
+variables.
+
+## Configuration files
+
+The Docker command line stores its configuration files in a directory called
+`.docker` within your `HOME` directory. Docker manages most of the files in
+`.docker` and you should not modify them. However, you *can modify* the
+`.docker/config.json` file to control certain aspects of how the `docker`
+command behaves.
+
+Currently, you can modify the `docker` command behavior using environment 
+variables or command-line options. You can also use options within 
+`config.json` to modify some of the same behavior.  When using these 
+mechanisms, you must keep in mind the order of precedence among them. Command 
+line options override environment variables and environment variables override 
+properties you specify in a `config.json` file.
+
+The `config.json` file stores a JSON encoding of a single `HttpHeaders`
+property. The property specifies a set of headers to include in all
+messages sent from the Docker client to the daemon. Docker does not try to
+interpret or understand these header; it simply puts them into the messages.
+Docker does not allow these headers to change any headers it sets for itself.
+
+Following is a sample `config.json` file:
+
+    {
+      "HttpHeaders: {
+        "MyHeader": "MyValue"
+      }
+    }
 
 ## Help
 To list the help on any command just execute the command, followed by the `--help` option.
 
-    $ sudo docker run --help
+    $ docker run --help
 
     Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
 
@@ -53,13 +113,19 @@
 
 ### Multi
 
-Options like `-a=[]` indicate they can be specified multiple times:
+You can specify options like `-a=[]` multiple times in a single command line,
+for example in these commands:
 
-    $ sudo docker run -a stdin -a stdout -a stderr -i -t ubuntu /bin/bash
+    $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash
+    $ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls
 
-Sometimes this can use a more complex value string, as for `-v`:
+Sometimes, multiple options can call for a more complex value string as for `-v`:
 
-    $ sudo docker run -v /host:/container example/mysql
+    $ docker run -v /host:/container example/mysql
+
+> **Note**:
+> Do not use the `-t` and `-a stderr` options together due to limitations
+> in the `pty` implementation. All `stderr` in `pty` mode simply goes to `stdout`.
 
 ### Strings and Integers
 
@@ -79,9 +145,14 @@
       --bip=""                               Specify network bridge IP
       -D, --debug=false                      Enable debug mode
       -d, --daemon=false                     Enable daemon mode
+      --default-gateway=""                   Container default gateway IPv4 address
+      --default-gateway-v6=""                Container default gateway IPv6 address
       --dns=[]                               DNS server to use
       --dns-search=[]                        DNS search domains to use
+      --default-ulimit=[]                    Set default ulimit settings for containers
       -e, --exec-driver="native"             Exec driver to use
+      --exec-opt=[]                          Set exec driver options
+      --exec-root="/var/run/docker"          Root of the Docker execdriver
       --fixed-cidr=""                        IPv4 subnet for fixed IPs
       --fixed-cidr-v6=""                     IPv6 subnet for fixed IPs
       -G, --group="docker"                   Group for the unix socket
@@ -97,7 +168,7 @@
       --ipv6=false                           Enable IPv6 networking
       -l, --log-level="info"                 Set the logging level
       --label=[]                             Set key=value labels to the daemon
-      --log-driver="json-file"               Container's logging driver (json-file/none)
+      --log-driver="json-file"               Default driver for container logs
       --mtu=0                                Set the containers network MTU
       -p, --pidfile="/var/run/docker.pid"    Path to use for daemon PID file
       --registry-mirror=[]                   Preferred Docker registry mirror
@@ -109,8 +180,8 @@
       --tlscert="~/.docker/cert.pem"         Path to TLS certificate file
       --tlskey="~/.docker/key.pem"           Path to TLS key file
       --tlsverify=false                      Use TLS and verify the remote
+      --userland-proxy=true                  Use userland proxy for loopback traffic
       -v, --version=false                    Print version information and quit
-      --default-ulimit=[]                    Set default ulimit settings for containers.
 
 Options with [] may be specified multiple times.
 
@@ -160,19 +231,19 @@
 The Docker client will honor the `DOCKER_HOST` environment variable to set
 the `-H` flag for the client.
 
-    $ sudo docker -H tcp://0.0.0.0:2375 ps
+    $ docker -H tcp://0.0.0.0:2375 ps
     # or
     $ export DOCKER_HOST="tcp://0.0.0.0:2375"
-    $ sudo docker ps
+    $ docker ps
     # both are equal
 
 Setting the `DOCKER_TLS_VERIFY` environment variable to any value other than the empty
 string is equivalent to setting the `--tlsverify` flag. The following are equivalent:
 
-    $ sudo docker --tlsverify ps
+    $ docker --tlsverify ps
     # or
     $ export DOCKER_TLS_VERIFY=1
-    $ sudo docker ps
+    $ docker ps
 
 The Docker client will honor the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY`
 environment variables (or the lowercase versions thereof). `HTTPS_PROXY` takes
@@ -181,7 +252,7 @@
 ### Daemon storage-driver option
 
 The Docker daemon has support for several different image layer storage drivers: `aufs`,
-`devicemapper`, `btrfs` and `overlay`.
+`devicemapper`, `btrfs`, `zfs` and `overlay`.
 
 The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that
 is unlikely to be merged into the main kernel. These are also known to cause some
@@ -203,6 +274,11 @@
 The `btrfs` driver is very fast for `docker build` - but like `devicemapper` does not
 share executable memory between devices. Use `docker -d -s btrfs -g /mnt/btrfs_partition`.
 
+The `zfs` driver is probably not fast as `btrfs` but has a longer track record
+on stability. Thanks to `Single Copy ARC` shared blocks between clones will be
+cached only once. Use `docker -d -s zfs`. To select a different zfs filesystem
+set `zfs.fsname` option as described in [Storage driver options](#storage-driver-options):
+
 The `overlay` is a very fast union filesystem. It is now merged in the main
 Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137).
 Call `docker -d -s overlay` to use it.
@@ -213,10 +289,10 @@
 #### Storage driver options
 
 Particular storage-driver can be configured with options specified with
-`--storage-opt` flags. The only driver accepting options is `devicemapper` as
-of now. All its options are prefixed with `dm`.
+`--storage-opt` flags. Options for `devicemapper` are prefixed with `dm` and
+options for `zfs` start with `zfs`.
 
-Currently supported options are:
+Currently supported options of `devicemapper`:
 
  *  `dm.basesize`
 
@@ -236,7 +312,7 @@
 
     Example use:
 
-        $ sudo docker -d --storage-opt dm.basesize=20G
+        $ docker -d --storage-opt dm.basesize=20G
 
  *  `dm.loopdatasize`
 
@@ -246,7 +322,7 @@
 
     Example use:
 
-        $ sudo docker -d --storage-opt dm.loopdatasize=200G
+        $ docker -d --storage-opt dm.loopdatasize=200G
 
  *  `dm.loopmetadatasize`
 
@@ -257,7 +333,7 @@
 
     Example use:
 
-        $ sudo docker -d --storage-opt dm.loopmetadatasize=4G
+        $ docker -d --storage-opt dm.loopmetadatasize=4G
 
  *  `dm.fs`
 
@@ -266,7 +342,7 @@
 
     Example use:
 
-        $ sudo docker -d --storage-opt dm.fs=xfs
+        $ docker -d --storage-opt dm.fs=xfs
 
  *  `dm.mkfsarg`
 
@@ -274,7 +350,7 @@
 
     Example use:
 
-        $ sudo docker -d --storage-opt "dm.mkfsarg=-O ^has_journal"
+        $ docker -d --storage-opt "dm.mkfsarg=-O ^has_journal"
 
  *  `dm.mountopt`
 
@@ -282,7 +358,7 @@
 
     Example use:
 
-        $ sudo docker -d --storage-opt dm.mountopt=nodiscard
+        $ docker -d --storage-opt dm.mountopt=nodiscard
 
  *  `dm.datadev`
 
@@ -294,7 +370,7 @@
 
     Example use:
 
-        $ sudo docker -d \
+        $ docker -d \
             --storage-opt dm.datadev=/dev/sdb1 \
             --storage-opt dm.metadatadev=/dev/sdc1
 
@@ -312,7 +388,7 @@
 
     Example use:
 
-        $ sudo docker -d \
+        $ docker -d \
             --storage-opt dm.datadev=/dev/sdb1 \
             --storage-opt dm.metadatadev=/dev/sdc1
 
@@ -323,7 +399,7 @@
 
     Example use:
 
-        $ sudo docker -d --storage-opt dm.blocksize=512K
+        $ docker -d --storage-opt dm.blocksize=512K
 
  *  `dm.blkdiscard`
 
@@ -337,9 +413,55 @@
 
     Example use:
 
-        $ sudo docker -d --storage-opt dm.blkdiscard=false
+        $ docker -d --storage-opt dm.blkdiscard=false
 
-### Docker exec-driver option
+ *  `dm.override_udev_sync_check`
+
+    Overrides the `udev` synchronization checks between `devicemapper` and `udev`.
+    `udev` is the device manager for the Linux kernel.
+
+    To view the `udev` sync support of a Docker daemon that is using the
+    `devicemapper` driver, run:
+
+        $ docker info
+	[...]
+	 Udev Sync Supported: true
+	[...]
+
+    When `udev` sync support is `true`, then `devicemapper` and udev can
+    coordinate the activation and deactivation of devices for containers.
+
+    When `udev` sync support is `false`, a race condition occurs between
+    the`devicemapper` and `udev` during create and cleanup. The race condition
+    results in errors and failures. (For information on these failures, see
+    [docker#4036](https://github.com/docker/docker/issues/4036))
+
+    To allow the `docker` daemon to start, regardless of `udev` sync not being
+    supported, set `dm.override_udev_sync_check` to true:
+
+        $ docker -d --storage-opt dm.override_udev_sync_check=true
+
+    When this value is `true`, the  `devicemapper` continues and simply warns
+    you the errors are happening.
+
+    > **Note**: The ideal is to pursue a `docker` daemon and environment that
+    > does support synchronizing with `udev`. For further discussion on this
+    > topic, see [docker#4036](https://github.com/docker/docker/issues/4036).
+    > Otherwise, set this flag for migrating existing Docker daemons to a
+    > daemon with a supported environment.
+
+### Docker execdriver option
+Currently supported options of `zfs`:
+
+ * `zfs.fsname`
+
+    Set zfs filesystem under which docker will create its own datasets.
+    By default docker will pick up the zfs filesystem where docker graph
+    (`/var/lib/docker`) is located.
+
+    Example use:
+
+       $ docker -d -s zfs --storage-opt zfs.fsname=zroot/docker
 
 The Docker daemon uses a specifically built `libcontainer` execution driver as its
 interface to the Linux kernel `namespaces`, `cgroups`, and `SELinux`.
@@ -349,6 +471,21 @@
 not where the primary development of new functionality is taking place.
 Add `-e lxc` to the daemon flags to use the `lxc` execution driver.
 
+#### Options for the native execdriver
+
+You can configure the `native` (libcontainer) execdriver using options specified
+with the `--exec-opt` flag. All the flag's options have the `native` prefix. A
+single `native.cgroupdriver` option is available.
+
+The `native.cgroupdriver` option specifies the management of the container's 
+cgroups. You can specify `cgroupfs` or `systemd`. If you specify `systemd` and 
+it is not available, the system uses `cgroupfs`. By default, if no option is 
+specified, the execdriver first tries `systemd` and falls back to `cgroupfs`. 
+This example sets the execdriver to `cgroupfs`:
+
+    $ sudo docker -d --exec-opt native.cgroupdriver=cgroupfs
+     
+Setting this option applies to all containers the daemon launches.
 
 ### Daemon DNS options
 
@@ -410,7 +547,7 @@
 
 `--default-ulimit` allows you to set the default `ulimit` options to use for all
 containers. It takes the same options as `--ulimit` for `docker run`. If these
-defaults are not set, `ulimit` settings will be inheritted, if not set on
+defaults are not set, `ulimit` settings will be inherited, if not set on
 `docker run`, from the Docker daemon. Any `--ulimit` options passed to
 `docker run` will overwrite these defaults.
 
@@ -444,18 +581,24 @@
 simultaneously, screen sharing style, or quickly view the progress of your
 daemonized process.
 
-You can detach from the container (and leave it running) with `CTRL-p CTRL-q`
-(for a quiet exit) or `CTRL-c` which will send a `SIGKILL` to the container.
-When you are attached to a container, and exit its main process, the process's
-exit code will be returned to the client.
+You can detach from the container and leave it running with `CTRL-p
+CTRL-q` (for a quiet exit) or with `CTRL-c` if `--sig-proxy` is false.
+
+If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT`
+to the container.
+
+>**Note**: A process running as PID 1 inside a container is treated
+>specially by Linux: it ignores any signal with the default action.
+>So, the process will not terminate on `SIGINT` or `SIGTERM` unless it is
+>coded to do so.
 
 It is forbidden to redirect the standard input of a `docker attach` command while
 attaching to a tty-enabled container (i.e.: launched with `-t`).
 
 #### Examples
 
-    $ sudo docker run -d --name topdemo ubuntu /usr/bin/top -b)
-    $ sudo docker attach topdemo
+    $ docker run -d --name topdemo ubuntu /usr/bin/top -b
+    $ docker attach topdemo
     top - 02:05:52 up  3:05,  0 users,  load average: 0.01, 0.02, 0.05
     Tasks:   1 total,   1 running,   0 sleeping,   0 stopped,   0 zombie
     Cpu(s):  0.1%us,  0.2%sy,  0.0%ni, 99.7%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
@@ -492,14 +635,14 @@
 And in this second example, you can see the exit code returned by the `bash` process
 is returned by the `docker attach` command to its caller too:
 
-    $ sudo docker run --name test -d -it debian
+    $ docker run --name test -d -it debian
     275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab
-    $ sudo docker attach test
+    $ docker attach test
     $$ exit 13
     exit
     $ echo $?
     13
-    $ sudo docker ps -a | grep test
+    $ docker ps -a | grep test
     275c44472aeb        debian:7            "/bin/bash"         26 seconds ago      Exited (13) 17 seconds ago                         test
 
 ## build
@@ -518,7 +661,9 @@
       -m, --memory=""          Memory limit for all build containers
       --memory-swap=""         Total memory (memory + swap), `-1` to disable swap
       -c, --cpu-shares         CPU Shares (relative weight)
+      --cpuset-mems=""         MEMs in which to allow execution, e.g. `0-3`, `0,1`
       --cpuset-cpus=""         CPUs in which to allow exection, e.g. `0-3`, `0,1`
+      --cgroup-parent=""       Optional parent cgroup for the container
 
 Builds Docker images from a Dockerfile and a "context". A build's context is
 the files located in the specified `PATH` or `URL`.  The build process can
@@ -526,12 +671,36 @@
 an [*ADD*](/reference/builder/#add) instruction to reference a file in the
 context.
 
-The `URL` parameter can specify the location of a Git repository; in this
-case,  the repository is the context. The Git repository is recursively
-cloned with its submodules.  The system does a fresh `git clone -recursive`
-in a temporary directory on your local host. Then, this clone is sent to
-the Docker daemon as the context. Local clones give you the ability to
-access private repositories using local user credentials, VPN's, and so forth.
+The `URL` parameter can specify the location of a Git repository;
+the repository acts as the build context. The system recursively clones the repository
+and its submodules using a `git clone --depth 1 --recursive` command.
+This command runs in a temporary directory on your local host.
+After the command succeeds, the directory is sent to the Docker daemon as the context.
+Local clones give you the ability to access private repositories using
+local user credentials, VPN's, and so forth.
+
+Git URLs accept context configuration in their fragment section, separated by a colon `:`.
+The first part represents the reference that Git will check out, this can be either
+a branch, a tag, or a commit SHA. The second part represents a subdirectory
+inside the repository that will be used as a build context.
+
+For example, run this command to use a directory called `docker` in the branch `container`:
+
+      $ docker build https://github.com/docker/rootfs.git#container:docker
+
+The following table represents all the valid suffixes with their build contexts:
+
+Build Syntax Suffix | Commit Used | Build Context Used
+--------------------|-------------|-------------------
+`myrepo.git` | `refs/heads/master` | `/`
+`myrepo.git#mytag` | `refs/tags/mytag` | `/`
+`myrepo.git#mybranch` | `refs/heads/mybranch` | `/`
+`myrepo.git#abcdef` | `sha1 = abcdef` | `/`
+`myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder`
+`myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder`
+`myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder`
+`myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder`
+`myrepo.git#abcdef:myfolder` | `sha1 = abcdef` | `/myfolder`
 
 Instead of specifying a context, you can pass a single Dockerfile in the
 `URL` or pipe the file in via `STDIN`.  To pipe a Dockerfile from `STDIN`:
@@ -539,9 +708,29 @@
 	docker build - < Dockerfile
 
 If you use STDIN or specify a `URL`, the system places the contents into a
-file called `Dockerfile`, and any `-f`, `--file` option is ignored. In this 
+file called `Dockerfile`, and any `-f`, `--file` option is ignored. In this
 scenario, there is no context.
 
+By default the `docker build` command will look for a `Dockerfile` at the
+root of the build context. The `-f`, `--file`, option lets you specify
+the path to an alternative file to use instead.  This is useful
+in cases where the same set of files are used for multiple builds. The path
+must be to a file within the build context. If a relative path is specified
+then it must to be relative to the current directory.
+
+In most cases, it's best to put each Dockerfile in an empty directory. Then, add
+to that directory only the files needed for building the Dockerfile. To increase
+the build's performance, you can exclude files and directories by adding a
+`.dockerignore` file to that directory as well. For information on creating one,
+see the [.dockerignore file](../../reference/builder/#dockerignore-file).
+
+If the Docker client loses connection to the daemon, the build is canceled.
+This happens if you interrupt the Docker client with `ctrl-c` or if the Docker
+client is killed for any reason.
+
+> **Note:** Currently only the "run" phase of the build can be canceled until
+> pull cancelation is implemented).
+
 ### Return code
 
 On a successful build, a return code of success `0` will be returned.
@@ -562,57 +751,13 @@
 $ echo $?
 1
 ```
-
-### .dockerignore file
-
-If a file named `.dockerignore` exists in the root of `PATH` then it
-is interpreted as a newline-separated list of exclusion patterns.
-Exclusion patterns match files or directories relative to `PATH` that
-will be excluded from the context. Globbing is done using Go's
-[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules.
-
-Please note that `.dockerignore` files in other subdirectories are
-considered as normal files. Filepaths in `.dockerignore` are absolute with
-the current directory as the root. Wildcards are allowed but the search
-is not recursive.
-
-#### Example .dockerignore file
-    */temp*
-    */*/temp*
-    temp?
-
-The first line above `*/temp*`, would ignore all files with names starting with
-`temp` from any subdirectory below the root directory. For example, a file named
-`/somedir/temporary.txt` would be ignored. The second line `*/*/temp*`, will
-ignore files starting with name `temp` from any subdirectory that is two levels
-below the root directory. For example, the file `/somedir/subdir/temporary.txt`
-would get ignored in this case. The last line in the above example `temp?`
-will ignore the files that match the pattern from the root directory.
-For example, the files `tempa`, `tempb` are ignored from the root directory.
-Currently there is no support for regular expressions. Formats
-like `[^temp*]` are ignored.
-
-By default the `docker build` command will look for a `Dockerfile` at the
-root of the build context. The `-f`, `--file`, option lets you specify
-the path to an alternative file to use instead.  This is useful
-in cases where the same set of files are used for multiple builds. The path
-must be to a file within the build context. If a relative path is specified
-then it must to be relative to the current directory.
-
-If the Docker client loses connection to the daemon, the build is canceled.
-This happens if you interrupt the Docker client with `ctrl-c` or if the Docker
-client is killed for any reason.
-
-> **Note:** Currently only the "run" phase of the build can be canceled until
-> pull cancelation is implemented).
-
 See also:
 
 [*Dockerfile Reference*](/reference/builder).
 
-#### Examples
+### Examples
 
-    $ sudo docker build .
+    $ docker build .
     Uploading context 10240 bytes
     Step 1 : FROM busybox
     Pulling repository busybox
@@ -657,7 +802,7 @@
 complete, you must use `--rm=false`. This does not
 affect the build cache.
 
-    $ sudo docker build .
+    $ docker build .
     Uploading context 18.829 MB
     Uploading context
     Step 0 : FROM busybox
@@ -667,7 +812,7 @@
      ---> 99cc1ad10469
     Successfully built 99cc1ad10469
     $ echo ".git" > .dockerignore
-    $ sudo docker build .
+    $ docker build .
     Uploading context  6.76 MB
     Uploading context
     Step 0 : FROM busybox
@@ -679,27 +824,28 @@
 
 This example shows the use of the `.dockerignore` file to exclude the `.git`
 directory from the context. Its effect can be seen in the changed size of the
-uploaded context.
+uploaded context. The builder reference contains detailed information on
+[creating a .dockerignore file](../../builder/#dockerignore-file)
 
-    $ sudo docker build -t vieux/apache:2.0 .
+    $ docker build -t vieux/apache:2.0 .
 
 This will build like the previous example, but it will then tag the
 resulting image. The repository name will be `vieux/apache`
 and the tag will be `2.0`
 
-    $ sudo docker build - < Dockerfile
+    $ docker build - < Dockerfile
 
 This will read a Dockerfile from `STDIN` without context. Due to the
 lack of a context, no contents of any local directory will be sent to
 the Docker daemon. Since there is no context, a Dockerfile `ADD` only
 works if it refers to a remote URL.
 
-    $ sudo docker build - < context.tar.gz
+    $ docker build - < context.tar.gz
 
 This will build an image for a compressed context read from `STDIN`.
 Supported formats are: bzip2, gzip and xz.
 
-    $ sudo docker build github.com/creack/docker-firefox
+    $ docker build github.com/creack/docker-firefox
 
 This will clone the GitHub repository and use the cloned repository as
 context. The Dockerfile at the root of the
@@ -707,21 +853,21 @@
 can specify an arbitrary Git repository by using the `git://` or `git@`
 schema.
 
-    $ sudo docker build -f Dockerfile.debug .
+    $ docker build -f Dockerfile.debug .
 
 This will use a file called `Dockerfile.debug` for the build
 instructions instead of `Dockerfile`.
 
-    $ sudo docker build -f dockerfiles/Dockerfile.debug -t myapp_debug .
-    $ sudo docker build -f dockerfiles/Dockerfile.prod  -t myapp_prod .
+    $ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug .
+    $ docker build -f dockerfiles/Dockerfile.prod  -t myapp_prod .
 
 The above commands will build the current build context (as specified by
 the `.`) twice, once using a debug version of a `Dockerfile` and once using
 a production version.
 
     $ cd /home/me/myapp/some/dir/really/deep
-    $ sudo docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp
-    $ sudo docker build -f ../../../../dockerfiles/debug /home/me/myapp
+    $ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp
+    $ docker build -f ../../../../dockerfiles/debug /home/me/myapp
 
 These two `docker build` commands do the exact same thing. They both
 use the contents of the `debug` file instead of looking for a `Dockerfile`
@@ -736,6 +882,11 @@
 > children) for security reasons, and to ensure repeatable builds on remote
 > Docker hosts. This is also the reason why `ADD ../file` will not work.
 
+When `docker build` is run with the `--cgroup-parent` option the containers used
+in the build will be run with the [corresponding `docker run`
+flag](/reference/run/#specifying-custom-cgroups). 
+
+
 ## commit
 
     Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]
@@ -760,31 +911,32 @@
 
 The `--change` option will apply `Dockerfile` instructions to the image
 that is created.
-Supported `Dockerfile` instructions: `ADD`|`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`FROM`|`MAINTAINER`|`RUN`|`USER`|`LABEL`|`VOLUME`|`WORKDIR`|`COPY`
+Supported `Dockerfile` instructions:
+`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR`
 
 #### Commit a container
 
-    $ sudo docker ps
+    $ docker ps
     ID                  IMAGE               COMMAND             CREATED             STATUS              PORTS
     c3f279d17e0a        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours
     197387f1b436        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours
-    $ sudo docker commit c3f279d17e0a  SvenDowideit/testimage:version3
+    $ docker commit c3f279d17e0a  SvenDowideit/testimage:version3
     f5283438590d
-    $ sudo docker images | head
+    $ docker images
     REPOSITORY                        TAG                 ID                  CREATED             VIRTUAL SIZE
     SvenDowideit/testimage            version3            f5283438590d        16 seconds ago      335.7 MB
 
 #### Commit a container with new configurations
 
-    $ sudo docker ps
+    $ docker ps
     ID                  IMAGE               COMMAND             CREATED             STATUS              PORTS
     c3f279d17e0a        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours
     197387f1b436        ubuntu:12.04        /bin/bash           7 days ago          Up 25 hours
-    $ sudo docker inspect -f "{{ .Config.Env }}" c3f279d17e0a
+    $ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a
     [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin]
-    $ sudo docker commit --change "ENV DEBUG true" c3f279d17e0a  SvenDowideit/testimage:version3
+    $ docker commit --change "ENV DEBUG true" c3f279d17e0a  SvenDowideit/testimage:version3
     f5283438590d
-    $ sudo docker inspect -f "{{ .Config.Env }}" f5283438590d
+    $ docker inspect -f "{{ .Config.Env }}" f5283438590d
     [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true]
 
 ## cp
@@ -795,7 +947,7 @@
 
     Usage: docker cp CONTAINER:PATH HOSTDIR|-
 
-    Copy files/folders from the PATH to the HOSTDIR. 
+    Copy files/folders from the PATH to the HOSTDIR.
 
 
 ## create
@@ -808,12 +960,16 @@
 
       -a, --attach=[]            Attach to STDIN, STDOUT or STDERR
       --add-host=[]              Add a custom host-to-IP mapping (host:ip)
+      --blkio-weight=0           Block IO weight (relative weight)
       -c, --cpu-shares=0         CPU shares (relative weight)
       --cap-add=[]               Add Linux capabilities
       --cap-drop=[]              Drop Linux capabilities
       --cgroup-parent=""         Optional parent cgroup for the container
       --cidfile=""               Write the container ID to the file
       --cpuset-cpus=""           CPUs in which to allow execution (0-3, 0,1)
+      --cpuset-mems=""           Memory nodes (MEMs) in which to allow execution (0-3, 0,1)
+      --cpu-period=0             Limit the CPU CFS (Completely Fair Scheduler) period
+      --cpu-quota=0              Limit the CPU CFS (Completely Fair Scheduler) quota
       --device=[]                Add a host device to the container
       --dns=[]                   Set custom DNS servers
       --dns-search=[]            Set custom DNS search domains
@@ -833,8 +989,11 @@
       --mac-address=""           Container MAC address (e.g. 92:d0:c6:0a:29:33)
       --name=""                  Assign a name to the container
       --net="bridge"             Set the Network mode for the container
+      --oom-kill-disable=false   Whether to disable OOM Killer for the container or not
       -P, --publish-all=false    Publish all exposed ports to random ports
       -p, --publish=[]           Publish a container's port(s) to the host
+      --pid=""                   PID namespace to use
+      --uts=""                   UTS namespace to use
       --privileged=false         Give extended privileges to this container
       --read-only=false          Mount the container's root filesystem as read only
       --restart="no"             Restart policy (no, on-failure[:max-retry], always)
@@ -860,9 +1019,9 @@
 
 #### Examples
 
-    $ sudo docker create -t -i fedora bash
+    $ docker create -t -i fedora bash
     6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752
-    $ sudo docker start -a -i 6d8af538ec5
+    $ docker start -a -i 6d8af538ec5
     bash-4.2#
 
 As of v1.4.0 container volumes are initialized during the `docker create`
@@ -910,7 +1069,7 @@
 
 For example:
 
-    $ sudo docker diff 7bb0e258aefe
+    $ docker diff 7bb0e258aefe
 
     C /dev
     A /dev/kmsg
@@ -955,7 +1114,7 @@
 `--filter container=588a23dac085 --filter event=start` will display events for container
 container 588a23dac085 *AND* the event type is *start*
 
-Current filters:
+The currently supported filters are:
 
 * container
 * event
@@ -967,13 +1126,13 @@
 
 **Shell 1: Listening for events:**
 
-    $ sudo docker events
+    $ docker events
 
 **Shell 2: Start and Stop containers:**
 
-    $ sudo docker start 4386fb97867d
-    $ sudo docker stop 4386fb97867d
-    $ sudo docker stop 7805c1d35632
+    $ docker start 4386fb97867d
+    $ docker stop 4386fb97867d
+    $ docker stop 7805c1d35632
 
 **Shell 1: (Again .. now showing events):**
 
@@ -985,20 +1144,20 @@
 
 **Show events in the past from a specified time:**
 
-    $ sudo docker events --since 1378216169
+    $ docker events --since 1378216169
     2014-03-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die
     2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop
     2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die
     2014-03-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop
 
-    $ sudo docker events --since '2013-09-03'
+    $ docker events --since '2013-09-03'
     2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) start
     2014-09-03T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die
     2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop
     2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die
     2014-09-03T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop
 
-    $ sudo docker events --since '2013-09-03T15:49:29'
+    $ docker events --since '2013-09-03T15:49:29'
     2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die
     2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop
     2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die
@@ -1006,29 +1165,29 @@
 
 **Filter events:**
 
-    $ sudo docker events --filter 'event=stop'
+    $ docker events --filter 'event=stop'
     2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop
     2014-09-03T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) stop
 
-    $ sudo docker events --filter 'image=ubuntu-1:14.04'
+    $ docker events --filter 'image=ubuntu-1:14.04'
     2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) start
     2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die
     2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop
 
-    $ sudo docker events --filter 'container=7805c1d35632'
+    $ docker events --filter 'container=7805c1d35632'
     2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die
     2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop
 
-    $ sudo docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d'
+    $ docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d'
     2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die
     2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop
     2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die
     2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop
 
-    $ sudo docker events --filter 'container=7805c1d35632' --filter 'event=stop'
+    $ docker events --filter 'container=7805c1d35632' --filter 'event=stop'
     2014-09-03T15:49:29.999999999Z07:00 7805c1d35632: (from redis:2.8) stop
 
-    $ sudo docker events --filter 'container=container_1' --filter 'container=container_2'
+    $ docker events --filter 'container=container_1' --filter 'container=container_2'
     2014-09-03T15:49:29.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die
     2014-05-10T17:42:14.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop
     2014-05-10T17:42:14.999999999Z07:00 7805c1d35632: (from redis:2.8) die
@@ -1043,6 +1202,7 @@
       -d, --detach=false         Detached mode: run command in the background
       -i, --interactive=false    Keep STDIN open even if not attached
       -t, --tty=false            Allocate a pseudo-TTY
+      -u, --user=                Username or UID (format: <name|uid>[:<group|gid>])
 
 The `docker exec` command runs a new command in a running container.
 
@@ -1063,16 +1223,16 @@
 
 #### Examples
 
-    $ sudo docker run --name ubuntu_bash --rm -i -t ubuntu bash
+    $ docker run --name ubuntu_bash --rm -i -t ubuntu bash
 
 This will create a container named `ubuntu_bash` and start a Bash session.
 
-    $ sudo docker exec -d ubuntu_bash touch /tmp/execWorks
+    $ docker exec -d ubuntu_bash touch /tmp/execWorks
 
 This will create a new file `/tmp/execWorks` inside the running container
 `ubuntu_bash`, in the background.
 
-    $ sudo docker exec -it ubuntu_bash bash
+    $ docker exec -it ubuntu_bash bash
 
 This will create a new Bash session in the container `ubuntu_bash`.
 
@@ -1088,11 +1248,11 @@
 
    For example:
 
-    $ sudo docker export red_panda > latest.tar
+    $ docker export red_panda > latest.tar
 
    Or
 
-    $ sudo docker export --output="latest.tar" red_panda
+    $ docker export --output="latest.tar" red_panda
 
 > **Note:**
 > `docker export` does not export the contents of volumes associated with the
@@ -1109,19 +1269,30 @@
 
     Show the history of an image
 
+      -H, --human=true     Print sizes and dates in human readable format
       --no-trunc=false     Don't truncate output
       -q, --quiet=false    Only show numeric IDs
 
 To see how the `docker:latest` image was built:
 
-    $ sudo docker history docker
-    IMAGE                                                              CREATED             CREATED BY                                                                                                                                                 SIZE
-    3e23a5875458790b7a806f95f7ec0d0b2a5c1659bfc899c89f939f6d5b8f7094   8 days ago          /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8                                                                                                                       0 B
-    8578938dd17054dce7993d21de79e96a037400e8d28e15e7290fea4f65128a36   8 days ago          /bin/sh -c dpkg-reconfigure locales &&    locale-gen C.UTF-8 &&    /usr/sbin/update-locale LANG=C.UTF-8                                                    1.245 MB
-    be51b77efb42f67a5e96437b3e102f81e0a1399038f77bf28cea0ed23a65cf60   8 days ago          /bin/sh -c apt-get update && apt-get install -y    git    libxml2-dev    python    build-essential    make    gcc    python-dev    locales    python-pip   338.3 MB
-    4b137612be55ca69776c7f30c2d2dd0aa2e7d72059820abf3e25b629f887a084   6 weeks ago         /bin/sh -c #(nop) ADD jessie.tar.xz in /                                                                                                                   121 MB
-    750d58736b4b6cc0f9a9abe8f258cef269e3e9dceced1146503522be9f985ada   6 weeks ago         /bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -t jessie.tar.xz jessie http://http.debian.net/debian             0 B
-    511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158   9 months ago                                                                                                                                                                   0 B
+    $ docker history docker
+    IMAGE               CREATED             CREATED BY                                      SIZE                COMMENT
+    3e23a5875458        8 days ago          /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8            0 B
+    8578938dd170        8 days ago          /bin/sh -c dpkg-reconfigure locales &&    loc   1.245 MB
+    be51b77efb42        8 days ago          /bin/sh -c apt-get update && apt-get install    338.3 MB
+    4b137612be55        6 weeks ago         /bin/sh -c #(nop) ADD jessie.tar.xz in /        121 MB
+    750d58736b4b        6 weeks ago         /bin/sh -c #(nop) MAINTAINER Tianon Gravi <ad   0 B
+    511136ea3c5a        9 months ago                                                        0 B                 Imported from -
+
+To see how the `docker:apache` image was added to a container's base image:
+
+    $ docker history docker:scm
+    IMAGE               CREATED             CREATED BY                                      SIZE                COMMENT
+    2ac9d1098bf1        3 months ago        /bin/bash                                       241.4 MB            Added Apache to Fedora base image
+    88b42ffd1f7c        5 months ago        /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7   373.7 MB            
+    c69cab00d6ef        5 months ago        /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar   0 B                 
+    511136ea3c5a        19 months ago                                                       0 B                 Imported from -
+
 
 ## images
 
@@ -1154,7 +1325,7 @@
 
 #### Listing the most recently created images
 
-    $ sudo docker images | head
+    $ docker images
     REPOSITORY                TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
     <none>                    <none>              77af4d6b9913        19 hours ago        1.089 GB
     committ                   latest              b6fa739cedf5        19 hours ago        1.089 GB
@@ -1169,7 +1340,7 @@
 
 #### Listing the full length image IDs
 
-    $ sudo docker images --no-trunc | head
+    $ docker images --no-trunc
     REPOSITORY                    TAG                 IMAGE ID                                                           CREATED             VIRTUAL SIZE
     <none>                        <none>              77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182   19 hours ago        1.089 GB
     committest                    latest              b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f   19 hours ago        1.089 GB
@@ -1188,7 +1359,7 @@
 unchanged, the digest value is predictable. To list image digest values, use
 the `--digests` flag:
 
-    $ sudo docker images --digests | head
+    $ docker images --digests
     REPOSITORY                         TAG                 DIGEST                                                                    IMAGE ID            CREATED             VIRTUAL SIZE
     localhost:5000/test/busybox        <none>              sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf   4986bf8c1536        9 weeks ago         2.43 MB
 
@@ -1202,13 +1373,14 @@
 The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more
 than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
 
-Current filters:
- * dangling (boolean - true or false)
- * label (`label=<key>` or `label=<key>=<value>`)
+The currently supported filters are:
+
+* dangling (boolean - true or false)
+* label (`label=<key>` or `label=<key>=<value>`)
 
 ##### Untagged images
 
-    $ sudo docker images --filter "dangling=true"
+    $ docker images --filter "dangling=true"
 
     REPOSITORY          TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
     <none>              <none>              8abc22fbb042        4 weeks ago         0 B
@@ -1226,7 +1398,7 @@
 
 Ready for use by `docker rmi ...`, like:
 
-    $ sudo docker rmi $(sudo docker images -f "dangling=true" -q)
+    $ docker rmi $(docker images -f "dangling=true" -q)
 
     8abc22fbb042
     48e5f45168b9
@@ -1254,8 +1426,8 @@
 
 The `--change` option will apply `Dockerfile` instructions to the image
 that is created.
-Supported `Dockerfile` instructions: `CMD`, `ENTRYPOINT`, `ENV`, `EXPOSE`,
-`ONBUILD`, `USER`, `VOLUME`, `WORKDIR`
+Supported `Dockerfile` instructions:
+`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR`
 
 #### Examples
 
@@ -1263,21 +1435,21 @@
 
 This will create a new untagged image.
 
-    $ sudo docker import http://example.com/exampleimage.tgz
+    $ docker import http://example.com/exampleimage.tgz
 
 **Import from a local file:**
 
 Import to docker via pipe and `STDIN`.
 
-    $ cat exampleimage.tgz | sudo docker import - exampleimagelocal:new
+    $ cat exampleimage.tgz | docker import - exampleimagelocal:new
 
 **Import from a local directory:**
 
-    $ sudo tar -c . | sudo docker import - exampleimagedir
+    $ sudo tar -c . | docker import - exampleimagedir
 
 **Import from a local directory with new configurations:**
 
-    $ sudo tar -c . | sudo docker import --change "ENV DEBUG true" - exampleimagedir
+    $ sudo tar -c . | docker import --change "ENV DEBUG true" - exampleimagedir
 
 Note the `sudo` in this example – you must preserve
 the ownership of the files (especially root ownership) during the
@@ -1293,7 +1465,7 @@
 
 For example:
 
-    $ sudo docker -D info
+    $ docker -D info
     Containers: 14
     Images: 52
     Storage Driver: aufs
@@ -1301,6 +1473,7 @@
      Backing Filesystem: extfs
      Dirs: 545
     Execution Driver: native-0.2
+    Logging Driver: json-file
     Kernel Version: 3.13.0-24-generic
     Operating System: Ubuntu 14.04 LTS
     CPUs: 1
@@ -1309,7 +1482,7 @@
     Total Memory: 2 GiB
     Debug mode (server): false
     Debug mode (client): true
-    Fds: 10
+    File Descriptors: 10
     Goroutines: 9
     System Time: Tue Mar 10 18:38:57 UTC 2015
     EventsListeners: 0
@@ -1349,25 +1522,25 @@
 For the most part, you can pick out any field from the JSON in a fairly
 straightforward manner.
 
-    $ sudo docker inspect --format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID
+    $ docker inspect --format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID
 
 **Get an instance's MAC Address:**
 
 For the most part, you can pick out any field from the JSON in a fairly
 straightforward manner.
 
-    $ sudo docker inspect --format='{{.NetworkSettings.MacAddress}}' $INSTANCE_ID
+    $ docker inspect --format='{{.NetworkSettings.MacAddress}}' $INSTANCE_ID
 
 **Get an instance's log path:**
 
-    $ sudo docker inspect --format='{{.LogPath}}' $INSTANCE_ID
+    $ docker inspect --format='{{.LogPath}}' $INSTANCE_ID
 
 **List All Port Bindings:**
 
 One can loop over arrays and maps in the results to produce simple text
 output:
 
-    $ sudo docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
+    $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
 
 **Find a Specific Port Mapping:**
 
@@ -1379,7 +1552,7 @@
 then `index` 0 contains the first object inside of that. Then we ask for
 the `HostPort` field to get the public address.
 
-    $ sudo docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
+    $ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
 
 **Get config:**
 
@@ -1388,7 +1561,7 @@
 section contains complex JSON object, so to grab it as JSON, you use
 `json` to convert the configuration object into JSON.
 
-    $ sudo docker inspect --format='{{json .config}}' $INSTANCE_ID
+    $ docker inspect --format='{{json .config}}' $INSTANCE_ID
 
 ## kill
 
@@ -1412,14 +1585,14 @@
 Loads a tarred repository from a file or the standard input stream.
 Restores both images and tags.
 
-    $ sudo docker images
+    $ docker images
     REPOSITORY          TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
-    $ sudo docker load < busybox.tar
-    $ sudo docker images
+    $ docker load < busybox.tar
+    $ docker images
     REPOSITORY          TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
     busybox             latest              769b9341d937        7 weeks ago         2.489 MB
-    $ sudo docker load --input fedora.tar
-    $ sudo docker images
+    $ docker load --input fedora.tar
+    $ docker images
     REPOSITORY          TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
     busybox             latest              769b9341d937        7 weeks ago         2.489 MB
     fedora              rawhide             0d20aec6529d        7 weeks ago         387 MB
@@ -1442,7 +1615,7 @@
 adding the server name.
 
     example:
-    $ sudo docker login localhost:8080
+    $ docker login localhost:8080
 
 ## logout
 
@@ -1453,7 +1626,7 @@
 
 For example:
 
-    $ sudo docker logout localhost:8080
+    $ docker logout localhost:8080
 
 ## logs
 
@@ -1462,6 +1635,7 @@
     Fetch the logs of a container
 
       -f, --follow=false        Follow log output
+      --since=""                Show logs since timestamp
       -t, --timestamps=false    Show timestamps
       --tail="all"              Number of lines to show from the end of the logs
 
@@ -1481,6 +1655,10 @@
 log entry. To ensure that the timestamps for are aligned the
 nano-second part of the timestamp will be padded with zero when necessary.
 
+The `--since` option shows logs of a container generated only after
+the given date, specified as RFC 3339 or UNIX timestamp. The `--since` option
+can be combined with the `--follow` and `--tail` options.
+
 ## pause
 
     Usage: docker pause CONTAINER [CONTAINER...]
@@ -1507,17 +1685,17 @@
 You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or
 just a specific mapping:
 
-    $ sudo docker ps test
+    $ docker ps test
     CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS                                            NAMES
     b650456536c7        busybox:latest      top                 54 minutes ago      Up 54 minutes       0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp   test
-    $ sudo docker port test
+    $ docker port test
     7890/tcp -> 0.0.0.0:4321
     9876/tcp -> 0.0.0.0:1234
-    $ sudo docker port test 7890/tcp
+    $ docker port test 7890/tcp
     0.0.0.0:4321
-    $ sudo docker port test 7890/udp
+    $ docker port test 7890/udp
     2014/06/24 11:53:36 Error: No public port '7890/udp' published for test
-    $ sudo docker port test 7890
+    $ docker port test 7890
     0.0.0.0:4321
 
 ## ps
@@ -1530,7 +1708,7 @@
       --before=""           Show only container created before Id or Name
       -f, --filter=[]       Filter output based on conditions provided
       -l, --latest=false    Show the latest created container, include non-running
-      -n=-1                 Show n last created containers, include non-running 
+      -n=-1                 Show n last created containers, include non-running
       --no-trunc=false      Don't truncate output
       -q, --quiet=false     Only display numeric IDs
       -s, --size=false      Display total file sizes
@@ -1538,7 +1716,7 @@
 
 Running `docker ps --no-trunc` showing 2 linked containers.
 
-    $ sudo docker ps
+    $ docker ps
     CONTAINER ID        IMAGE                        COMMAND                CREATED              STATUS              PORTS               NAMES
     4c01db0b339c        ubuntu:12.04                 bash                   17 seconds ago       Up 16 seconds       3300-3310/tcp       webapp
     d7886598dbe2        crosbymichael/redis:latest   /redis-server --dir    33 minutes ago       Up 33 minutes       6379/tcp            redis,webapp/db
@@ -1553,13 +1731,17 @@
 The filtering flag (`-f` or `--filter)` format is a `key=value` pair. If there is more
 than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`)
 
-Current filters:
- * exited (int - the code of exited containers. Only useful with '--all')
- * status (restarting|running|paused|exited)
+The currently supported filters are:
+
+* id (container's id)
+* label (`label=<key>` or `label=<key>=<value>`)
+* name (container's name)
+* exited (int - the code of exited containers. Only useful with `--all`)
+* status (restarting|running|paused|exited)
 
 ##### Successfully exited containers
 
-    $ sudo docker ps -a --filter 'exited=0'
+    $ docker ps -a --filter 'exited=0'
     CONTAINER ID        IMAGE             COMMAND                CREATED             STATUS                   PORTS                      NAMES
     ea09c3c82f6e        registry:latest   /srv/run.sh            2 weeks ago         Exited (0) 2 weeks ago   127.0.0.1:5000->5000/tcp   desperate_leakey
     106ea823fe4e        fedora:latest     /bin/sh -c 'bash -l'   2 weeks ago         Exited (0) 2 weeks ago                              determined_albattani
@@ -1569,7 +1751,7 @@
 
 ## pull
 
-    Usage: docker pull [OPTIONS] NAME[:TAG]
+    Usage: docker pull [OPTIONS] NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG]
 
     Pull an image or a repository from the registry
 
@@ -1589,22 +1771,23 @@
 To download a particular image, or set of images (i.e., a repository),
 use `docker pull`:
 
-    $ sudo docker pull debian
+    $ docker pull debian
     # will pull the debian:latest image and its intermediate layers
-    $ sudo docker pull debian:testing
+    $ docker pull debian:testing
     # will pull the image named debian:testing and any intermediate
     # layers it is based on.
-    $ sudo docker pull debian@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
+    $ docker pull debian@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
     # will pull the image from the debian repository with the digest
     # sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
     # and any intermediate layers it is based on.
     # (Typically the empty `scratch` image, a MAINTAINER layer,
     # and the un-tarred base).
-    $ sudo docker pull --all-tags centos
+    $ docker pull --all-tags centos
     # will pull all the images from the centos repository
-    $ sudo docker pull registry.hub.docker.com/debian
+    $ docker pull registry.hub.docker.com/debian
     # manually specifies the path to the default Docker registry. This could
     # be replaced with the path to a local registry to pull from another source.
+    # sudo docker pull myhub.com:8080/test-image
 
 ## push
 
@@ -1643,24 +1826,26 @@
 
 #### Examples
 
-    $ sudo docker rm /redis
+    $ docker rm /redis
     /redis
 
 This will remove the container referenced under the link
 `/redis`.
 
-    $ sudo docker rm --link /webapp/redis
+    $ docker rm --link /webapp/redis
     /webapp/redis
 
 This will remove the underlying link between `/webapp` and the `/redis`
 containers removing all network communication.
 
-    $ sudo docker rm --force redis
+    $ docker rm --force redis
     redis
 
 The main process inside the container referenced under the link `/redis` will receive
 `SIGKILL`, then the container will be removed.
 
+    $ docker rm $(docker ps -a -q)
+
 This command will delete all stopped containers. The command `docker ps
 -a -q` will return all existing container IDs and pass them to the `rm`
 command which will delete them. Any running containers will not be
@@ -1681,37 +1866,52 @@
 an image has one or more tag or digest reference, you must remove all of them
 before the image is removed.
 
-    $ sudo docker images
+    $ docker images
     REPOSITORY                TAG                 IMAGE ID            CREATED             SIZE
     test1                     latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
     test                      latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
     test2                     latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
 
-    $ sudo docker rmi fd484f19954f
+    $ docker rmi fd484f19954f
     Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force
     2013/12/11 05:47:16 Error: failed to remove one or more images
 
-    $ sudo docker rmi test1
+    $ docker rmi test1
     Untagged: test1:latest
-    $ sudo docker rmi test2
+    $ docker rmi test2
     Untagged: test2:latest
 
-    $ sudo docker images
+    $ docker images
     REPOSITORY                TAG                 IMAGE ID            CREATED             SIZE
     test                      latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
-    $ sudo docker rmi test
+    $ docker rmi test
     Untagged: test:latest
     Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
 
+If you use the `-f` flag and specify the image's short or long ID, then this
+command untags and removes all images that match the specified ID.
+
+    $ docker images
+    REPOSITORY                TAG                 IMAGE ID            CREATED             SIZE
+    test1                     latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
+    test                      latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
+    test2                     latest              fd484f19954f        23 seconds ago      7 B (virtual 4.964 MB)
+
+    $ docker rmi -f fd484f19954f
+    Untagged: test1:latest
+    Untagged: test:latest
+    Untagged: test2:latest
+    Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
+
 An image pulled by digest has no tag associated with it:
 
-    $ sudo docker images --digests
+    $ docker images --digests
     REPOSITORY                     TAG       DIGEST                                                                    IMAGE ID        CREATED         VIRTUAL SIZE
     localhost:5000/test/busybox    <none>    sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf   4986bf8c1536    9 weeks ago     2.43 MB
 
 To remove an image using its digest:
 
-    $ sudo docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
+    $ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
     Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
     Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125
     Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2
@@ -1725,11 +1925,15 @@
 
       -a, --attach=[]            Attach to STDIN, STDOUT or STDERR
       --add-host=[]              Add a custom host-to-IP mapping (host:ip)
+      --blkio-weight=0           Block IO weight (relative weight)
       -c, --cpu-shares=0         CPU shares (relative weight)
       --cap-add=[]               Add Linux capabilities
       --cap-drop=[]              Drop Linux capabilities
       --cidfile=""               Write the container ID to the file
       --cpuset-cpus=""           CPUs in which to allow execution (0-3, 0,1)
+      --cpuset-mems=""           Memory nodes (MEMs) in which to allow execution (0-3, 0,1)
+      --cpu-period=0             Limit the CPU CFS (Completely Fair Scheduler) period
+      --cpu-quota=0              Limit the CPU CFS (Completely Fair Scheduler) quota
       -d, --detach=false         Run container in background and print container ID
       --device=[]                Add a host device to the container
       --dns=[]                   Set custom DNS servers
@@ -1752,9 +1956,11 @@
       --memory-swap=""           Total memory (memory + swap), '-1' to disable swap
       --name=""                  Assign a name to the container
       --net="bridge"             Set the Network mode for the container
+      --oom-kill-disable=false   Whether to disable OOM Killer for the container or not
       -P, --publish-all=false    Publish all exposed ports to random ports
       -p, --publish=[]           Publish a container's port(s) to the host
       --pid=""                   PID namespace to use
+      --uts=""                   UTS namespace to use
       --privileged=false         Give extended privileges to this container
       --read-only=false          Mount the container's root filesystem as read only
       --restart="no"             Restart policy (no, on-failure[:max-retry], always)
@@ -1786,12 +1992,12 @@
 
 #### Examples
 
-    $ sudo docker run --name test -it debian
+    $ docker run --name test -it debian
     $$ exit 13
     exit
     $ echo $?
     13
-    $ sudo docker ps -a | grep test
+    $ docker ps -a | grep test
     275c44472aeb        debian:7            "/bin/bash"         26 seconds ago      Exited (13) 17 seconds ago                         test
 
 In this example, we are running `bash` interactively in the `debian:latest` image, and giving
@@ -1799,14 +2005,14 @@
 will have an exit code of `13`. This is then passed on to the caller of `docker run`, and
 is recorded in the `test` container metadata.
 
-    $ sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test"
+    $ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test"
 
 This will create a container and print `test` to the console. The `cidfile`
 flag makes Docker attempt to create a new file and write the container ID to it.
 If the file exists already, Docker will return an error. Docker will close this
 file when `docker run` exits.
 
-    $ sudo docker run -t -i --rm ubuntu bash
+    $ docker run -t -i --rm ubuntu bash
     root@bc338942ef20:/# mount -t tmpfs none /mnt
     mount: permission denied
 
@@ -1814,7 +2020,7 @@
 capabilities are dropped; including `cap_sys_admin` (which is required to mount
 filesystems). However, the `--privileged` flag will allow it to run:
 
-    $ sudo docker run --privileged ubuntu bash
+    $ docker run --privileged ubuntu bash
     root@50e3f57e16e6:/# mount -t tmpfs none /mnt
     root@50e3f57e16e6:/# df -h
     Filesystem      Size  Used Avail Use% Mounted on
@@ -1825,12 +2031,12 @@
 words, the container can then do almost everything that the host can do. This
 flag exists to allow special use-cases, like running Docker within Docker.
 
-    $ sudo docker  run -w /path/to/dir/ -i -t  ubuntu pwd
+    $ docker  run -w /path/to/dir/ -i -t  ubuntu pwd
 
 The `-w` lets the command being executed inside directory given, here
 `/path/to/dir/`. If the path does not exists it is created inside the container.
 
-    $ sudo docker  run  -v `pwd`:`pwd` -w `pwd` -i -t  ubuntu pwd
+    $ docker  run  -v `pwd`:`pwd` -w `pwd` -i -t  ubuntu pwd
 
 The `-v` flag mounts the current working directory into the container. The `-w`
 lets the command being executed inside the current working directory, by
@@ -1838,41 +2044,41 @@
 combination executes the command using the container, but inside the
 current working directory.
 
-    $ sudo docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash
+    $ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash
 
 When the host directory of a bind-mounted volume doesn't exist, Docker
 will automatically create this directory on the host for you. In the
 example above, Docker will create the `/doesnt/exist`
 folder before starting your container.
 
-    $ sudo docker run --read-only -v /icanwrite busybox touch /icanwrite here
+    $ docker run --read-only -v /icanwrite busybox touch /icanwrite here
 
 Volumes can be used in combination with `--read-only` to control where
 a container writes files.  The `--read-only` flag mounts the container's root
 filesystem as read only prohibiting writes to locations other than the
 specified volumes for the container.
 
-    $ sudo docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v ./static-docker:/usr/bin/docker busybox sh
+    $ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v ./static-docker:/usr/bin/docker busybox sh
 
 By bind-mounting the docker unix socket and statically linked docker
 binary (such as that provided by [https://get.docker.com](
 https://get.docker.com)), you give the container the full access to create and
 manipulate the host's Docker daemon.
 
-    $ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash
+    $ docker run -p 127.0.0.1:80:8080 ubuntu bash
 
 This binds port `8080` of the container to port `80` on `127.0.0.1` of
 the host machine. The [Docker User Guide](/userguide/dockerlinks/)
 explains in detail how to manipulate ports in Docker.
 
-    $ sudo docker run --expose 80 ubuntu bash
+    $ docker run --expose 80 ubuntu bash
 
 This exposes port `80` of the container for use within a link without
 publishing the port to the host system's interfaces. The [Docker User
 Guide](/userguide/dockerlinks) explains in detail how to manipulate
 ports in Docker.
 
-    $ sudo docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash
+    $ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash
 
 This sets environmental variables in the container. For illustration all three
 flags are shown here. Where `-e`, `--env` take an environment variable and
@@ -1889,7 +2095,7 @@
 
     $ cat ./env.list
     TEST_FOO=BAR
-    $ sudo docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO
+    $ docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO
     TEST_FOO=This is a test
 
 The `--env-file` flag takes a filename as an argument and expects each line
@@ -1916,11 +2122,11 @@
     TEST_APP_DEST_PORT=8888
     TEST_PASSTHROUGH=howdy
 
-    $ sudo docker run --name console -t -i ubuntu bash
+    $ docker run --name console -t -i ubuntu bash
 
 A label is a a `key=value` pair that applies metadata to a container. To label a container with two labels:
 
-    $ sudo docker run -l my-label --label com.example.foo=bar ubuntu bash
+    $ docker run -l my-label --label com.example.foo=bar ubuntu bash
 
 The `my-label` key doesn't specify a value so the label defaults to an empty
 string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`).
@@ -1933,10 +2139,10 @@
 label in the file with an EOL mark. The example below loads labels from a
 labels file in the current directory:
 
-    $ sudo docker run --label-file ./labels ubuntu bash
+    $ docker run --label-file ./labels ubuntu bash
 
 The label-file format is similar to the format for loading environment
-variables. (Unlike environment variables, labels are not visislbe to processes
+variables. (Unlike environment variables, labels are not visible to processes
 running inside a container.) The following example illustrates a label-file
 format:
 
@@ -1946,21 +2152,27 @@
     com.example.label2=another\ label
     com.example.label3
 
-You can load multiple label-files by supplying multiple  `--label-file` flags. 
+You can load multiple label-files by supplying multiple  `--label-file` flags.
 
 For additional information on working with labels, see [*Labels - custom
 metadata in Docker*](/userguide/labels-custom-metadata/) in the Docker User
 Guide.
 
-    $ sudo docker run --link /redis:redis --name console ubuntu bash
+    $ docker run --link /redis:redis --name console ubuntu bash
 
 The `--link` flag will link the container named `/redis` into the newly
 created container with the alias `redis`. The new container can access the
 network and environment of the `redis` container via environment variables.
+The `--link` flag will also just accept the form `<name or id>` in which case
+the alias will match the name. For instance, you could have written the previous
+example as:
+
+    $ docker run --link redis --name console ubuntu bash
+
 The `--name` flag will assign the name `console` to the newly created
 container.
 
-    $ sudo docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd
+    $ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd
 
 The `--volumes-from` flag mounts all the defined volumes from the referenced
 containers. Containers can be specified by repetitions of the `--volumes-from`
@@ -1969,21 +2181,34 @@
 the volumes are mounted in the same mode (read write or read only) as
 the reference container.
 
+Labeling systems like SELinux require proper labels be placed on volume content
+mounted into a container, otherwise the security system might prevent the
+processes running inside the container from using the content. By default,
+volumes are not relabeled.
+
+Two suffixes :z or :Z can be added to the volume mount. These suffixes tell
+Docker to relabel file objects on the shared volumes. The 'z' option tells
+Docker that the volume content will be shared between containers. Docker will
+label the content with a shared content label. Shared volumes labels allow all
+containers to read/write content. The 'Z' option tells Docker to label the
+content with a private unshared label. Private volumes can only be used by the
+current container.
+
 The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` or
 `STDERR`. This makes it possible to manipulate the output and input as needed.
 
-    $ echo "test" | sudo docker run -i -a stdin ubuntu cat -
+    $ echo "test" | docker run -i -a stdin ubuntu cat -
 
 This pipes data into a container and prints the container's ID by attaching
 only to the container's `STDIN`.
 
-    $ sudo docker run -a stderr ubuntu echo test
+    $ docker run -a stderr ubuntu echo test
 
 This isn't going to print anything unless there's an error because we've
 only attached to the `STDERR` of the container. The container's logs
 still store what's been written to `STDERR` and `STDOUT`.
 
-    $ cat somefile | sudo docker run -i -a stdin mybuilder dobuild
+    $ cat somefile | docker run -i -a stdin mybuilder dobuild
 
 This is how piping a file into a container could be done for a build.
 The container's ID will be printed after the build is done and the build
@@ -1991,10 +2216,10 @@
 useful if you need to pipe a file or something else into a container and
 retrieve the container's ID once the container has finished running.
 
-   $ sudo docker run --device=/dev/sdc:/dev/xvdc --device=/dev/sdd --device=/dev/zero:/dev/nulo -i -t ubuntu ls -l /dev/{xvdc,sdd,nulo}
-   brw-rw---- 1 root disk 8, 2 Feb  9 16:05 /dev/xvdc
-   brw-rw---- 1 root disk 8, 3 Feb  9 16:05 /dev/sdd
-   crw-rw-rw- 1 root root 1, 5 Feb  9 16:05 /dev/nulo
+    $ docker run --device=/dev/sdc:/dev/xvdc --device=/dev/sdd --device=/dev/zero:/dev/nulo -i -t ubuntu ls -l /dev/{xvdc,sdd,nulo}
+    brw-rw---- 1 root disk 8, 2 Feb  9 16:05 /dev/xvdc
+    brw-rw---- 1 root disk 8, 3 Feb  9 16:05 /dev/sdd
+    crw-rw-rw- 1 root root 1, 5 Feb  9 16:05 /dev/nulo
 
 It is often necessary to directly expose devices to a container. The `--device`
 option enables that.  For example, a specific block storage device or loop
@@ -2007,19 +2232,19 @@
 
 
 ```
-	$ sudo docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk  /dev/xvdc
+	$ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk  /dev/xvdc
 
 	Command (m for help): q
-	$ sudo docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk  /dev/xvdc
+	$ docker run --device=/dev/sda:/dev/xvdc:ro --rm -it ubuntu fdisk  /dev/xvdc
 	You will not be able to write the partition table.
 
 	Command (m for help): q
 
-	$ sudo docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk  /dev/xvdc
+	$ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk  /dev/xvdc
 
 	Command (m for help): q
 
-	$ sudo docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk  /dev/xvdc
+	$ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk  /dev/xvdc
 	fdisk: unable to open /dev/xvdc: Operation not permitted
 ```
 
@@ -2029,11 +2254,11 @@
 
 **A complete example:**
 
-    $ sudo docker run -d --name static static-web-files sh
-    $ sudo docker run -d --expose=8098 --name riak riakserver
-    $ sudo docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro --name app appserver
-    $ sudo docker run -d -p 1443:443 --dns=10.0.0.1 --dns-search=dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver
-    $ sudo docker run -t -i --rm --volumes-from web -w /var/log/httpd busybox tail -f access.log
+    $ docker run -d --name static static-web-files sh
+    $ docker run -d --expose=8098 --name riak riakserver
+    $ docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro --name app appserver
+    $ docker run -d -p 1443:443 --dns=10.0.0.1 --dns-search=dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver
+    $ docker run -t -i --rm --volumes-from web -w /var/log/httpd busybox tail -f access.log
 
 This example shows five containers that might be set up to test a web
 application change:
@@ -2058,9 +2283,9 @@
    `--rm` option means that when the container exits, the container's layer is
    removed.
 
-#### Restart Policies
+#### Restart policies
 
-Use Docker's `--restart` to specify a container's *restart policy*. A restart 
+Use Docker's `--restart` to specify a container's *restart policy*. A restart
 policy controls whether the Docker daemon restarts a container after exit.
 Docker supports the following restart policies:
 
@@ -2075,7 +2300,7 @@
     <tr>
       <td><strong>no</strong></td>
       <td>
-        Do not automatically restart the container when it exits. This is the 
+        Do not automatically restart the container when it exits. This is the
         default.
       </td>
     </tr>
@@ -2087,7 +2312,7 @@
       </td>
       <td>
         Restart only if the container exits with a non-zero exit status.
-        Optionally, limit the number of restart retries the Docker 
+        Optionally, limit the number of restart retries the Docker
         daemon attempts.
       </td>
     </tr>
@@ -2102,12 +2327,12 @@
   </tbody>
 </table>
 
-    $ sudo docker run --restart=always redis
+    $ docker run --restart=always redis
 
 This will run the `redis` container with a restart policy of **always**
 so that if the container exits, Docker will restart it.
 
-More detailed information on restart policies can be found in the 
+More detailed information on restart policies can be found in the
 [Restart Policies (--restart)](/reference/run/#restart-policies-restart) section
 of the Docker run reference page.
 
@@ -2159,6 +2384,8 @@
 > If you do not provide a `hard limit`, the `soft limit` will be used for both
 values. If no `ulimits` are set, they will be inherited from the default `ulimits`
 set on the daemon.
+> `as` option is disabled now. In other words, the following script is not supported:
+>   `$ docker run -it --ulimit as=1024 fedora /bin/bash`
 
 ## save
 
@@ -2174,18 +2401,18 @@
 
 It is used to create a backup that can then be used with `docker load`
 
-    $ sudo docker save busybox > busybox.tar
+    $ docker save busybox > busybox.tar
     $ ls -sh busybox.tar
     2.7M busybox.tar
-    $ sudo docker save --output busybox.tar busybox
+    $ docker save --output busybox.tar busybox
     $ ls -sh busybox.tar
     2.7M busybox.tar
-    $ sudo docker save -o fedora-all.tar fedora
-    $ sudo docker save -o fedora-latest.tar fedora:latest
+    $ docker save -o fedora-all.tar fedora
+    $ docker save -o fedora-latest.tar fedora:latest
 
 It is even useful to cherry-pick particular tags of an image repository
 
-   $ sudo docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy
+    $ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy
 
 ## search
 
@@ -2222,13 +2449,14 @@
     Display a live stream of one or more containers' resource usage statistics
 
       --help=false       Print usage
+      --no-stream=false  Disable streaming stats and only pull the first result
 
 Running `docker stats` on multiple containers
 
-    $ sudo docker stats redis1 redis2
+    $ docker stats redis1 redis2
     CONTAINER           CPU %               MEM USAGE/LIMIT     MEM %               NET I/O
-    redis1              0.07%               796 KiB/64 MiB      1.21%               788 B/648 B
-    redis2              0.07%               2.746 MiB/64 MiB    4.29%               1.266 KiB/648 B
+    redis1              0.07%               796 KB/64 MB        1.21%               788 B/648 B
+    redis2              0.07%               2.746 MB/64 MB      4.29%               1.266 KB/648 B
 
 
 The `docker stats` command will only return a live stream of data for running
@@ -2289,7 +2517,7 @@
 Show the Docker version, API version, Git commit, Go version and OS/architecture
 of both Docker client and daemon. Example use:
 
-    $ sudo docker version
+    $ docker version
     Client version: 1.5.0
     Client API version: 1.17
     Go version (client): go1.4.1
diff --git a/docs/sources/reference/glossary.md b/docs/sources/reference/glossary.md
new file mode 100644
index 0000000..d33d015
--- /dev/null
+++ b/docs/sources/reference/glossary.md
@@ -0,0 +1,201 @@
+page_title: Docker Glossary
+page_description: Glossary of terms used around Docker
+page_keywords: glossary, docker, terms, definitions
+
+# Glossary
+
+A list of terms used around the Docker project.
+
+## aufs
+
+aufs (advanced multi layered unification filesystem) is a Linux [filesystem](#filesystem) that
+Docker supports as a storage backend. It implements the
+[union mount](http://en.wikipedia.org/wiki/Union_mount) for Linux file systems.
+
+## boot2docker
+
+[boot2docker](http://boot2docker.io/) is a lightweight Linux distribution made
+specifically to run Docker containers. It is a common choice for a [VM](#virtual-machine)
+to run Docker on Windows and Mac OS X.
+
+boot2docker can also refer to the boot2docker management tool on Windows and
+Mac OS X which manages the boot2docker VM.
+
+## btrfs
+
+btrfs (B-tree file system) is a Linux [filesystem](#filesystem) that Docker
+supports as a storage backend. It is a [copy-on-write](http://en.wikipedia.org/wiki/Copy-on-write)
+filesystem.
+
+## build
+
+build is the process of building Docker images using a [Dockerfile](#dockerfile).
+The build uses a Dockerfile and a "context". The context is the set of files in the
+directory in which the image is built.
+
+## cgroups
+
+cgroups is a Linux kernel feature that limits, accounts for, and isolates
+the resource usage (CPU, memory, disk I/O, network, etc.) of a collection
+of processes. Docker relies on cgroups to control and isolate resource limits.
+
+*Also known as : control groups*
+
+## Compose
+
+[Compose](https://github.com/docker/compose) is a tool for defining and
+running complex applications with Docker. With compose, you define a
+multi-container application in a single file, then spin your
+application up in a single command which does everything that needs to
+be done to get it running.
+
+*Also known as : docker-compose, fig*
+
+## container
+
+A container is a runtime instance of a [docker image](#image).
+
+A Docker container consists of
+
+- A Docker image
+- Execution environment
+- A standard set of instructions
+
+The concept is borrowed from Shipping Containers, which define a standard to ship
+goods globally. Docker defines a standard to ship software.
+
+## data volume
+
+A data volume is a specially-designated directory within one or more containers
+that bypasses the Union File System. Data volumes are designed to persist data,
+independent of the container's life cycle. Docker therefore never automatically
+delete volumes when you remove a container, nor will it "garbage collect"
+volumes that are no longer referenced by a container.
+
+
+## Docker
+
+The term Docker can refer to
+
+- The Docker project as a whole, which is a platform for developers and sysadmins to
+develop, ship, and run applications
+- The docker daemon process running on the host which manages images and containers
+
+
+## Docker Hub
+
+The [Docker Hub](https://hub.docker.com/) is a centralized resource for working with
+Docker and its components. It provides the following services:
+
+- Docker image hosting
+- User authentication
+- Automated image builds and work-flow tools such as build triggers and web hooks
+- Integration with GitHub and BitBucket
+
+
+## Dockerfile
+
+A Dockerfile is a text document that contains all the commands you would
+normally execute manually in order to build a Docker image. Docker can
+build images automatically by reading the instructions from a Dockerfile.
+
+## filesystem
+
+A file system is the method an operating system uses to name files
+and assign them locations for efficient storage and retrieval.
+
+Examples :
+
+- Linux : ext4, aufs, btrfs, zfs
+- Windows : NTFS
+- OS X : HFS+
+
+## image
+
+Docker images are the basis of [containers](#container). An Image is an
+ordered collection of root filesystem changes and the corresponding
+execution parameters for use within a container runtime. An image typically
+contains a union of layered filesystems stacked on top of each other. An image
+does not have state and it never changes.
+
+## libcontainer
+
+libcontainer provides a native Go implementation for creating containers with
+namespaces, cgroups, capabilities, and filesystem access controls. It allows
+you to manage the lifecycle of the container performing additional operations
+after the container is created.
+
+## link
+
+links provide an interface to connect Docker containers running on the same host
+to each other without exposing the hosts' network ports. When you set up a link,
+you create a conduit between a source container and a recipient container.
+The recipient can then access select data about the source. To create a link,
+you can use the `--link` flag.
+
+## Machine
+
+[Machine](https://github.com/docker/machine) is a Docker tool which
+makes it really easy to create Docker hosts on  your computer, on
+cloud providers and inside your own data center. It creates servers,
+installs Docker on them, then configures the Docker client to talk to them.
+
+*Also known as : docker-machine*
+
+## overlay
+
+OverlayFS is a [filesystem](#filesystem) service for Linux which implements a
+[union mount](http://en.wikipedia.org/wiki/Union_mount) for other file systems.
+It is supported by the Docker daemon as a storage driver.
+
+## registry
+
+A Registry is a hosted service containing [repositories](#repository) of [images](#image)
+which responds to the Registry API.
+
+The default registry can be accessed using a browser at [Docker Hub](#docker-hub)
+or using the `docker search` command.
+
+## repository
+
+A repository is a set of Docker images. A repository can be shared by pushing it
+to a [registry](#registry) server. The different images in the repository can be
+labeled using [tags](#tag).
+
+Here is an example of the shared [nginx repository](https://registry.hub.docker.com/_/nginx/)
+and its [tags](https://registry.hub.docker.com/_/nginx/tags/manage/)
+
+## Swarm
+
+[Swarm](https://github.com/docker/swarm) is a native clustering tool for Docker.
+Swarm pools together several Docker hosts and exposes them as a single virtual
+Docker host. It serves the standard Docker API, so any tool that already works
+with Docker can now transparently scale up to multiple hosts.
+
+*Also known as : docker-swarm*
+
+## tag
+
+A tag is a label applied to a Docker image in a [repository](#repository).
+tags are how various images in a repository are distinguished from each other.
+
+*Note : This label is not related to the key=value labels set for docker daemon*
+
+## Union file system
+
+Union file systems, or UnionFS, are file systems that operate by creating layers, making them
+very lightweight and fast. Docker uses union file systems to provide the building
+blocks for containers.
+
+
+## Virtual Machine
+
+A Virtual Machine is a program that emulates a complete computer and imitates dedicated hardware.
+It shares physical hardware resources with other users but isolates the operating system. The
+end user has the same experience on a Virtual Machine as they would have on dedicated hardware.
+
+Compared to to containers, a Virtual Machine is heavier to run, provides more isolation,
+gets its own set of resources and does minimal sharing.
+
+*Also known as : VM*
+
diff --git a/docs/sources/reference/logging/journald.md b/docs/sources/reference/logging/journald.md
new file mode 100644
index 0000000..9c025bf
--- /dev/null
+++ b/docs/sources/reference/logging/journald.md
@@ -0,0 +1,66 @@
+# Journald logging driver
+
+The `journald` logging driver sends container logs to the [systemd
+journal](http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html).  Log entries can be retrieved using the `journalctl`
+command or through use of the journal API.
+
+In addition to the text of the log message itself, the `journald` log
+driver stores the following metadata in the journal with each message:
+
+| Field               | Description |
+----------------------|-------------|
+| `CONTAINER_ID`      | The container ID truncated to 12 characters. |
+| `CONTAINER_ID_FULL` | The full 64-character container ID. |
+| `CONTAINER_NAME`    | The container name at the time it was started. If you use `docker rename` to rename a container, the new name is not reflected in the journal entries. |
+
+## Usage
+
+You can configure the default logging driver by passing the
+`--log-driver` option to the Docker daemon:
+
+    docker --log-driver=journald
+
+You can set the logging driver for a specific container by using the
+`--log-driver` option to `docker run`:
+
+    docker run --log-driver=journald ...
+
+## Note regarding container names
+
+The value logged in the `CONTAINER_NAME` field is the container name
+that was set at startup.  If you use `docker rename` to rename a
+container, the new name will not be reflected in the journal entries.
+Journal entries will continue to use the original name.
+
+## Retrieving log messages with journalctl
+
+You can use the `journalctl` command to retrieve log messages.  You
+can apply filter expressions to limit the retrieved messages to a
+specific container.  For example, to retrieve all log messages from a
+container referenced by name:
+
+    # journalctl CONTAINER_NAME=webserver
+
+You can make use of additional filters to further limit the messages
+retrieved.  For example, to see just those messages generated since
+the system last booted:
+
+    # journalctl -b CONTAINER_NAME=webserver
+
+Or to retrieve log messages in JSON format with complete metadata:
+
+    # journalctl -o json CONTAINER_NAME=webserver
+
+## Retrieving log messages with the journal API
+
+This example uses the `systemd` Python module to retrieve container
+logs:
+
+    import systemd.journal
+
+    reader = systemd.journal.Reader()
+    reader.add_match('CONTAINER_NAME=web')
+
+    for msg in reader:
+      print '{CONTAINER_ID_FULL}: {MESSAGE}'.format(**msg)
+
diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md
index 5a46646..7c5113f 100644
--- a/docs/sources/reference/run.md
+++ b/docs/sources/reference/run.md
@@ -24,26 +24,30 @@
 
 The basic `docker run` command takes this form:
 
-    $ sudo docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...]
+    $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...]
 
 To learn how to interpret the types of `[OPTIONS]`,
 see [*Option types*](/reference/commandline/cli/#option-types).
 
-The list of `[OPTIONS]` breaks down into two groups:
+The `run` options control the image's runtime behavior in a container. These
+settings affect:
 
-1. Settings exclusive to operators, including:
-     * Detached or Foreground running,
-     * Container Identification,
-     * Network settings, and
-     * Runtime Constraints on CPU and Memory
-     * Privileges and LXC Configuration
-2. Settings shared between operators and developers, where operators can
-   override defaults developers set in images at build time.
+ * detached or foreground running
+ * container identification
+ * network settings
+ * runtime constraints on CPU and memory
+ * privileges and LXC configuration
+ 
+An image developer may set defaults for these same settings when they create the
+image using the `docker build` command. Operators, however, can override all
+defaults set by the developer using the `run` options.  And, operators can also
+override nearly all the defaults set by the Docker runtime itself.
 
-Together, the `docker run [OPTIONS]` give the operator complete control over runtime
-behavior, allowing them to override all defaults set by
-the developer during `docker build` and nearly all the defaults set by
-the Docker runtime itself.
+Finally, depending on your Docker system configuration, you may be required to
+preface each `docker` command with `sudo`. To avoid having to use `sudo` with
+the `docker` command, your system administrator can create a Unix group called
+`docker` and add users to it. For more information about this configuration,
+refer to the Docker installation documentation for your operating system.
 
 ## Operator exclusive options
 
@@ -99,13 +103,18 @@
 specify to which of the three standard streams (`STDIN`, `STDOUT`,
 `STDERR`) you'd like to connect instead, as in:
 
-    $ sudo docker run -a stdin -a stdout -i -t ubuntu /bin/bash
+    $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash
 
 For interactive processes (like a shell), you must use `-i -t` together in
 order to allocate a tty for the container process. `-i -t` is often written `-it`
 as you'll see in later examples.  Specifying `-t` is forbidden when the client
 standard output is redirected or piped, such as in:
-`echo test | sudo docker run -i busybox cat`.
+`echo test | docker run -i busybox cat`.
+
+>**Note**: A process running as PID 1 inside a container is treated
+>specially by Linux: it ignores any signal with the default action.
+>So, the process will not terminate on `SIGINT` or `SIGTERM` unless it is
+>coded to do so.
 
 ## Container identification
 
@@ -147,7 +156,8 @@
 called a digest. As long as the input used to generate the image is unchanged,
 the digest value is predictable and referenceable.
 
-## PID Settings (--pid)
+## PID settings (--pid)
+
     --pid=""  : Set the PID (Process) Namespace mode for the container,
            'host': use the host's PID namespace inside the container
 
@@ -163,12 +173,29 @@
 like `strace` or `gdb`, but want to use these tools when debugging processes
 within the container.
 
-    $ sudo docker run --pid=host rhel7 strace -p 1234
+    $ docker run --pid=host rhel7 strace -p 1234
 
 This command would allow you to use `strace` inside the container on pid 1234 on
 the host.
 
-## IPC Settings (--ipc)
+## UTS settings (--uts)
+
+    --uts=""  : Set the UTS namespace mode for the container,
+           'host': use the host's UTS namespace inside the container
+
+The UTS namespace is for setting the hostname and the domain that is visible
+to running processes in that namespace.  By default, all containers, including
+those with `--net=host`, have their own UTS namespace.  The `host` setting will
+result in the container using the same UTS namespace as the host.
+
+You may wish to share the UTS namespace with the host if you would like the
+hostname of the container to change as the hostname of the host changes.  A
+more advanced use case would be changing the host's hostname from a container.
+
+> **Note**: `--uts="host"` gives the container full access to change the
+> hostname of the host and is therefore considered insecure.
+
+## IPC settings (--ipc)
 
     --ipc=""  : Set the IPC mode for the container,
                  'container:<name|id>': reuses another container's IPC namespace
@@ -207,9 +234,9 @@
 Your container will use the same DNS servers as the host by default, but
 you can override this with `--dns`.
 
-By default a random MAC is generated. You can set the container's MAC address
-explicitly by providing a MAC via the `--mac-address` parameter (format:
-`12:34:56:78:9a:bc`).
+By default, the MAC address is generated using the IP address allocated to the
+container. You can set the container's MAC address explicitly by providing a
+MAC address via the `--mac-address` parameter (format:`12:34:56:78:9a:bc`).
 
 Supported networking modes are:
 
@@ -273,7 +300,15 @@
 network stack and all interfaces from the host will be available to the
 container.  The container's hostname will match the hostname on the host
 system.  Publishing ports and linking to other containers will not work
-when sharing the host's network stack.
+when sharing the host's network stack. Note that `--add-host` `--hostname`
+`--dns` `--dns-search` and `--mac-address` is invalid in `host` netmode.
+
+Compared to the default `bridge` mode, the `host` mode gives *significantly*
+better networking performance since it uses the host's native networking stack
+whereas the bridge has to go through one level of virtualization through the
+docker daemon. It is recommended to run containers in this mode when their
+networking performance is critical, for example, a production Load Balancer
+or a High Performance Web Server.
 
 > **Note**: `--net="host"` gives the container full access to local system
 > services such as D-bus and is therefore considered insecure.
@@ -282,15 +317,17 @@
 
 With the networking mode set to `container` a container will share the
 network stack of another container.  The other container's name must be
-provided in the format of `--net container:<name|id>`.
+provided in the format of `--net container:<name|id>`. Note that `--add-host` 
+`--hostname` `--dns` `--dns-search` and `--mac-address` is invalid 
+in `container` netmode.
 
 Example running a Redis container with Redis binding to `localhost` then
 running the `redis-cli` command and connecting to the Redis server over the
 `localhost` interface.
 
-    $ sudo docker run -d --name redis example/redis --bind 127.0.0.1
+    $ docker run -d --name redis example/redis --bind 127.0.0.1
     $ # use the redis container's network stack to access localhost
-    $ sudo docker run --rm -it --net container:redis example/redis-cli -h 127.0.0.1
+    $ docker run --rm -it --net container:redis example/redis-cli -h 127.0.0.1
 
 ### Managing /etc/hosts
 
@@ -298,7 +335,7 @@
 container itself as well as `localhost` and a few other common things.  The
 `--add-host` flag can be used to add additional lines to `/etc/hosts`.  
 
-    $ sudo docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts
+    $ docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts
     172.17.0.22     09d03f76bf2c
     fe00::0         ip6-localnet
     ff00::0         ip6-mcastprefix
@@ -364,7 +401,7 @@
 and so on until either the `on-failure` limit is hit, or when you `docker stop`
 or `docker rm -f` the container.
 
-If a container is succesfully restarted (the container is started and runs
+If a container is successfully restarted (the container is started and runs
 for at least 10 seconds), the delay is reset to its default value of 100 ms.
 
 You can specify the maximum amount of times Docker will try to restart the
@@ -374,7 +411,7 @@
 /reference/commandline/cli/#inspect). For example, to get the number of restarts
 for container "my-container";
 
-    $ sudo docker inspect -f "{{ .RestartCount }}" my-container
+    $ docker inspect -f "{{ .RestartCount }}" my-container
     # 2
 
 Or, to get the last time the container was (re)started;
@@ -388,12 +425,12 @@
 
 ###Examples
 
-    $ sudo docker run --restart=always redis
+    $ docker run --restart=always redis
 
 This will run the `redis` container with a restart policy of **always**
 so that if the container exits, Docker will restart it.
 
-    $ sudo docker run --restart=on-failure:10 redis
+    $ docker run --restart=on-failure:10 redis
 
 This will run the `redis` container with a restart policy of **on-failure** 
 and a maximum restart count of 10.  If the `redis` container exits with a
@@ -427,36 +464,49 @@
 requirement for MLS systems. Specifying the level in the following command
 allows you to share the same content between containers.
 
-    # docker run --security-opt label:level:s0:c100,c200 -i -t fedora bash
+    $ docker run --security-opt label:level:s0:c100,c200 -i -t fedora bash
 
 An MLS example might be:
 
-    # docker run --security-opt label:level:TopSecret -i -t rhel7 bash
+    $ docker run --security-opt label:level:TopSecret -i -t rhel7 bash
 
 To disable the security labeling for this container versus running with the
 `--permissive` flag, use the following command:
 
-    # docker run --security-opt label:disable -i -t fedora bash
+    $ docker run --security-opt label:disable -i -t fedora bash
 
 If you want a tighter security policy on the processes within a container,
 you can specify an alternate type for the container. You could run a container
 that is only allowed to listen on Apache ports by executing the following
 command:
 
-    # docker run --security-opt label:type:svirt_apache_t -i -t centos bash
+    $ docker run --security-opt label:type:svirt_apache_t -i -t centos bash
 
 Note:
 
 You would have to write policy defining a `svirt_apache_t` type.
 
-## Runtime constraints on CPU and memory
+## Specifying custom cgroups
+
+Using the `--cgroup-parent` flag, you can pass a specific cgroup to run a
+container in. This allows you to create and manage cgroups on their own. You can
+define custom resources for those cgroups and put containers under a common
+parent group.
+
+## Runtime constraints on resources
 
 The operator can also adjust the performance parameters of the
 container:
 
-    -m="": Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
+    -m, --memory="": Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
     -memory-swap="": Total memory limit (memory + swap, format: <number><optional unit>, where unit = b, k, m or g)
-    -c, --cpu-shares=0         CPU shares (relative weight)
+    -c, --cpu-shares=0: CPU shares (relative weight)
+    --cpu-period=0: Limit the CPU CFS (Completely Fair Scheduler) period
+    --cpuset-cpus="": CPUs in which to allow execution (0-3, 0,1)
+    --cpuset-mems="": Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
+    --cpu-quota=0: Limit the CPU CFS (Completely Fair Scheduler) quota
+    --blkio-weight=0: Block IO weight (relative weight) accepts a weight value between 10 and 1000.
+    --oom-kill-disable=true|false: Whether to disable OOM Killer for the container or not.
 
 ### Memory constraints
 
@@ -508,6 +558,52 @@
   </tbody>
 </table>
 
+Examples:
+
+    $ docker run -ti ubuntu:14.04 /bin/bash
+
+We set nothing about memory, this means the processes in the container can use
+as much memory and swap memory as they need.
+
+    $ docker run -ti -m 300M --memory-swap -1 ubuntu:14.04 /bin/bash
+
+We set memory limit and disabled swap memory limit, this means the processes in
+the container can use 300M memory and as much swap memory as they need (if the
+host supports swap memory).
+
+    $ docker run -ti -m 300M ubuntu:14.04 /bin/bash
+
+We set memory limit only, this means the processes in the container can use
+300M memory and 300M swap memory, by default, the total virtual memory size
+(--memory-swap) will be set as double of memory, in this case, memory + swap
+would be 2*300M, so processes can use 300M swap memory as well.
+
+    $ docker run -ti -m 300M --memory-swap 1G ubuntu:14.04 /bin/bash
+
+We set both memory and swap memory, so the processes in the container can use
+300M memory and 700M swap memory.
+
+By default, Docker kills processes in a container if an out-of-memory (OOM)
+error occurs. To change this behaviour, use the `--oom-kill-disable` option.
+Only disable the OOM killer on containers where you have also set the
+`-m/--memory` option. If the `-m` flag is not set, this can result in the host
+running out of memory and require killing the host's system processes to free
+memory.
+
+Examples:
+
+The following example limits the memory to 100M and disables the OOM killer for
+this container:
+
+    $ docker run -ti -m 100M --oom-kill-disable ubuntu:14.04 /bin/bash
+
+The following example, illustrates a dangerous way to use the flag:
+
+    $ docker run -ti --oom-kill-disable ubuntu:14.04 /bin/bash
+
+The container has unlimited memory which can cause the host to run out memory
+and require killing system processes to free memory.
+
 ### CPU share constraint
 
 By default, all containers get the same proportion of CPU cycles. This proportion
@@ -525,7 +621,7 @@
 For example, consider three containers, one has a cpu-share of 1024 and
 two others have a cpu-share setting of 512. When processes in all three
 containers attempt to use 100% of CPU, the first container would receive
-50% of the total CPU time. If you add a fouth container with a cpu-share
+50% of the total CPU time. If you add a fourth container with a cpu-share
 of 1024, the first container only gets 33% of the CPU. The remaining containers
 receive 16.5%, 16.5% and 33% of the CPU.
 
@@ -543,6 +639,82 @@
     101    {C1}		1	100% of CPU1
     102    {C1}		2	100% of CPU2
 
+### CPU period constraint
+
+The default CPU CFS (Completely Fair Scheduler) period is 100ms. We can use
+`--cpu-period` to set the period of CPUs to limit the container's CPU usage. 
+And usually `--cpu-period` should work with `--cpu-quota`.
+
+Examples:
+
+    $ docker run -ti --cpu-period=50000 --cpu-quota=25000 ubuntu:14.04 /bin/bash
+
+If there is 1 CPU, this means the container can get 50% CPU worth of run-time every 50ms.
+
+For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt).
+
+### Cpuset constraint
+
+We can set cpus in which to allow execution for containers.
+
+Examples:
+
+    $ docker run -ti --cpuset-cpus="1,3" ubuntu:14.04 /bin/bash
+
+This means processes in container can be executed on cpu 1 and cpu 3.
+
+    $ docker run -ti --cpuset-cpus="0-2" ubuntu:14.04 /bin/bash
+
+This means processes in container can be executed on cpu 0, cpu 1 and cpu 2.
+
+We can set mems in which to allow execution for containers. Only effective
+on NUMA systems.
+
+Examples:
+
+    $ docker run -ti --cpuset-mems="1,3" ubuntu:14.04 /bin/bash
+
+This example restricts the processes in the container to only use memory from
+memory nodes 1 and 3.
+
+    $ docker run -ti --cpuset-mems="0-2" ubuntu:14.04 /bin/bash
+
+This example restricts the processes in the container to only use memory from
+memory nodes 0, 1 and 2.
+
+### CPU quota constraint
+
+The `--cpu-quota` flag limits the container's CPU usage. The default 0 value
+allows the container to take 100% of a CPU resource (1 CPU). The CFS (Completely Fair
+Scheduler) handles resource allocation for executing processes and is default
+Linux Scheduler used by the kernel. Set this value to 50000 to limit the container
+to 50% of a CPU resource. For multiple CPUs, adjust the `--cpu-quota` as necessary.
+For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt).
+
+### Block IO bandwidth (Blkio) constraint
+
+By default, all containers get the same proportion of block IO bandwidth
+(blkio). This proportion is 500. To modify this proportion, change the
+container's blkio weight relative to the weighting of all other running
+containers using the `--blkio-weight` flag.
+
+The `--blkio-weight` flag can set the weighting to a value between 10 to 1000.
+For example, the commands below create two containers with different blkio
+weight:
+
+    $ docker run -ti --name c1 --blkio-weight 300 ubuntu:14.04 /bin/bash
+    $ docker run -ti --name c2 --blkio-weight 600 ubuntu:14.04 /bin/bash
+
+If you do block IO in the two containers at the same time, by, for example:
+
+    $ time dd if=/mnt/zerofile of=test.out bs=1M count=1024 oflag=direct
+
+You'll find that the proportion of time is the same as the proportion of blkio
+weights of the two containers.
+
+> **Note:** The blkio weight setting is only available for direct IO. Buffered IO
+> is not currently supported.
+
 ## Runtime privilege, Linux capabilities, and LXC configuration
 
     --cap-add: Add Linux capabilities
@@ -570,31 +742,75 @@
 the `--device` flag. It allows you to specify one or more devices that
 will be accessible within the container.
 
-    $ sudo docker run --device=/dev/snd:/dev/snd ...
+    $ docker run --device=/dev/snd:/dev/snd ...
 
 By default, the container will be able to `read`, `write`, and `mknod` these devices.
 This can be overridden using a third `:rwm` set of options to each `--device` flag:
 
-    $ sudo docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk  /dev/xvdc
+    $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk  /dev/xvdc
 
     Command (m for help): q
-    $ sudo docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk  /dev/xvdc
+    $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk  /dev/xvdc
     You will not be able to write the partition table.
 
     Command (m for help): q
 
-    $ sudo docker run --device=/dev/sda:/dev/xvdc:w --rm -it ubuntu fdisk  /dev/xvdc
+    $ docker run --device=/dev/sda:/dev/xvdc:w --rm -it ubuntu fdisk  /dev/xvdc
         crash....
 
-    $ sudo docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk  /dev/xvdc
+    $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk  /dev/xvdc
     fdisk: unable to open /dev/xvdc: Operation not permitted
 
 In addition to `--privileged`, the operator can have fine grain control over the
 capabilities using `--cap-add` and `--cap-drop`. By default, Docker has a default
-list of capabilities that are kept. Both flags support the value `all`, so if the
+list of capabilities that are kept. The following table lists the Linux capability options which can be added or dropped.
+
+| Capability Key | Capability Description |
+| :----------------- | :---------------| :-------------------- |
+| SETPCAP | Modify process capabilities. |
+| SYS_MODULE| Load and unload kernel modules. |
+| SYS_RAWIO | Perform I/O port operations (iopl(2) and ioperm(2)). |
+| SYS_PACCT | Use acct(2), switch process accounting on or off. |
+| SYS_ADMIN | Perform a range of system administration operations. |
+| SYS_NICE | Raise process nice value (nice(2), setpriority(2)) and change the nice value for arbitrary processes. |
+| SYS_RESOURCE | Override resource Limits. |
+| SYS_TIME | Set system clock (settimeofday(2), stime(2), adjtimex(2)); set real-time (hardware) clock. |
+| SYS_TTY_CONFIG | Use vhangup(2); employ various privileged ioctl(2) operations on virtual terminals. |
+| MKNOD | Create special files using mknod(2). |
+| AUDIT_WRITE | Write records to kernel auditing log. |
+| AUDIT_CONTROL | Enable and disable kernel auditing; change auditing filter rules; retrieve auditing status and filtering rules. |
+| MAC_OVERRIDE | Allow MAC configuration or state changes. Implemented for the Smack LSM. |
+| MAC_ADMIN | Override Mandatory Access Control (MAC). Implemented for the Smack Linux Security Module (LSM). |
+| NET_ADMIN | Perform various network-related operations. |
+| SYSLOG | Perform privileged syslog(2) operations.  |
+| CHOWN | Make arbitrary changes to file UIDs and GIDs (see chown(2)). |
+| NET_RAW | Use RAW and PACKET sockets. |
+| DAC_OVERRIDE | Bypass file read, write, and execute permission checks. |
+| FOWNER | Bypass permission checks on operations that normally require the file system UID of the process to match the UID of the file. |
+| DAC_READ_SEARCH | Bypass file read permission checks and directory read and execute permission checks. |
+| FSETID | Don't clear set-user-ID and set-group-ID permission bits when a file is modified. |
+| KILL | Bypass permission checks for sending signals. |
+| SETGID | Make arbitrary manipulations of process GIDs and supplementary GID list. |
+| SETUID | Make arbitrary manipulations of process UIDs. |
+| LINUX_IMMUTABLE | Set the FS_APPEND_FL and FS_IMMUTABLE_FL i-node flags. |
+| NET_BIND_SERVICE  | Bind a socket to internet domain privileged ports (port numbers less than 1024). |
+| NET_BROADCAST |  Make socket broadcasts, and listen to multicasts. |
+| IPC_LOCK | Lock memory (mlock(2), mlockall(2), mmap(2), shmctl(2)). |
+| IPC_OWNER | Bypass permission checks for operations on System V IPC objects. |
+| SYS_CHROOT | Use chroot(2), change root directory. |
+| SYS_PTRACE | Trace arbitrary processes using ptrace(2). |
+| SYS_BOOT | Use reboot(2) and kexec_load(2), reboot and load a new kernel for later execution. |
+| LEASE | Establish leases on arbitrary files (see fcntl(2)). |
+| SETFCAP | Set file capabilities.|
+| WAKE_ALARM | Trigger something that will wake up the system. |
+| BLOCK_SUSPEND | Employ features that can block system suspend. |
+
+Further reference information is available on the [capabilities(7) - Linux man page](http://linux.die.net/man/7/capabilities)
+
+Both flags support the value `all`, so if the
 operator wants to have all capabilities but `MKNOD` they could use:
 
-    $ sudo docker run --cap-add=ALL --cap-drop=MKNOD ...
+    $ docker run --cap-add=ALL --cap-drop=MKNOD ...
 
 For interacting with the network stack, instead of using `--privileged` they
 should use `--cap-add=NET_ADMIN` to modify the network interfaces.
@@ -647,21 +863,29 @@
 
 You can specify a different logging driver for the container than for the daemon.
 
-### Logging driver: none
+#### Logging driver: none
 
 Disables any logging for the container. `docker logs` won't be available with
 this driver.
 
-### Log driver: json-file
+#### Logging driver: json-file
 
 Default logging driver for Docker. Writes JSON messages to file. `docker logs`
 command is available only for this logging driver
 
-## Logging driver: syslog
+#### Logging driver: syslog
 
 Syslog logging driver for Docker. Writes log messages to syslog. `docker logs`
 command is not available for this logging driver
 
+#### Logging driver: journald
+
+Journald logging driver for Docker. Writes log messages to journald; the container id will be stored in the journal's `CONTAINER_ID` field. `docker logs` command is not available for this logging driver.  For detailed information on working with this logging driver, see [the journald logging driver](reference/logging/journald) reference documentation.
+
+#### Log Opts : 
+
+Logging options for configuring a log driver. The following log options are supported: [none]
+
 ## Overriding Dockerfile image defaults
 
 When a developer builds an image from a [*Dockerfile*](/reference/builder)
@@ -687,7 +911,7 @@
 Recall the optional `COMMAND` in the Docker
 commandline:
 
-    $ sudo docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...]
+    $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...]
 
 This command is optional because the person who created the `IMAGE` may
 have already provided a default `COMMAND` using the Dockerfile `CMD`
@@ -714,12 +938,12 @@
 example of how to run a shell in a container that has been set up to
 automatically run something else (like `/usr/bin/redis-server`):
 
-    $ sudo docker run -i -t --entrypoint /bin/bash example/redis
+    $ docker run -i -t --entrypoint /bin/bash example/redis
 
 or two examples of how to pass more parameters to that ENTRYPOINT:
 
-    $ sudo docker run -i -t --entrypoint /bin/bash example/redis -c ls -l
-    $ sudo docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help
+    $ docker run -i -t --entrypoint /bin/bash example/redis -c ls -l
+    $ docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help
 
 ## EXPOSE (incoming ports)
 
@@ -736,7 +960,7 @@
                    Both hostPort and containerPort can be specified as a range of ports. 
                    When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`)
                    (use 'docker port' to see the actual mapping)
-    --link=""  : Add link to another container (<name or id>:alias)
+    --link=""  : Add link to another container (<name or id>:alias or <name or id>)
 
 As mentioned previously, `EXPOSE` (and `--expose`) makes ports available
 **in** a container for incoming connections. The port number on the
@@ -806,7 +1030,7 @@
 container by using one or more `-e` flags, even overriding those mentioned 
 above, or already defined by the developer with a Dockerfile `ENV`:
 
-    $ sudo docker run -e "deep=purple" --rm ubuntu /bin/bash -c export
+    $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export
     declare -x HOME="/"
     declare -x HOSTNAME="85bc26a0e200"
     declare -x OLDPWD
@@ -824,23 +1048,23 @@
 container running Redis:
 
     # Start the service container, named redis-name
-    $ sudo docker run -d --name redis-name dockerfiles/redis
+    $ docker run -d --name redis-name dockerfiles/redis
     4241164edf6f5aca5b0e9e4c9eccd899b0b8080c64c0cd26efe02166c73208f3
 
     # The redis-name container exposed port 6379
-    $ sudo docker ps
+    $ docker ps
     CONTAINER ID        IMAGE                        COMMAND                CREATED             STATUS              PORTS               NAMES
     4241164edf6f        $ dockerfiles/redis:latest   /redis-stable/src/re   5 seconds ago       Up 4 seconds        6379/tcp            redis-name
 
     # Note that there are no public ports exposed since we didn᾿t use -p or -P
-    $ sudo docker port 4241164edf6f 6379
+    $ docker port 4241164edf6f 6379
     2014/01/25 00:55:38 Error: No public port '6379' published for 4241164edf6f
 
 Yet we can get information about the Redis container's exposed ports
 with `--link`. Choose an alias that will form a
 valid environment variable!
 
-    $ sudo docker run --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c export
+    $ docker run --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c export
     declare -x HOME="/"
     declare -x HOSTNAME="acda7f7b1cdc"
     declare -x OLDPWD
@@ -857,15 +1081,15 @@
 
 And we can use that information to connect from another container as a client:
 
-    $ sudo docker run -i -t --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT'
+    $ docker run -i -t --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT'
     172.17.0.32:6379>
 
 Docker will also map the private IP address to the alias of a linked
 container by inserting an entry into `/etc/hosts`.  You can use this
 mechanism to communicate with a linked container by its alias:
 
-    $ sudo docker run -d --name servicename busybox sleep 30
-    $ sudo docker run -i -t --link servicename:servicealias busybox ping -c 1 servicealias
+    $ docker run -d --name servicename busybox sleep 30
+    $ docker run -i -t --link servicename:servicealias busybox ping -c 1 servicealias
 
 If you restart the source container (`servicename` in this case), the recipient
 container's `/etc/hosts` entry will be automatically updated.
diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md
index fe79d88..f01d783 100644
--- a/docs/sources/release-notes.md
+++ b/docs/sources/release-notes.md
@@ -1,75 +1,136 @@
-page_title: Docker 1.x Series Release Notes
-page_description: Release Notes for Docker 1.x.
+page_title: Docker 1.x series release notes
+page_description: Release notes for Docker 1.x.
 page_keywords: docker, documentation, about, technology, understanding, release
 
-# Release Notes
+# Release notes version 1.6.0
+(2015-04-16)
 
 You can view release notes for earlier version of Docker by selecting the
-desired version from the drop-down list at the top right of this page.
+desired version from the drop-down list at the top right of this page. For the
+formal release announcement, see [the Docker
+blog](https://blog.docker.com/2015/04/docker-release-1-6/).
 
-## Version 1.5.0
-(2015-02-03)
 
-For a complete list of patches, fixes, and other improvements, see the
-[merge PR on GitHub](https://github.com/docker/docker/pull/10286).
 
-*New Features*
+## Docker Engine 1.6.0 features
 
-* [1.6] The Docker daemon will no longer ignore unknown commands
-  while processing a `Dockerfile`. Instead it will generate an error and halt
-  processing.
-* The Docker daemon has now supports for IPv6 networking between containers
-  and on the `docker0` bridge. For more information see the
-  [IPv6 networking reference](/articles/networking/#ipv6).
-* Docker container filesystems can now be set to`--read-only`, restricting your
-  container to writing to volumes [PR# 10093](https://github.com/docker/docker/pull/10093).
-* A new `docker stats CONTAINERID` command has been added to allow users to view a
-  continuously updating stream of container resource usage statistics. See the
-  [`stats` command line reference](/reference/commandline/cli/#stats) and the
-  [container `stats` API reference](/reference/api/docker_remote_api_v1.17/#get-container-stats-based-on-resource-usage).
-  **Note**: this feature is only enabled for the `libcontainer` exec-driver at this point.
-* Users can now specify the file to use as the `Dockerfile` by running
-  `docker build -f alternate.dockerfile .`. This will allow the definition of multiple
-  `Dockerfile`s for a single project. See the [`docker build` command reference](
-/reference/commandline/cli/#build) for more information.
-* The v1 Open Image specification has been created to document the current Docker image
-  format and metadata. Please see [the Open Image specification document](
-https://github.com/docker/docker/blob/master/image/spec/v1.md) for more details.
-* This release also includes a number of significant performance improvements in
-  build and image management ([PR #9720](https://github.com/docker/docker/pull/9720),
-  [PR #8827](https://github.com/docker/docker/pull/8827))
-* The `docker inspect` command now lists ExecIDs generated for each `docker exec` process.
-  See [PR #9800](https://github.com/docker/docker/pull/9800)) for more details.
-* The `docker inspect` command now shows the number of container restarts when there
-  is a restart policy ([PR #9621](https://github.com/docker/docker/pull/9621))
-* This version of Docker is built using Go 1.4
+For a complete list of engine patches, fixes, and other improvements, see the
+[merge PR on GitHub](https://github.com/docker/docker/pull/11635). You'll also
+find [a changelog in the project
+repository](https://github.com/docker/docker/blob/master/CHANGELOG.md).
 
-> **Note:**
-> Development history prior to version 1.0 can be found by
-> searching in the [Docker GitHub repo](https://github.com/docker/docker).
 
-## Known Issues
+| Feature                      | Description                                                                                                                                                                                                                                                                                                                                                                                                                                                        |
+|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Container and Image Labels   | Labels allow you to attach user-defined metadata to containers and images that can be used by your tools. For additional information on using labels, see [Apply custom metadata](http://docs.docker.com/userguide/labels-custom-metadata/#add-labels-to-images-the-label-instruction) in the documentation.                                                                                                                                                    |
+| Windows Client preview       | The Windows Client can be used just like the Mac OS X client is today with a remote host. Our testing infrastructure was scaled out to accommodate Windows Client testing on every PR to the Engine. See the Azure blog for [details on using this new client](http://azure.microsoft.com/blog/2015/04/16/docker-client-for-windows-is-now-available).                                                                                                           |
+| Logging drivers              | The new logging driver follows the exec driver and storage driver concepts already available in Engine today. There is a new option `--log-driver` to `docker run` command. See the `run` reference for a [description on how to use this option](http://docs.docker.com/reference/run/#logging-drivers-log-driver).                                                                                                                                            |
+| Image digests                | When you pull, build, or run images, you specify them in the form `namespace/repository:tag`, or even just `repository`. In this release, you are now able to pull, run, build and refer to images by a new content addressable identifier called a “digest” with the syntax `namespace/repo@digest`. See the the command line reference for [examples of using the digest](http://docs.docker.com/reference/commandline/cli/#listing-image-digests).           |
+| Custom cgroups               | Containers are made from a combination of namespaces, capabilities, and cgroups. Docker already supports custom namespaces and capabilities. Additionally, in this release we’ve added support for custom cgroups. Using the `--cgroup-parent` flag, you can pass a specific `cgroup` to run a container in. See [the command line reference for more information](http://docs.docker.com/reference/commandline/cli/#create).                                   |
+| Ulimits                      | You can now specify the default `ulimit` settings for all containers when configuring the daemon. For example:`docker -d --default-ulimit nproc=1024:2048` See [Default Ulimits](http://docs.docker.com/reference/commandline/cli/#default-ulimits) in this documentation.                                                                                                                                                                                   |
+| Commit and import Dockerfile | You can now make changes to images on the fly without having to re-build the entire image. The feature `commit --change` and `import --change` allows you to apply standard changes to a new image. These are expressed in the Dockerfile syntax and used to modify the image. For details on how to use these, see the [commit](http://docs.docker.com/reference/commandline/cli/#commit) and [import](http://docs.docker.com/reference/commandline/cli/#import). |
 
-This section lists significant known issues present in Docker as of release
-date. It is not exhaustive; it lists only issues with potentially significant
-impact on users. This list will be updated as issues are resolved.
+### Known issues in Engine
 
-* **Unexpected File Permissions in Containers**
-An idiosyncrasy in AUFS prevents permissions from propagating predictably
-between upper and lower layers. This can cause issues with accessing private
-keys, database instances, etc.
+This section lists significant known issues present in Docker as of release date.
+For an exhaustive list of issues, see [the issues list on the project
+repository](https://github.com/docker/docker/issues/).
 
-For systems that have recent aufs version (i.e., `dirperm1` mount option can
-be set), docker will attempt to fix the issue automatically by mounting
-the layers with `dirperm1` option. More details on `dirperm1` option can be
-found at [`aufs` man page](http://aufs.sourceforge.net/aufs3/man.html)
-
-For complete information and workarounds see
+* *Unexpected File Permissions in Containers*
+An idiosyncrasy in AUFS prevented permissions from propagating predictably
+between upper and lower layers. This caused issues with accessing private
+keys, database instances, etc.  This issue was closed in this release:
 [Github Issue 783](https://github.com/docker/docker/issues/783).
 
-* **Docker Hub incompatible with Safari 8**
-Docker Hub has multiple issues displaying on Safari 8, the default browser
-for OS X 10.10 (Yosemite). Users should access the hub using a different
-browser. Most notably, changes in the way Safari handles cookies means that the
-user is repeatedly logged out. For more information, see the [Docker
-forum post](https://forums.docker.com/t/new-safari-in-yosemite-issue/300).
+
+* *Docker Hub incompatible with Safari 8*
+Docker Hub had multiple issues displaying on Safari 8, the default browser for
+OS X 10.10 (Yosemite).  Most notably, changes in the way Safari handled cookies
+means that the user was repeatedly logged out.
+Recently, Safari fixed the bug that was causing all the issues. If you upgrade
+to Safari 8.0.5 which was just released last week and see if that fixes your
+issues. You might have to flush your cookies if it doesn't work right away.
+For more information, see the [Docker forum
+post](https://forums.docker.com/t/new-safari-in-yosemite-issue/300).
+
+## Docker Registry 2.0 features
+
+This release includes Registry 2.0. The Docker Registry is a central server for
+pushing and pulling images. In this release, it was completely rewritten in Go
+around a new set of distribution APIs
+
+- **Webhook notifications**: You can now configure the Registry to send Webhooks
+when images are pushed. Spin off a CI build, send a notification to IRC –
+whatever you want! Included in the documentation is a detailed [notification
+specification](http://docs.docker.com/registry/notifications/).
+
+- **Native TLS support**: This release makes it easier to secure a registry with
+TLS.  This documentation includes [expanded examples of secure
+deployments](http://docs.docker.com/registry/deploying/).
+
+- **New Distribution APIs**: This release includes an expanded set of new
+distribution APIs. You can read the [detailed specification
+here](http://docs.docker.com/registry/spec/api/).
+
+
+## Docker Compose 1.2
+
+For a complete list of compose patches, fixes, and other improvements, see the
+[changelog in the project
+repository](https://github.com/docker/compose/blob/master/CHANGES.md). The
+project also makes a [set of release
+notes](https://github.com/docker/compose/releases/tag/1.2.0) on the project.
+
+- **extends**:  You can use `extends` to share configuration between services
+with the keyword “extends”. With extends, you can refer to a service defined
+elsewhere and include its configuration in a locally-defined service, while also
+adding or overriding configuration as necessary. The documentation describes
+[how to use extends in your
+configuration](http://docs.docker.com/compose/extends/#extending-services-in-
+compose).
+
+- **Relative directory handling may cause breaking change**: Compose now treats
+directories passed to build, filenames passed to `env_file` and volume host
+paths passed to volumes as relative to the configuration file's directory.
+Previously, they were treated as relative to the directory where you were
+running `docker-compose`. In the majority of cases, the location of the
+configuration file and where you ran `docker-compose` were the same directory.
+Now, you can use the `-f|--file` argument to specify a configuration file in
+another directory. 
+
+
+## Docker Swarm 0.2
+
+You'll find the [release for download on
+GitHub](https://github.com/docker/swarm/releases/tag/v0.2.0) and [the
+documentation here](http://docs.docker.com/swarm/).  This release includes the
+following features:
+
+- **Spread strategy**: A new strategy for scheduling containers on your cluster
+which evenly spreads them over available nodes.
+- **More Docker commands supported**: More progress has been made towards
+supporting the complete Docker API, such as pulling and inspecting images.
+- **Clustering drivers**: There are not any third-party drivers yet, but the
+first steps have been made towards making a pluggable driver interface that will
+make it possible to use Swarm with clustering systems such as Mesos.
+
+
+## Docker Machine 0.2 Pre-release
+
+You'll find the [release for download on
+GitHub](https://github.com/docker/machine/releases) and [the documentation
+here](http://docs.docker.com/machine/).  For a complete list of machine changes
+see [the changelog in the project
+repository](https://github.com/docker/machine/blob/master/CHANGES.md#020-2015-03
+-22).
+
+- **Cleaner driver interface**: It is now much easier to write drivers for providers.
+- **More reliable and consistent provisioning**: Provisioning servers is now
+handled centrally by Machine instead of letting each driver individually do it.
+- **Regenerate TLS certificates**: A new command has been added to regenerate a
+host’s TLS certificates for good security practice and for if a host’s IP
+address changes. 
+
+## Docker Hub Enterprise & Commercially Supported Docker Engine
+
+See the [DHE and CS Docker Engine release notes](docker-hub-enterprise/release-notes.md).
diff --git a/docs/sources/terms/container.md b/docs/sources/terms/container.md
index 8b42868..d0c31c2 100644
--- a/docs/sources/terms/container.md
+++ b/docs/sources/terms/container.md
@@ -17,7 +17,7 @@
 and some additional information like its unique id, networking
 configuration, and resource limits is called a **container**.
 
-## Container State
+## Container state
 
 Containers can change, and so they have state. A container may be
 **running** or **exited**.
diff --git a/docs/sources/terms/filesystem.md b/docs/sources/terms/filesystem.md
index 5587e3c..814246d 100644
--- a/docs/sources/terms/filesystem.md
+++ b/docs/sources/terms/filesystem.md
@@ -1,8 +1,8 @@
-page_title: File Systems
+page_title: File system
 page_description: How Linux organizes its persistent storage
 page_keywords: containers, files, linux
 
-# File System
+# File system
 
 ## Introduction
 
diff --git a/docs/sources/terms/image.md b/docs/sources/terms/image.md
index e42a6cf..0a11d91 100644
--- a/docs/sources/terms/image.md
+++ b/docs/sources/terms/image.md
@@ -1,4 +1,4 @@
-page_title: Images
+page_title: Image
 page_description: Definition of an image
 page_keywords: containers, lxc, concepts, explanation, image, container
 
@@ -19,7 +19,7 @@
 
 ![](/terms/images/docker-filesystems-debianrw.png)
 
-## Parent Image
+## Parent image
 
 ![](/terms/images/docker-filesystems-multilayer.png)
 
@@ -27,7 +27,7 @@
 it. We sometimes say that the lower image is the **parent** of the upper
 image.
 
-## Base Image
+## Base image
 
 An image that has no parent is a **base image**.
 
diff --git a/docs/sources/terms/registry.md b/docs/sources/terms/registry.md
index 8a7e623..ad5a81d 100644
--- a/docs/sources/terms/registry.md
+++ b/docs/sources/terms/registry.md
@@ -12,9 +12,9 @@
 
 The default registry can be accessed using a browser at
 [Docker Hub](https://hub.docker.com) or using the
-`sudo docker search` command.
+`docker search` command.
 
-## Further Reading
+## Further reading
 
 For more information see [*Working with
 Repositories*](/userguide/dockerrepos/#working-with-the-repository)
diff --git a/docs/sources/terms/repository.md b/docs/sources/terms/repository.md
index c4d1d43..4b85799 100644
--- a/docs/sources/terms/repository.md
+++ b/docs/sources/terms/repository.md
@@ -13,11 +13,11 @@
 Images can be associated with a repository (or multiple) by giving them
 an image name using one of three different commands:
 
-1. At build time (e.g., `sudo docker build -t IMAGENAME`),
+1. At build time (e.g., `docker build -t IMAGENAME`),
 2. When committing a container (e.g.,
-   `sudo docker commit CONTAINERID IMAGENAME`) or
+   `docker commit CONTAINERID IMAGENAME`) or
 3. When tagging an image id with an image name (e.g.,
-   `sudo docker tag IMAGEID IMAGENAME`).
+   `docker tag IMAGEID IMAGENAME`).
 
 A Fully Qualified Image Name (FQIN) can be made up of 3 parts:
 
@@ -29,7 +29,7 @@
 
 If you create a new repository which you want to share, you will need to
 set at least the `user_name`, as the `default` blank `user_name` prefix is
-reserved for official Docker images.
+reserved for [Official Repositories](/docker-hub/official_repos).
 
 For more information see [*Working with
 Repositories*](/userguide/dockerrepos/#working-with-the-repository)
diff --git a/docs/sources/userguide/dockerhub.md b/docs/sources/userguide/dockerhub.md
index 62438b9..2f7170d 100644
--- a/docs/sources/userguide/dockerhub.md
+++ b/docs/sources/userguide/dockerhub.md
@@ -2,7 +2,7 @@
 page_description: Introductory guide to getting an account on Docker Hub
 page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, central service, services, how to, container, containers, automation, collaboration, collaborators, registry, repo, repository, technology, github webhooks, trusted builds
 
-# Getting Started with Docker Hub
+# Getting started with Docker Hub
 
 
 This section provides a quick introduction to the [Docker Hub](https://hub.docker.com),
@@ -10,7 +10,7 @@
 
 The [Docker Hub](https://hub.docker.com) is a centralized resource for working with
 Docker and its components. Docker Hub helps you collaborate with colleagues and get the
-most out of Docker.To do this, it provides services such as:
+most out of Docker. To do this, it provides services such as:
 
 * Docker image hosting.
 * User authentication.
@@ -21,7 +21,7 @@
 In order to use Docker Hub, you will first need to register and create an account. Don't
 worry, creating an account is simple and free.
 
-## Creating a Docker Hub Account
+## Creating a Docker Hub account
 
 There are two ways for you to register and create an account:
 
@@ -42,7 +42,7 @@
 You can also create a Docker Hub account via the command line with the
 `docker login` command.
 
-    $ sudo docker login
+    $ docker login
 
 ### Confirm your email
 
@@ -58,7 +58,7 @@
 
 Or via the command line with the `docker login` command:
 
-    $ sudo docker login
+    $ docker login
 
 Your Docker Hub account is now active and ready to use.
 
diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md
index 6224479..d230515 100644
--- a/docs/sources/userguide/dockerimages.md
+++ b/docs/sources/userguide/dockerimages.md
@@ -1,8 +1,8 @@
-page_title: Working with Docker Images
+page_title: Working with Docker images
 page_description: How to work with Docker images.
 page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, Docker images, Docker image, image management, Docker repos, Docker repositories, docker, docker tag, docker tags, Docker Hub, collaboration
 
-# Working with Docker Images
+# Working with Docker images
 
 In the [introduction](/introduction/understanding-docker/) we've discovered that Docker
 images are the basis of containers. In the
@@ -27,7 +27,7 @@
 Let's start with listing the images we have locally on our host. You can
 do this using the `docker images` command like so:
 
-    $ sudo docker images
+    $ docker images
     REPOSITORY       TAG      IMAGE ID      CREATED      VIRTUAL SIZE
     training/webapp  latest   fc77f57ad303  3 weeks ago  280.5 MB
     ubuntu           13.10    5e019ab7bf6d  4 weeks ago  180 MB
@@ -54,6 +54,13 @@
 * The tags for each image, for example `14.04`.
 * The image ID of each image.
 
+> **Note:**
+> Previously, the `docker images` command supported the `--tree` and `--dot`
+> arguments, which displayed different visualizations of the image data. Docker
+> core removed this functionality in the 1.7 version. If you liked this
+> functionality, you can still find it in
+> [the third-party dockviz tool](https://github.com/justone/dockviz).
+
 A repository potentially holds multiple variants of an image. In the case of
 our `ubuntu` image we can see multiple variants covering Ubuntu 10.04, 12.04,
 12.10, 13.04, 13.10 and 14.04. Each variant is identified by a tag and you can
@@ -63,11 +70,11 @@
 
 So when we run a container we refer to a tagged image like so:
 
-    $ sudo docker run -t -i ubuntu:14.04 /bin/bash
+    $ docker run -t -i ubuntu:14.04 /bin/bash
 
 If instead we wanted to run an Ubuntu 12.04 image we'd use:
 
-    $ sudo docker run -t -i ubuntu:12.04 /bin/bash
+    $ docker run -t -i ubuntu:12.04 /bin/bash
 
 If you don't specify a variant, for example you just use `ubuntu`, then Docker
 will default to using the `ubuntu:latest` image.
@@ -85,7 +92,7 @@
 can download it using the `docker pull` command. Let's say we'd like to
 download the `centos` image.
 
-    $ sudo docker pull centos
+    $ docker pull centos
     Pulling repository centos
     b7de3133ff98: Pulling dependent layers
     5cc9e91966f7: Pulling fs layer
@@ -99,7 +106,7 @@
 can run a container from this image and we won't have to wait to
 download the image.
 
-    $ sudo docker run -t -i centos /bin/bash
+    $ docker run -t -i centos /bin/bash
     bash-4.1#
 
 ## Finding images
@@ -117,7 +124,7 @@
 by using the `docker search` command to find all the images that contain the
 term `sinatra`.
 
-    $ sudo docker search sinatra
+    $ docker search sinatra
     NAME                                   DESCRIPTION                                     STARS     OFFICIAL   AUTOMATED
     training/sinatra                       Sinatra training image                          0                    [OK]
     marceldegraaf/sinatra                  Sinatra test app                                0
@@ -131,11 +138,11 @@
 We can see we've returned a lot of images that use the term `sinatra`. We've
 returned a list of image names, descriptions, Stars (which measure the social
 popularity of images - if a user likes an image then they can "star" it), and
-the Official and Automated build statuses. Official repositories are built and
-maintained by the [Stackbrew](https://github.com/docker/stackbrew) project,
-and Automated repositories are [Automated Builds](
-/userguide/dockerrepos/#automated-builds) that allow you to validate the source
-and content of an image.
+the Official and Automated build statuses.
+[Official Repositories](/docker-hub/official_repos) are a carefully curated set
+of Docker repositories supported by Docker, Inc.  Automated repositories are
+[Automated Builds](/userguide/dockerrepos/#automated-builds) that allow you to
+validate the source and content of an image.
 
 We've reviewed the images available to use and we decided to use the
 `training/sinatra` image. So far we've seen two types of images repositories,
@@ -152,11 +159,11 @@
 
 We've identified a suitable image, `training/sinatra`, and now we can download it using the `docker pull` command.
 
-    $ sudo docker pull training/sinatra
+    $ docker pull training/sinatra
 
 The team can now use this image by running their own containers.
 
-    $ sudo docker run -t -i training/sinatra /bin/bash
+    $ docker run -t -i training/sinatra /bin/bash
     root@a8cb6ce02d85:/#
 
 ## Creating our own images
@@ -174,7 +181,7 @@
 To update an image we first need to create a container from the image
 we'd like to update.
 
-    $ sudo docker run -t -i training/sinatra /bin/bash
+    $ docker run -t -i training/sinatra /bin/bash
     root@0b2616b0e5a8:/#
 
 > **Note:** 
@@ -192,7 +199,7 @@
 commit a copy of this container to an image using the `docker commit`
 command.
 
-    $ sudo docker commit -m "Added json gem" -a "Kate Smith" \
+    $ docker commit -m "Added json gem" -a "Kate Smith" \
     0b2616b0e5a8 ouruser/sinatra:v2
     4f177bd27a9ff0f6dc2a830403925b5360bfe0b93d476f7fc3231110e7f71b1c
 
@@ -215,7 +222,7 @@
 We can then look at our new `ouruser/sinatra` image using the `docker images`
 command.
 
-    $ sudo docker images
+    $ docker images
     REPOSITORY          TAG     IMAGE ID       CREATED       VIRTUAL SIZE
     training/sinatra    latest  5bc342fa0b91   10 hours ago  446.7 MB
     ouruser/sinatra     v2      3c59e02ddd1a   10 hours ago  446.7 MB
@@ -223,7 +230,7 @@
 
 To use our new image to create a container we can then:
 
-    $ sudo docker run -t -i ouruser/sinatra:v2 /bin/bash
+    $ docker run -t -i ouruser/sinatra:v2 /bin/bash
     root@78e82f680994:/#
 
 ### Building an image from a `Dockerfile`
@@ -242,6 +249,9 @@
     $ cd sinatra
     $ touch Dockerfile
 
+If you are using Boot2Docker on Windows, you may access your host
+directory by `cd` to `/c/Users/your_user_name`.
+
 Each instruction creates a new layer of the image. Let's look at a simple
 example now for building our own Sinatra image for our development team.
 
@@ -273,7 +283,7 @@
 
 Now let's take our `Dockerfile` and use the `docker build` command to build an image.
 
-    $ sudo docker build -t ouruser/sinatra:v2 .
+    $ docker build -t ouruser/sinatra:v2 .
     Sending build context to Docker daemon 2.048 kB
     Sending build context to Docker daemon 
     Step 0 : FROM ubuntu:14.04
@@ -467,7 +477,7 @@
 
 We can then create a container from our new image.
 
-    $ sudo docker run -t -i ouruser/sinatra:v2 /bin/bash
+    $ docker run -t -i ouruser/sinatra:v2 /bin/bash
     root@8196968dac35:/#
 
 > **Note:** 
@@ -489,19 +499,38 @@
 can do this using the `docker tag` command. Let's add a new tag to our
 `ouruser/sinatra` image.
 
-    $ sudo docker tag 5db5f8471261 ouruser/sinatra:devel
+    $ docker tag 5db5f8471261 ouruser/sinatra:devel
 
 The `docker tag` command takes the ID of the image, here `5db5f8471261`, and our
 user name, the repository name and the new tag.
 
 Let's see our new tag using the `docker images` command.
 
-    $ sudo docker images ouruser/sinatra
+    $ docker images ouruser/sinatra
     REPOSITORY          TAG     IMAGE ID      CREATED        VIRTUAL SIZE
     ouruser/sinatra     latest  5db5f8471261  11 hours ago   446.7 MB
     ouruser/sinatra     devel   5db5f8471261  11 hours ago   446.7 MB
     ouruser/sinatra     v2      5db5f8471261  11 hours ago   446.7 MB
 
+## Image Digests
+
+Images that use the v2 or later format have a content-addressable identifier
+called a `digest`. As long as the input used to generate the image is
+unchanged, the digest value is predictable. To list image digest values, use
+the `--digests` flag:
+
+    $ docker images --digests | head
+    REPOSITORY                         TAG                 DIGEST                                                                     IMAGE ID            CREATED             VIRTUAL SIZE
+    ouruser/sinatra                    latest              sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf    5db5f8471261        11 hours ago        446.7 MB
+
+When pushing or pulling to a 2.0 registry, the `push` or `pull` command
+output includes the image digest. You can `pull` using a digest value.
+
+    $ docker pull ouruser/sinatra@cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
+
+You can also reference by digest in `create`, `run`, and `rmi` commands, as well as the
+`FROM` image reference in a Dockerfile.
+
 ## Push an image to Docker Hub
 
 Once you've built or created a new image you can push it to [Docker
@@ -509,7 +538,7 @@
 allows you to share it with others, either publicly, or push it into [a
 private repository](https://registry.hub.docker.com/plans/).
 
-    $ sudo docker push ouruser/sinatra
+    $ docker push ouruser/sinatra
     The push refers to a repository [ouruser/sinatra] (len: 1)
     Sending image list
     Pushing repository ouruser/sinatra (3 tags)
@@ -523,7 +552,7 @@
 
 Let's delete the `training/sinatra` image as we don't need it anymore.
 
-    $ sudo docker rmi training/sinatra
+    $ docker rmi training/sinatra
     Untagged: training/sinatra:latest
     Deleted: 5bc342fa0b91cabf65246837015197eecfa24b2213ed6a51a8974ae250fedd8d
     Deleted: ed0fffdcdae5eb2c3a55549857a8be7fc8bc4241fb19ad714364cbfd7a56b22f
diff --git a/docs/sources/userguide/dockerizing.md b/docs/sources/userguide/dockerizing.md
index cc7bc8e..7124ba6 100644
--- a/docs/sources/userguide/dockerizing.md
+++ b/docs/sources/userguide/dockerizing.md
@@ -1,8 +1,8 @@
-page_title: Dockerizing Applications: A "Hello world"
+page_title: Dockerizing applications: A "Hello world"
 page_description: A simple "Hello world" exercise that introduced you to Docker.
 page_keywords: docker guide, docker, docker platform, virtualization framework, how to, dockerize, dockerizing apps, dockerizing applications, container, containers
 
-# Dockerizing Applications: A "Hello world"
+# Dockerizing applications: A "Hello world"
 
 *So what's this Docker thing all about?*
 
@@ -15,7 +15,7 @@
 
 Let's try it now.
 
-    $ sudo docker run ubuntu:14.04 /bin/echo 'Hello world'
+    $ docker run ubuntu:14.04 /bin/echo 'Hello world'
     Hello world
 
 And you just launched your first container!
@@ -48,12 +48,12 @@
 only run as long as the command you specify is active. Here, as soon as
 `Hello world` was echoed, the container stopped.
 
-## An Interactive Container
+## An interactive container
 
 Let's try the `docker run` command again, this time specifying a new
 command to run in our container.
 
-    $ sudo docker run -t -i ubuntu:14.04 /bin/bash
+    $ docker run -t -i ubuntu:14.04 /bin/bash
     root@af8bae53bdd3:/#
 
 Here we've again specified the `docker run` command and launched an
@@ -90,7 +90,7 @@
 As with our previous container, once the Bash shell process has
 finished, the container is stopped.
 
-## A Daemonized Hello world
+## A daemonized Hello world
 
 Now a container that runs a command and then exits has some uses but
 it's not overly helpful. Let's create a container that runs as a daemon,
@@ -98,7 +98,7 @@
 
 Again we can do this with the `docker run` command:
 
-    $ sudo docker run -d ubuntu:14.04 /bin/sh -c "while true; do echo hello world; sleep 1; done"
+    $ docker run -d ubuntu:14.04 /bin/sh -c "while true; do echo hello world; sleep 1; done"
     1e5535038e285177d5214659a068137486f96ee5c2e85a4ac52dc83f2ebe4147
 
 Wait, what? Where's our "hello world" output? Let's look at what we've run here.
@@ -135,7 +135,7 @@
 the Docker daemon for information about all the containers it knows
 about.
 
-    $ sudo docker ps
+    $ docker ps
     CONTAINER ID  IMAGE         COMMAND               CREATED        STATUS       PORTS NAMES
     1e5535038e28  ubuntu:14.04  /bin/sh -c 'while tr  2 minutes ago  Up 1 minute        insane_babbage
 
@@ -155,7 +155,7 @@
 we're going to look inside the container using the `docker logs`
 command. Let's use the container name Docker assigned.
 
-    $ sudo docker logs insane_babbage
+    $ docker logs insane_babbage
     hello world
     hello world
     hello world
@@ -171,7 +171,7 @@
 after ourselves and stop our daemonized container. To do this we use the
 `docker stop` command.
 
-    $ sudo docker stop insane_babbage
+    $ docker stop insane_babbage
     insane_babbage
 
 The `docker stop` command tells Docker to politely stop the running
@@ -180,7 +180,7 @@
 
 Let's check it worked with the `docker ps` command.
 
-    $ sudo docker ps
+    $ docker ps
     CONTAINER ID  IMAGE         COMMAND               CREATED        STATUS       PORTS NAMES
 
 Excellent. Our container has been stopped.
diff --git a/docs/sources/userguide/dockerlinks.md b/docs/sources/userguide/dockerlinks.md
index 79ba179..2b7d30c 100644
--- a/docs/sources/userguide/dockerlinks.md
+++ b/docs/sources/userguide/dockerlinks.md
@@ -1,8 +1,8 @@
-page_title: Linking Containers Together
+page_title: Linking containers together
 page_description: Learn how to connect Docker containers together.
 page_keywords: Examples, Usage, user guide, links, linking, docker, documentation, examples, names, name, container naming, port, map, network port, network
 
-# Linking Containers Together
+# Linking containers together
 
 In [the Using Docker section](/userguide/usingdocker), you saw how you can
 connect to a service running inside a Docker container via a network
@@ -11,12 +11,12 @@
 connecting via a network port and then we'll introduce you to another method of access:
 container linking.
 
-## Connect using Network port mapping
+## Connect using network port mapping
 
 In [the Using Docker section](/userguide/usingdocker), you created a
 container that ran a Python Flask application:
 
-    $ sudo docker run -d -P training/webapp python app.py
+    $ docker run -d -P training/webapp python app.py
 
 > **Note:** 
 > Containers have an internal network and an IP address
@@ -30,14 +30,15 @@
 range* on your Docker host. Next, when `docker ps` was run, you saw that port
 5000 in the container was bound to port 49155 on the host.
 
-    $ sudo docker ps nostalgic_morse
+    $ docker ps nostalgic_morse
     CONTAINER ID  IMAGE                   COMMAND       CREATED        STATUS        PORTS                    NAMES
     bc533791f3f5  training/webapp:latest  python app.py 5 seconds ago  Up 2 seconds  0.0.0.0:49155->5000/tcp  nostalgic_morse
 
 You also saw how you can bind a container's ports to a specific port using
-the `-p` flag:
+the `-p` flag. Here port 80 of the host is mapped to port 5000 of the 
+container:
 
-    $ sudo docker run -d -p 5000:5000 training/webapp python app.py
+    $ docker run -d -p 80:5000 training/webapp python app.py
 
 And you saw why this isn't such a great idea because it constrains you to
 only one container on that specific port.
@@ -47,26 +48,26 @@
 the host machine. But you can also specify a binding to a specific
 interface, for example only to the `localhost`.
 
-    $ sudo docker run -d -p 127.0.0.1:5000:5000 training/webapp python app.py
+    $ docker run -d -p 127.0.0.1:80:5000 training/webapp python app.py
 
-This would bind port 5000 inside the container to port 5000 on the
+This would bind port 5000 inside the container to port 80 on the
 `localhost` or `127.0.0.1` interface on the host machine.
 
 Or, to bind port 5000 of the container to a dynamic port but only on the
 `localhost`, you could use:
 
-    $ sudo docker run -d -p 127.0.0.1::5000 training/webapp python app.py
+    $ docker run -d -p 127.0.0.1::5000 training/webapp python app.py
 
 You can also bind UDP ports by adding a trailing `/udp`. For example:
 
-    $ sudo docker run -d -p 127.0.0.1:5000:5000/udp training/webapp python app.py
+    $ docker run -d -p 127.0.0.1:80:5000/udp training/webapp python app.py
 
 You also learned about the useful `docker port` shortcut which showed us the
 current port bindings. This is also useful for showing you specific port
 configurations. For example, if you've bound the container port to the
 `localhost` on the host machine, then the `docker port` output will reflect that.
 
-    $ sudo docker port nostalgic_morse 5000
+    $ docker port nostalgic_morse 5000
     127.0.0.1:49155
 
 > **Note:** 
@@ -98,22 +99,22 @@
 
 You can name your container by using the `--name` flag, for example:
 
-    $ sudo docker run -d -P --name web training/webapp python app.py
+    $ docker run -d -P --name web training/webapp python app.py
 
 This launches a new container and uses the `--name` flag to
 name the container `web`. You can see the container's name using the
 `docker ps` command.
 
-    $ sudo docker ps -l
+    $ docker ps -l
     CONTAINER ID  IMAGE                  COMMAND        CREATED       STATUS       PORTS                    NAMES
     aed84ee21bde  training/webapp:latest python app.py  12 hours ago  Up 2 seconds 0.0.0.0:49154->5000/tcp  web
 
 You can also use `docker inspect` to return the container's name.
 
-    $ sudo docker inspect -f "{{ .Name }}" aed84ee21bde
+    $ docker inspect -f "{{ .Name }}" aed84ee21bde
     /web
 
-> **Note:** 
+> **Note:**
 > Container names have to be unique. That means you can only call
 > one container `web`. If you want to re-use a container name you must delete
 > the old container (with `docker rm`) before you can create a new
@@ -129,7 +130,7 @@
 about the source. To create a link, you use the `--link` flag. First, create a new
 container, this time one containing a database.
 
-    $ sudo docker run -d --name db training/postgres
+    $ docker run -d --name db training/postgres
 
 This creates a new container called `db` from the `training/postgres`
 image, which contains a PostgreSQL database.
@@ -137,11 +138,11 @@
 Now, you need to delete the `web` container you created previously so you can replace it
 with a linked one:
 
-    $ sudo docker rm -f web
+    $ docker rm -f web
 
 Now, create a new `web` container and link it with your `db` container.
 
-    $ sudo docker run -d -P --name web --link db:db training/webapp python app.py
+    $ docker run -d -P --name web --link db:db training/webapp python app.py
 
 This will link the new `web` container with the `db` container you created
 earlier. The `--link` flag takes the form:
@@ -150,10 +151,18 @@
 
 Where `name` is the name of the container we're linking to and `alias` is an
 alias for the link name. You'll see how that alias gets used shortly.
+The `--link` flag also takes the form:
+
+	--link <name or id>
+
+In which case the alias will match the name. You could have written the previous
+example as:
+
+    $ docker run -d -P --name web --link db training/webapp python app.py
 
 Next, inspect your linked containers with `docker inspect`:
 
-    $ sudo docker inspect -f "{{ .HostConfig.Links }}" web
+    $ docker inspect -f "{{ .HostConfig.Links }}" web
     [/db:/web/db]
 
 You can see that the `web` container is now linked to the `db` container
@@ -174,7 +183,7 @@
 * Environment variables,
 * Updating the `/etc/hosts` file.
 
-### Environment Variables
+### Environment variables
 
 Docker creates several environment variables when you link containers. Docker
 automatically creates environment variables in the target container based on
@@ -239,7 +248,7 @@
 command to list the specified container's environment variables.
 
 ```
-    $ sudo docker run --rm --name web2 --link db:db training/webapp env
+    $ docker run --rm --name web2 --link db:db training/webapp env
     . . .
     DB_NAME=/web2/db
     DB_PORT=tcp://172.17.0.5:5432
@@ -276,7 +285,7 @@
 source container to the `/etc/hosts` file. Here's an entry for the `web`
 container:
 
-    $ sudo docker run -t -i --rm --link db:webdb training/webapp /bin/bash
+    $ docker run -t -i --rm --link db:webdb training/webapp /bin/bash
     root@aed84ee21bde:/opt/webapp# cat /etc/hosts
     172.17.0.7  aed84ee21bde
     . . .
@@ -314,9 +323,9 @@
 will be automatically updated with the source container's new IP address,
 allowing linked communication to continue.
 
-    $ sudo docker restart db
+    $ docker restart db
     db
-    $ sudo docker run -t -i --rm --link db:db training/webapp /bin/bash
+    $ docker run -t -i --rm --link db:db training/webapp /bin/bash
     root@aed84ee21bde:/opt/webapp# cat /etc/hosts
     172.17.0.7  aed84ee21bde
     . . .
diff --git a/docs/sources/userguide/dockerrepos.md b/docs/sources/userguide/dockerrepos.md
index d8dc44e..8fc2ba6 100644
--- a/docs/sources/userguide/dockerrepos.md
+++ b/docs/sources/userguide/dockerrepos.md
@@ -27,7 +27,7 @@
 already) and logging in. You can create your account directly on
 [Docker Hub](https://hub.docker.com/account/signup/), or by running:
 
-    $ sudo docker login
+    $ docker login
 
 This will prompt you for a user name, which will become the public namespace for your
 public repositories.
@@ -45,22 +45,22 @@
 interface or by using the command line interface. Searching can find images by image
 name, user name, or description:
 
-    $ sudo docker search centos
+    $ docker search centos
     NAME           DESCRIPTION                                     STARS     OFFICIAL   TRUSTED
     centos         Official CentOS 6 Image as of 12 April 2014     88
     tianon/centos  CentOS 5 and 6, created using rinse instea...   21
     ...
 
-There you can see two example results: `centos` and
-`tianon/centos`. The second result shows that it comes from
-the public repository of a user, named `tianon/`, while the first result,
-`centos`, doesn't explicitly list a repository which means that it comes from the
-trusted top-level namespace. The `/` character separates a user's
-repository from the image name.
+There you can see two example results: `centos` and `tianon/centos`. The second
+result shows that it comes from the public repository of a user, named
+`tianon/`, while the first result, `centos`, doesn't explicitly list a
+repository which means that it comes from the trusted top-level namespace for
+[Official Repositories](/docker-hub/official_repos). The `/` character separates
+a user's repository from the image name.
 
 Once you've found the image you want, you can download it with `docker pull <imagename>`:
 
-    $ sudo docker pull centos
+    $ docker pull centos
     Pulling repository centos
     0b443ba03958: Download complete
     539c0211cd76: Download complete
@@ -86,7 +86,7 @@
 
 Now you can push this repository to the registry designated by its name or tag.
 
-    $ sudo docker push yourname/newimage
+    $ docker push yourname/newimage
 
 The image will then be uploaded and available for use by your team-mates and/or the
 community.
@@ -101,7 +101,7 @@
 * Automated Builds
 * Webhooks
 
-### Private Repositories
+### Private repositories
 
 Sometimes you have images you don't want to make public and share with
 everyone. So Docker Hub allows you to have private repositories. You can
@@ -150,7 +150,7 @@
 You can create multiple Automated Builds per repository and configure them
 to point to specific `Dockerfile`'s or Git branches.
 
-#### Build Triggers
+#### Build triggers
 
 Automated Builds can also be triggered via a URL on Docker Hub. This
 allows you to rebuild an Automated build image on demand.
diff --git a/docs/sources/userguide/dockervolumes.md b/docs/sources/userguide/dockervolumes.md
index d533224..c7126d7 100644
--- a/docs/sources/userguide/dockervolumes.md
+++ b/docs/sources/userguide/dockervolumes.md
@@ -1,8 +1,8 @@
-page_title: Managing Data in Containers
+page_title: Managing data in containers
 page_description: How to manage data inside your Docker containers.
 page_keywords: Examples, Usage, volume, docker, documentation, user guide, data, volumes
 
-# Managing Data in Containers
+# Managing data in containers
 
 So far we've been introduced to some [basic Docker
 concepts](/userguide/usingdocker/), seen how to work with [Docker
@@ -25,8 +25,8 @@
 useful features for persistent or shared data:
 
 - Volumes are initialized when a container is created. If the container's
-  base image contains data at the specified mount point, that data is 
-  copied into the new volume.
+  base image contains data at the specified mount point, that existing data is 
+  copied into the new volume upon volume initialization.
 - Data volumes can be shared and reused among containers.
 - Changes to a data volume are made directly.
 - Changes to a data volume will not be included when you update an image.
@@ -44,7 +44,7 @@
 to mount multiple data volumes. Let's mount a single volume now in our web
 application container.
 
-    $ sudo docker run -d -P --name web -v /webapp training/webapp python app.py
+    $ docker run -d -P --name web -v /webapp training/webapp python app.py
 
 This will create a new volume inside a container at `/webapp`.
 
@@ -52,7 +52,28 @@
 > You can also use the `VOLUME` instruction in a `Dockerfile` to add one or
 > more new volumes to any container created from that image.
 
-### Mount a Host Directory as a Data Volume
+### Locating a volume
+
+You can locate the volume on the host by utilizing the 'docker inspect' command.
+
+    $ docker inspect web
+
+The output will provide details on the container configurations including the
+volumes. The output should look something similar to the following:
+
+    ...
+    "Volumes": {
+        "/webapp": "/var/lib/docker/volumes/fac362...80535"
+    },
+    "VolumesRW": {
+        "/webapp": true
+    }
+    ...
+
+You will notice in the above 'Volumes' is specifying the location on the host and 
+'VolumesRW' is specifying that the volume is read/write.
+
+### Mount a host directory as a data volume
 
 In addition to creating a volume using the `-v` flag you can also mount a
 directory from your Docker daemon's host into a container.
@@ -65,7 +86,7 @@
 > `docker run -v /c/Users/<path>:/<container path ...` (Windows). All other paths
 > come from the Boot2Docker virtual machine's filesystem.
 
-    $ sudo docker run -d -P --name web -v /src/webapp:/opt/webapp training/webapp python app.py
+    $ docker run -d -P --name web -v /src/webapp:/opt/webapp training/webapp python app.py
 
 This will mount the host directory, `/src/webapp`, into the container at
 `/opt/webapp`.
@@ -90,17 +111,17 @@
 Docker defaults to a read-write volume but we can also mount a directory
 read-only.
 
-    $ sudo docker run -d -P --name web -v /src/webapp:/opt/webapp:ro training/webapp python app.py
+    $ docker run -d -P --name web -v /src/webapp:/opt/webapp:ro training/webapp python app.py
 
 Here we've mounted the same `/src/webapp` directory but we've added the `ro`
 option to specify that the mount should be read-only.
 
-### Mount a Host File as a Data Volume
+### Mount a host file as a data volume
 
 The `-v` flag can also be used to mount a single file  - instead of *just* 
 directories - from the host machine.
 
-    $ sudo docker run --rm -it -v ~/.bash_history:/.bash_history ubuntu /bin/bash
+    $ docker run --rm -it -v ~/.bash_history:/.bash_history ubuntu /bin/bash
 
 This will drop you into a bash shell in a new container, you will have your bash 
 history from the host and when you exit the container, the host will have the 
@@ -113,7 +134,7 @@
 > you want to edit the mounted file, it is often easiest to instead mount the 
 > parent directory.
 
-## Creating and mounting a Data Volume Container
+## Creating and mounting a data volume container
 
 If you have some persistent data that you want to share between
 containers, or want to use from non-persistent containers, it's best to
@@ -124,15 +145,15 @@
 While this container doesn't run an application, it reuses the `training/postgres`
 image so that all containers are using layers in common, saving disk space.
 
-    $ sudo docker create -v /dbdata --name dbdata training/postgres /bin/true
+    $ docker create -v /dbdata --name dbdata training/postgres /bin/true
 
 You can then use the `--volumes-from` flag to mount the `/dbdata` volume in another container.
 
-    $ sudo docker run -d --volumes-from dbdata --name db1 training/postgres
+    $ docker run -d --volumes-from dbdata --name db1 training/postgres
 
 And another:
 
-    $ sudo docker run -d --volumes-from dbdata --name db2 training/postgres
+    $ docker run -d --volumes-from dbdata --name db2 training/postgres
 
 In this case, if the `postgres` image contained a directory called `/dbdata`
 then mounting the volumes from the `dbdata` container hides the
@@ -145,7 +166,7 @@
 You can also extend the chain by mounting the volume that came from the
 `dbdata` container in yet another container via the `db1` or `db2` containers.
 
-    $ sudo docker run -d --name db3 --volumes-from db1 training/postgres
+    $ docker run -d --name db3 --volumes-from db1 training/postgres
 
 If you remove containers that mount volumes, including the initial `dbdata`
 container, or the subsequent containers `db1` and `db2`, the volumes will not
@@ -168,7 +189,7 @@
 `--volumes-from` flag to create a new container that mounts that volume,
 like so:
 
-    $ sudo docker run --volumes-from dbdata -v $(pwd):/backup ubuntu tar cvf /backup/backup.tar /dbdata
+    $ docker run --volumes-from dbdata -v $(pwd):/backup ubuntu tar cvf /backup/backup.tar /dbdata
 
 Here we've launched a new container and mounted the volume from the
 `dbdata` container. We've then mounted a local host directory as
@@ -180,11 +201,11 @@
 You could then restore it to the same container, or another that you've made
 elsewhere. Create a new container.
 
-    $ sudo docker run -v /dbdata --name dbdata2 ubuntu /bin/bash
+    $ docker run -v /dbdata --name dbdata2 ubuntu /bin/bash
 
 Then un-tar the backup file in the new container's data volume.
 
-    $ sudo docker run --volumes-from dbdata2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar
+    $ docker run --volumes-from dbdata2 -v $(pwd):/backup ubuntu cd /dbdata && tar xvf /backup/backup.tar
 
 You can use the techniques above to automate backup, migration and
 restore testing using your preferred tools.
diff --git a/docs/sources/userguide/index.md b/docs/sources/userguide/index.md
index d0dbdb8..9cc1c6d 100644
--- a/docs/sources/userguide/index.md
+++ b/docs/sources/userguide/index.md
@@ -1,8 +1,8 @@
-page_title: The Docker User Guide
-page_description: The Docker User Guide home page
+page_title: The Docker user guide
+page_description: The Docker user guide home page
 page_keywords: docker, introduction, documentation, about, technology, docker.io, user, guide, user's, manual, platform, framework, virtualization, home, intro
 
-# Welcome to the Docker User Guide
+# Welcome to the Docker user guide
 
 In the [Introduction](/) you got a taste of what Docker is and how it
 works. In this guide we're going to take you through the fundamentals of
@@ -19,7 +19,7 @@
 We've broken this guide into major sections that take you through
 the Docker life cycle:
 
-## Getting Started with Docker Hub
+## Getting started with Docker Hub
 
 *How do I use Docker Hub?*
 
@@ -29,7 +29,7 @@
 
 Go to [Using Docker Hub](/userguide/dockerhub).
 
-## Dockerizing Applications: A "Hello world"
+## Dockerizing applications: A "Hello world"
 
 *How do I run applications inside containers?*
 
@@ -38,7 +38,7 @@
 
 Go to [Dockerizing Applications](/userguide/dockerizing).
 
-## Working with Containers
+## Working with containers
 
 *How do I manage my containers?*
 
@@ -48,7 +48,7 @@
 
 Go to [Working With Containers](/userguide/usingdocker).
 
-## Working with Docker Images
+## Working with Docker images
 
 *How can I access, share and build my own images?*
 
@@ -57,7 +57,7 @@
 
 Go to [Working with Docker Images](/userguide/dockerimages).
 
-## Linking Containers Together
+## Linking containers together
 
 Until now we've seen how to build individual applications inside Docker
 containers. Now learn how to build whole application stacks with Docker
@@ -65,7 +65,7 @@
 
 Go to [Linking Containers Together](/userguide/dockerlinks).
 
-## Managing Data in Containers
+## Managing data in containers
 
 Now we know how to link Docker containers together the next step is
 learning how to manage data, volumes and mounts inside our containers.
diff --git a/docs/sources/userguide/labels-custom-metadata.md b/docs/sources/userguide/labels-custom-metadata.md
index 7cf25c0..79ac42e 100644
--- a/docs/sources/userguide/labels-custom-metadata.md
+++ b/docs/sources/userguide/labels-custom-metadata.md
@@ -129,10 +129,14 @@
     }
     ...
 
-    $ docker inspect -f "{{json .Labels }}" 4fa6e0f0c678
+    # Inspect labels on container
+    $ docker inspect -f "{{json .Config.Labels }}" 4fa6e0f0c678
 
     {"Vendor":"ACME Incorporated","com.example.is-beta":"","com.example.version":"0.0.1-beta","com.example.release-date":"2015-02-12"}
 
+    # Inspect labels on images
+    $ docker inspect -f "{{json .ContainerConfig.Labels }}" myimage
+
 
 ## Query labels
 
@@ -171,6 +175,7 @@
      Backing Filesystem: extfs
      Dirs: 697
     Execution Driver: native-0.2
+    Logging Driver: json-file
     Kernel Version: 3.13.0-32-generic
     Operating System: Ubuntu 14.04.1 LTS
     CPUs: 1
@@ -179,7 +184,7 @@
     ID: RC3P:JTCT:32YS:XYSB:YUBG:VFED:AAJZ:W3YW:76XO:D7NN:TEVU:UCRW
     Debug mode (server): false
     Debug mode (client): true
-    Fds: 11
+    File Descriptors: 11
     Goroutines: 14
     EventsListeners: 0
     Init Path: /usr/bin/docker
diff --git a/docs/sources/userguide/level1.md b/docs/sources/userguide/level1.md
index cca77dc..320fbfe 100644
--- a/docs/sources/userguide/level1.md
+++ b/docs/sources/userguide/level1.md
@@ -1,10 +1,10 @@
-page_title: Docker Images Test
+page_title: Docker images test
 page_description: How to work with Docker images.
 page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, Docker images, Docker image, image management, Docker repos, Docker repositories, docker, docker tag, docker tags, Docker Hub, collaboration
 
 <a title="back" class="dockerfile back" href="/userguide/dockerimages/#creating-our-own-images">Back</a>
 
-# Dockerfile Tutorial
+# Dockerfile tutorial
 
 ## Test your Dockerfile knowledge - Level 1
 
diff --git a/docs/sources/userguide/level2.md b/docs/sources/userguide/level2.md
index fe6654e..96e91a1 100644
--- a/docs/sources/userguide/level2.md
+++ b/docs/sources/userguide/level2.md
@@ -1,10 +1,10 @@
-page_title: Docker Images Test
+page_title: Docker images test
 page_description: How to work with Docker images.
 page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, Docker images, Docker image, image management, Docker repos, Docker repositories, docker, docker tag, docker tags, Docker Hub, collaboration
 
 <a title="back" class="dockerfile back" href="/userguide/dockerimages/#creating-our-own-images">Back</a>
 
-#Dockerfile Tutorial
+#Dockerfile tutorial
 
 ## Test your Dockerfile knowledge - Level 2
 
diff --git a/docs/sources/userguide/usingdocker.md b/docs/sources/userguide/usingdocker.md
index 8d57def..2c73929 100644
--- a/docs/sources/userguide/usingdocker.md
+++ b/docs/sources/userguide/usingdocker.md
@@ -1,8 +1,8 @@
-page_title: Working with Containers
+page_title: Working with containers
 page_description: Learn how to manage and operate Docker containers.
 page_keywords: docker, the docker guide, documentation, docker.io, monitoring containers, docker top, docker inspect, docker port, ports, docker logs, log, Logs
 
-# Working with Containers
+# Working with containers
 
 In the [last section of the Docker User Guide](/userguide/dockerizing)
 we launched our first containers. We launched two containers using the
@@ -27,12 +27,12 @@
 
     # Usage:  [sudo] docker [command] [flags] [arguments] ..
     # Example:
-    $ sudo docker run -i -t ubuntu /bin/bash
+    $ docker run -i -t ubuntu /bin/bash
 
 Let's see this in action by using the `docker version` command to return
 version information on the currently installed Docker client and daemon.
 
-    $ sudo docker version
+    $ docker version
 
 This command will not only provide you the version of Docker client and
 daemon you are using, but also the version of Go (the programming
@@ -49,49 +49,31 @@
 
     Last stable version: 0.8.0
 
-### Seeing what the Docker client can do
+## Get Docker command help
 
-We can see all of the commands available to us with the Docker client by
-running the `docker` binary without any options.
+You can display the help for specific Docker commands. The help details the
+options and their usage. To see a list of all the possible commands, use the
+following:
 
-    $ sudo docker
+    $ docker --help
 
-You will see a list of all currently available commands.
+To see usage for a specific command, specify the command with the `--help` flag:
 
-    Commands:
-         attach    Attach to a running container
-         build     Build an image from a Dockerfile
-         commit    Create a new image from a container's changes
-    . . .
-
-### Seeing Docker command usage
-
-You can also zoom in and review the usage for specific Docker commands.
-
-Try typing Docker followed with a `[command]` to see the usage for that
-command:
-
-    $ sudo docker attach
-    Help output . . .
-
-Or you can also pass the `--help` flag to the `docker` binary.
-
-    $ sudo docker attach --help
-
-This will display the help text and all available flags:
+    $ docker attach --help
 
     Usage: docker attach [OPTIONS] CONTAINER
 
     Attach to a running container
 
-      --no-stdin=false: Do not attach stdin
-      --sig-proxy=true: Proxify all received signal to the process (non-TTY mode only)
+      --help=false        Print usage
+      --no-stdin=false    Do not attach stdin
+      --sig-proxy=true    Proxy all received signals to the process
 
 > **Note:** 
-> You can see a full list of Docker's commands
-> [here](/reference/commandline/cli/).
+> For further details and examples of each command, see the
+> [command reference](/reference/commandline/cli/) in this guide.
 
-## Running a Web Application in Docker
+## Running a web application in Docker
 
 So now we've learnt a bit more about the `docker` client let's move onto
 the important stuff: running more containers. So far none of the
@@ -102,7 +84,7 @@
 For our web application we're going to run a Python Flask application.
 Let's start with a `docker run` command.
 
-    $ sudo docker run -d -P training/webapp python app.py
+    $ docker run -d -P training/webapp python app.py
 
 Let's review what our command did. We've specified two flags: `-d` and
 `-P`. We've already seen the `-d` flag which tells Docker to run the
@@ -121,11 +103,11 @@
 > reference](/reference/commandline/cli/#run) and the [Docker Run
 > Reference](/reference/run/).
 
-## Viewing our Web Application Container
+## Viewing our web application container
 
 Now let's see our running container using the `docker ps` command.
 
-    $ sudo docker ps -l
+    $ docker ps -l
     CONTAINER ID  IMAGE                   COMMAND       CREATED        STATUS        PORTS                    NAMES
     bc533791f3f5  training/webapp:latest  python app.py 5 seconds ago  Up 2 seconds  0.0.0.0:49155->5000/tcp  nostalgic_morse
 
@@ -160,9 +142,9 @@
 to 61000) on the local Docker host. We can also bind Docker containers to
 specific ports using the `-p` flag, for example:
 
-    $ sudo docker run -d -p 5000:5000 training/webapp python app.py
+    $ docker run -d -p 80:5000 training/webapp python app.py
 
-This would map port 5000 inside our container to port 5000 on our local
+This would map port 5000 inside our container to port 80 on our local
 host. You might be asking about now: why wouldn't we just want to always
 use 1:1 port mappings in Docker containers rather than mapping to high
 ports? Well 1:1 mappings have the constraint of only being able to map
@@ -179,35 +161,35 @@
 Our Python application is live!
 
 > **Note:**
-> If you have used the boot2docker virtual machine on OS X, Windows or Linux,
+> If you have used the `boot2docker` virtual machine on OS X, Windows or Linux,
 > you'll need to get the IP of the virtual host instead of using localhost.
-> You can do this by running the following in
-> the boot2docker shell.
+> You can do this by running the following outside of the `boot2docker` shell
+> (i.e., from your comment line or terminal application).
 > 
 >     $ boot2docker ip
 >     The VM's Host only interface IP address is: 192.168.59.103
 > 
 > In this case you'd browse to http://192.168.59.103:49155 for the above example.
 
-## A Network Port Shortcut
+## A network port shortcut
 
 Using the `docker ps` command to return the mapped port is a bit clumsy so
 Docker has a useful shortcut we can use: `docker port`. To use `docker port` we
 specify the ID or name of our container and then the port for which we need the
 corresponding public-facing port.
 
-    $ sudo docker port nostalgic_morse 5000
+    $ docker port nostalgic_morse 5000
     0.0.0.0:49155
 
 In this case we've looked up what port is mapped externally to port 5000 inside
 the container.
 
-## Viewing the Web Application's Logs
+## Viewing the web application's logs
 
 Let's also find out a bit more about what's happening with our application and
 use another of the commands we've learnt, `docker logs`.
 
-    $ sudo docker logs -f nostalgic_morse
+    $ docker logs -f nostalgic_morse
     * Running on http://0.0.0.0:5000/
     10.0.2.2 - - [23/May/2014 20:16:31] "GET / HTTP/1.1" 200 -
     10.0.2.2 - - [23/May/2014 20:16:31] "GET /favicon.ico HTTP/1.1" 404 -
@@ -217,25 +199,25 @@
 container's standard out. We can see here the logs from Flask showing
 the application running on port 5000 and the access log entries for it.
 
-## Looking at our Web Application Container's processes
+## Looking at our web application container's processes
 
 In addition to the container's logs we can also examine the processes
 running inside it using the `docker top` command.
 
-    $ sudo docker top nostalgic_morse
+    $ docker top nostalgic_morse
     PID                 USER                COMMAND
     854                 root                python app.py
 
 Here we can see our `python app.py` command is the only process running inside
 the container.
 
-## Inspecting our Web Application Container
+## Inspecting our web application container
 
 Lastly, we can take a low-level dive into our Docker container using the
 `docker inspect` command. It returns a JSON hash of useful configuration
 and status information about Docker containers.
 
-    $ sudo docker inspect nostalgic_morse
+    $ docker inspect nostalgic_morse
 
 Let's see a sample of that JSON output.
 
@@ -255,30 +237,30 @@
 We can also narrow down the information we want to return by requesting a
 specific element, for example to return the container's IP address we would:
 
-    $ sudo docker inspect -f '{{ .NetworkSettings.IPAddress }}' nostalgic_morse
+    $ docker inspect -f '{{ .NetworkSettings.IPAddress }}' nostalgic_morse
     172.17.0.5
 
-## Stopping our Web Application Container
+## Stopping our web application container
 
 Okay we've seen web application working. Now let's stop it using the
 `docker stop` command and the name of our container: `nostalgic_morse`.
 
-    $ sudo docker stop nostalgic_morse
+    $ docker stop nostalgic_morse
     nostalgic_morse
 
 We can now use the `docker ps` command to check if the container has
 been stopped.
 
-    $ sudo docker ps -l
+    $ docker ps -l
 
-## Restarting our Web Application Container
+## Restarting our web application container
 
 Oops! Just after you stopped the container you get a call to say another
 developer needs the container back. From here you have two choices: you
 can create a new container or restart the old one. Let's look at
 starting our previous container back up.
 
-    $ sudo docker start nostalgic_morse
+    $ docker start nostalgic_morse
     nostalgic_morse
 
 Now quickly run `docker ps -l` again to see the running container is
@@ -289,22 +271,22 @@
 > Also available is the `docker restart` command that runs a stop and
 > then start on the container.
 
-## Removing our Web Application Container
+## Removing our web application container
 
 Your colleague has let you know that they've now finished with the container
 and won't need it again. So let's remove it using the `docker rm` command.
 
-    $ sudo docker rm nostalgic_morse
+    $ docker rm nostalgic_morse
     Error: Impossible to remove a running container, please stop it first or use -f
     2014/05/24 08:12:56 Error: failed to remove one or more containers
 
-What's happened? We can't actually remove a running container. This protects
+What happened? We can't actually remove a running container. This protects
 you from accidentally removing a running container you might need. Let's try
 this again by stopping the container first.
 
-    $ sudo docker stop nostalgic_morse
+    $ docker stop nostalgic_morse
     nostalgic_morse
-    $ sudo docker rm nostalgic_morse
+    $ docker rm nostalgic_morse
     nostalgic_morse
 
 And now our container is stopped and deleted.
diff --git a/engine/engine.go b/engine/engine.go
deleted file mode 100644
index 6053234..0000000
--- a/engine/engine.go
+++ /dev/null
@@ -1,255 +0,0 @@
-package engine
-
-import (
-	"bufio"
-	"fmt"
-	"io"
-	"os"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-
-	"github.com/docker/docker/pkg/common"
-	"github.com/docker/docker/pkg/ioutils"
-)
-
-// Installer is a standard interface for objects which can "install" themselves
-// on an engine by registering handlers.
-// This can be used as an entrypoint for external plugins etc.
-type Installer interface {
-	Install(*Engine) error
-}
-
-type Handler func(*Job) Status
-
-var globalHandlers map[string]Handler
-
-func init() {
-	globalHandlers = make(map[string]Handler)
-}
-
-func Register(name string, handler Handler) error {
-	_, exists := globalHandlers[name]
-	if exists {
-		return fmt.Errorf("Can't overwrite global handler for command %s", name)
-	}
-	globalHandlers[name] = handler
-	return nil
-}
-
-func unregister(name string) {
-	delete(globalHandlers, name)
-}
-
-// The Engine is the core of Docker.
-// It acts as a store for *containers*, and allows manipulation of these
-// containers by executing *jobs*.
-type Engine struct {
-	handlers     map[string]Handler
-	catchall     Handler
-	hack         Hack // data for temporary hackery (see hack.go)
-	id           string
-	Stdout       io.Writer
-	Stderr       io.Writer
-	Stdin        io.Reader
-	Logging      bool
-	tasks        sync.WaitGroup
-	l            sync.RWMutex // lock for shutdown
-	shutdownWait sync.WaitGroup
-	shutdown     bool
-	onShutdown   []func() // shutdown handlers
-}
-
-func (eng *Engine) Register(name string, handler Handler) error {
-	_, exists := eng.handlers[name]
-	if exists {
-		return fmt.Errorf("Can't overwrite handler for command %s", name)
-	}
-	eng.handlers[name] = handler
-	return nil
-}
-
-func (eng *Engine) RegisterCatchall(catchall Handler) {
-	eng.catchall = catchall
-}
-
-// New initializes a new engine.
-func New() *Engine {
-	eng := &Engine{
-		handlers: make(map[string]Handler),
-		id:       common.RandomString(),
-		Stdout:   os.Stdout,
-		Stderr:   os.Stderr,
-		Stdin:    os.Stdin,
-		Logging:  true,
-	}
-	eng.Register("commands", func(job *Job) Status {
-		for _, name := range eng.commands() {
-			job.Printf("%s\n", name)
-		}
-		return StatusOK
-	})
-	// Copy existing global handlers
-	for k, v := range globalHandlers {
-		eng.handlers[k] = v
-	}
-	return eng
-}
-
-func (eng *Engine) String() string {
-	return fmt.Sprintf("%s", eng.id[:8])
-}
-
-// Commands returns a list of all currently registered commands,
-// sorted alphabetically.
-func (eng *Engine) commands() []string {
-	names := make([]string, 0, len(eng.handlers))
-	for name := range eng.handlers {
-		names = append(names, name)
-	}
-	sort.Strings(names)
-	return names
-}
-
-// Job creates a new job which can later be executed.
-// This function mimics `Command` from the standard os/exec package.
-func (eng *Engine) Job(name string, args ...string) *Job {
-	job := &Job{
-		Eng:     eng,
-		Name:    name,
-		Args:    args,
-		Stdin:   NewInput(),
-		Stdout:  NewOutput(),
-		Stderr:  NewOutput(),
-		env:     &Env{},
-		closeIO: true,
-
-		cancelled: make(chan struct{}),
-	}
-	if eng.Logging {
-		job.Stderr.Add(ioutils.NopWriteCloser(eng.Stderr))
-	}
-
-	// Catchall is shadowed by specific Register.
-	if handler, exists := eng.handlers[name]; exists {
-		job.handler = handler
-	} else if eng.catchall != nil && name != "" {
-		// empty job names are illegal, catchall or not.
-		job.handler = eng.catchall
-	}
-	return job
-}
-
-// OnShutdown registers a new callback to be called by Shutdown.
-// This is typically used by services to perform cleanup.
-func (eng *Engine) OnShutdown(h func()) {
-	eng.l.Lock()
-	eng.onShutdown = append(eng.onShutdown, h)
-	eng.shutdownWait.Add(1)
-	eng.l.Unlock()
-}
-
-// Shutdown permanently shuts down eng as follows:
-// - It refuses all new jobs, permanently.
-// - It waits for all active jobs to complete (with no timeout)
-// - It calls all shutdown handlers concurrently (if any)
-// - It returns when all handlers complete, or after 15 seconds,
-//	whichever happens first.
-func (eng *Engine) Shutdown() {
-	eng.l.Lock()
-	if eng.shutdown {
-		eng.l.Unlock()
-		eng.shutdownWait.Wait()
-		return
-	}
-	eng.shutdown = true
-	eng.l.Unlock()
-	// We don't need to protect the rest with a lock, to allow
-	// for other calls to immediately fail with "shutdown" instead
-	// of hanging for 15 seconds.
-	// This requires all concurrent calls to check for shutdown, otherwise
-	// it might cause a race.
-
-	// Wait for all jobs to complete.
-	// Timeout after 5 seconds.
-	tasksDone := make(chan struct{})
-	go func() {
-		eng.tasks.Wait()
-		close(tasksDone)
-	}()
-	select {
-	case <-time.After(time.Second * 5):
-	case <-tasksDone:
-	}
-
-	// Call shutdown handlers, if any.
-	// Timeout after 10 seconds.
-	for _, h := range eng.onShutdown {
-		go func(h func()) {
-			h()
-			eng.shutdownWait.Done()
-		}(h)
-	}
-	done := make(chan struct{})
-	go func() {
-		eng.shutdownWait.Wait()
-		close(done)
-	}()
-	select {
-	case <-time.After(time.Second * 10):
-	case <-done:
-	}
-	return
-}
-
-// IsShutdown returns true if the engine is in the process
-// of shutting down, or already shut down.
-// Otherwise it returns false.
-func (eng *Engine) IsShutdown() bool {
-	eng.l.RLock()
-	defer eng.l.RUnlock()
-	return eng.shutdown
-}
-
-// ParseJob creates a new job from a text description using a shell-like syntax.
-//
-// The following syntax is used to parse `input`:
-//
-// * Words are separated using standard whitespaces as separators.
-// * Quotes and backslashes are not interpreted.
-// * Words of the form 'KEY=[VALUE]' are added to the job environment.
-// * All other words are added to the job arguments.
-//
-// For example:
-//
-// job, _ := eng.ParseJob("VERBOSE=1 echo hello TEST=true world")
-//
-// The resulting job will have:
-//	job.Args={"echo", "hello", "world"}
-//	job.Env={"VERBOSE":"1", "TEST":"true"}
-//
-func (eng *Engine) ParseJob(input string) (*Job, error) {
-	// FIXME: use a full-featured command parser
-	scanner := bufio.NewScanner(strings.NewReader(input))
-	scanner.Split(bufio.ScanWords)
-	var (
-		cmd []string
-		env Env
-	)
-	for scanner.Scan() {
-		word := scanner.Text()
-		kv := strings.SplitN(word, "=", 2)
-		if len(kv) == 2 {
-			env.Set(kv[0], kv[1])
-		} else {
-			cmd = append(cmd, word)
-		}
-	}
-	if len(cmd) == 0 {
-		return nil, fmt.Errorf("empty command: '%s'", input)
-	}
-	job := eng.Job(cmd[0], cmd[1:]...)
-	job.Env().Init(&env)
-	return job, nil
-}
diff --git a/engine/engine_test.go b/engine/engine_test.go
deleted file mode 100644
index 96c3f0d..0000000
--- a/engine/engine_test.go
+++ /dev/null
@@ -1,236 +0,0 @@
-package engine
-
-import (
-	"bytes"
-	"strings"
-	"testing"
-
-	"github.com/docker/docker/pkg/ioutils"
-)
-
-func TestRegister(t *testing.T) {
-	if err := Register("dummy1", nil); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := Register("dummy1", nil); err == nil {
-		t.Fatalf("Expecting error, got none")
-	}
-	// Register is global so let's cleanup to avoid conflicts
-	defer unregister("dummy1")
-
-	eng := New()
-
-	//Should fail because global handlers are copied
-	//at the engine creation
-	if err := eng.Register("dummy1", nil); err == nil {
-		t.Fatalf("Expecting error, got none")
-	}
-
-	if err := eng.Register("dummy2", nil); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := eng.Register("dummy2", nil); err == nil {
-		t.Fatalf("Expecting error, got none")
-	}
-	defer unregister("dummy2")
-}
-
-func TestJob(t *testing.T) {
-	eng := New()
-	job1 := eng.Job("dummy1", "--level=awesome")
-
-	if job1.handler != nil {
-		t.Fatalf("job1.handler should be empty")
-	}
-
-	h := func(j *Job) Status {
-		j.Printf("%s\n", j.Name)
-		return 42
-	}
-
-	eng.Register("dummy2", h)
-	defer unregister("dummy2")
-	job2 := eng.Job("dummy2", "--level=awesome")
-
-	if job2.handler == nil {
-		t.Fatalf("job2.handler shouldn't be nil")
-	}
-
-	if job2.handler(job2) != 42 {
-		t.Fatalf("handler dummy2 was not found in job2")
-	}
-}
-
-func TestEngineShutdown(t *testing.T) {
-	eng := New()
-	if eng.IsShutdown() {
-		t.Fatalf("Engine should not show as shutdown")
-	}
-	eng.Shutdown()
-	if !eng.IsShutdown() {
-		t.Fatalf("Engine should show as shutdown")
-	}
-}
-
-func TestEngineCommands(t *testing.T) {
-	eng := New()
-	handler := func(job *Job) Status { return StatusOK }
-	eng.Register("foo", handler)
-	eng.Register("bar", handler)
-	eng.Register("echo", handler)
-	eng.Register("die", handler)
-	var output bytes.Buffer
-	commands := eng.Job("commands")
-	commands.Stdout.Add(&output)
-	commands.Run()
-	expected := "bar\ncommands\ndie\necho\nfoo\n"
-	if result := output.String(); result != expected {
-		t.Fatalf("Unexpected output:\nExpected = %v\nResult   = %v\n", expected, result)
-	}
-}
-
-func TestEngineString(t *testing.T) {
-	eng1 := New()
-	eng2 := New()
-	s1 := eng1.String()
-	s2 := eng2.String()
-	if eng1 == eng2 {
-		t.Fatalf("Different engines should have different names (%v == %v)", s1, s2)
-	}
-}
-
-func TestParseJob(t *testing.T) {
-	eng := New()
-	// Verify that the resulting job calls to the right place
-	var called bool
-	eng.Register("echo", func(job *Job) Status {
-		called = true
-		return StatusOK
-	})
-	input := "echo DEBUG=1 hello world VERBOSITY=42"
-	job, err := eng.ParseJob(input)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if job.Name != "echo" {
-		t.Fatalf("Invalid job name: %v", job.Name)
-	}
-	if strings.Join(job.Args, ":::") != "hello:::world" {
-		t.Fatalf("Invalid job args: %v", job.Args)
-	}
-	if job.Env().Get("DEBUG") != "1" {
-		t.Fatalf("Invalid job env: %v", job.Env)
-	}
-	if job.Env().Get("VERBOSITY") != "42" {
-		t.Fatalf("Invalid job env: %v", job.Env)
-	}
-	if len(job.Env().Map()) != 2 {
-		t.Fatalf("Invalid job env: %v", job.Env)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-	if !called {
-		t.Fatalf("Job was not called")
-	}
-}
-
-func TestCatchallEmptyName(t *testing.T) {
-	eng := New()
-	var called bool
-	eng.RegisterCatchall(func(job *Job) Status {
-		called = true
-		return StatusOK
-	})
-	err := eng.Job("").Run()
-	if err == nil {
-		t.Fatalf("Engine.Job(\"\").Run() should return an error")
-	}
-	if called {
-		t.Fatalf("Engine.Job(\"\").Run() should return an error")
-	}
-}
-
-// Ensure that a job within a job both using the same underlying standard
-// output writer does not close the output of the outer job when the inner
-// job's stdout is wrapped with a NopCloser. When not wrapped, it should
-// close the outer job's output.
-func TestNestedJobSharedOutput(t *testing.T) {
-	var (
-		outerHandler Handler
-		innerHandler Handler
-		wrapOutput   bool
-	)
-
-	outerHandler = func(job *Job) Status {
-		job.Stdout.Write([]byte("outer1"))
-
-		innerJob := job.Eng.Job("innerJob")
-
-		if wrapOutput {
-			innerJob.Stdout.Add(ioutils.NopWriteCloser(job.Stdout))
-		} else {
-			innerJob.Stdout.Add(job.Stdout)
-		}
-
-		if err := innerJob.Run(); err != nil {
-			t.Fatal(err)
-		}
-
-		// If wrapOutput was *false* this write will do nothing.
-		// FIXME (jlhawn): It should cause an error to write to
-		// closed output.
-		job.Stdout.Write([]byte(" outer2"))
-
-		return StatusOK
-	}
-
-	innerHandler = func(job *Job) Status {
-		job.Stdout.Write([]byte(" inner"))
-
-		return StatusOK
-	}
-
-	eng := New()
-	eng.Register("outerJob", outerHandler)
-	eng.Register("innerJob", innerHandler)
-
-	// wrapOutput starts *false* so the expected
-	// output of running the outer job will be:
-	//
-	//     "outer1 inner"
-	//
-	outBuf := new(bytes.Buffer)
-	outerJob := eng.Job("outerJob")
-	outerJob.Stdout.Add(outBuf)
-
-	if err := outerJob.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	expectedOutput := "outer1 inner"
-	if outBuf.String() != expectedOutput {
-		t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String())
-	}
-
-	// Set wrapOutput to true so that the expected
-	// output of running the outer job will be:
-	//
-	//     "outer1 inner outer2"
-	//
-	wrapOutput = true
-	outBuf.Reset()
-	outerJob = eng.Job("outerJob")
-	outerJob.Stdout.Add(outBuf)
-
-	if err := outerJob.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	expectedOutput = "outer1 inner outer2"
-	if outBuf.String() != expectedOutput {
-		t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String())
-	}
-}
diff --git a/engine/env.go b/engine/env.go
deleted file mode 100644
index a671f13..0000000
--- a/engine/env.go
+++ /dev/null
@@ -1,310 +0,0 @@
-package engine
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/docker/docker/utils"
-)
-
-type Env []string
-
-// Get returns the last value associated with the given key. If there are no
-// values associated with the key, Get returns the empty string.
-func (env *Env) Get(key string) (value string) {
-	// not using Map() because of the extra allocations https://github.com/docker/docker/pull/7488#issuecomment-51638315
-	for _, kv := range *env {
-		if strings.Index(kv, "=") == -1 {
-			continue
-		}
-		parts := strings.SplitN(kv, "=", 2)
-		if parts[0] != key {
-			continue
-		}
-		if len(parts) < 2 {
-			value = ""
-		} else {
-			value = parts[1]
-		}
-	}
-	return
-}
-
-func (env *Env) Exists(key string) bool {
-	_, exists := env.Map()[key]
-	return exists
-}
-
-// Len returns the number of keys in the environment.
-// Note that len(env) might be different from env.Len(),
-// because the same key might be set multiple times.
-func (env *Env) Len() int {
-	return len(env.Map())
-}
-
-func (env *Env) Init(src *Env) {
-	(*env) = make([]string, 0, len(*src))
-	for _, val := range *src {
-		(*env) = append((*env), val)
-	}
-}
-
-func (env *Env) GetBool(key string) (value bool) {
-	s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
-	if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
-		return false
-	}
-	return true
-}
-
-func (env *Env) SetBool(key string, value bool) {
-	if value {
-		env.Set(key, "1")
-	} else {
-		env.Set(key, "0")
-	}
-}
-
-func (env *Env) GetTime(key string) (time.Time, error) {
-	t, err := time.Parse(time.RFC3339Nano, env.Get(key))
-	return t, err
-}
-
-func (env *Env) SetTime(key string, t time.Time) {
-	env.Set(key, t.Format(time.RFC3339Nano))
-}
-
-func (env *Env) GetInt(key string) int {
-	return int(env.GetInt64(key))
-}
-
-func (env *Env) GetInt64(key string) int64 {
-	s := strings.Trim(env.Get(key), " \t")
-	val, err := strconv.ParseInt(s, 10, 64)
-	if err != nil {
-		return 0
-	}
-	return val
-}
-
-func (env *Env) SetInt(key string, value int) {
-	env.Set(key, fmt.Sprintf("%d", value))
-}
-
-func (env *Env) SetInt64(key string, value int64) {
-	env.Set(key, fmt.Sprintf("%d", value))
-}
-
-// Returns nil if key not found
-func (env *Env) GetList(key string) []string {
-	sval := env.Get(key)
-	if sval == "" {
-		return nil
-	}
-	l := make([]string, 0, 1)
-	if err := json.Unmarshal([]byte(sval), &l); err != nil {
-		l = append(l, sval)
-	}
-	return l
-}
-
-func (env *Env) GetSubEnv(key string) *Env {
-	sval := env.Get(key)
-	if sval == "" {
-		return nil
-	}
-	buf := bytes.NewBufferString(sval)
-	var sub Env
-	if err := sub.Decode(buf); err != nil {
-		return nil
-	}
-	return &sub
-}
-
-func (env *Env) SetSubEnv(key string, sub *Env) error {
-	var buf bytes.Buffer
-	if err := sub.Encode(&buf); err != nil {
-		return err
-	}
-	env.Set(key, string(buf.Bytes()))
-	return nil
-}
-
-func (env *Env) GetJson(key string, iface interface{}) error {
-	sval := env.Get(key)
-	if sval == "" {
-		return nil
-	}
-	return json.Unmarshal([]byte(sval), iface)
-}
-
-func (env *Env) SetJson(key string, value interface{}) error {
-	sval, err := json.Marshal(value)
-	if err != nil {
-		return err
-	}
-	env.Set(key, string(sval))
-	return nil
-}
-
-func (env *Env) SetList(key string, value []string) error {
-	return env.SetJson(key, value)
-}
-
-func (env *Env) Set(key, value string) {
-	*env = append(*env, key+"="+value)
-}
-
-func NewDecoder(src io.Reader) *Decoder {
-	return &Decoder{
-		json.NewDecoder(src),
-	}
-}
-
-type Decoder struct {
-	*json.Decoder
-}
-
-func (decoder *Decoder) Decode() (*Env, error) {
-	m := make(map[string]interface{})
-	if err := decoder.Decoder.Decode(&m); err != nil {
-		return nil, err
-	}
-	env := &Env{}
-	for key, value := range m {
-		env.SetAuto(key, value)
-	}
-	return env, nil
-}
-
-// DecodeEnv decodes `src` as a json dictionary, and adds
-// each decoded key-value pair to the environment.
-//
-// If `src` cannot be decoded as a json dictionary, an error
-// is returned.
-func (env *Env) Decode(src io.Reader) error {
-	m := make(map[string]interface{})
-	if err := json.NewDecoder(src).Decode(&m); err != nil {
-		return err
-	}
-	for k, v := range m {
-		env.SetAuto(k, v)
-	}
-	return nil
-}
-
-func (env *Env) SetAuto(k string, v interface{}) {
-	// Issue 7941 - if the value in the incoming JSON is null then treat it
-	// as if they never specified the property at all.
-	if v == nil {
-		return
-	}
-
-	// FIXME: we fix-convert float values to int, because
-	// encoding/json decodes integers to float64, but cannot encode them back.
-	// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
-	if fval, ok := v.(float64); ok {
-		env.SetInt64(k, int64(fval))
-	} else if sval, ok := v.(string); ok {
-		env.Set(k, sval)
-	} else if val, err := json.Marshal(v); err == nil {
-		env.Set(k, string(val))
-	} else {
-		env.Set(k, fmt.Sprintf("%v", v))
-	}
-}
-
-func changeFloats(v interface{}) interface{} {
-	switch v := v.(type) {
-	case float64:
-		return int(v)
-	case map[string]interface{}:
-		for key, val := range v {
-			v[key] = changeFloats(val)
-		}
-	case []interface{}:
-		for idx, val := range v {
-			v[idx] = changeFloats(val)
-		}
-	}
-	return v
-}
-
-func (env *Env) Encode(dst io.Writer) error {
-	m := make(map[string]interface{})
-	for k, v := range env.Map() {
-		var val interface{}
-		if err := json.Unmarshal([]byte(v), &val); err == nil {
-			// FIXME: we fix-convert float values to int, because
-			// encoding/json decodes integers to float64, but cannot encode them back.
-			// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
-			m[k] = changeFloats(val)
-		} else {
-			m[k] = v
-		}
-	}
-	if err := json.NewEncoder(dst).Encode(&m); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (env *Env) WriteTo(dst io.Writer) (int64, error) {
-	wc := utils.NewWriteCounter(dst)
-	err := env.Encode(wc)
-	return wc.Count, err
-}
-
-func (env *Env) Import(src interface{}) (err error) {
-	defer func() {
-		if err != nil {
-			err = fmt.Errorf("ImportEnv: %s", err)
-		}
-	}()
-	var buf bytes.Buffer
-	if err := json.NewEncoder(&buf).Encode(src); err != nil {
-		return err
-	}
-	if err := env.Decode(&buf); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (env *Env) Map() map[string]string {
-	m := make(map[string]string)
-	for _, kv := range *env {
-		parts := strings.SplitN(kv, "=", 2)
-		m[parts[0]] = parts[1]
-	}
-	return m
-}
-
-// MultiMap returns a representation of env as a
-// map of string arrays, keyed by string.
-// This is the same structure as http headers for example,
-// which allow each key to have multiple values.
-func (env *Env) MultiMap() map[string][]string {
-	m := make(map[string][]string)
-	for _, kv := range *env {
-		parts := strings.SplitN(kv, "=", 2)
-		m[parts[0]] = append(m[parts[0]], parts[1])
-	}
-	return m
-}
-
-// InitMultiMap removes all values in env, then initializes
-// new values from the contents of m.
-func (env *Env) InitMultiMap(m map[string][]string) {
-	(*env) = make([]string, 0, len(m))
-	for k, vals := range m {
-		for _, v := range vals {
-			env.Set(k, v)
-		}
-	}
-}
diff --git a/engine/env_test.go b/engine/env_test.go
deleted file mode 100644
index 2ed99d0..0000000
--- a/engine/env_test.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package engine
-
-import (
-	"bytes"
-	"encoding/json"
-	"testing"
-	"time"
-
-	"github.com/docker/docker/pkg/testutils"
-)
-
-func TestEnvLenZero(t *testing.T) {
-	env := &Env{}
-	if env.Len() != 0 {
-		t.Fatalf("%d", env.Len())
-	}
-}
-
-func TestEnvLenNotZero(t *testing.T) {
-	env := &Env{}
-	env.Set("foo", "bar")
-	env.Set("ga", "bu")
-	if env.Len() != 2 {
-		t.Fatalf("%d", env.Len())
-	}
-}
-
-func TestEnvLenDup(t *testing.T) {
-	env := &Env{
-		"foo=bar",
-		"foo=baz",
-		"a=b",
-	}
-	// len(env) != env.Len()
-	if env.Len() != 2 {
-		t.Fatalf("%d", env.Len())
-	}
-}
-
-func TestEnvGetDup(t *testing.T) {
-	env := &Env{
-		"foo=bar",
-		"foo=baz",
-		"foo=bif",
-	}
-	expected := "bif"
-	if v := env.Get("foo"); v != expected {
-		t.Fatalf("expect %q, got %q", expected, v)
-	}
-}
-
-func TestNewJob(t *testing.T) {
-	job := mkJob(t, "dummy", "--level=awesome")
-	if job.Name != "dummy" {
-		t.Fatalf("Wrong job name: %s", job.Name)
-	}
-	if len(job.Args) != 1 {
-		t.Fatalf("Wrong number of job arguments: %d", len(job.Args))
-	}
-	if job.Args[0] != "--level=awesome" {
-		t.Fatalf("Wrong job arguments: %s", job.Args[0])
-	}
-}
-
-func TestSetenv(t *testing.T) {
-	job := mkJob(t, "dummy")
-	job.Setenv("foo", "bar")
-	if val := job.Getenv("foo"); val != "bar" {
-		t.Fatalf("Getenv returns incorrect value: %s", val)
-	}
-
-	job.Setenv("bar", "")
-	if val := job.Getenv("bar"); val != "" {
-		t.Fatalf("Getenv returns incorrect value: %s", val)
-	}
-	if val := job.Getenv("nonexistent"); val != "" {
-		t.Fatalf("Getenv returns incorrect value: %s", val)
-	}
-}
-
-func TestSetenvBool(t *testing.T) {
-	job := mkJob(t, "dummy")
-	job.SetenvBool("foo", true)
-	if val := job.GetenvBool("foo"); !val {
-		t.Fatalf("GetenvBool returns incorrect value: %t", val)
-	}
-
-	job.SetenvBool("bar", false)
-	if val := job.GetenvBool("bar"); val {
-		t.Fatalf("GetenvBool returns incorrect value: %t", val)
-	}
-
-	if val := job.GetenvBool("nonexistent"); val {
-		t.Fatalf("GetenvBool returns incorrect value: %t", val)
-	}
-}
-
-func TestSetenvTime(t *testing.T) {
-	job := mkJob(t, "dummy")
-
-	now := time.Now()
-	job.SetenvTime("foo", now)
-	if val, err := job.GetenvTime("foo"); err != nil {
-		t.Fatalf("GetenvTime failed to parse: %v", err)
-	} else {
-		nowStr := now.Format(time.RFC3339)
-		valStr := val.Format(time.RFC3339)
-		if nowStr != valStr {
-			t.Fatalf("GetenvTime returns incorrect value: %s, Expected: %s", valStr, nowStr)
-		}
-	}
-
-	job.Setenv("bar", "Obviously I'm not a date")
-	if val, err := job.GetenvTime("bar"); err == nil {
-		t.Fatalf("GetenvTime was supposed to fail, instead returned: %s", val)
-	}
-}
-
-func TestSetenvInt(t *testing.T) {
-	job := mkJob(t, "dummy")
-
-	job.SetenvInt("foo", -42)
-	if val := job.GetenvInt("foo"); val != -42 {
-		t.Fatalf("GetenvInt returns incorrect value: %d", val)
-	}
-
-	job.SetenvInt("bar", 42)
-	if val := job.GetenvInt("bar"); val != 42 {
-		t.Fatalf("GetenvInt returns incorrect value: %d", val)
-	}
-	if val := job.GetenvInt("nonexistent"); val != 0 {
-		t.Fatalf("GetenvInt returns incorrect value: %d", val)
-	}
-}
-
-func TestSetenvList(t *testing.T) {
-	job := mkJob(t, "dummy")
-
-	job.SetenvList("foo", []string{"bar"})
-	if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" {
-		t.Fatalf("GetenvList returns incorrect value: %v", val)
-	}
-
-	job.SetenvList("bar", nil)
-	if val := job.GetenvList("bar"); val != nil {
-		t.Fatalf("GetenvList returns incorrect value: %v", val)
-	}
-	if val := job.GetenvList("nonexistent"); val != nil {
-		t.Fatalf("GetenvList returns incorrect value: %v", val)
-	}
-}
-
-func TestEnviron(t *testing.T) {
-	job := mkJob(t, "dummy")
-	job.Setenv("foo", "bar")
-	val, exists := job.Environ()["foo"]
-	if !exists {
-		t.Fatalf("foo not found in the environ")
-	}
-	if val != "bar" {
-		t.Fatalf("bar not found in the environ")
-	}
-}
-
-func TestMultiMap(t *testing.T) {
-	e := &Env{}
-	e.Set("foo", "bar")
-	e.Set("bar", "baz")
-	e.Set("hello", "world")
-	m := e.MultiMap()
-	e2 := &Env{}
-	e2.Set("old_key", "something something something")
-	e2.InitMultiMap(m)
-	if v := e2.Get("old_key"); v != "" {
-		t.Fatalf("%#v", v)
-	}
-	if v := e2.Get("bar"); v != "baz" {
-		t.Fatalf("%#v", v)
-	}
-	if v := e2.Get("hello"); v != "world" {
-		t.Fatalf("%#v", v)
-	}
-}
-
-func testMap(l int) [][2]string {
-	res := make([][2]string, l)
-	for i := 0; i < l; i++ {
-		t := [2]string{testutils.RandomString(5), testutils.RandomString(20)}
-		res[i] = t
-	}
-	return res
-}
-
-func BenchmarkSet(b *testing.B) {
-	fix := testMap(100)
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		env := &Env{}
-		for _, kv := range fix {
-			env.Set(kv[0], kv[1])
-		}
-	}
-}
-
-func BenchmarkSetJson(b *testing.B) {
-	fix := testMap(100)
-	type X struct {
-		f string
-	}
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		env := &Env{}
-		for _, kv := range fix {
-			if err := env.SetJson(kv[0], X{kv[1]}); err != nil {
-				b.Fatal(err)
-			}
-		}
-	}
-}
-
-func BenchmarkGet(b *testing.B) {
-	fix := testMap(100)
-	env := &Env{}
-	for _, kv := range fix {
-		env.Set(kv[0], kv[1])
-	}
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		for _, kv := range fix {
-			env.Get(kv[0])
-		}
-	}
-}
-
-func BenchmarkGetJson(b *testing.B) {
-	fix := testMap(100)
-	env := &Env{}
-	type X struct {
-		f string
-	}
-	for _, kv := range fix {
-		env.SetJson(kv[0], X{kv[1]})
-	}
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		for _, kv := range fix {
-			if err := env.GetJson(kv[0], &X{}); err != nil {
-				b.Fatal(err)
-			}
-		}
-	}
-}
-
-func BenchmarkEncode(b *testing.B) {
-	fix := testMap(100)
-	env := &Env{}
-	type X struct {
-		f string
-	}
-	// half a json
-	for i, kv := range fix {
-		if i%2 != 0 {
-			if err := env.SetJson(kv[0], X{kv[1]}); err != nil {
-				b.Fatal(err)
-			}
-			continue
-		}
-		env.Set(kv[0], kv[1])
-	}
-	var writer bytes.Buffer
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		env.Encode(&writer)
-		writer.Reset()
-	}
-}
-
-func BenchmarkDecode(b *testing.B) {
-	fix := testMap(100)
-	env := &Env{}
-	type X struct {
-		f string
-	}
-	// half a json
-	for i, kv := range fix {
-		if i%2 != 0 {
-			if err := env.SetJson(kv[0], X{kv[1]}); err != nil {
-				b.Fatal(err)
-			}
-			continue
-		}
-		env.Set(kv[0], kv[1])
-	}
-	var writer bytes.Buffer
-	env.Encode(&writer)
-	denv := &Env{}
-	reader := bytes.NewReader(writer.Bytes())
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		err := denv.Decode(reader)
-		if err != nil {
-			b.Fatal(err)
-		}
-		reader.Seek(0, 0)
-	}
-}
-
-func TestLongNumbers(t *testing.T) {
-	type T struct {
-		TestNum int64
-	}
-	v := T{67108864}
-	var buf bytes.Buffer
-	e := &Env{}
-	e.SetJson("Test", v)
-	if err := e.Encode(&buf); err != nil {
-		t.Fatal(err)
-	}
-	res := make(map[string]T)
-	if err := json.Unmarshal(buf.Bytes(), &res); err != nil {
-		t.Fatal(err)
-	}
-	if res["Test"].TestNum != v.TestNum {
-		t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum)
-	}
-}
-
-func TestLongNumbersArray(t *testing.T) {
-	type T struct {
-		TestNum []int64
-	}
-	v := T{[]int64{67108864}}
-	var buf bytes.Buffer
-	e := &Env{}
-	e.SetJson("Test", v)
-	if err := e.Encode(&buf); err != nil {
-		t.Fatal(err)
-	}
-	res := make(map[string]T)
-	if err := json.Unmarshal(buf.Bytes(), &res); err != nil {
-		t.Fatal(err)
-	}
-	if res["Test"].TestNum[0] != v.TestNum[0] {
-		t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum)
-	}
-}
diff --git a/engine/hack.go b/engine/hack.go
deleted file mode 100644
index be4fadb..0000000
--- a/engine/hack.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package engine
-
-type Hack map[string]interface{}
-
-func (eng *Engine) Hack_GetGlobalVar(key string) interface{} {
-	if eng.hack == nil {
-		return nil
-	}
-	val, exists := eng.hack[key]
-	if !exists {
-		return nil
-	}
-	return val
-}
-
-func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) {
-	if eng.hack == nil {
-		eng.hack = make(Hack)
-	}
-	eng.hack[key] = val
-}
diff --git a/engine/helpers_test.go b/engine/helpers_test.go
deleted file mode 100644
index cfa11da..0000000
--- a/engine/helpers_test.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package engine
-
-import (
-	"testing"
-)
-
-var globalTestID string
-
-func mkJob(t *testing.T, name string, args ...string) *Job {
-	return New().Job(name, args...)
-}
diff --git a/engine/http.go b/engine/http.go
deleted file mode 100644
index 7e4dcd7..0000000
--- a/engine/http.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package engine
-
-import (
-	"net/http"
-	"path"
-)
-
-// ServeHTTP executes a job as specified by the http request `r`, and sends the
-// result as an http response.
-// This method allows an Engine instance to be passed as a standard http.Handler interface.
-//
-// Note that the protocol used in this method is a convenience wrapper and is not the canonical
-// implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing,
-// and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response
-// once data has been written to the body, which makes it inconvenient to return metadata such
-// as the exit status.
-//
-func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	var (
-		jobName         = path.Base(r.URL.Path)
-		jobArgs, exists = r.URL.Query()["a"]
-	)
-	if !exists {
-		jobArgs = []string{}
-	}
-	w.Header().Set("Job-Name", jobName)
-	for _, arg := range jobArgs {
-		w.Header().Add("Job-Args", arg)
-	}
-	job := eng.Job(jobName, jobArgs...)
-	job.Stdout.Add(w)
-	job.Stderr.Add(w)
-	// FIXME: distinguish job status from engine error in Run()
-	// The former should be passed as a special header, the former
-	// should cause a 500 status
-	w.WriteHeader(http.StatusOK)
-	// The exit status cannot be sent reliably with HTTP1, because headers
-	// can only be sent before the body.
-	// (we could possibly use http footers via chunked encoding, but I couldn't find
-	// how to use them in net/http)
-	job.Run()
-}
diff --git a/engine/job.go b/engine/job.go
deleted file mode 100644
index ecb68c3..0000000
--- a/engine/job.go
+++ /dev/null
@@ -1,269 +0,0 @@
-package engine
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"strings"
-	"sync"
-	"time"
-
-	log "github.com/Sirupsen/logrus"
-)
-
-// A job is the fundamental unit of work in the docker engine.
-// Everything docker can do should eventually be exposed as a job.
-// For example: execute a process in a container, create a new container,
-// download an archive from the internet, serve the http api, etc.
-//
-// The job API is designed after unix processes: a job has a name, arguments,
-// environment variables, standard streams for input, output and error, and
-// an exit status which can indicate success (0) or error (anything else).
-//
-// For status, 0 indicates success, and any other integers indicates an error.
-// This allows for richer error reporting.
-//
-type Job struct {
-	Eng     *Engine
-	Name    string
-	Args    []string
-	env     *Env
-	Stdout  *Output
-	Stderr  *Output
-	Stdin   *Input
-	handler Handler
-	status  Status
-	end     time.Time
-	closeIO bool
-
-	// When closed, the job has been cancelled.
-	// Note: not all jobs implement cancellation.
-	// See Job.Cancel() and Job.WaitCancelled()
-	cancelled  chan struct{}
-	cancelOnce sync.Once
-}
-
-type Status int
-
-const (
-	StatusOK       Status = 0
-	StatusErr      Status = 1
-	StatusNotFound Status = 127
-)
-
-// Run executes the job and blocks until the job completes.
-// If the job returns a failure status, an error is returned
-// which includes the status.
-func (job *Job) Run() error {
-	if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") {
-		return fmt.Errorf("engine is shutdown")
-	}
-	// FIXME: this is a temporary workaround to avoid Engine.Shutdown
-	// waiting 5 seconds for server/api.ServeApi to complete (which it never will)
-	// everytime the daemon is cleanly restarted.
-	// The permanent fix is to implement Job.Stop and Job.OnStop so that
-	// ServeApi can cooperate and terminate cleanly.
-	if job.Name != "serveapi" {
-		job.Eng.l.Lock()
-		job.Eng.tasks.Add(1)
-		job.Eng.l.Unlock()
-		defer job.Eng.tasks.Done()
-	}
-	// FIXME: make this thread-safe
-	// FIXME: implement wait
-	if !job.end.IsZero() {
-		return fmt.Errorf("%s: job has already completed", job.Name)
-	}
-	// Log beginning and end of the job
-	if job.Eng.Logging {
-		log.Infof("+job %s", job.CallString())
-		defer func() {
-			log.Infof("-job %s%s", job.CallString(), job.StatusString())
-		}()
-	}
-	var errorMessage = bytes.NewBuffer(nil)
-	job.Stderr.Add(errorMessage)
-	if job.handler == nil {
-		job.Errorf("%s: command not found", job.Name)
-		job.status = 127
-	} else {
-		job.status = job.handler(job)
-		job.end = time.Now()
-	}
-	if job.closeIO {
-		// Wait for all background tasks to complete
-		if err := job.Stdout.Close(); err != nil {
-			return err
-		}
-		if err := job.Stderr.Close(); err != nil {
-			return err
-		}
-		if err := job.Stdin.Close(); err != nil {
-			return err
-		}
-	}
-	if job.status != 0 {
-		return fmt.Errorf("%s", Tail(errorMessage, 1))
-	}
-
-	return nil
-}
-
-func (job *Job) CallString() string {
-	return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
-}
-
-func (job *Job) StatusString() string {
-	// If the job hasn't completed, status string is empty
-	if job.end.IsZero() {
-		return ""
-	}
-	var okerr string
-	if job.status == StatusOK {
-		okerr = "OK"
-	} else {
-		okerr = "ERR"
-	}
-	return fmt.Sprintf(" = %s (%d)", okerr, job.status)
-}
-
-// String returns a human-readable description of `job`
-func (job *Job) String() string {
-	return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString())
-}
-
-func (job *Job) Env() *Env {
-	return job.env
-}
-
-func (job *Job) EnvExists(key string) (value bool) {
-	return job.env.Exists(key)
-}
-
-func (job *Job) Getenv(key string) (value string) {
-	return job.env.Get(key)
-}
-
-func (job *Job) GetenvBool(key string) (value bool) {
-	return job.env.GetBool(key)
-}
-
-func (job *Job) SetenvBool(key string, value bool) {
-	job.env.SetBool(key, value)
-}
-
-func (job *Job) GetenvTime(key string) (value time.Time, err error) {
-	return job.env.GetTime(key)
-}
-
-func (job *Job) SetenvTime(key string, value time.Time) {
-	job.env.SetTime(key, value)
-}
-
-func (job *Job) GetenvSubEnv(key string) *Env {
-	return job.env.GetSubEnv(key)
-}
-
-func (job *Job) SetenvSubEnv(key string, value *Env) error {
-	return job.env.SetSubEnv(key, value)
-}
-
-func (job *Job) GetenvInt64(key string) int64 {
-	return job.env.GetInt64(key)
-}
-
-func (job *Job) GetenvInt(key string) int {
-	return job.env.GetInt(key)
-}
-
-func (job *Job) SetenvInt64(key string, value int64) {
-	job.env.SetInt64(key, value)
-}
-
-func (job *Job) SetenvInt(key string, value int) {
-	job.env.SetInt(key, value)
-}
-
-// Returns nil if key not found
-func (job *Job) GetenvList(key string) []string {
-	return job.env.GetList(key)
-}
-
-func (job *Job) GetenvJson(key string, iface interface{}) error {
-	return job.env.GetJson(key, iface)
-}
-
-func (job *Job) SetenvJson(key string, value interface{}) error {
-	return job.env.SetJson(key, value)
-}
-
-func (job *Job) SetenvList(key string, value []string) error {
-	return job.env.SetJson(key, value)
-}
-
-func (job *Job) Setenv(key, value string) {
-	job.env.Set(key, value)
-}
-
-// DecodeEnv decodes `src` as a json dictionary, and adds
-// each decoded key-value pair to the environment.
-//
-// If `src` cannot be decoded as a json dictionary, an error
-// is returned.
-func (job *Job) DecodeEnv(src io.Reader) error {
-	return job.env.Decode(src)
-}
-
-func (job *Job) EncodeEnv(dst io.Writer) error {
-	return job.env.Encode(dst)
-}
-
-func (job *Job) ImportEnv(src interface{}) (err error) {
-	return job.env.Import(src)
-}
-
-func (job *Job) Environ() map[string]string {
-	return job.env.Map()
-}
-
-func (job *Job) Logf(format string, args ...interface{}) (n int, err error) {
-	prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n"))
-	return fmt.Fprintf(job.Stderr, prefixedFormat, args...)
-}
-
-func (job *Job) Printf(format string, args ...interface{}) (n int, err error) {
-	return fmt.Fprintf(job.Stdout, format, args...)
-}
-
-func (job *Job) Errorf(format string, args ...interface{}) Status {
-	if format[len(format)-1] != '\n' {
-		format = format + "\n"
-	}
-	fmt.Fprintf(job.Stderr, format, args...)
-	return StatusErr
-}
-
-func (job *Job) Error(err error) Status {
-	fmt.Fprintf(job.Stderr, "%s\n", err)
-	return StatusErr
-}
-
-func (job *Job) StatusCode() int {
-	return int(job.status)
-}
-
-func (job *Job) SetCloseIO(val bool) {
-	job.closeIO = val
-}
-
-// When called, causes the Job.WaitCancelled channel to unblock.
-func (job *Job) Cancel() {
-	job.cancelOnce.Do(func() {
-		close(job.cancelled)
-	})
-}
-
-// Returns a channel which is closed ("never blocks") when the job is cancelled.
-func (job *Job) WaitCancelled() <-chan struct{} {
-	return job.cancelled
-}
diff --git a/engine/job_test.go b/engine/job_test.go
deleted file mode 100644
index 9f8c760..0000000
--- a/engine/job_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package engine
-
-import (
-	"bytes"
-	"fmt"
-	"testing"
-)
-
-func TestJobStatusOK(t *testing.T) {
-	eng := New()
-	eng.Register("return_ok", func(job *Job) Status { return StatusOK })
-	err := eng.Job("return_ok").Run()
-	if err != nil {
-		t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err)
-	}
-}
-
-func TestJobStatusErr(t *testing.T) {
-	eng := New()
-	eng.Register("return_err", func(job *Job) Status { return StatusErr })
-	err := eng.Job("return_err").Run()
-	if err == nil {
-		t.Fatalf("When a job returns StatusErr, Run() should return an error")
-	}
-}
-
-func TestJobStatusNotFound(t *testing.T) {
-	eng := New()
-	eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound })
-	err := eng.Job("return_not_found").Run()
-	if err == nil {
-		t.Fatalf("When a job returns StatusNotFound, Run() should return an error")
-	}
-}
-
-func TestJobStdoutString(t *testing.T) {
-	eng := New()
-	// FIXME: test multiple combinations of output and status
-	eng.Register("say_something_in_stdout", func(job *Job) Status {
-		job.Printf("Hello world\n")
-		return StatusOK
-	})
-
-	job := eng.Job("say_something_in_stdout")
-	var outputBuffer = bytes.NewBuffer(nil)
-	job.Stdout.Add(outputBuffer)
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-	fmt.Println(outputBuffer)
-	var output = Tail(outputBuffer, 1)
-	if expectedOutput := "Hello world"; output != expectedOutput {
-		t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
-	}
-}
-
-func TestJobStderrString(t *testing.T) {
-	eng := New()
-	// FIXME: test multiple combinations of output and status
-	eng.Register("say_something_in_stderr", func(job *Job) Status {
-		job.Errorf("Something might happen\nHere it comes!\nOh no...\nSomething happened\n")
-		return StatusOK
-	})
-
-	job := eng.Job("say_something_in_stderr")
-	var outputBuffer = bytes.NewBuffer(nil)
-	job.Stderr.Add(outputBuffer)
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-	var output = Tail(outputBuffer, 1)
-	if expectedOutput := "Something happened"; output != expectedOutput {
-		t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
-	}
-}
diff --git a/engine/shutdown_test.go b/engine/shutdown_test.go
deleted file mode 100644
index 13d8049..0000000
--- a/engine/shutdown_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package engine
-
-import (
-	"testing"
-	"time"
-)
-
-func TestShutdownEmpty(t *testing.T) {
-	eng := New()
-	if eng.IsShutdown() {
-		t.Fatalf("IsShutdown should be false")
-	}
-	eng.Shutdown()
-	if !eng.IsShutdown() {
-		t.Fatalf("IsShutdown should be true")
-	}
-}
-
-func TestShutdownAfterRun(t *testing.T) {
-	eng := New()
-	var called bool
-	eng.Register("foo", func(job *Job) Status {
-		called = true
-		return StatusOK
-	})
-	if err := eng.Job("foo").Run(); err != nil {
-		t.Fatal(err)
-	}
-	eng.Shutdown()
-	if err := eng.Job("foo").Run(); err == nil {
-		t.Fatalf("%#v", *eng)
-	}
-}
-
-// An approximate and racy, but better-than-nothing test that
-//
-func TestShutdownDuringRun(t *testing.T) {
-	var (
-		jobDelay     time.Duration = 500 * time.Millisecond
-		jobDelayLow  time.Duration = 100 * time.Millisecond
-		jobDelayHigh time.Duration = 700 * time.Millisecond
-	)
-	eng := New()
-	var completed bool
-	eng.Register("foo", func(job *Job) Status {
-		time.Sleep(jobDelay)
-		completed = true
-		return StatusOK
-	})
-	go eng.Job("foo").Run()
-	time.Sleep(50 * time.Millisecond)
-	done := make(chan struct{})
-	var startShutdown time.Time
-	go func() {
-		startShutdown = time.Now()
-		eng.Shutdown()
-		close(done)
-	}()
-	time.Sleep(50 * time.Millisecond)
-	if err := eng.Job("foo").Run(); err == nil {
-		t.Fatalf("run on shutdown should fail: %#v", *eng)
-	}
-	<-done
-	// Verify that Shutdown() blocks for roughly 500ms, instead
-	// of returning almost instantly.
-	//
-	// We use >100ms to leave ample margin for race conditions between
-	// goroutines. It's possible (but unlikely in reasonable testing
-	// conditions), that this test will cause a false positive or false
-	// negative. But it's probably better than not having any test
-	// for the 99.999% of time where testing conditions are reasonable.
-	if d := time.Since(startShutdown); d.Nanoseconds() < jobDelayLow.Nanoseconds() {
-		t.Fatalf("shutdown did not block long enough: %v", d)
-	} else if d.Nanoseconds() > jobDelayHigh.Nanoseconds() {
-		t.Fatalf("shutdown blocked too long: %v", d)
-	}
-	if !completed {
-		t.Fatalf("job did not complete")
-	}
-}
diff --git a/engine/streams.go b/engine/streams.go
deleted file mode 100644
index 216fb89..0000000
--- a/engine/streams.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package engine
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"strings"
-	"sync"
-	"unicode"
-)
-
-type Output struct {
-	sync.Mutex
-	dests []io.Writer
-	tasks sync.WaitGroup
-	used  bool
-}
-
-// Tail returns the n last lines of a buffer
-// stripped out of trailing white spaces, if any.
-//
-// if n <= 0, returns an empty string
-func Tail(buffer *bytes.Buffer, n int) string {
-	if n <= 0 {
-		return ""
-	}
-	s := strings.TrimRightFunc(buffer.String(), unicode.IsSpace)
-	i := len(s) - 1
-	for ; i >= 0 && n > 0; i-- {
-		if s[i] == '\n' {
-			n--
-			if n == 0 {
-				break
-			}
-		}
-	}
-	// when i == -1, return the whole string which is s[0:]
-	return s[i+1:]
-}
-
-// NewOutput returns a new Output object with no destinations attached.
-// Writing to an empty Output will cause the written data to be discarded.
-func NewOutput() *Output {
-	return &Output{}
-}
-
-// Return true if something was written on this output
-func (o *Output) Used() bool {
-	o.Lock()
-	defer o.Unlock()
-	return o.used
-}
-
-// Add attaches a new destination to the Output. Any data subsequently written
-// to the output will be written to the new destination in addition to all the others.
-// This method is thread-safe.
-func (o *Output) Add(dst io.Writer) {
-	o.Lock()
-	defer o.Unlock()
-	o.dests = append(o.dests, dst)
-}
-
-// Set closes and remove existing destination and then attaches a new destination to
-// the Output. Any data subsequently written to the output will be written to the new
-// destination in addition to all the others. This method is thread-safe.
-func (o *Output) Set(dst io.Writer) {
-	o.Close()
-	o.Lock()
-	defer o.Unlock()
-	o.dests = []io.Writer{dst}
-}
-
-// AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination,
-// and returns its reading end for consumption by the caller.
-// This is a rough equivalent similar to Cmd.StdoutPipe() in the standard os/exec package.
-// This method is thread-safe.
-func (o *Output) AddPipe() (io.Reader, error) {
-	r, w := io.Pipe()
-	o.Add(w)
-	return r, nil
-}
-
-// Write writes the same data to all registered destinations.
-// This method is thread-safe.
-func (o *Output) Write(p []byte) (n int, err error) {
-	o.Lock()
-	defer o.Unlock()
-	o.used = true
-	var firstErr error
-	for _, dst := range o.dests {
-		_, err := dst.Write(p)
-		if err != nil && firstErr == nil {
-			firstErr = err
-		}
-	}
-	return len(p), firstErr
-}
-
-// Close unregisters all destinations and waits for all background
-// AddTail and AddString tasks to complete.
-// The Close method of each destination is called if it exists.
-func (o *Output) Close() error {
-	o.Lock()
-	defer o.Unlock()
-	var firstErr error
-	for _, dst := range o.dests {
-		if closer, ok := dst.(io.Closer); ok {
-			err := closer.Close()
-			if err != nil && firstErr == nil {
-				firstErr = err
-			}
-		}
-	}
-	o.tasks.Wait()
-	o.dests = nil
-	return firstErr
-}
-
-type Input struct {
-	src io.Reader
-	sync.Mutex
-}
-
-// NewInput returns a new Input object with no source attached.
-// Reading to an empty Input will return io.EOF.
-func NewInput() *Input {
-	return &Input{}
-}
-
-// Read reads from the input in a thread-safe way.
-func (i *Input) Read(p []byte) (n int, err error) {
-	i.Mutex.Lock()
-	defer i.Mutex.Unlock()
-	if i.src == nil {
-		return 0, io.EOF
-	}
-	return i.src.Read(p)
-}
-
-// Closes the src
-// Not thread safe on purpose
-func (i *Input) Close() error {
-	if i.src != nil {
-		if closer, ok := i.src.(io.Closer); ok {
-			return closer.Close()
-		}
-	}
-	return nil
-}
-
-// Add attaches a new source to the input.
-// Add can only be called once per input. Subsequent calls will
-// return an error.
-func (i *Input) Add(src io.Reader) error {
-	i.Mutex.Lock()
-	defer i.Mutex.Unlock()
-	if i.src != nil {
-		return fmt.Errorf("Maximum number of sources reached: 1")
-	}
-	i.src = src
-	return nil
-}
-
-// AddEnv starts a new goroutine which will decode all subsequent data
-// as a stream of json-encoded objects, and point `dst` to the last
-// decoded object.
-// The result `env` can be queried using the type-neutral Env interface.
-// It is not safe to query `env` until the Output is closed.
-func (o *Output) AddEnv() (dst *Env, err error) {
-	src, err := o.AddPipe()
-	if err != nil {
-		return nil, err
-	}
-	dst = &Env{}
-	o.tasks.Add(1)
-	go func() {
-		defer o.tasks.Done()
-		decoder := NewDecoder(src)
-		for {
-			env, err := decoder.Decode()
-			if err != nil {
-				return
-			}
-			*dst = *env
-		}
-	}()
-	return dst, nil
-}
-
-func (o *Output) AddListTable() (dst *Table, err error) {
-	src, err := o.AddPipe()
-	if err != nil {
-		return nil, err
-	}
-	dst = NewTable("", 0)
-	o.tasks.Add(1)
-	go func() {
-		defer o.tasks.Done()
-		content, err := ioutil.ReadAll(src)
-		if err != nil {
-			return
-		}
-		if _, err := dst.ReadListFrom(content); err != nil {
-			return
-		}
-	}()
-	return dst, nil
-}
-
-func (o *Output) AddTable() (dst *Table, err error) {
-	src, err := o.AddPipe()
-	if err != nil {
-		return nil, err
-	}
-	dst = NewTable("", 0)
-	o.tasks.Add(1)
-	go func() {
-		defer o.tasks.Done()
-		if _, err := dst.ReadFrom(src); err != nil {
-			return
-		}
-	}()
-	return dst, nil
-}
diff --git a/engine/streams_test.go b/engine/streams_test.go
deleted file mode 100644
index 476a721..0000000
--- a/engine/streams_test.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package engine
-
-import (
-	"bufio"
-	"bytes"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"strings"
-	"testing"
-)
-
-type sentinelWriteCloser struct {
-	calledWrite bool
-	calledClose bool
-}
-
-func (w *sentinelWriteCloser) Write(p []byte) (int, error) {
-	w.calledWrite = true
-	return len(p), nil
-}
-
-func (w *sentinelWriteCloser) Close() error {
-	w.calledClose = true
-	return nil
-}
-
-func TestOutputAddEnv(t *testing.T) {
-	input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}"
-	o := NewOutput()
-	result, err := o.AddEnv()
-	if err != nil {
-		t.Fatal(err)
-	}
-	o.Write([]byte(input))
-	o.Close()
-	if v := result.Get("foo"); v != "bar" {
-		t.Errorf("Expected %v, got %v", "bar", v)
-	}
-	if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 {
-		t.Errorf("Expected %v, got %v", 42, v)
-	}
-	if v := result.Get("this-value-doesnt-exist"); v != "" {
-		t.Errorf("Expected %v, got %v", "", v)
-	}
-}
-
-func TestOutputAddClose(t *testing.T) {
-	o := NewOutput()
-	var s sentinelWriteCloser
-	o.Add(&s)
-	if err := o.Close(); err != nil {
-		t.Fatal(err)
-	}
-	// Write data after the output is closed.
-	// Write should succeed, but no destination should receive it.
-	if _, err := o.Write([]byte("foo bar")); err != nil {
-		t.Fatal(err)
-	}
-	if !s.calledClose {
-		t.Fatal("Output.Close() didn't close the destination")
-	}
-}
-
-func TestOutputAddPipe(t *testing.T) {
-	var testInputs = []string{
-		"hello, world!",
-		"One\nTwo\nThree",
-		"",
-		"A line\nThen another nl-terminated line\n",
-		"A line followed by an empty line\n\n",
-	}
-	for _, input := range testInputs {
-		expectedOutput := input
-		o := NewOutput()
-		r, err := o.AddPipe()
-		if err != nil {
-			t.Fatal(err)
-		}
-		go func(o *Output) {
-			if n, err := o.Write([]byte(input)); err != nil {
-				t.Error(err)
-			} else if n != len(input) {
-				t.Errorf("Expected %d, got %d", len(input), n)
-			}
-			if err := o.Close(); err != nil {
-				t.Error(err)
-			}
-		}(o)
-		output, err := ioutil.ReadAll(r)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if string(output) != expectedOutput {
-			t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot:       '%s'", expectedOutput, output)
-		}
-	}
-}
-
-func TestTail(t *testing.T) {
-	var tests = make(map[string][]string)
-	tests["hello, world!"] = []string{
-		"",
-		"hello, world!",
-		"hello, world!",
-		"hello, world!",
-	}
-	tests["One\nTwo\nThree"] = []string{
-		"",
-		"Three",
-		"Two\nThree",
-		"One\nTwo\nThree",
-	}
-	tests["One\nTwo\n\n\n"] = []string{
-		"",
-		"Two",
-		"One\nTwo",
-	}
-	for input, outputs := range tests {
-		for n, expectedOutput := range outputs {
-			output := Tail(bytes.NewBufferString(input), n)
-			if output != expectedOutput {
-				t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot     : '%s'", n, expectedOutput, output)
-			}
-		}
-	}
-}
-
-func lastLine(txt string) string {
-	scanner := bufio.NewScanner(strings.NewReader(txt))
-	var lastLine string
-	for scanner.Scan() {
-		lastLine = scanner.Text()
-	}
-	return lastLine
-}
-
-func TestOutputAdd(t *testing.T) {
-	o := NewOutput()
-	b := &bytes.Buffer{}
-	o.Add(b)
-	input := "hello, world!"
-	if n, err := o.Write([]byte(input)); err != nil {
-		t.Fatal(err)
-	} else if n != len(input) {
-		t.Fatalf("Expected %d, got %d", len(input), n)
-	}
-	if output := b.String(); output != input {
-		t.Fatalf("Received wrong data from Add.\nExpected: '%s'\nGot:     '%s'", input, output)
-	}
-}
-
-func TestOutputWriteError(t *testing.T) {
-	o := NewOutput()
-	buf := &bytes.Buffer{}
-	o.Add(buf)
-	r, w := io.Pipe()
-	input := "Hello there"
-	expectedErr := fmt.Errorf("This is an error")
-	r.CloseWithError(expectedErr)
-	o.Add(w)
-	n, err := o.Write([]byte(input))
-	if err != expectedErr {
-		t.Fatalf("Output.Write() should return the first error encountered, if any")
-	}
-	if buf.String() != input {
-		t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error")
-	}
-	if n != len(input) {
-		t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination")
-	}
-}
-
-func TestInputAddEmpty(t *testing.T) {
-	i := NewInput()
-	var b bytes.Buffer
-	if err := i.Add(&b); err != nil {
-		t.Fatal(err)
-	}
-	data, err := ioutil.ReadAll(i)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(data) > 0 {
-		t.Fatalf("Read from empty input shoul yield no data")
-	}
-}
-
-func TestInputAddTwo(t *testing.T) {
-	i := NewInput()
-	var b1 bytes.Buffer
-	// First add should succeed
-	if err := i.Add(&b1); err != nil {
-		t.Fatal(err)
-	}
-	var b2 bytes.Buffer
-	// Second add should fail
-	if err := i.Add(&b2); err == nil {
-		t.Fatalf("Adding a second source should return an error")
-	}
-}
-
-func TestInputAddNotEmpty(t *testing.T) {
-	i := NewInput()
-	b := bytes.NewBufferString("hello world\nabc")
-	expectedResult := b.String()
-	i.Add(b)
-	result, err := ioutil.ReadAll(i)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if string(result) != expectedResult {
-		t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result)
-	}
-}
diff --git a/engine/table.go b/engine/table.go
deleted file mode 100644
index 4498bdf..0000000
--- a/engine/table.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package engine
-
-import (
-	"bytes"
-	"encoding/json"
-	"io"
-	"sort"
-	"strconv"
-)
-
-type Table struct {
-	Data    []*Env
-	sortKey string
-	Chan    chan *Env
-}
-
-func NewTable(sortKey string, sizeHint int) *Table {
-	return &Table{
-		make([]*Env, 0, sizeHint),
-		sortKey,
-		make(chan *Env),
-	}
-}
-
-func (t *Table) SetKey(sortKey string) {
-	t.sortKey = sortKey
-}
-
-func (t *Table) Add(env *Env) {
-	t.Data = append(t.Data, env)
-}
-
-func (t *Table) Len() int {
-	return len(t.Data)
-}
-
-func (t *Table) Less(a, b int) bool {
-	return t.lessBy(a, b, t.sortKey)
-}
-
-func (t *Table) lessBy(a, b int, by string) bool {
-	keyA := t.Data[a].Get(by)
-	keyB := t.Data[b].Get(by)
-	intA, errA := strconv.ParseInt(keyA, 10, 64)
-	intB, errB := strconv.ParseInt(keyB, 10, 64)
-	if errA == nil && errB == nil {
-		return intA < intB
-	}
-	return keyA < keyB
-}
-
-func (t *Table) Swap(a, b int) {
-	tmp := t.Data[a]
-	t.Data[a] = t.Data[b]
-	t.Data[b] = tmp
-}
-
-func (t *Table) Sort() {
-	sort.Sort(t)
-}
-
-func (t *Table) ReverseSort() {
-	sort.Sort(sort.Reverse(t))
-}
-
-func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) {
-	if _, err := dst.Write([]byte{'['}); err != nil {
-		return -1, err
-	}
-	n = 1
-	for i, env := range t.Data {
-		bytes, err := env.WriteTo(dst)
-		if err != nil {
-			return -1, err
-		}
-		n += bytes
-		if i != len(t.Data)-1 {
-			if _, err := dst.Write([]byte{','}); err != nil {
-				return -1, err
-			}
-			n++
-		}
-	}
-	if _, err := dst.Write([]byte{']'}); err != nil {
-		return -1, err
-	}
-	return n + 1, nil
-}
-
-func (t *Table) ToListString() (string, error) {
-	buffer := bytes.NewBuffer(nil)
-	if _, err := t.WriteListTo(buffer); err != nil {
-		return "", err
-	}
-	return buffer.String(), nil
-}
-
-func (t *Table) WriteTo(dst io.Writer) (n int64, err error) {
-	for _, env := range t.Data {
-		bytes, err := env.WriteTo(dst)
-		if err != nil {
-			return -1, err
-		}
-		n += bytes
-	}
-	return n, nil
-}
-
-func (t *Table) ReadListFrom(src []byte) (n int64, err error) {
-	var array []interface{}
-
-	if err := json.Unmarshal(src, &array); err != nil {
-		return -1, err
-	}
-
-	for _, item := range array {
-		if m, ok := item.(map[string]interface{}); ok {
-			env := &Env{}
-			for key, value := range m {
-				env.SetAuto(key, value)
-			}
-			t.Add(env)
-		}
-	}
-
-	return int64(len(src)), nil
-}
-
-func (t *Table) ReadFrom(src io.Reader) (n int64, err error) {
-	decoder := NewDecoder(src)
-	for {
-		env, err := decoder.Decode()
-		if err == io.EOF {
-			return 0, nil
-		} else if err != nil {
-			return -1, err
-		}
-		t.Add(env)
-	}
-}
diff --git a/engine/table_test.go b/engine/table_test.go
deleted file mode 100644
index 9a32ac9..0000000
--- a/engine/table_test.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package engine
-
-import (
-	"bytes"
-	"encoding/json"
-	"testing"
-)
-
-func TestTableWriteTo(t *testing.T) {
-	table := NewTable("", 0)
-	e := &Env{}
-	e.Set("foo", "bar")
-	table.Add(e)
-	var buf bytes.Buffer
-	if _, err := table.WriteTo(&buf); err != nil {
-		t.Fatal(err)
-	}
-	output := make(map[string]string)
-	if err := json.Unmarshal(buf.Bytes(), &output); err != nil {
-		t.Fatal(err)
-	}
-	if len(output) != 1 {
-		t.Fatalf("Incorrect output: %v", output)
-	}
-	if val, exists := output["foo"]; !exists || val != "bar" {
-		t.Fatalf("Inccorect output: %v", output)
-	}
-}
-
-func TestTableSortStringValue(t *testing.T) {
-	table := NewTable("Key", 0)
-
-	e := &Env{}
-	e.Set("Key", "A")
-	table.Add(e)
-
-	e = &Env{}
-	e.Set("Key", "D")
-	table.Add(e)
-
-	e = &Env{}
-	e.Set("Key", "B")
-	table.Add(e)
-
-	e = &Env{}
-	e.Set("Key", "C")
-	table.Add(e)
-
-	table.Sort()
-
-	if len := table.Len(); len != 4 {
-		t.Fatalf("Expected 4, got %d", len)
-	}
-
-	if value := table.Data[0].Get("Key"); value != "A" {
-		t.Fatalf("Expected A, got %s", value)
-	}
-
-	if value := table.Data[1].Get("Key"); value != "B" {
-		t.Fatalf("Expected B, got %s", value)
-	}
-
-	if value := table.Data[2].Get("Key"); value != "C" {
-		t.Fatalf("Expected C, got %s", value)
-	}
-
-	if value := table.Data[3].Get("Key"); value != "D" {
-		t.Fatalf("Expected D, got %s", value)
-	}
-}
-
-func TestTableReverseSortStringValue(t *testing.T) {
-	table := NewTable("Key", 0)
-
-	e := &Env{}
-	e.Set("Key", "A")
-	table.Add(e)
-
-	e = &Env{}
-	e.Set("Key", "D")
-	table.Add(e)
-
-	e = &Env{}
-	e.Set("Key", "B")
-	table.Add(e)
-
-	e = &Env{}
-	e.Set("Key", "C")
-	table.Add(e)
-
-	table.ReverseSort()
-
-	if len := table.Len(); len != 4 {
-		t.Fatalf("Expected 4, got %d", len)
-	}
-
-	if value := table.Data[0].Get("Key"); value != "D" {
-		t.Fatalf("Expected D, got %s", value)
-	}
-
-	if value := table.Data[1].Get("Key"); value != "C" {
-		t.Fatalf("Expected B, got %s", value)
-	}
-
-	if value := table.Data[2].Get("Key"); value != "B" {
-		t.Fatalf("Expected C, got %s", value)
-	}
-
-	if value := table.Data[3].Get("Key"); value != "A" {
-		t.Fatalf("Expected A, got %s", value)
-	}
-}
diff --git a/events/events.go b/events/events.go
deleted file mode 100644
index 559bf68..0000000
--- a/events/events.go
+++ /dev/null
@@ -1,230 +0,0 @@
-package events
-
-import (
-	"bytes"
-	"encoding/json"
-	"io"
-	"strings"
-	"sync"
-	"time"
-
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/pkg/parsers/filters"
-	"github.com/docker/docker/utils"
-)
-
-const eventsLimit = 64
-
-type listener chan<- *utils.JSONMessage
-
-type Events struct {
-	mu          sync.RWMutex
-	events      []*utils.JSONMessage
-	subscribers []listener
-}
-
-func New() *Events {
-	return &Events{
-		events: make([]*utils.JSONMessage, 0, eventsLimit),
-	}
-}
-
-// Install installs events public api in docker engine
-func (e *Events) Install(eng *engine.Engine) error {
-	// Here you should describe public interface
-	jobs := map[string]engine.Handler{
-		"events":            e.Get,
-		"log":               e.Log,
-		"subscribers_count": e.SubscribersCount,
-	}
-	for name, job := range jobs {
-		if err := eng.Register(name, job); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (e *Events) Get(job *engine.Job) engine.Status {
-	var (
-		since   = job.GetenvInt64("since")
-		until   = job.GetenvInt64("until")
-		timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now()))
-	)
-
-	eventFilters, err := filters.FromParam(job.Getenv("filters"))
-	if err != nil {
-		return job.Error(err)
-	}
-
-	// If no until, disable timeout
-	if until == 0 {
-		timeout.Stop()
-	}
-
-	listener := make(chan *utils.JSONMessage)
-	e.subscribe(listener)
-	defer e.unsubscribe(listener)
-
-	job.Stdout.Write(nil)
-
-	// Resend every event in the [since, until] time interval.
-	if since != 0 {
-		if err := e.writeCurrent(job, since, until, eventFilters); err != nil {
-			return job.Error(err)
-		}
-	}
-
-	for {
-		select {
-		case event, ok := <-listener:
-			if !ok {
-				return engine.StatusOK
-			}
-			if err := writeEvent(job, event, eventFilters); err != nil {
-				return job.Error(err)
-			}
-		case <-timeout.C:
-			return engine.StatusOK
-		}
-	}
-}
-
-func (e *Events) Log(job *engine.Job) engine.Status {
-	if len(job.Args) != 3 {
-		return job.Errorf("usage: %s ACTION ID FROM", job.Name)
-	}
-	// not waiting for receivers
-	go e.log(job.Args[0], job.Args[1], job.Args[2])
-	return engine.StatusOK
-}
-
-func (e *Events) SubscribersCount(job *engine.Job) engine.Status {
-	ret := &engine.Env{}
-	ret.SetInt("count", e.subscribersCount())
-	ret.WriteTo(job.Stdout)
-	return engine.StatusOK
-}
-
-func writeEvent(job *engine.Job, event *utils.JSONMessage, eventFilters filters.Args) error {
-	isFiltered := func(field string, filter []string) bool {
-		if len(filter) == 0 {
-			return false
-		}
-		for _, v := range filter {
-			if v == field {
-				return false
-			}
-			if strings.Contains(field, ":") {
-				image := strings.Split(field, ":")
-				if image[0] == v {
-					return false
-				}
-			}
-		}
-		return true
-	}
-
-	//incoming container filter can be name,id or partial id, convert and replace as a full container id
-	for i, cn := range eventFilters["container"] {
-		eventFilters["container"][i] = GetContainerId(job.Eng, cn)
-	}
-
-	if isFiltered(event.Status, eventFilters["event"]) || isFiltered(event.From, eventFilters["image"]) ||
-		isFiltered(event.ID, eventFilters["container"]) {
-		return nil
-	}
-
-	// When sending an event JSON serialization errors are ignored, but all
-	// other errors lead to the eviction of the listener.
-	if b, err := json.Marshal(event); err == nil {
-		if _, err = job.Stdout.Write(b); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (e *Events) writeCurrent(job *engine.Job, since, until int64, eventFilters filters.Args) error {
-	e.mu.RLock()
-	for _, event := range e.events {
-		if event.Time >= since && (event.Time <= until || until == 0) {
-			if err := writeEvent(job, event, eventFilters); err != nil {
-				e.mu.RUnlock()
-				return err
-			}
-		}
-	}
-	e.mu.RUnlock()
-	return nil
-}
-
-func (e *Events) subscribersCount() int {
-	e.mu.RLock()
-	c := len(e.subscribers)
-	e.mu.RUnlock()
-	return c
-}
-
-func (e *Events) log(action, id, from string) {
-	e.mu.Lock()
-	now := time.Now().UTC().Unix()
-	jm := &utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
-	if len(e.events) == cap(e.events) {
-		// discard oldest event
-		copy(e.events, e.events[1:])
-		e.events[len(e.events)-1] = jm
-	} else {
-		e.events = append(e.events, jm)
-	}
-	for _, s := range e.subscribers {
-		// We give each subscriber a 100ms time window to receive the event,
-		// after which we move to the next.
-		select {
-		case s <- jm:
-		case <-time.After(100 * time.Millisecond):
-		}
-	}
-	e.mu.Unlock()
-}
-
-func (e *Events) subscribe(l listener) {
-	e.mu.Lock()
-	e.subscribers = append(e.subscribers, l)
-	e.mu.Unlock()
-}
-
-// unsubscribe closes and removes the specified listener from the list of
-// previously registed ones.
-// It returns a boolean value indicating if the listener was successfully
-// found, closed and unregistered.
-func (e *Events) unsubscribe(l listener) bool {
-	e.mu.Lock()
-	for i, subscriber := range e.subscribers {
-		if subscriber == l {
-			close(l)
-			e.subscribers = append(e.subscribers[:i], e.subscribers[i+1:]...)
-			e.mu.Unlock()
-			return true
-		}
-	}
-	e.mu.Unlock()
-	return false
-}
-
-func GetContainerId(eng *engine.Engine, name string) string {
-	var buf bytes.Buffer
-	job := eng.Job("container_inspect", name)
-
-	var outStream io.Writer
-
-	outStream = &buf
-	job.Stdout.Set(outStream)
-
-	if err := job.Run(); err != nil {
-		return ""
-	}
-	var out struct{ ID string }
-	json.NewDecoder(&buf).Decode(&out)
-	return out.ID
-}
diff --git a/events/events_test.go b/events/events_test.go
deleted file mode 100644
index d4fc664..0000000
--- a/events/events_test.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package events
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io"
-	"testing"
-	"time"
-
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/utils"
-)
-
-func TestEventsPublish(t *testing.T) {
-	e := New()
-	l1 := make(chan *utils.JSONMessage)
-	l2 := make(chan *utils.JSONMessage)
-	e.subscribe(l1)
-	e.subscribe(l2)
-	count := e.subscribersCount()
-	if count != 2 {
-		t.Fatalf("Must be 2 subscribers, got %d", count)
-	}
-	go e.log("test", "cont", "image")
-	select {
-	case msg := <-l1:
-		if len(e.events) != 1 {
-			t.Fatalf("Must be only one event, got %d", len(e.events))
-		}
-		if msg.Status != "test" {
-			t.Fatalf("Status should be test, got %s", msg.Status)
-		}
-		if msg.ID != "cont" {
-			t.Fatalf("ID should be cont, got %s", msg.ID)
-		}
-		if msg.From != "image" {
-			t.Fatalf("From should be image, got %s", msg.From)
-		}
-	case <-time.After(1 * time.Second):
-		t.Fatal("Timeout waiting for broadcasted message")
-	}
-	select {
-	case msg := <-l2:
-		if len(e.events) != 1 {
-			t.Fatalf("Must be only one event, got %d", len(e.events))
-		}
-		if msg.Status != "test" {
-			t.Fatalf("Status should be test, got %s", msg.Status)
-		}
-		if msg.ID != "cont" {
-			t.Fatalf("ID should be cont, got %s", msg.ID)
-		}
-		if msg.From != "image" {
-			t.Fatalf("From should be image, got %s", msg.From)
-		}
-	case <-time.After(1 * time.Second):
-		t.Fatal("Timeout waiting for broadcasted message")
-	}
-}
-
-func TestEventsPublishTimeout(t *testing.T) {
-	e := New()
-	l := make(chan *utils.JSONMessage)
-	e.subscribe(l)
-
-	c := make(chan struct{})
-	go func() {
-		e.log("test", "cont", "image")
-		close(c)
-	}()
-
-	select {
-	case <-c:
-	case <-time.After(time.Second):
-		t.Fatal("Timeout publishing message")
-	}
-}
-
-func TestLogEvents(t *testing.T) {
-	e := New()
-	eng := engine.New()
-	if err := e.Install(eng); err != nil {
-		t.Fatal(err)
-	}
-
-	for i := 0; i < eventsLimit+16; i++ {
-		action := fmt.Sprintf("action_%d", i)
-		id := fmt.Sprintf("cont_%d", i)
-		from := fmt.Sprintf("image_%d", i)
-		job := eng.Job("log", action, id, from)
-		if err := job.Run(); err != nil {
-			t.Fatal(err)
-		}
-	}
-	time.Sleep(50 * time.Millisecond)
-	if len(e.events) != eventsLimit {
-		t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events))
-	}
-
-	job := eng.Job("events")
-	job.SetenvInt64("since", 1)
-	job.SetenvInt64("until", time.Now().Unix())
-	buf := bytes.NewBuffer(nil)
-	job.Stdout.Add(buf)
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-	buf = bytes.NewBuffer(buf.Bytes())
-	dec := json.NewDecoder(buf)
-	var msgs []utils.JSONMessage
-	for {
-		var jm utils.JSONMessage
-		if err := dec.Decode(&jm); err != nil {
-			if err == io.EOF {
-				break
-			}
-			t.Fatal(err)
-		}
-		msgs = append(msgs, jm)
-	}
-	if len(msgs) != eventsLimit {
-		t.Fatalf("Must be %d events, got %d", eventsLimit, len(msgs))
-	}
-	first := msgs[0]
-	if first.Status != "action_16" {
-		t.Fatalf("First action is %s, must be action_15", first.Status)
-	}
-	last := msgs[len(msgs)-1]
-	if last.Status != "action_79" {
-		t.Fatalf("First action is %s, must be action_79", first.Status)
-	}
-}
-
-func TestEventsCountJob(t *testing.T) {
-	e := New()
-	eng := engine.New()
-	if err := e.Install(eng); err != nil {
-		t.Fatal(err)
-	}
-	l1 := make(chan *utils.JSONMessage)
-	l2 := make(chan *utils.JSONMessage)
-	e.subscribe(l1)
-	e.subscribe(l2)
-	job := eng.Job("subscribers_count")
-	env, _ := job.Stdout.AddEnv()
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-	count := env.GetInt("count")
-	if count != 2 {
-		t.Fatalf("There must be 2 subscribers, got %d", count)
-	}
-}
diff --git a/graph/export.go b/graph/export.go
index 3f7ecd3..c356a23 100644
--- a/graph/export.go
+++ b/graph/export.go
@@ -5,10 +5,9 @@
 	"io"
 	"io/ioutil"
 	"os"
-	"path"
+	"path/filepath"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/engine"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/registry"
@@ -19,42 +18,45 @@
 // uncompressed tar ball.
 // name is the set of tags to export.
 // out is the writer where the images are written to.
-func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
-	if len(job.Args) < 1 {
-		return job.Errorf("Usage: %s IMAGE [IMAGE...]\n", job.Name)
-	}
+type ImageExportConfig struct {
+	Names     []string
+	Outstream io.Writer
+}
+
+func (s *TagStore) ImageExport(imageExportConfig *ImageExportConfig) error {
+
 	// get image json
 	tempdir, err := ioutil.TempDir("", "docker-export-")
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	defer os.RemoveAll(tempdir)
 
 	rootRepoMap := map[string]Repository{}
 	addKey := func(name string, tag string, id string) {
-		log.Debugf("add key [%s:%s]", name, tag)
+		logrus.Debugf("add key [%s:%s]", name, tag)
 		if repo, ok := rootRepoMap[name]; !ok {
 			rootRepoMap[name] = Repository{tag: id}
 		} else {
 			repo[tag] = id
 		}
 	}
-	for _, name := range job.Args {
+	for _, name := range imageExportConfig.Names {
 		name = registry.NormalizeLocalName(name)
-		log.Debugf("Serializing %s", name)
+		logrus.Debugf("Serializing %s", name)
 		rootRepo := s.Repositories[name]
 		if rootRepo != nil {
 			// this is a base repo name, like 'busybox'
 			for tag, id := range rootRepo {
 				addKey(name, tag, id)
-				if err := s.exportImage(job.Eng, id, tempdir); err != nil {
-					return job.Error(err)
+				if err := s.exportImage(id, tempdir); err != nil {
+					return err
 				}
 			}
 		} else {
 			img, err := s.LookupImage(name)
 			if err != nil {
-				return job.Error(err)
+				return err
 			}
 
 			if img != nil {
@@ -66,47 +68,54 @@
 				if len(repoTag) > 0 {
 					addKey(repoName, repoTag, img.ID)
 				}
-				if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil {
-					return job.Error(err)
+				if err := s.exportImage(img.ID, tempdir); err != nil {
+					return err
 				}
 
 			} else {
 				// this must be an ID that didn't get looked up just right?
-				if err := s.exportImage(job.Eng, name, tempdir); err != nil {
-					return job.Error(err)
+				if err := s.exportImage(name, tempdir); err != nil {
+					return err
 				}
 			}
 		}
-		log.Debugf("End Serializing %s", name)
+		logrus.Debugf("End Serializing %s", name)
 	}
 	// write repositories, if there is something to write
 	if len(rootRepoMap) > 0 {
-		rootRepoJson, _ := json.Marshal(rootRepoMap)
-		if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil {
-			return job.Error(err)
+		f, err := os.OpenFile(filepath.Join(tempdir, "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+		if err != nil {
+			f.Close()
+			return err
+		}
+		if err := json.NewEncoder(f).Encode(rootRepoMap); err != nil {
+			return err
+		}
+		if err := f.Close(); err != nil {
+			return err
 		}
 	} else {
-		log.Debugf("There were no repositories to write")
+		logrus.Debugf("There were no repositories to write")
 	}
 
 	fs, err := archive.Tar(tempdir, archive.Uncompressed)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	defer fs.Close()
 
-	if _, err := io.Copy(job.Stdout, fs); err != nil {
-		return job.Error(err)
+	if _, err := io.Copy(imageExportConfig.Outstream, fs); err != nil {
+		return err
 	}
-	log.Debugf("End export job: %s", job.Name)
-	return engine.StatusOK
+	logrus.Debugf("End export image")
+	return nil
 }
 
 // FIXME: this should be a top-level function, not a class method
-func (s *TagStore) exportImage(eng *engine.Engine, name, tempdir string) error {
+func (s *TagStore) exportImage(name, tempdir string) error {
 	for n := name; n != ""; {
 		// temporary directory
-		tmpImageDir := path.Join(tempdir, n)
+		tmpImageDir := filepath.Join(tempdir, n)
 		if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil {
 			if os.IsExist(err) {
 				return nil
@@ -117,40 +126,42 @@
 		var version = "1.0"
 		var versionBuf = []byte(version)
 
-		if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil {
+		if err := ioutil.WriteFile(filepath.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil {
 			return err
 		}
 
 		// serialize json
-		json, err := os.Create(path.Join(tmpImageDir, "json"))
+		json, err := os.Create(filepath.Join(tmpImageDir, "json"))
 		if err != nil {
 			return err
 		}
-		job := eng.Job("image_inspect", n)
-		job.SetenvBool("raw", true)
-		job.Stdout.Add(json)
-		if err := job.Run(); err != nil {
+		imageInspectRaw, err := s.LookupRaw(n)
+		if err != nil {
 			return err
 		}
+		written, err := json.Write(imageInspectRaw)
+		if err != nil {
+			return err
+		}
+		if written != len(imageInspectRaw) {
+			logrus.Warnf("%d byes should have been written instead %d have been written", written, len(imageInspectRaw))
+		}
 
 		// serialize filesystem
-		fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar"))
+		fsTar, err := os.Create(filepath.Join(tmpImageDir, "layer.tar"))
 		if err != nil {
 			return err
 		}
-		job = eng.Job("image_tarlayer", n)
-		job.Stdout.Add(fsTar)
-		if err := job.Run(); err != nil {
+		if err := s.ImageTarLayer(n, fsTar); err != nil {
 			return err
 		}
 
 		// find parent
-		job = eng.Job("image_get", n)
-		info, _ := job.Stdout.AddEnv()
-		if err := job.Run(); err != nil {
+		img, err := s.LookupImage(n)
+		if err != nil {
 			return err
 		}
-		n = info.Get("Parent")
+		n = img.Parent
 	}
 	return nil
 }
diff --git a/graph/graph.go b/graph/graph.go
index 5c6ac97..e95887f 100644
--- a/graph/graph.go
+++ b/graph/graph.go
@@ -7,24 +7,24 @@
 	"io"
 	"io/ioutil"
 	"os"
-	"path"
 	"path/filepath"
 	"runtime"
 	"strings"
 	"syscall"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution/digest"
 	"github.com/docker/docker/autogen/dockerversion"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/progressreader"
+	"github.com/docker/docker/pkg/streamformatter"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/truncindex"
 	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/utils"
 )
 
 // A Graph is a store for versioned filesystem images and the relationship between them.
@@ -42,7 +42,7 @@
 		return nil, err
 	}
 	// Create the root directory if it doesn't exists
-	if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) {
+	if err := system.MkdirAll(root, 0700); err != nil && !os.IsExist(err) {
 		return nil, err
 	}
 
@@ -70,14 +70,14 @@
 		}
 	}
 	graph.idIndex = truncindex.NewTruncIndex(ids)
-	log.Debugf("Restored %d elements", len(dir))
+	logrus.Debugf("Restored %d elements", len(dir))
 	return nil
 }
 
 // FIXME: Implement error subclass instead of looking at the error text
 // Note: This is the way golang implements os.IsNotExists on Plan9
-func (graph *Graph) IsNotExist(err error) bool {
-	return err != nil && (strings.Contains(strings.ToLower(err.Error()), "does not exist") || strings.Contains(strings.ToLower(err.Error()), "no such"))
+func (graph *Graph) IsNotExist(err error, id string) bool {
+	return err != nil && (strings.Contains(strings.ToLower(err.Error()), "does not exist") || strings.Contains(strings.ToLower(err.Error()), "no such")) && strings.Contains(err.Error(), id)
 }
 
 // Exists returns true if an image is registered at the given id.
@@ -121,7 +121,7 @@
 // Create creates a new image and registers it in the graph.
 func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) {
 	img := &image.Image{
-		ID:            common.GenerateRandomID(),
+		ID:            stringid.GenerateRandomID(),
 		Comment:       comment,
 		Created:       time.Now().UTC(),
 		DockerVersion: dockerversion.VERSION,
@@ -153,7 +153,7 @@
 			graph.driver.Remove(img.ID)
 		}
 	}()
-	if err := utils.ValidateID(img.ID); err != nil {
+	if err := image.ValidateID(img.ID); err != nil {
 		return err
 	}
 	// (This is a convenience to save time. Race conditions are taken care of by os.Rename)
@@ -201,7 +201,7 @@
 //   The archive is stored on disk and will be automatically deleted as soon as has been read.
 //   If output is not nil, a human-readable progress bar will be written to it.
 //   FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives?
-func (graph *Graph) TempLayerArchive(id string, sf *utils.StreamFormatter, output io.Writer) (*archive.TempArchive, error) {
+func (graph *Graph) TempLayerArchive(id string, sf *streamformatter.StreamFormatter, output io.Writer) (*archive.TempArchive, error) {
 	image, err := graph.Get(id)
 	if err != nil {
 		return nil, err
@@ -220,7 +220,7 @@
 		Formatter: sf,
 		Size:      0,
 		NewLines:  false,
-		ID:        common.TruncateID(id),
+		ID:        stringid.TruncateID(id),
 		Action:    "Buffering to disk",
 	})
 	defer progressReader.Close()
@@ -229,8 +229,8 @@
 
 // Mktemp creates a temporary sub-directory inside the graph's filesystem.
 func (graph *Graph) Mktemp(id string) (string, error) {
-	dir := path.Join(graph.Root, "_tmp", common.GenerateRandomID())
-	if err := os.MkdirAll(dir, 0700); err != nil {
+	dir := filepath.Join(graph.Root, "_tmp", stringid.GenerateRandomID())
+	if err := system.MkdirAll(dir, 0700); err != nil {
 		return "", err
 	}
 	return dir, nil
@@ -254,9 +254,6 @@
 	if err != nil {
 		return 0, "", err
 	}
-	if err = f.Sync(); err != nil {
-		return 0, "", err
-	}
 	n, err := f.Seek(0, os.SEEK_CUR)
 	if err != nil {
 		return 0, "", err
@@ -290,28 +287,28 @@
 		parts := strings.Split(pth, "/")
 		prev := "/"
 		for _, p := range parts[1:] {
-			prev = path.Join(prev, p)
-			syscall.Unlink(path.Join(initLayer, prev))
+			prev = filepath.Join(prev, p)
+			syscall.Unlink(filepath.Join(initLayer, prev))
 		}
 
-		if _, err := os.Stat(path.Join(initLayer, pth)); err != nil {
+		if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil {
 			if os.IsNotExist(err) {
-				if err := os.MkdirAll(path.Join(initLayer, path.Dir(pth)), 0755); err != nil {
+				if err := system.MkdirAll(filepath.Join(initLayer, filepath.Dir(pth)), 0755); err != nil {
 					return err
 				}
 				switch typ {
 				case "dir":
-					if err := os.MkdirAll(path.Join(initLayer, pth), 0755); err != nil {
+					if err := system.MkdirAll(filepath.Join(initLayer, pth), 0755); err != nil {
 						return err
 					}
 				case "file":
-					f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755)
+					f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755)
 					if err != nil {
 						return err
 					}
 					f.Close()
 				default:
-					if err := os.Symlink(typ, path.Join(initLayer, pth)); err != nil {
+					if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil {
 						return err
 					}
 				}
@@ -348,9 +345,8 @@
 	tmp, err := graph.Mktemp("")
 	graph.idIndex.Delete(id)
 	if err == nil {
-		err = os.Rename(graph.ImageRoot(id), tmp)
-		// On err make tmp point to old dir and cleanup unused tmp dir
-		if err != nil {
+		if err := os.Rename(graph.ImageRoot(id), tmp); err != nil {
+			// On err make tmp point to old dir and cleanup unused tmp dir
 			os.RemoveAll(tmp)
 			tmp = graph.ImageRoot(id)
 		}
@@ -433,7 +429,7 @@
 }
 
 func (graph *Graph) ImageRoot(id string) string {
-	return path.Join(graph.Root, id)
+	return filepath.Join(graph.Root, id)
 }
 
 func (graph *Graph) Driver() graphdriver.Driver {
diff --git a/graph/graph_test.go b/graph/graph_test.go
new file mode 100644
index 0000000..81471b6
--- /dev/null
+++ b/graph/graph_test.go
@@ -0,0 +1,306 @@
+package graph
+
+import (
+	"errors"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"testing"
+	"time"
+
+	"github.com/docker/docker/autogen/dockerversion"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/stringid"
+)
+
+func TestMount(t *testing.T) {
+	graph, driver := tempGraph(t)
+	defer os.RemoveAll(graph.Root)
+	defer driver.Cleanup()
+
+	archive, err := fakeTar()
+	if err != nil {
+		t.Fatal(err)
+	}
+	image, err := graph.Create(archive, "", "", "Testing", "", nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmp, err := ioutil.TempDir("", "docker-test-graph-mount-")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmp)
+	rootfs := path.Join(tmp, "rootfs")
+	if err := os.MkdirAll(rootfs, 0700); err != nil {
+		t.Fatal(err)
+	}
+	rw := path.Join(tmp, "rw")
+	if err := os.MkdirAll(rw, 0700); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := driver.Get(image.ID, ""); err != nil {
+		t.Fatal(err)
+	}
+
+}
+
+func TestInit(t *testing.T) {
+	graph, _ := tempGraph(t)
+	defer nukeGraph(graph)
+	// Root should exist
+	if _, err := os.Stat(graph.Root); err != nil {
+		t.Fatal(err)
+	}
+	// Map() should be empty
+	if l, err := graph.Map(); err != nil {
+		t.Fatal(err)
+	} else if len(l) != 0 {
+		t.Fatalf("len(Map()) should return %d, not %d", 0, len(l))
+	}
+}
+
+// Test that Register can be interrupted cleanly without side effects
+func TestInterruptedRegister(t *testing.T) {
+	graph, _ := tempGraph(t)
+	defer nukeGraph(graph)
+	badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data
+	image := &image.Image{
+		ID:      stringid.GenerateRandomID(),
+		Comment: "testing",
+		Created: time.Now(),
+	}
+	w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling)
+	graph.Register(image, badArchive)
+	if _, err := graph.Get(image.ID); err == nil {
+		t.Fatal("Image should not exist after Register is interrupted")
+	}
+	// Registering the same image again should succeed if the first register was interrupted
+	goodArchive, err := fakeTar()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := graph.Register(image, goodArchive); err != nil {
+		t.Fatal(err)
+	}
+}
+
+// FIXME: Do more extensive tests (ex: create multiple, delete, recreate;
+//       create multiple, check the amount of images and paths, etc..)
+func TestGraphCreate(t *testing.T) {
+	graph, _ := tempGraph(t)
+	defer nukeGraph(graph)
+	archive, err := fakeTar()
+	if err != nil {
+		t.Fatal(err)
+	}
+	img, err := graph.Create(archive, "", "", "Testing", "", nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := image.ValidateID(img.ID); err != nil {
+		t.Fatal(err)
+	}
+	if img.Comment != "Testing" {
+		t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", img.Comment)
+	}
+	if img.DockerVersion != dockerversion.VERSION {
+		t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, img.DockerVersion)
+	}
+	images, err := graph.Map()
+	if err != nil {
+		t.Fatal(err)
+	} else if l := len(images); l != 1 {
+		t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
+	}
+	if images[img.ID] == nil {
+		t.Fatalf("Could not find image with id %s", img.ID)
+	}
+}
+
+func TestRegister(t *testing.T) {
+	graph, _ := tempGraph(t)
+	defer nukeGraph(graph)
+	archive, err := fakeTar()
+	if err != nil {
+		t.Fatal(err)
+	}
+	image := &image.Image{
+		ID:      stringid.GenerateRandomID(),
+		Comment: "testing",
+		Created: time.Now(),
+	}
+	err = graph.Register(image, archive)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if images, err := graph.Map(); err != nil {
+		t.Fatal(err)
+	} else if l := len(images); l != 1 {
+		t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
+	}
+	if resultImg, err := graph.Get(image.ID); err != nil {
+		t.Fatal(err)
+	} else {
+		if resultImg.ID != image.ID {
+			t.Fatalf("Wrong image ID. Should be '%s', not '%s'", image.ID, resultImg.ID)
+		}
+		if resultImg.Comment != image.Comment {
+			t.Fatalf("Wrong image comment. Should be '%s', not '%s'", image.Comment, resultImg.Comment)
+		}
+	}
+}
+
+// Test that an image can be deleted by its shorthand prefix
+func TestDeletePrefix(t *testing.T) {
+	graph, _ := tempGraph(t)
+	defer nukeGraph(graph)
+	img := createTestImage(graph, t)
+	if err := graph.Delete(stringid.TruncateID(img.ID)); err != nil {
+		t.Fatal(err)
+	}
+	assertNImages(graph, t, 0)
+}
+
+func TestDelete(t *testing.T) {
+	graph, _ := tempGraph(t)
+	defer nukeGraph(graph)
+	archive, err := fakeTar()
+	if err != nil {
+		t.Fatal(err)
+	}
+	assertNImages(graph, t, 0)
+	img, err := graph.Create(archive, "", "", "Bla bla", "", nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	assertNImages(graph, t, 1)
+	if err := graph.Delete(img.ID); err != nil {
+		t.Fatal(err)
+	}
+	assertNImages(graph, t, 0)
+
+	archive, err = fakeTar()
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Test 2 create (same name) / 1 delete
+	img1, err := graph.Create(archive, "", "", "Testing", "", nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	archive, err = fakeTar()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err = graph.Create(archive, "", "", "Testing", "", nil, nil); err != nil {
+		t.Fatal(err)
+	}
+	assertNImages(graph, t, 2)
+	if err := graph.Delete(img1.ID); err != nil {
+		t.Fatal(err)
+	}
+	assertNImages(graph, t, 1)
+
+	// Test delete wrong name
+	if err := graph.Delete("Not_foo"); err == nil {
+		t.Fatalf("Deleting wrong ID should return an error")
+	}
+	assertNImages(graph, t, 1)
+
+	archive, err = fakeTar()
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Test delete twice (pull -> rm -> pull -> rm)
+	if err := graph.Register(img1, archive); err != nil {
+		t.Fatal(err)
+	}
+	if err := graph.Delete(img1.ID); err != nil {
+		t.Fatal(err)
+	}
+	assertNImages(graph, t, 1)
+}
+
+func TestByParent(t *testing.T) {
+	archive1, _ := fakeTar()
+	archive2, _ := fakeTar()
+	archive3, _ := fakeTar()
+
+	graph, _ := tempGraph(t)
+	defer nukeGraph(graph)
+	parentImage := &image.Image{
+		ID:      stringid.GenerateRandomID(),
+		Comment: "parent",
+		Created: time.Now(),
+		Parent:  "",
+	}
+	childImage1 := &image.Image{
+		ID:      stringid.GenerateRandomID(),
+		Comment: "child1",
+		Created: time.Now(),
+		Parent:  parentImage.ID,
+	}
+	childImage2 := &image.Image{
+		ID:      stringid.GenerateRandomID(),
+		Comment: "child2",
+		Created: time.Now(),
+		Parent:  parentImage.ID,
+	}
+	_ = graph.Register(parentImage, archive1)
+	_ = graph.Register(childImage1, archive2)
+	_ = graph.Register(childImage2, archive3)
+
+	byParent, err := graph.ByParent()
+	if err != nil {
+		t.Fatal(err)
+	}
+	numChildren := len(byParent[parentImage.ID])
+	if numChildren != 2 {
+		t.Fatalf("Expected 2 children, found %d", numChildren)
+	}
+}
+
+func createTestImage(graph *Graph, t *testing.T) *image.Image {
+	archive, err := fakeTar()
+	if err != nil {
+		t.Fatal(err)
+	}
+	img, err := graph.Create(archive, "", "", "Test image", "", nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return img
+}
+
+func assertNImages(graph *Graph, t *testing.T, n int) {
+	if images, err := graph.Map(); err != nil {
+		t.Fatal(err)
+	} else if actualN := len(images); actualN != n {
+		t.Fatalf("Expected %d images, found %d", n, actualN)
+	}
+}
+
+func tempGraph(t *testing.T) (*Graph, graphdriver.Driver) {
+	tmp, err := ioutil.TempDir("", "docker-graph-")
+	if err != nil {
+		t.Fatal(err)
+	}
+	driver, err := graphdriver.New(tmp, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	graph, err := NewGraph(tmp, driver)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return graph, driver
+}
+
+func nukeGraph(graph *Graph) {
+	graph.Driver().Cleanup()
+	os.RemoveAll(graph.Root)
+}
diff --git a/graph/history.go b/graph/history.go
index 7f5063e..56e759a 100644
--- a/graph/history.go
+++ b/graph/history.go
@@ -3,19 +3,15 @@
 import (
 	"strings"
 
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/utils"
 )
 
-func (s *TagStore) CmdHistory(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s IMAGE", job.Name)
-	}
-	name := job.Args[0]
+func (s *TagStore) History(name string) ([]*types.ImageHistory, error) {
 	foundImage, err := s.LookupImage(name)
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
 
 	lookupMap := make(map[string][]string)
@@ -29,19 +25,19 @@
 		}
 	}
 
-	outs := engine.NewTable("Created", 0)
+	history := []*types.ImageHistory{}
+
 	err = foundImage.WalkHistory(func(img *image.Image) error {
-		out := &engine.Env{}
-		out.SetJson("Id", img.ID)
-		out.SetInt64("Created", img.Created.Unix())
-		out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " "))
-		out.SetList("Tags", lookupMap[img.ID])
-		out.SetInt64("Size", img.Size)
-		outs.Add(out)
+		history = append(history, &types.ImageHistory{
+			ID:        img.ID,
+			Created:   img.Created.Unix(),
+			CreatedBy: strings.Join(img.ContainerConfig.Cmd.Slice(), " "),
+			Tags:      lookupMap[img.ID],
+			Size:      img.Size,
+			Comment:   img.Comment,
+		})
 		return nil
 	})
-	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
+
+	return history, err
 }
diff --git a/graph/import.go b/graph/import.go
index 44b1ecb..2e08e6c 100644
--- a/graph/import.go
+++ b/graph/import.go
@@ -1,57 +1,52 @@
 package graph
 
 import (
-	"bytes"
-	"encoding/json"
+	"io"
 	"net/http"
 	"net/url"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/engine"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/httputils"
 	"github.com/docker/docker/pkg/progressreader"
+	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
 )
 
-func (s *TagStore) CmdImport(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 2 && n != 3 {
-		return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name)
-	}
+type ImageImportConfig struct {
+	Changes         []string
+	InConfig        io.ReadCloser
+	OutStream       io.Writer
+	ContainerConfig *runconfig.Config
+}
+
+func (s *TagStore) Import(src string, repo string, tag string, imageImportConfig *ImageImportConfig) error {
 	var (
-		src          = job.Args[0]
-		repo         = job.Args[1]
-		tag          string
-		sf           = utils.NewStreamFormatter(job.GetenvBool("json"))
-		archive      archive.ArchiveReader
-		resp         *http.Response
-		stdoutBuffer = bytes.NewBuffer(nil)
-		newConfig    runconfig.Config
+		sf      = streamformatter.NewJSONStreamFormatter()
+		archive archive.ArchiveReader
+		resp    *http.Response
 	)
-	if len(job.Args) > 2 {
-		tag = job.Args[2]
-	}
 
 	if src == "-" {
-		archive = job.Stdin
+		archive = imageImportConfig.InConfig
 	} else {
 		u, err := url.Parse(src)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		if u.Scheme == "" {
 			u.Scheme = "http"
 			u.Host = src
 			u.Path = ""
 		}
-		job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u))
-		resp, err = utils.Download(u.String())
+		imageImportConfig.OutStream.Write(sf.FormatStatus("", "Downloading from %s", u))
+		resp, err = httputils.Download(u.String())
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		progressReader := progressreader.New(progressreader.Config{
 			In:        resp.Body,
-			Out:       job.Stdout,
+			Out:       imageImportConfig.OutStream,
 			Formatter: sf,
 			Size:      int(resp.ContentLength),
 			NewLines:  true,
@@ -62,36 +57,22 @@
 		archive = progressReader
 	}
 
-	buildConfigJob := job.Eng.Job("build_config")
-	buildConfigJob.Stdout.Add(stdoutBuffer)
-	buildConfigJob.Setenv("changes", job.Getenv("changes"))
-	// FIXME this should be remove when we remove deprecated config param
-	buildConfigJob.Setenv("config", job.Getenv("config"))
-
-	if err := buildConfigJob.Run(); err != nil {
-		return job.Error(err)
-	}
-	if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil {
-		return job.Error(err)
-	}
-
-	img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, &newConfig)
+	img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, imageImportConfig.ContainerConfig)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	// Optionally register the image at REPO/TAG
 	if repo != "" {
-		if err := s.Set(repo, tag, img.ID, true); err != nil {
-			return job.Error(err)
+		if err := s.Tag(repo, tag, img.ID, true); err != nil {
+			return err
 		}
 	}
-	job.Stdout.Write(sf.FormatStatus("", img.ID))
+	imageImportConfig.OutStream.Write(sf.FormatStatus("", img.ID))
 	logID := img.ID
 	if tag != "" {
 		logID = utils.ImageReference(logID, tag)
 	}
-	if err = job.Eng.Job("log", "import", logID, "").Run(); err != nil {
-		log.Errorf("Error logging event 'import' for %s: %s", logID, err)
-	}
-	return engine.StatusOK
+
+	s.eventsService.Log("import", logID, "")
+	return nil
 }
diff --git a/graph/list.go b/graph/list.go
index 9f7bccd..f95508e 100644
--- a/graph/list.go
+++ b/graph/list.go
@@ -1,11 +1,13 @@
 package graph
 
 import (
+	"fmt"
 	"log"
 	"path"
+	"sort"
 	"strings"
 
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/parsers/filters"
 	"github.com/docker/docker/utils"
@@ -16,47 +18,60 @@
 	"label":    {},
 }
 
-func (s *TagStore) CmdImages(job *engine.Job) engine.Status {
+type ImagesConfig struct {
+	Filters string
+	Filter  string
+	All     bool
+}
+
+type ByCreated []*types.Image
+
+func (r ByCreated) Len() int           { return len(r) }
+func (r ByCreated) Swap(i, j int)      { r[i], r[j] = r[j], r[i] }
+func (r ByCreated) Less(i, j int) bool { return r[i].Created < r[j].Created }
+
+func (s *TagStore) Images(config *ImagesConfig) ([]*types.Image, error) {
 	var (
-		allImages   map[string]*image.Image
-		err         error
-		filt_tagged = true
-		filt_label  = false
+		allImages  map[string]*image.Image
+		err        error
+		filtTagged = true
+		filtLabel  = false
 	)
 
-	imageFilters, err := filters.FromParam(job.Getenv("filters"))
+	imageFilters, err := filters.FromParam(config.Filters)
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
 	for name := range imageFilters {
 		if _, ok := acceptedImageFilterTags[name]; !ok {
-			return job.Errorf("Invalid filter '%s'", name)
+			return nil, fmt.Errorf("Invalid filter '%s'", name)
 		}
 	}
 
 	if i, ok := imageFilters["dangling"]; ok {
 		for _, value := range i {
 			if strings.ToLower(value) == "true" {
-				filt_tagged = false
+				filtTagged = false
 			}
 		}
 	}
 
-	_, filt_label = imageFilters["label"]
+	_, filtLabel = imageFilters["label"]
 
-	if job.GetenvBool("all") && filt_tagged {
+	if config.All && filtTagged {
 		allImages, err = s.graph.Map()
 	} else {
 		allImages, err = s.graph.Heads()
 	}
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
-	lookup := make(map[string]*engine.Env)
+
+	lookup := make(map[string]*types.Image)
 	s.Lock()
 	for repoName, repository := range s.Repositories {
-		if job.Getenv("filter") != "" {
-			if match, _ := path.Match(job.Getenv("filter"), repoName); !match {
+		if config.Filter != "" {
+			if match, _ := path.Match(config.Filter, repoName); !match {
 				continue
 			}
 		}
@@ -68,12 +83,12 @@
 				continue
 			}
 
-			if out, exists := lookup[id]; exists {
-				if filt_tagged {
+			if lImage, exists := lookup[id]; exists {
+				if filtTagged {
 					if utils.DigestReference(ref) {
-						out.SetList("RepoDigests", append(out.GetList("RepoDigests"), imgRef))
+						lImage.RepoDigests = append(lImage.RepoDigests, imgRef)
 					} else { // Tag Ref.
-						out.SetList("RepoTags", append(out.GetList("RepoTags"), imgRef))
+						lImage.RepoTags = append(lImage.RepoTags, imgRef)
 					}
 				}
 			} else {
@@ -82,24 +97,24 @@
 				if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) {
 					continue
 				}
-				if filt_tagged {
-					out := &engine.Env{}
-					out.SetJson("ParentId", image.Parent)
-					out.SetJson("Id", image.ID)
-					out.SetInt64("Created", image.Created.Unix())
-					out.SetInt64("Size", image.Size)
-					out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
-					out.SetJson("Labels", image.ContainerConfig.Labels)
+				if filtTagged {
+					newImage := new(types.Image)
+					newImage.ParentId = image.Parent
+					newImage.ID = image.ID
+					newImage.Created = int(image.Created.Unix())
+					newImage.Size = int(image.Size)
+					newImage.VirtualSize = int(image.GetParentsSize(0) + image.Size)
+					newImage.Labels = image.ContainerConfig.Labels
 
 					if utils.DigestReference(ref) {
-						out.SetList("RepoTags", []string{})
-						out.SetList("RepoDigests", []string{imgRef})
+						newImage.RepoTags = []string{}
+						newImage.RepoDigests = []string{imgRef}
 					} else {
-						out.SetList("RepoTags", []string{imgRef})
-						out.SetList("RepoDigests", []string{})
+						newImage.RepoTags = []string{imgRef}
+						newImage.RepoDigests = []string{}
 					}
 
-					lookup[id] = out
+					lookup[id] = newImage
 				}
 			}
 
@@ -107,33 +122,32 @@
 	}
 	s.Unlock()
 
-	outs := engine.NewTable("Created", len(lookup))
+	images := []*types.Image{}
 	for _, value := range lookup {
-		outs.Add(value)
+		images = append(images, value)
 	}
 
 	// Display images which aren't part of a repository/tag
-	if job.Getenv("filter") == "" || filt_label {
+	if config.Filter == "" || filtLabel {
 		for _, image := range allImages {
 			if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) {
 				continue
 			}
-			out := &engine.Env{}
-			out.SetJson("ParentId", image.Parent)
-			out.SetList("RepoTags", []string{"<none>:<none>"})
-			out.SetList("RepoDigests", []string{"<none>@<none>"})
-			out.SetJson("Id", image.ID)
-			out.SetInt64("Created", image.Created.Unix())
-			out.SetInt64("Size", image.Size)
-			out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
-			out.SetJson("Labels", image.ContainerConfig.Labels)
-			outs.Add(out)
+			newImage := new(types.Image)
+			newImage.ParentId = image.Parent
+			newImage.RepoTags = []string{"<none>:<none>"}
+			newImage.RepoDigests = []string{"<none>@<none>"}
+			newImage.ID = image.ID
+			newImage.Created = int(image.Created.Unix())
+			newImage.Size = int(image.Size)
+			newImage.VirtualSize = int(image.GetParentsSize(0) + image.Size)
+			newImage.Labels = image.ContainerConfig.Labels
+
+			images = append(images, newImage)
 		}
 	}
 
-	outs.ReverseSort()
-	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
+	sort.Sort(sort.Reverse(ByCreated(images)))
+
+	return images, nil
 }
diff --git a/graph/load.go b/graph/load.go
index c257e9e..9afde34 100644
--- a/graph/load.go
+++ b/graph/load.go
@@ -1,40 +1,39 @@
-// +build linux
+// +build linux windows
 
 package graph
 
 import (
 	"encoding/json"
+	"io"
 	"io/ioutil"
 	"os"
-	"path"
+	"path/filepath"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/engine"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
-	"github.com/docker/docker/utils"
 )
 
 // Loads a set of images into the repository. This is the complementary of ImageExport.
 // The input stream is an uncompressed tar ball containing images and metadata.
-func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
+func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error {
 	tmpImageDir, err := ioutil.TempDir("", "docker-import-")
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	defer os.RemoveAll(tmpImageDir)
 
 	var (
-		repoDir = path.Join(tmpImageDir, "repo")
+		repoDir = filepath.Join(tmpImageDir, "repo")
 	)
 
 	if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
-		return job.Error(err)
+		return err
 	}
 	images, err := s.graph.Map()
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 	excludes := make([]string, len(images))
 	i := 0
@@ -42,73 +41,77 @@
 		excludes[i] = k
 		i++
 	}
-	if err := chrootarchive.Untar(job.Stdin, repoDir, &archive.TarOptions{ExcludePatterns: excludes}); err != nil {
-		return job.Error(err)
+	if err := chrootarchive.Untar(inTar, repoDir, &archive.TarOptions{ExcludePatterns: excludes}); err != nil {
+		return err
 	}
 
 	dirs, err := ioutil.ReadDir(repoDir)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	for _, d := range dirs {
 		if d.IsDir() {
-			if err := s.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil {
-				return job.Error(err)
+			if err := s.recursiveLoad(d.Name(), tmpImageDir); err != nil {
+				return err
 			}
 		}
 	}
 
-	repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories"))
-	if err == nil {
-		repositories := map[string]Repository{}
-		if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
-			return job.Error(err)
+	reposJSONFile, err := os.Open(filepath.Join(tmpImageDir, "repo", "repositories"))
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return err
 		}
+		return nil
+	}
+	defer reposJSONFile.Close()
 
-		for imageName, tagMap := range repositories {
-			for tag, address := range tagMap {
-				if err := s.Set(imageName, tag, address, true); err != nil {
-					return job.Error(err)
-				}
-			}
-		}
-	} else if !os.IsNotExist(err) {
-		return job.Error(err)
+	repositories := map[string]Repository{}
+	if err := json.NewDecoder(reposJSONFile).Decode(&repositories); err != nil {
+		return err
 	}
 
-	return engine.StatusOK
+	for imageName, tagMap := range repositories {
+		for tag, address := range tagMap {
+			if err := s.SetLoad(imageName, tag, address, true, outStream); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
 }
 
-func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error {
-	if err := eng.Job("image_get", address).Run(); err != nil {
-		log.Debugf("Loading %s", address)
+func (s *TagStore) recursiveLoad(address, tmpImageDir string) error {
+	if _, err := s.LookupImage(address); err != nil {
+		logrus.Debugf("Loading %s", address)
 
-		imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
+		imageJson, err := ioutil.ReadFile(filepath.Join(tmpImageDir, "repo", address, "json"))
 		if err != nil {
-			log.Debugf("Error reading json", err)
+			logrus.Debugf("Error reading json", err)
 			return err
 		}
 
-		layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
+		layer, err := os.Open(filepath.Join(tmpImageDir, "repo", address, "layer.tar"))
 		if err != nil {
-			log.Debugf("Error reading embedded tar", err)
+			logrus.Debugf("Error reading embedded tar", err)
 			return err
 		}
 		img, err := image.NewImgJSON(imageJson)
 		if err != nil {
-			log.Debugf("Error unmarshalling json", err)
+			logrus.Debugf("Error unmarshalling json", err)
 			return err
 		}
-		if err := utils.ValidateID(img.ID); err != nil {
-			log.Debugf("Error validating ID: %s", err)
+		if err := image.ValidateID(img.ID); err != nil {
+			logrus.Debugf("Error validating ID: %s", err)
 			return err
 		}
 
 		// ensure no two downloads of the same layer happen at the same time
 		if c, err := s.poolAdd("pull", "layer:"+img.ID); err != nil {
 			if c != nil {
-				log.Debugf("Image (id: %s) load is already running, waiting: %v", img.ID, err)
+				logrus.Debugf("Image (id: %s) load is already running, waiting: %v", img.ID, err)
 				<-c
 				return nil
 			}
@@ -120,7 +123,7 @@
 
 		if img.Parent != "" {
 			if !s.graph.Exists(img.Parent) {
-				if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil {
+				if err := s.recursiveLoad(img.Parent, tmpImageDir); err != nil {
 					return err
 				}
 			}
@@ -129,7 +132,7 @@
 			return err
 		}
 	}
-	log.Debugf("Completed processing %s", address)
+	logrus.Debugf("Completed processing %s", address)
 
 	return nil
 }
diff --git a/graph/load_unsupported.go b/graph/load_unsupported.go
index 164e917..45bdd98 100644
--- a/graph/load_unsupported.go
+++ b/graph/load_unsupported.go
@@ -1,11 +1,12 @@
-// +build !linux
+// +build !linux,!windows
 
 package graph
 
 import (
-	"github.com/docker/docker/engine"
+	"fmt"
+	"io"
 )
 
-func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
-	return job.Errorf("CmdLoad is not supported on this platform")
+func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error {
+	return fmt.Errorf("Load is not supported on this platform")
 }
diff --git a/graph/manifest.go b/graph/manifest.go
index 3b1d825..053a185 100644
--- a/graph/manifest.go
+++ b/graph/manifest.go
@@ -1,14 +1,13 @@
 package graph
 
 import (
-	"bytes"
 	"encoding/json"
 	"fmt"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution/digest"
-	"github.com/docker/docker/engine"
 	"github.com/docker/docker/registry"
+	"github.com/docker/docker/trust"
 	"github.com/docker/docker/utils"
 	"github.com/docker/libtrust"
 )
@@ -18,7 +17,7 @@
 // contains no signatures by a trusted key for the name in the manifest, the
 // image is not considered verified. The parsed manifest object and a boolean
 // for whether the manifest is verified is returned.
-func (s *TagStore) loadManifest(eng *engine.Engine, manifestBytes []byte, dgst, ref string) (*registry.ManifestData, bool, error) {
+func (s *TagStore) loadManifest(manifestBytes []byte, dgst, ref string) (*registry.ManifestData, bool, error) {
 	sig, err := libtrust.ParsePrettySignature(manifestBytes, "signatures")
 	if err != nil {
 		return nil, false, fmt.Errorf("error parsing payload: %s", err)
@@ -69,32 +68,28 @@
 
 	var verified bool
 	for _, key := range keys {
-		job := eng.Job("trust_key_check")
-		b, err := key.MarshalJSON()
-		if err != nil {
-			return nil, false, fmt.Errorf("error marshalling public key: %s", err)
-		}
 		namespace := manifest.Name
 		if namespace[0] != '/' {
 			namespace = "/" + namespace
 		}
-		stdoutBuffer := bytes.NewBuffer(nil)
-
-		job.Args = append(job.Args, namespace)
-		job.Setenv("PublicKey", string(b))
-		// Check key has read/write permission (0x03)
-		job.SetenvInt("Permission", 0x03)
-		job.Stdout.Add(stdoutBuffer)
-		if err = job.Run(); err != nil {
-			return nil, false, fmt.Errorf("error running key check: %s", err)
+		b, err := key.MarshalJSON()
+		if err != nil {
+			return nil, false, fmt.Errorf("error marshalling public key: %s", err)
 		}
-		result := engine.Tail(stdoutBuffer, 1)
-		log.Debugf("Key check result: %q", result)
-		if result == "verified" {
-			verified = true
+		// Check key has read/write permission (0x03)
+		v, err := s.trustService.CheckKey(namespace, b, 0x03)
+		if err != nil {
+			vErr, ok := err.(trust.NotVerifiedError)
+			if !ok {
+				return nil, false, fmt.Errorf("error running key check: %s", err)
+			}
+			logrus.Debugf("Key check result: %v", vErr)
+		}
+		verified = v
+		if verified {
+			logrus.Debug("Key check result: verified")
 		}
 	}
-
 	return &manifest, verified, nil
 }
 
diff --git a/graph/manifest_test.go b/graph/manifest_test.go
index 9137041..2702dca 100644
--- a/graph/manifest_test.go
+++ b/graph/manifest_test.go
@@ -135,7 +135,7 @@
 	if err := store.graph.Register(img, archive); err != nil {
 		t.Fatal(err)
 	}
-	if err := store.Set(testManifestImageName, testManifestTag, testManifestImageID, false); err != nil {
+	if err := store.Tag(testManifestImageName, testManifestTag, testManifestImageID, false); err != nil {
 		t.Fatal(err)
 	}
 
diff --git a/graph/pull.go b/graph/pull.go
index adad6f3..b2a9ef1 100644
--- a/graph/pull.go
+++ b/graph/pull.go
@@ -10,104 +10,204 @@
 	"strings"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution/digest"
-	"github.com/docker/docker/engine"
+	"github.com/docker/docker/cliconfig"
 	"github.com/docker/docker/image"
-	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/progressreader"
+	"github.com/docker/docker/pkg/streamformatter"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/pkg/transport"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/utils"
 )
 
-func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 && n != 2 {
-		return job.Errorf("Usage: %s IMAGE [TAG|DIGEST]", job.Name)
-	}
+type ImagePullConfig struct {
+	MetaHeaders map[string][]string
+	AuthConfig  *cliconfig.AuthConfig
+	OutStream   io.Writer
+}
 
+func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConfig) error {
 	var (
-		localName   = job.Args[0]
-		tag         string
-		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
-		authConfig  = &registry.AuthConfig{}
-		metaHeaders map[string][]string
+		sf = streamformatter.NewJSONStreamFormatter()
 	)
 
 	// Resolve the Repository name from fqn to RepositoryInfo
-	repoInfo, err := registry.ResolveRepositoryInfo(job, localName)
+	repoInfo, err := s.registryService.ResolveRepository(image)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
-	if len(job.Args) > 1 {
-		tag = job.Args[1]
+	if err := validateRepoName(repoInfo.LocalName); err != nil {
+		return err
 	}
 
-	job.GetenvJson("authConfig", authConfig)
-	job.GetenvJson("metaHeaders", &metaHeaders)
-
 	c, err := s.poolAdd("pull", utils.ImageReference(repoInfo.LocalName, tag))
 	if err != nil {
 		if c != nil {
 			// Another pull of the same repository is already taking place; just wait for it to finish
-			job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", repoInfo.LocalName))
+			imagePullConfig.OutStream.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", repoInfo.LocalName))
 			<-c
-			return engine.StatusOK
+			return nil
 		}
-		return job.Error(err)
+		return err
 	}
 	defer s.poolRemove("pull", utils.ImageReference(repoInfo.LocalName, tag))
 
-	log.Debugf("pulling image from host %q with remote name %q", repoInfo.Index.Name, repoInfo.RemoteName)
-	endpoint, err := repoInfo.GetEndpoint()
-	if err != nil {
-		return job.Error(err)
-	}
-
-	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
-	if err != nil {
-		return job.Error(err)
-	}
-
 	logName := repoInfo.LocalName
 	if tag != "" {
 		logName = utils.ImageReference(logName, tag)
 	}
 
+	// Attempt pulling official content from a provided v2 mirror
+	if repoInfo.Index.Official {
+		v2mirrorEndpoint, v2mirrorRepoInfo, err := configureV2Mirror(repoInfo, s.registryService)
+		if err != nil {
+			logrus.Errorf("Error configuring mirrors: %s", err)
+			return err
+		}
+
+		if v2mirrorEndpoint != nil {
+			logrus.Debugf("Attempting to pull from v2 mirror: %s", v2mirrorEndpoint.URL)
+			return s.pullFromV2Mirror(v2mirrorEndpoint, v2mirrorRepoInfo, imagePullConfig, tag, sf, logName)
+		}
+	}
+
+	logrus.Debugf("pulling image from host %q with remote name %q", repoInfo.Index.Name, repoInfo.RemoteName)
+
+	endpoint, err := repoInfo.GetEndpoint(imagePullConfig.MetaHeaders)
+	if err != nil {
+		return err
+	}
+	// TODO(tiborvass): reuse client from endpoint?
+	// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
+	tr := transport.NewTransport(
+		registry.NewTransport(registry.ReceiveTimeout, endpoint.IsSecure),
+		registry.DockerHeaders(imagePullConfig.MetaHeaders)...,
+	)
+	client := registry.HTTPClient(tr)
+	r, err := registry.NewSession(client, imagePullConfig.AuthConfig, endpoint)
+	if err != nil {
+		return err
+	}
+
 	if len(repoInfo.Index.Mirrors) == 0 && (repoInfo.Index.Official || endpoint.Version == registry.APIVersion2) {
 		if repoInfo.Official {
-			j := job.Eng.Job("trust_update_base")
-			if err = j.Run(); err != nil {
-				log.Errorf("error updating trust base graph: %s", err)
-			}
+			s.trustService.UpdateBase()
 		}
 
-		log.Debugf("pulling v2 repository with local name %q", repoInfo.LocalName)
-		if err := s.pullV2Repository(job.Eng, r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err == nil {
-			if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil {
-				log.Errorf("Error logging event 'pull' for %s: %s", logName, err)
-			}
-			return engine.StatusOK
+		logrus.Debugf("pulling v2 repository with local name %q", repoInfo.LocalName)
+		if err := s.pullV2Repository(r, imagePullConfig.OutStream, repoInfo, tag, sf); err == nil {
+			s.eventsService.Log("pull", logName, "")
+			return nil
 		} else if err != registry.ErrDoesNotExist && err != ErrV2RegistryUnavailable {
-			log.Errorf("Error from V2 registry: %s", err)
+			logrus.Errorf("Error from V2 registry: %s", err)
 		}
 
-		log.Debug("image does not exist on v2 registry, falling back to v1")
+		logrus.Debug("image does not exist on v2 registry, falling back to v1")
 	}
 
-	log.Debugf("pulling v1 repository with local name %q", repoInfo.LocalName)
-	if err = s.pullRepository(r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err != nil {
-		return job.Error(err)
+	if utils.DigestReference(tag) {
+		return fmt.Errorf("pulling with digest reference failed from v2 registry")
 	}
 
-	if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil {
-		log.Errorf("Error logging event 'pull' for %s: %s", logName, err)
+	logrus.Debugf("pulling v1 repository with local name %q", repoInfo.LocalName)
+	if err = s.pullRepository(r, imagePullConfig.OutStream, repoInfo, tag, sf); err != nil {
+		return err
 	}
 
-	return engine.StatusOK
+	s.eventsService.Log("pull", logName, "")
+
+	return nil
+
 }
 
-func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, askedTag string, sf *utils.StreamFormatter, parallel bool) error {
+func makeMirrorRepoInfo(repoInfo *registry.RepositoryInfo, mirror string) *registry.RepositoryInfo {
+	mirrorRepo := &registry.RepositoryInfo{
+		RemoteName:    repoInfo.RemoteName,
+		LocalName:     repoInfo.LocalName,
+		CanonicalName: repoInfo.CanonicalName,
+		Official:      false,
+
+		Index: &registry.IndexInfo{
+			Official: false,
+			Secure:   repoInfo.Index.Secure,
+			Name:     mirror,
+			Mirrors:  []string{},
+		},
+	}
+	return mirrorRepo
+}
+
+func configureV2Mirror(repoInfo *registry.RepositoryInfo, s *registry.Service) (*registry.Endpoint, *registry.RepositoryInfo, error) {
+	mirrors := repoInfo.Index.Mirrors
+
+	if len(mirrors) == 0 {
+		// no mirrors configured
+		return nil, nil, nil
+	}
+
+	v1MirrorCount := 0
+	var v2MirrorEndpoint *registry.Endpoint
+	var v2MirrorRepoInfo *registry.RepositoryInfo
+	var lastErr error
+	for _, mirror := range mirrors {
+		mirrorRepoInfo := makeMirrorRepoInfo(repoInfo, mirror)
+		endpoint, err := registry.NewEndpoint(mirrorRepoInfo.Index, nil)
+		if err != nil {
+			logrus.Errorf("Unable to create endpoint for %s: %s", mirror, err)
+			lastErr = err
+			continue
+		}
+		if endpoint.Version == 2 {
+			if v2MirrorEndpoint == nil {
+				v2MirrorEndpoint = endpoint
+				v2MirrorRepoInfo = mirrorRepoInfo
+			} else {
+				// > 1 v2 mirrors given
+				return nil, nil, fmt.Errorf("multiple v2 mirrors configured")
+			}
+		} else {
+			v1MirrorCount++
+		}
+	}
+
+	if v1MirrorCount == len(mirrors) {
+		// OK, but mirrors are v1
+		return nil, nil, nil
+	}
+	if v2MirrorEndpoint != nil && v1MirrorCount == 0 {
+		// OK, 1 v2 mirror specified
+		return v2MirrorEndpoint, v2MirrorRepoInfo, nil
+	}
+	if v2MirrorEndpoint != nil && v1MirrorCount > 0 {
+		lastErr = fmt.Errorf("v1 and v2 mirrors configured")
+	}
+	return nil, nil, lastErr
+}
+
+func (s *TagStore) pullFromV2Mirror(mirrorEndpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo,
+	imagePullConfig *ImagePullConfig, tag string, sf *streamformatter.StreamFormatter, logName string) error {
+
+	tr := transport.NewTransport(
+		registry.NewTransport(registry.ReceiveTimeout, mirrorEndpoint.IsSecure),
+		registry.DockerHeaders(imagePullConfig.MetaHeaders)...,
+	)
+	client := registry.HTTPClient(tr)
+	mirrorSession, err := registry.NewSession(client, &cliconfig.AuthConfig{}, mirrorEndpoint)
+	if err != nil {
+		return err
+	}
+	logrus.Debugf("Pulling v2 repository with local name %q from %s", repoInfo.LocalName, mirrorEndpoint.URL)
+	if err := s.pullV2Repository(mirrorSession, imagePullConfig.OutStream, repoInfo, tag, sf); err != nil {
+		return err
+	}
+	s.eventsService.Log("pull", logName, "")
+	return nil
+}
+
+func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, askedTag string, sf *streamformatter.StreamFormatter) error {
 	out.Write(sf.FormatStatus("", "Pulling repository %s", repoInfo.CanonicalName))
 
 	repoData, err := r.GetRepositoryData(repoInfo.RemoteName)
@@ -119,10 +219,10 @@
 		return err
 	}
 
-	log.Debugf("Retrieving the tag list")
-	tagsList, err := r.GetRemoteTags(repoData.Endpoints, repoInfo.RemoteName, repoData.Tokens)
+	logrus.Debugf("Retrieving the tag list")
+	tagsList, err := r.GetRemoteTags(repoData.Endpoints, repoInfo.RemoteName)
 	if err != nil {
-		log.Errorf("unable to get remote tags: %s", err)
+		logrus.Errorf("unable to get remote tags: %s", err)
 		return err
 	}
 
@@ -134,7 +234,7 @@
 		}
 	}
 
-	log.Debugf("Registering tags")
+	logrus.Debugf("Registering tags")
 	// If no tag has been specified, pull them all
 	if askedTag == "" {
 		for tag, id := range tagsList {
@@ -151,108 +251,95 @@
 
 	errors := make(chan error)
 
-	layers_downloaded := false
+	layersDownloaded := false
 	for _, image := range repoData.ImgList {
 		downloadImage := func(img *registry.ImgData) {
 			if askedTag != "" && img.Tag != askedTag {
-				if parallel {
-					errors <- nil
-				}
+				errors <- nil
 				return
 			}
 
 			if img.Tag == "" {
-				log.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
-				if parallel {
-					errors <- nil
-				}
+				logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
+				errors <- nil
 				return
 			}
 
 			// ensure no two downloads of the same image happen at the same time
 			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
 				if c != nil {
-					out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
+					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
 					<-c
-					out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))
+					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
 				} else {
-					log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
+					logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
 				}
-				if parallel {
-					errors <- nil
-				}
+				errors <- nil
 				return
 			}
 			defer s.poolRemove("pull", "img:"+img.ID)
 
-			out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, repoInfo.CanonicalName), nil))
+			out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, repoInfo.CanonicalName), nil))
 			success := false
 			var lastErr, err error
-			var is_downloaded bool
+			var isDownloaded bool
 			for _, ep := range repoInfo.Index.Mirrors {
-				out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
-				if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
+				// Ensure endpoint is v1
+				ep = ep + "v1/"
+				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
+				if isDownloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
 					// Don't report errors when pulling from mirrors.
-					log.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err)
+					logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err)
 					continue
 				}
-				layers_downloaded = layers_downloaded || is_downloaded
+				layersDownloaded = layersDownloaded || isDownloaded
 				success = true
 				break
 			}
 			if !success {
 				for _, ep := range repoData.Endpoints {
-					out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
-					if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
+					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
+					if isDownloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
 						// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
 						// As the error is also given to the output stream the user will see the error.
 						lastErr = err
-						out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err), nil))
+						out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err), nil))
 						continue
 					}
-					layers_downloaded = layers_downloaded || is_downloaded
+					layersDownloaded = layersDownloaded || isDownloaded
 					success = true
 					break
 				}
 			}
 			if !success {
 				err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, repoInfo.CanonicalName, lastErr)
-				out.Write(sf.FormatProgress(common.TruncateID(img.ID), err.Error(), nil))
-				if parallel {
-					errors <- err
-					return
-				}
+				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil))
+				errors <- err
+				return
 			}
-			out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))
+			out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
 
-			if parallel {
-				errors <- nil
-			}
+			errors <- nil
 		}
 
-		if parallel {
-			go downloadImage(image)
-		} else {
-			downloadImage(image)
+		go downloadImage(image)
+	}
+
+	var lastError error
+	for i := 0; i < len(repoData.ImgList); i++ {
+		if err := <-errors; err != nil {
+			lastError = err
 		}
 	}
-	if parallel {
-		var lastError error
-		for i := 0; i < len(repoData.ImgList); i++ {
-			if err := <-errors; err != nil {
-				lastError = err
-			}
-		}
-		if lastError != nil {
-			return lastError
-		}
-
+	if lastError != nil {
+		return lastError
 	}
+
 	for tag, id := range tagsList {
 		if askedTag != "" && tag != askedTag {
 			continue
 		}
-		if err := s.Set(repoInfo.LocalName, tag, id, true); err != nil {
+		if err := s.Tag(repoInfo.LocalName, tag, id, true); err != nil {
 			return err
 		}
 	}
@@ -261,32 +348,32 @@
 	if len(askedTag) > 0 {
 		requestedTag = utils.ImageReference(repoInfo.CanonicalName, askedTag)
 	}
-	WriteStatus(requestedTag, out, sf, layers_downloaded)
+	WriteStatus(requestedTag, out, sf, layersDownloaded)
 	return nil
 }
 
-func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) (bool, error) {
-	history, err := r.GetRemoteHistory(imgID, endpoint, token)
+func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *streamformatter.StreamFormatter) (bool, error) {
+	history, err := r.GetRemoteHistory(imgID, endpoint)
 	if err != nil {
 		return false, err
 	}
-	out.Write(sf.FormatProgress(common.TruncateID(imgID), "Pulling dependent layers", nil))
+	out.Write(sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil))
 	// FIXME: Try to stream the images?
 	// FIXME: Launch the getRemoteImage() in goroutines
 
-	layers_downloaded := false
+	layersDownloaded := false
 	for i := len(history) - 1; i >= 0; i-- {
 		id := history[i]
 
 		// ensure no two downloads of the same layer happen at the same time
 		if c, err := s.poolAdd("pull", "layer:"+id); err != nil {
-			log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
+			logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
 			<-c
 		}
 		defer s.poolRemove("pull", "layer:"+id)
 
 		if !s.graph.Exists(id) {
-			out.Write(sf.FormatProgress(common.TruncateID(id), "Pulling metadata", nil))
+			out.Write(sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
 			var (
 				imgJSON []byte
 				imgSize int
@@ -295,19 +382,19 @@
 			)
 			retries := 5
 			for j := 1; j <= retries; j++ {
-				imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
+				imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint)
 				if err != nil && j == retries {
-					out.Write(sf.FormatProgress(common.TruncateID(id), "Error pulling dependent layers", nil))
-					return layers_downloaded, err
+					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
+					return layersDownloaded, err
 				} else if err != nil {
 					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
 					continue
 				}
 				img, err = image.NewImgJSON(imgJSON)
-				layers_downloaded = true
+				layersDownloaded = true
 				if err != nil && j == retries {
-					out.Write(sf.FormatProgress(common.TruncateID(id), "Error pulling dependent layers", nil))
-					return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err)
+					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
+					return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err)
 				} else if err != nil {
 					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
 					continue
@@ -322,8 +409,8 @@
 				if j > 1 {
 					status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
 				}
-				out.Write(sf.FormatProgress(common.TruncateID(id), status, nil))
-				layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
+				out.Write(sf.FormatProgress(stringid.TruncateID(id), status, nil))
+				layer, err := r.GetRemoteImageLayer(img.ID, endpoint, int64(imgSize))
 				if uerr, ok := err.(*url.Error); ok {
 					err = uerr.Err
 				}
@@ -331,10 +418,10 @@
 					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
 					continue
 				} else if err != nil {
-					out.Write(sf.FormatProgress(common.TruncateID(id), "Error pulling dependent layers", nil))
-					return layers_downloaded, err
+					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
+					return layersDownloaded, err
 				}
-				layers_downloaded = true
+				layersDownloaded = true
 				defer layer.Close()
 
 				err = s.graph.Register(img,
@@ -344,27 +431,27 @@
 						Formatter: sf,
 						Size:      imgSize,
 						NewLines:  false,
-						ID:        common.TruncateID(id),
+						ID:        stringid.TruncateID(id),
 						Action:    "Downloading",
 					}))
 				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
 					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
 					continue
 				} else if err != nil {
-					out.Write(sf.FormatProgress(common.TruncateID(id), "Error downloading dependent layers", nil))
-					return layers_downloaded, err
+					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
+					return layersDownloaded, err
 				} else {
 					break
 				}
 			}
 		}
-		out.Write(sf.FormatProgress(common.TruncateID(id), "Download complete", nil))
+		out.Write(sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
 	}
-	return layers_downloaded, nil
+	return layersDownloaded, nil
 }
 
-func WriteStatus(requestedTag string, out io.Writer, sf *utils.StreamFormatter, layers_downloaded bool) {
-	if layers_downloaded {
+func WriteStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamFormatter, layersDownloaded bool) {
+	if layersDownloaded {
 		out.Write(sf.FormatStatus("", "Status: Downloaded newer image for %s", requestedTag))
 	} else {
 		out.Write(sf.FormatStatus("", "Status: Image is up to date for %s", requestedTag))
@@ -382,11 +469,11 @@
 	err        chan error
 }
 
-func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool) error {
+func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter) error {
 	endpoint, err := r.V2RegistryEndpoint(repoInfo.Index)
 	if err != nil {
 		if repoInfo.Index.Official {
-			log.Debugf("Unable to pull from V2 registry, falling back to v1: %s", err)
+			logrus.Debugf("Unable to pull from V2 registry, falling back to v1: %s", err)
 			return ErrV2RegistryUnavailable
 		}
 		return fmt.Errorf("error getting registry endpoint: %s", err)
@@ -397,7 +484,7 @@
 	}
 	var layersDownloaded bool
 	if tag == "" {
-		log.Debugf("Pulling tag list from V2 registry for %s", repoInfo.CanonicalName)
+		logrus.Debugf("Pulling tag list from V2 registry for %s", repoInfo.CanonicalName)
 		tags, err := r.GetV2RemoteTags(endpoint, repoInfo.RemoteName, auth)
 		if err != nil {
 			return err
@@ -406,14 +493,14 @@
 			return registry.ErrDoesNotExist
 		}
 		for _, t := range tags {
-			if downloaded, err := s.pullV2Tag(eng, r, out, endpoint, repoInfo, t, sf, parallel, auth); err != nil {
+			if downloaded, err := s.pullV2Tag(r, out, endpoint, repoInfo, t, sf, auth); err != nil {
 				return err
 			} else if downloaded {
 				layersDownloaded = true
 			}
 		}
 	} else {
-		if downloaded, err := s.pullV2Tag(eng, r, out, endpoint, repoInfo, tag, sf, parallel, auth); err != nil {
+		if downloaded, err := s.pullV2Tag(r, out, endpoint, repoInfo, tag, sf, auth); err != nil {
 			return err
 		} else if downloaded {
 			layersDownloaded = true
@@ -428,8 +515,8 @@
 	return nil
 }
 
-func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) {
-	log.Debugf("Pulling tag from V2 registry: %q", tag)
+func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, auth *registry.RequestAuthorization) (bool, error) {
+	logrus.Debugf("Pulling tag from V2 registry: %q", tag)
 
 	manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
 	if err != nil {
@@ -438,7 +525,7 @@
 
 	// loadManifest ensures that the manifest payload has the expected digest
 	// if the tag is a digest reference.
-	manifest, verified, err := s.loadManifest(eng, manifestBytes, manifestDigest, tag)
+	manifest, verified, err := s.loadManifest(manifestBytes, manifestDigest, tag)
 	if err != nil {
 		return false, fmt.Errorf("error verifying manifest: %s", err)
 	}
@@ -448,7 +535,7 @@
 	}
 
 	if verified {
-		log.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
+		logrus.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
 	}
 	out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))
 
@@ -468,7 +555,7 @@
 
 		// Check if exists
 		if s.graph.Exists(img.ID) {
-			log.Debugf("Image already exists: %s", img.ID)
+			logrus.Debugf("Image already exists: %s", img.ID)
 			continue
 		}
 
@@ -478,18 +565,18 @@
 		}
 		downloads[i].digest = dgst
 
-		out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Pulling fs layer", nil))
+		out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))
 
 		downloadFunc := func(di *downloadInfo) error {
-			log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)
+			logrus.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)
 
 			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
 				if c != nil {
-					out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
+					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
 					<-c
-					out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))
+					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
 				} else {
-					log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
+					logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
 				}
 			} else {
 				defer s.poolRemove("pull", "img:"+img.ID)
@@ -498,7 +585,7 @@
 					return err
 				}
 
-				r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest.Algorithm(), di.digest.Hex(), auth)
+				r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest, auth)
 				if err != nil {
 					return err
 				}
@@ -515,22 +602,22 @@
 					Formatter: sf,
 					Size:      int(l),
 					NewLines:  false,
-					ID:        common.TruncateID(img.ID),
+					ID:        stringid.TruncateID(img.ID),
 					Action:    "Downloading",
 				})); err != nil {
 					return fmt.Errorf("unable to copy v2 image blob data: %s", err)
 				}
 
-				out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Verifying Checksum", nil))
+				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Verifying Checksum", nil))
 
 				if !verifier.Verified() {
-					log.Infof("Image verification failed: checksum mismatch for %q", di.digest.String())
+					logrus.Infof("Image verification failed: checksum mismatch for %q", di.digest.String())
 					verified = false
 				}
 
-				out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))
+				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
 
-				log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
+				logrus.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
 				di.tmpFile = tmpFile
 				di.length = l
 				di.downloaded = true
@@ -540,25 +627,17 @@
 			return nil
 		}
 
-		if parallel {
-			downloads[i].err = make(chan error)
-			go func(di *downloadInfo) {
-				di.err <- downloadFunc(di)
-			}(&downloads[i])
-		} else {
-			err := downloadFunc(&downloads[i])
-			if err != nil {
-				return false, err
-			}
-		}
+		downloads[i].err = make(chan error)
+		go func(di *downloadInfo) {
+			di.err <- downloadFunc(di)
+		}(&downloads[i])
 	}
 
 	var tagUpdated bool
 	for i := len(downloads) - 1; i >= 0; i-- {
 		d := &downloads[i]
 		if d.err != nil {
-			err := <-d.err
-			if err != nil {
+			if err := <-d.err; err != nil {
 				return false, err
 			}
 		}
@@ -574,7 +653,7 @@
 						Out:       out,
 						Formatter: sf,
 						Size:      int(d.length),
-						ID:        common.TruncateID(d.img.ID),
+						ID:        stringid.TruncateID(d.img.ID),
 						Action:    "Extracting",
 					}))
 				if err != nil {
@@ -583,10 +662,10 @@
 
 				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
 			}
-			out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Pull complete", nil))
+			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
 			tagUpdated = true
 		} else {
-			out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Already exists", nil))
+			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
 		}
 
 	}
@@ -601,6 +680,8 @@
 			if _, exists := repo[tag]; !exists {
 				tagUpdated = true
 			}
+		} else {
+			tagUpdated = true
 		}
 	}
 
@@ -618,7 +699,7 @@
 		}
 	} else {
 		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
-		if err = s.Set(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
+		if err = s.Tag(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
 			return false, err
 		}
 	}
diff --git a/graph/push.go b/graph/push.go
index 08c383b..817ef70 100644
--- a/graph/push.go
+++ b/graph/push.go
@@ -7,15 +7,18 @@
 	"io"
 	"io/ioutil"
 	"os"
-	"path"
-	"strings"
+	"path/filepath"
 	"sync"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/engine"
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/cliconfig"
 	"github.com/docker/docker/image"
-	"github.com/docker/docker/pkg/common"
+	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/progressreader"
+	"github.com/docker/docker/pkg/streamformatter"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/pkg/transport"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
@@ -24,6 +27,13 @@
 
 var ErrV2RegistryUnavailable = errors.New("error v2 registry unavailable")
 
+type ImagePushConfig struct {
+	MetaHeaders map[string][]string
+	AuthConfig  *cliconfig.AuthConfig
+	Tag         string
+	OutStream   io.Writer
+}
+
 // Retrieve the all the images to be uploaded in the correct order
 func (s *TagStore) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) {
 	var (
@@ -72,14 +82,14 @@
 	if len(imageList) == 0 {
 		return nil, nil, fmt.Errorf("No images found for the requested repository / tag")
 	}
-	log.Debugf("Image list: %v", imageList)
-	log.Debugf("Tags by image: %v", tagsByImage)
+	logrus.Debugf("Image list: %v", imageList)
+	logrus.Debugf("Tags by image: %v", tagsByImage)
 
 	return imageList, tagsByImage, nil
 }
 
 func (s *TagStore) getImageTags(localRepo map[string]string, askedTag string) ([]string, error) {
-	log.Debugf("Checking %s against %#v", askedTag, localRepo)
+	logrus.Debugf("Checking %s against %#v", askedTag, localRepo)
 	if len(askedTag) > 0 {
 		if _, ok := localRepo[askedTag]; !ok || utils.DigestReference(askedTag) {
 			return nil, fmt.Errorf("Tag does not exist: %s", askedTag)
@@ -128,21 +138,21 @@
 
 // lookupImageOnEndpoint checks the specified endpoint to see if an image exists
 // and if it is absent then it sends the image id to the channel to be pushed.
-func lookupImageOnEndpoint(wg *sync.WaitGroup, r *registry.Session, out io.Writer, sf *utils.StreamFormatter,
+func lookupImageOnEndpoint(wg *sync.WaitGroup, r *registry.Session, out io.Writer, sf *streamformatter.StreamFormatter,
 	images chan imagePushData, imagesToPush chan string) {
 	defer wg.Done()
 	for image := range images {
-		if err := r.LookupRemoteImage(image.id, image.endpoint, image.tokens); err != nil {
-			log.Errorf("Error in LookupRemoteImage: %s", err)
+		if err := r.LookupRemoteImage(image.id, image.endpoint); err != nil {
+			logrus.Errorf("Error in LookupRemoteImage: %s", err)
 			imagesToPush <- image.id
 			continue
 		}
-		out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", common.TruncateID(image.id)))
+		out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", stringid.TruncateID(image.id)))
 	}
 }
 
 func (s *TagStore) pushImageToEndpoint(endpoint string, out io.Writer, remoteName string, imageIDs []string,
-	tags map[string][]string, repo *registry.RepositoryData, sf *utils.StreamFormatter, r *registry.Session) error {
+	tags map[string][]string, repo *registry.RepositoryData, sf *streamformatter.StreamFormatter, r *registry.Session) error {
 	workerCount := len(imageIDs)
 	// start a maximum of 5 workers to check if images exist on the specified endpoint.
 	if workerCount > 5 {
@@ -189,8 +199,8 @@
 			}
 		}
 		for _, tag := range tags[id] {
-			out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", common.TruncateID(id), endpoint+"repositories/"+remoteName+"/tags/"+tag))
-			if err := r.PushRegistryTag(remoteName, id, tag, endpoint, repo.Tokens); err != nil {
+			out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(id), endpoint+"repositories/"+remoteName+"/tags/"+tag))
+			if err := r.PushRegistryTag(remoteName, id, tag, endpoint); err != nil {
 				return err
 			}
 		}
@@ -201,9 +211,9 @@
 // pushRepository pushes layers that do not already exist on the registry.
 func (s *TagStore) pushRepository(r *registry.Session, out io.Writer,
 	repoInfo *registry.RepositoryInfo, localRepo map[string]string,
-	tag string, sf *utils.StreamFormatter) error {
-	log.Debugf("Local repo: %s", localRepo)
-	out = utils.NewWriteFlusher(out)
+	tag string, sf *streamformatter.StreamFormatter) error {
+	logrus.Debugf("Local repo: %s", localRepo)
+	out = ioutils.NewWriteFlusher(out)
 	imgList, tags, err := s.getImageList(localRepo, tag)
 	if err != nil {
 		return err
@@ -211,9 +221,9 @@
 	out.Write(sf.FormatStatus("", "Sending image list"))
 
 	imageIndex := s.createImageIndex(imgList, tags)
-	log.Debugf("Preparing to push %s with the following images and tags", localRepo)
+	logrus.Debugf("Preparing to push %s with the following images and tags", localRepo)
 	for _, data := range imageIndex {
-		log.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag)
+		logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag)
 	}
 	// Register all the images in a repository with the registry
 	// If an image is not in this list it will not be associated with the repository
@@ -236,22 +246,22 @@
 	return err
 }
 
-func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
-	out = utils.NewWriteFlusher(out)
-	jsonRaw, err := ioutil.ReadFile(path.Join(s.graph.Root, imgID, "json"))
+func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep string, token []string, sf *streamformatter.StreamFormatter) (checksum string, err error) {
+	out = ioutils.NewWriteFlusher(out)
+	jsonRaw, err := ioutil.ReadFile(filepath.Join(s.graph.Root, imgID, "json"))
 	if err != nil {
 		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
 	}
-	out.Write(sf.FormatProgress(common.TruncateID(imgID), "Pushing", nil))
+	out.Write(sf.FormatProgress(stringid.TruncateID(imgID), "Pushing", nil))
 
 	imgData := &registry.ImgData{
 		ID: imgID,
 	}
 
 	// Send the json
-	if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
+	if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
 		if err == registry.ErrAlreadyExists {
-			out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
+			out.Write(sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
 			return "", nil
 		}
 		return "", err
@@ -264,7 +274,7 @@
 	defer os.RemoveAll(layerData.Name())
 
 	// Send the layer
-	log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
+	logrus.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
 
 	checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID,
 		progressreader.New(progressreader.Config{
@@ -273,28 +283,28 @@
 			Formatter: sf,
 			Size:      int(layerData.Size),
 			NewLines:  false,
-			ID:        common.TruncateID(imgData.ID),
+			ID:        stringid.TruncateID(imgData.ID),
 			Action:    "Pushing",
-		}), ep, token, jsonRaw)
+		}), ep, jsonRaw)
 	if err != nil {
 		return "", err
 	}
 	imgData.Checksum = checksum
 	imgData.ChecksumPayload = checksumPayload
 	// Send the checksum
-	if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
+	if err := r.PushImageChecksumRegistry(imgData, ep); err != nil {
 		return "", err
 	}
 
-	out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image successfully pushed", nil))
+	out.Write(sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image successfully pushed", nil))
 	return imgData.Checksum, nil
 }
 
-func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter) error {
+func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter) error {
 	endpoint, err := r.V2RegistryEndpoint(repoInfo.Index)
 	if err != nil {
 		if repoInfo.Index.Official {
-			log.Debugf("Unable to push to V2 registry, falling back to v1: %s", err)
+			logrus.Debugf("Unable to push to V2 registry, falling back to v1: %s", err)
 			return ErrV2RegistryUnavailable
 		}
 		return fmt.Errorf("error getting registry endpoint: %s", err)
@@ -314,7 +324,7 @@
 	}
 
 	for _, tag := range tags {
-		log.Debugf("Pushing repository: %s:%s", repoInfo.CanonicalName, tag)
+		logrus.Debugf("Pushing repository: %s:%s", repoInfo.CanonicalName, tag)
 
 		layerId, exists := localRepo[tag]
 		if !exists {
@@ -355,11 +365,10 @@
 
 		// Schema version 1 requires layer ordering from top to root
 		for i, layer := range layers {
-			log.Debugf("Pushing layer: %s", layer.ID)
+			logrus.Debugf("Pushing layer: %s", layer.ID)
 
 			if layer.Config != nil && metadata.Image != layer.ID {
-				err = runconfig.Merge(&metadata, layer.Config)
-				if err != nil {
+				if err := runconfig.Merge(&metadata, layer.Config); err != nil {
 					return err
 				}
 			}
@@ -375,15 +384,15 @@
 
 			var exists bool
 			if len(checksum) > 0 {
-				sumParts := strings.SplitN(checksum, ":", 2)
-				if len(sumParts) < 2 {
-					return fmt.Errorf("Invalid checksum: %s", checksum)
+				dgst, err := digest.ParseDigest(checksum)
+				if err != nil {
+					return fmt.Errorf("Invalid checksum %s: %s", checksum, err)
 				}
 
 				// Call mount blob
-				exists, err = r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], sumParts[1], auth)
+				exists, err = r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, dgst, auth)
 				if err != nil {
-					out.Write(sf.FormatProgress(common.TruncateID(layer.ID), "Image push failed", nil))
+					out.Write(sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil))
 					return err
 				}
 			}
@@ -398,7 +407,7 @@
 					checksum = cs
 				}
 			} else {
-				out.Write(sf.FormatProgress(common.TruncateID(layer.ID), "Image already exists", nil))
+				out.Write(sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil))
 			}
 			m.FSLayers[i] = &registry.FSLayer{BlobSum: checksum}
 			m.History[i] = &registry.ManifestHistory{V1Compatibility: string(jsonData)}
@@ -408,7 +417,7 @@
 			return fmt.Errorf("invalid manifest: %s", err)
 		}
 
-		log.Debugf("Pushing %s:%s to v2 repository", repoInfo.LocalName, tag)
+		logrus.Debugf("Pushing %s:%s to v2 repository", repoInfo.LocalName, tag)
 		mBytes, err := json.MarshalIndent(m, "", "   ")
 		if err != nil {
 			return err
@@ -426,7 +435,7 @@
 		if err != nil {
 			return err
 		}
-		log.Infof("Signed manifest for %s:%s using daemon's key: %s", repoInfo.LocalName, tag, s.trustKey.KeyID())
+		logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", repoInfo.LocalName, tag, s.trustKey.KeyID())
 
 		// push the manifest
 		digest, err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, signedBody, mBytes, auth)
@@ -440,8 +449,8 @@
 }
 
 // PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk
-func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName string, sf *utils.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) (string, error) {
-	out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Buffering to Disk", nil))
+func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName string, sf *streamformatter.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) (string, error) {
+	out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Buffering to Disk", nil))
 
 	image, err := s.graph.Get(img.ID)
 	if err != nil {
@@ -465,87 +474,87 @@
 	size, dgst, err := bufferToFile(tf, arch)
 
 	// Send the layer
-	log.Debugf("rendered layer for %s of [%d] size", img.ID, size)
+	logrus.Debugf("rendered layer for %s of [%d] size", img.ID, size)
 
-	if err := r.PutV2ImageBlob(endpoint, imageName, dgst.Algorithm(), dgst.Hex(),
+	if err := r.PutV2ImageBlob(endpoint, imageName, dgst,
 		progressreader.New(progressreader.Config{
 			In:        tf,
 			Out:       out,
 			Formatter: sf,
 			Size:      int(size),
 			NewLines:  false,
-			ID:        common.TruncateID(img.ID),
+			ID:        stringid.TruncateID(img.ID),
 			Action:    "Pushing",
 		}), auth); err != nil {
-		out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image push failed", nil))
+		out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Image push failed", nil))
 		return "", err
 	}
-	out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image successfully pushed", nil))
+	out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Image successfully pushed", nil))
 	return dgst.String(), nil
 }
 
 // FIXME: Allow to interrupt current push when new push of same image is done.
-func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s IMAGE", job.Name)
-	}
+func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error {
 	var (
-		localName   = job.Args[0]
-		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
-		authConfig  = &registry.AuthConfig{}
-		metaHeaders map[string][]string
+		sf = streamformatter.NewJSONStreamFormatter()
 	)
 
 	// Resolve the Repository name from fqn to RepositoryInfo
-	repoInfo, err := registry.ResolveRepositoryInfo(job, localName)
+	repoInfo, err := s.registryService.ResolveRepository(localName)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
-	tag := job.Getenv("tag")
-	job.GetenvJson("authConfig", authConfig)
-	job.GetenvJson("metaHeaders", &metaHeaders)
-
 	if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil {
-		return job.Error(err)
+		return err
 	}
 	defer s.poolRemove("push", repoInfo.LocalName)
 
-	endpoint, err := repoInfo.GetEndpoint()
+	endpoint, err := repoInfo.GetEndpoint(imagePushConfig.MetaHeaders)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
-
-	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false)
+	// TODO(tiborvass): reuse client from endpoint?
+	// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
+	tr := transport.NewTransport(
+		registry.NewTransport(registry.NoTimeout, endpoint.IsSecure),
+		registry.DockerHeaders(imagePushConfig.MetaHeaders)...,
+	)
+	client := registry.HTTPClient(tr)
+	r, err := registry.NewSession(client, imagePushConfig.AuthConfig, endpoint)
 	if err != nil {
-		return job.Error(err)
+		return err
 	}
 
 	reposLen := 1
-	if tag == "" {
+	if imagePushConfig.Tag == "" {
 		reposLen = len(s.Repositories[repoInfo.LocalName])
 	}
-	job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen))
+
+	imagePushConfig.OutStream.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen))
+
 	// If it fails, try to get the repository
 	localRepo, exists := s.Repositories[repoInfo.LocalName]
 	if !exists {
-		return job.Errorf("Repository does not exist: %s", repoInfo.LocalName)
+		return fmt.Errorf("Repository does not exist: %s", repoInfo.LocalName)
 	}
 
 	if repoInfo.Index.Official || endpoint.Version == registry.APIVersion2 {
-		err := s.pushV2Repository(r, localRepo, job.Stdout, repoInfo, tag, sf)
+		err := s.pushV2Repository(r, localRepo, imagePushConfig.OutStream, repoInfo, imagePushConfig.Tag, sf)
 		if err == nil {
-			return engine.StatusOK
+			s.eventsService.Log("push", repoInfo.LocalName, "")
+			return nil
 		}
 
 		if err != ErrV2RegistryUnavailable {
-			return job.Errorf("Error pushing to registry: %s", err)
+			return fmt.Errorf("Error pushing to registry: %s", err)
 		}
 	}
 
-	if err := s.pushRepository(r, job.Stdout, repoInfo, localRepo, tag, sf); err != nil {
-		return job.Error(err)
+	if err := s.pushRepository(r, imagePushConfig.OutStream, repoInfo, localRepo, imagePushConfig.Tag, sf); err != nil {
+		return err
 	}
-	return engine.StatusOK
+	s.eventsService.Log("push", repoInfo.LocalName, "")
+	return nil
 
 }
diff --git a/graph/service.go b/graph/service.go
index 350ed8c..52dde1d 100644
--- a/graph/service.go
+++ b/graph/service.go
@@ -4,178 +4,65 @@
 	"fmt"
 	"io"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/image"
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/api/types"
 )
 
-func (s *TagStore) Install(eng *engine.Engine) error {
-	for name, handler := range map[string]engine.Handler{
-		"image_set":      s.CmdSet,
-		"tag":            s.CmdTag,
-		"image_get":      s.CmdGet,
-		"image_inspect":  s.CmdLookup,
-		"image_tarlayer": s.CmdTarLayer,
-		"image_export":   s.CmdImageExport,
-		"history":        s.CmdHistory,
-		"images":         s.CmdImages,
-		"viz":            s.CmdViz,
-		"load":           s.CmdLoad,
-		"import":         s.CmdImport,
-		"pull":           s.CmdPull,
-		"push":           s.CmdPush,
-	} {
-		if err := eng.Register(name, handler); err != nil {
-			return fmt.Errorf("Could not register %q: %v", name, err)
-		}
+func (s *TagStore) LookupRaw(name string) ([]byte, error) {
+	image, err := s.LookupImage(name)
+	if err != nil || image == nil {
+		return nil, fmt.Errorf("No such image %s", name)
 	}
-	return nil
-}
 
-// CmdSet stores a new image in the graph.
-// Images are stored in the graph using 4 elements:
-//	- A user-defined ID
-//	- A collection of metadata describing the image
-//	- A directory tree stored as a tar archive (also called the "layer")
-//	- A reference to a "parent" ID on top of which the layer should be applied
-//
-// NOTE: even though the parent ID is only useful in relation to the layer and how
-// to apply it (ie you could represent the full directory tree as 'parent_layer + layer',
-// it is treated as a top-level property of the image. This is an artifact of early
-// design and should probably be cleaned up in the future to simplify the design.
-//
-// Syntax: image_set ID
-// Input:
-//	- Layer content must be streamed in tar format on stdin. An empty input is
-//	valid and represents a nil layer.
-//
-//	- Image metadata must be passed in the command environment.
-//		'json': a json-encoded object with all image metadata.
-//			It will be stored as-is, without any encoding/decoding artifacts.
-//			That is a requirement of the current registry client implementation,
-//			because a re-encoded json might invalidate the image checksum at
-//			the next upload, even with functionaly identical content.
-func (s *TagStore) CmdSet(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("usage: %s NAME", job.Name)
-	}
-	var (
-		imgJSON = []byte(job.Getenv("json"))
-		layer   = job.Stdin
-	)
-	if len(imgJSON) == 0 {
-		return job.Errorf("mandatory key 'json' is not set")
-	}
-	// We have to pass an *image.Image object, even though it will be completely
-	// ignored in favor of the redundant json data.
-	// FIXME: the current prototype of Graph.Register is stupid and redundant.
-	img, err := image.NewImgJSON(imgJSON)
+	imageInspectRaw, err := image.RawJson()
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
-	if err := s.graph.Register(img, layer); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
+
+	return imageInspectRaw, nil
 }
 
-// CmdGet returns information about an image.
-// If the image doesn't exist, an empty object is returned, to allow
-// checking for an image's existence.
-func (s *TagStore) CmdGet(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("usage: %s NAME", job.Name)
+// Lookup return an image encoded in JSON
+func (s *TagStore) Lookup(name string) (*types.ImageInspect, error) {
+	image, err := s.LookupImage(name)
+	if err != nil || image == nil {
+		return nil, fmt.Errorf("No such image: %s", name)
 	}
-	name := job.Args[0]
-	res := &engine.Env{}
-	img, err := s.LookupImage(name)
-	// Note: if the image doesn't exist, LookupImage returns
-	// nil, nil.
-	if err != nil {
-		return job.Error(err)
+
+	imageInspect := &types.ImageInspect{
+		Id:              image.ID,
+		Parent:          image.Parent,
+		Comment:         image.Comment,
+		Created:         image.Created,
+		Container:       image.Container,
+		ContainerConfig: &image.ContainerConfig,
+		DockerVersion:   image.DockerVersion,
+		Author:          image.Author,
+		Config:          image.Config,
+		Architecture:    image.Architecture,
+		Os:              image.OS,
+		Size:            image.Size,
+		VirtualSize:     image.GetParentsSize(0) + image.Size,
 	}
-	if img != nil {
-		// We don't directly expose all fields of the Image objects,
-		// to maintain a clean public API which we can maintain over
-		// time even if the underlying structure changes.
-		// We should have done this with the Image object to begin with...
-		// but we didn't, so now we're doing it here.
-		//
-		// Fields that we're probably better off not including:
-		//	- Config/ContainerConfig. Those structs have the same sprawl problem,
-		//		so we shouldn't include them wholesale either.
-		//	- Comment: initially created to fulfill the "every image is a git commit"
-		//		metaphor, in practice people either ignore it or use it as a
-		//		generic description field which it isn't. On deprecation shortlist.
-		res.SetAuto("Created", img.Created)
-		res.SetJson("Author", img.Author)
-		res.Set("Os", img.OS)
-		res.Set("Architecture", img.Architecture)
-		res.Set("DockerVersion", img.DockerVersion)
-		res.SetJson("Id", img.ID)
-		res.SetJson("Parent", img.Parent)
-	}
-	res.WriteTo(job.Stdout)
-	return engine.StatusOK
+
+	return imageInspect, nil
 }
 
-// CmdLookup return an image encoded in JSON
-func (s *TagStore) CmdLookup(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("usage: %s NAME", job.Name)
-	}
-	name := job.Args[0]
-	if image, err := s.LookupImage(name); err == nil && image != nil {
-		if job.GetenvBool("raw") {
-			b, err := image.RawJson()
-			if err != nil {
-				return job.Error(err)
-			}
-			job.Stdout.Write(b)
-			return engine.StatusOK
-		}
-
-		out := &engine.Env{}
-		out.SetJson("Id", image.ID)
-		out.SetJson("Parent", image.Parent)
-		out.SetJson("Comment", image.Comment)
-		out.SetAuto("Created", image.Created)
-		out.SetJson("Container", image.Container)
-		out.SetJson("ContainerConfig", image.ContainerConfig)
-		out.Set("DockerVersion", image.DockerVersion)
-		out.SetJson("Author", image.Author)
-		out.SetJson("Config", image.Config)
-		out.Set("Architecture", image.Architecture)
-		out.Set("Os", image.OS)
-		out.SetInt64("Size", image.Size)
-		out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
-		if _, err = out.WriteTo(job.Stdout); err != nil {
-			return job.Error(err)
-		}
-		return engine.StatusOK
-	}
-	return job.Errorf("No such image: %s", name)
-}
-
-// CmdTarLayer return the tarLayer of the image
-func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status {
-	if len(job.Args) != 1 {
-		return job.Errorf("usage: %s NAME", job.Name)
-	}
-	name := job.Args[0]
+// ImageTarLayer return the tarLayer of the image
+func (s *TagStore) ImageTarLayer(name string, dest io.Writer) error {
 	if image, err := s.LookupImage(name); err == nil && image != nil {
 		fs, err := image.TarLayer()
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
 		defer fs.Close()
 
-		written, err := io.Copy(job.Stdout, fs)
+		written, err := io.Copy(dest, fs)
 		if err != nil {
-			return job.Error(err)
+			return err
 		}
-		log.Debugf("rendered layer for %s of [%d] size", image.ID, written)
-		return engine.StatusOK
+		logrus.Debugf("rendered layer for %s of [%d] size", image.ID, written)
+		return nil
 	}
-	return job.Errorf("No such image: %s", name)
+	return fmt.Errorf("No such image: %s", name)
 }
diff --git a/graph/tag.go b/graph/tag.go
deleted file mode 100644
index b33e49d..0000000
--- a/graph/tag.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package graph
-
-import (
-	"github.com/docker/docker/engine"
-)
-
-func (s *TagStore) CmdTag(job *engine.Job) engine.Status {
-	if len(job.Args) != 2 && len(job.Args) != 3 {
-		return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
-	}
-	var tag string
-	if len(job.Args) == 3 {
-		tag = job.Args[2]
-	}
-	if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
-}
diff --git a/graph/tags.go b/graph/tags.go
index 5d26b8c..166a3d7 100644
--- a/graph/tags.go
+++ b/graph/tags.go
@@ -4,6 +4,7 @@
 	"encoding/json"
 	"errors"
 	"fmt"
+	"io"
 	"io/ioutil"
 	"os"
 	"path/filepath"
@@ -12,10 +13,13 @@
 	"strings"
 	"sync"
 
+	"github.com/docker/docker/daemon/events"
+	"github.com/docker/docker/graph/tags"
 	"github.com/docker/docker/image"
-	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/registry"
+	"github.com/docker/docker/trust"
 	"github.com/docker/docker/utils"
 	"github.com/docker/libtrust"
 )
@@ -23,9 +27,8 @@
 const DEFAULTTAG = "latest"
 
 var (
-	//FIXME these 2 regexes also exist in registry/v2/regexp.go
-	validTagName = regexp.MustCompile(`^[\w][\w.-]{0,127}$`)
-	validDigest  = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
+	//FIXME this regex also exists in registry/v2/regexp.go
+	validDigest = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
 )
 
 type TagStore struct {
@@ -36,8 +39,11 @@
 	sync.Mutex
 	// FIXME: move push/pull-related fields
 	// to a helper type
-	pullingPool map[string]chan struct{}
-	pushingPool map[string]chan struct{}
+	pullingPool     map[string]chan struct{}
+	pushingPool     map[string]chan struct{}
+	registryService *registry.Service
+	eventsService   *events.Events
+	trustService    *trust.TrustStore
 }
 
 type Repository map[string]string
@@ -60,19 +66,30 @@
 	return true
 }
 
-func NewTagStore(path string, graph *Graph, key libtrust.PrivateKey) (*TagStore, error) {
+type TagStoreConfig struct {
+	Graph    *Graph
+	Key      libtrust.PrivateKey
+	Registry *registry.Service
+	Events   *events.Events
+	Trust    *trust.TrustStore
+}
+
+func NewTagStore(path string, cfg *TagStoreConfig) (*TagStore, error) {
 	abspath, err := filepath.Abs(path)
 	if err != nil {
 		return nil, err
 	}
 
 	store := &TagStore{
-		path:         abspath,
-		graph:        graph,
-		trustKey:     key,
-		Repositories: make(map[string]Repository),
-		pullingPool:  make(map[string]chan struct{}),
-		pushingPool:  make(map[string]chan struct{}),
+		path:            abspath,
+		graph:           cfg.Graph,
+		trustKey:        cfg.Key,
+		Repositories:    make(map[string]Repository),
+		pullingPool:     make(map[string]chan struct{}),
+		pushingPool:     make(map[string]chan struct{}),
+		registryService: cfg.Registry,
+		eventsService:   cfg.Events,
+		trustService:    cfg.Trust,
 	}
 	// Load the json file if it exists, otherwise create it.
 	if err := store.reload(); os.IsNotExist(err) {
@@ -98,11 +115,12 @@
 }
 
 func (store *TagStore) reload() error {
-	jsonData, err := ioutil.ReadFile(store.path)
+	f, err := os.Open(store.path)
 	if err != nil {
 		return err
 	}
-	if err := json.Unmarshal(jsonData, store); err != nil {
+	defer f.Close()
+	if err := json.NewDecoder(f).Decode(&store); err != nil {
 		return err
 	}
 	return nil
@@ -163,7 +181,7 @@
 	if names, exists := store.ByID()[id]; exists && len(names) > 0 {
 		return names[0]
 	}
-	return common.TruncateID(id)
+	return stringid.TruncateID(id)
 }
 
 func (store *TagStore) DeleteAll(id string) error {
@@ -218,7 +236,11 @@
 	return deleted, store.save()
 }
 
-func (store *TagStore) Set(repoName, tag, imageName string, force bool) error {
+func (store *TagStore) Tag(repoName, tag, imageName string, force bool) error {
+	return store.SetLoad(repoName, tag, imageName, force, nil)
+}
+
+func (store *TagStore) SetLoad(repoName, tag, imageName string, force bool, out io.Writer) error {
 	img, err := store.LookupImage(imageName)
 	store.Lock()
 	defer store.Unlock()
@@ -226,12 +248,12 @@
 		return err
 	}
 	if tag == "" {
-		tag = DEFAULTTAG
+		tag = tags.DEFAULTTAG
 	}
 	if err := validateRepoName(repoName); err != nil {
 		return err
 	}
-	if err := ValidateTagName(tag); err != nil {
+	if err := tags.ValidateTagName(tag); err != nil {
 		return err
 	}
 	if err := store.reload(); err != nil {
@@ -241,8 +263,17 @@
 	repoName = registry.NormalizeLocalName(repoName)
 	if r, exists := store.Repositories[repoName]; exists {
 		repo = r
-		if old, exists := store.Repositories[repoName][tag]; exists && !force {
-			return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", tag, old)
+		if old, exists := store.Repositories[repoName][tag]; exists {
+
+			if !force {
+				return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", tag, old)
+			}
+
+			if old != img.ID && out != nil {
+
+				fmt.Fprintf(out, "The image %s:%s already exists, renaming the old one with ID %s to empty string\n", repoName, tag, old[:12])
+
+			}
 		}
 	} else {
 		repo = make(map[string]string)
@@ -316,9 +347,12 @@
 	}
 
 	// If no matching tag is found, search through images for a matching image id
-	for _, revision := range repo {
-		if strings.HasPrefix(revision, refOrID) {
-			return store.graph.Get(revision)
+	// iff it looks like a short ID or would look like a short ID
+	if stringid.IsShortID(stringid.TruncateID(refOrID)) {
+		for _, revision := range repo {
+			if strings.HasPrefix(revision, refOrID) {
+				return store.graph.Get(revision)
+			}
 		}
 	}
 
@@ -331,7 +365,7 @@
 
 	for name, repository := range store.Repositories {
 		for tag, id := range repository {
-			shortID := common.TruncateID(id)
+			shortID := stringid.TruncateID(id)
 			reporefs[shortID] = append(reporefs[shortID], utils.ImageReference(name, tag))
 		}
 	}
@@ -350,17 +384,6 @@
 	return nil
 }
 
-// ValidateTagName validates the name of a tag
-func ValidateTagName(name string) error {
-	if name == "" {
-		return fmt.Errorf("tag name can't be empty")
-	}
-	if !validTagName.MatchString(name) {
-		return fmt.Errorf("Illegal tag name (%s): only [A-Za-z0-9_.-] are allowed, minimum 1, maximum 128 in length", name)
-	}
-	return nil
-}
-
 func validateDigest(dgst string) error {
 	if dgst == "" {
 		return errors.New("digest can't be empty")
diff --git a/graph/tags/tags.go b/graph/tags/tags.go
new file mode 100644
index 0000000..1abb593
--- /dev/null
+++ b/graph/tags/tags.go
@@ -0,0 +1,24 @@
+package tags
+
+import (
+	"fmt"
+	"regexp"
+)
+
+const DEFAULTTAG = "latest"
+
+var (
+	//FIXME this regex also exists in registry/v2/regexp.go
+	validTagName = regexp.MustCompile(`^[\w][\w.-]{0,127}$`)
+)
+
+// ValidateTagName validates the name of a tag
+func ValidateTagName(name string) error {
+	if name == "" {
+		return fmt.Errorf("tag name can't be empty")
+	}
+	if !validTagName.MatchString(name) {
+		return fmt.Errorf("Illegal tag name (%s): only [A-Za-z0-9_.-] are allowed, minimum 1, maximum 128 in length", name)
+	}
+	return nil
+}
diff --git a/graph/tags/tags_unit_test.go b/graph/tags/tags_unit_test.go
new file mode 100644
index 0000000..5114da1
--- /dev/null
+++ b/graph/tags/tags_unit_test.go
@@ -0,0 +1,23 @@
+package tags
+
+import (
+	"testing"
+)
+
+func TestValidTagName(t *testing.T) {
+	validTags := []string{"9", "foo", "foo-test", "bar.baz.boo"}
+	for _, tag := range validTags {
+		if err := ValidateTagName(tag); err != nil {
+			t.Errorf("'%s' should've been a valid tag", tag)
+		}
+	}
+}
+
+func TestInvalidTagName(t *testing.T) {
+	validTags := []string{"-9", ".foo", "-test", ".", "-"}
+	for _, tag := range validTags {
+		if err := ValidateTagName(tag); err == nil {
+			t.Errorf("'%s' shouldn't have been a valid tag", tag)
+		}
+	}
+}
diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go
index c1a686b..d1ddc67 100644
--- a/graph/tags_unit_test.go
+++ b/graph/tags_unit_test.go
@@ -1,17 +1,18 @@
 package graph
 
 import (
+	"archive/tar"
 	"bytes"
 	"io"
 	"os"
 	"path"
 	"testing"
 
+	"github.com/docker/docker/daemon/events"
 	"github.com/docker/docker/daemon/graphdriver"
 	_ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/utils"
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
 const (
@@ -59,7 +60,11 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	store, err := NewTagStore(path.Join(root, "tags"), graph, nil)
+	tagCfg := &TagStoreConfig{
+		Graph:  graph,
+		Events: events.New(),
+	}
+	store, err := NewTagStore(path.Join(root, "tags"), tagCfg)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -71,7 +76,7 @@
 	if err := graph.Register(img, officialArchive); err != nil {
 		t.Fatal(err)
 	}
-	if err := store.Set(testOfficialImageName, "", testOfficialImageID, false); err != nil {
+	if err := store.Tag(testOfficialImageName, "", testOfficialImageID, false); err != nil {
 		t.Fatal(err)
 	}
 	privateArchive, err := fakeTar()
@@ -82,7 +87,7 @@
 	if err := graph.Register(img, privateArchive); err != nil {
 		t.Fatal(err)
 	}
-	if err := store.Set(testPrivateImageName, "", testPrivateImageID, false); err != nil {
+	if err := store.Tag(testPrivateImageName, "", testPrivateImageID, false); err != nil {
 		t.Fatal(err)
 	}
 	if err := store.SetDigest(testPrivateImageName, testPrivateImageDigest, testPrivateImageID); err != nil {
@@ -176,24 +181,6 @@
 	}
 }
 
-func TestValidTagName(t *testing.T) {
-	validTags := []string{"9", "foo", "foo-test", "bar.baz.boo"}
-	for _, tag := range validTags {
-		if err := ValidateTagName(tag); err != nil {
-			t.Errorf("'%s' should've been a valid tag", tag)
-		}
-	}
-}
-
-func TestInvalidTagName(t *testing.T) {
-	validTags := []string{"-9", ".foo", "-test", ".", "-"}
-	for _, tag := range validTags {
-		if err := ValidateTagName(tag); err == nil {
-			t.Errorf("'%s' shouldn't have been a valid tag", tag)
-		}
-	}
-}
-
 func TestValidateDigest(t *testing.T) {
 	tests := []struct {
 		input       string
diff --git a/graph/viz.go b/graph/viz.go
deleted file mode 100644
index 924c22b..0000000
--- a/graph/viz.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package graph
-
-import (
-	"strings"
-
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/image"
-)
-
-func (s *TagStore) CmdViz(job *engine.Job) engine.Status {
-	images, _ := s.graph.Map()
-	if images == nil {
-		return engine.StatusOK
-	}
-	job.Stdout.Write([]byte("digraph docker {\n"))
-
-	var (
-		parentImage *image.Image
-		err         error
-	)
-	for _, image := range images {
-		parentImage, err = image.GetParent()
-		if err != nil {
-			return job.Errorf("Error while getting parent image: %v", err)
-		}
-		if parentImage != nil {
-			job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
-		} else {
-			job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n"))
-		}
-	}
-
-	for id, repos := range s.GetRepoRefs() {
-		job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n"))
-	}
-	job.Stdout.Write([]byte(" base [style=invisible]\n}\n"))
-	return engine.StatusOK
-}
diff --git a/hack/dind b/hack/dind
index f8fae63..9289ba6 100755
--- a/hack/dind
+++ b/hack/dind
@@ -3,7 +3,7 @@
 
 # DinD: a wrapper script which allows docker to be run inside a docker container.
 # Original version by Jerome Petazzoni <jerome@docker.com>
-# See the blog post: http://blog.docker.com/2013/09/docker-can-now-run-within-docker/
+# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/
 #
 # This script should be executed inside a docker container in privilieged mode
 # ('docker run --privileged', introduced in docker 0.6).
@@ -33,28 +33,35 @@
 fi
 
 # Mount the cgroup hierarchies exactly as they are in the parent system.
-for SUBSYS in $(cut -d: -f2 /proc/1/cgroup); do
-	mkdir -p "$CGROUP/$SUBSYS"
-	if ! mountpoint -q $CGROUP/$SUBSYS; then
-		mount -n -t cgroup -o "$SUBSYS" cgroup "$CGROUP/$SUBSYS"
-	fi
+for HIER in $(cut -d: -f2 /proc/1/cgroup); do
 
-	# The two following sections address a bug which manifests itself
+	# The following sections address a bug which manifests itself
 	# by a cryptic "lxc-start: no ns_cgroup option specified" when
-	# trying to start containers withina container.
+	# trying to start containers within a container.
 	# The bug seems to appear when the cgroup hierarchies are not
 	# mounted on the exact same directories in the host, and in the
 	# container.
 
+	SUBSYSTEMS="${HIER%name=*}"
+
+	# If cgroup hierarchy is named(mounted with "-o name=foo") we
+	# need to mount it in $CGROUP/foo to create exect same
+	# directoryes as on host. Else we need to mount it as is e.g.
+	# "subsys1,subsys2" if it has two subsystems
+
 	# Named, control-less cgroups are mounted with "-o name=foo"
 	# (and appear as such under /proc/<pid>/cgroup) but are usually
 	# mounted on a directory named "foo" (without the "name=" prefix).
 	# Systemd and OpenRC (and possibly others) both create such a
-	# cgroup. To avoid the aforementioned bug, we symlink "foo" to
-	# "name=foo". This shouldn't have any adverse effect.
-	name="${SUBSYS#name=}"
-	if [ "$name" != "$SUBSYS" ]; then
-		ln -s "$SUBSYS" "$CGROUP/$name"
+	# cgroup. So just mount them on directory $CGROUP/foo.
+
+	OHIER=$HIER
+	HIER="${HIER#*name=}"
+
+	mkdir -p "$CGROUP/$HIER"
+
+	if ! mountpoint -q "$CGROUP/$HIER"; then
+		mount -n -t cgroup -o "$OHIER" cgroup "$CGROUP/$HIER"
 	fi
 
 	# Likewise, on at least one system, it has been reported that
@@ -62,8 +69,25 @@
 	# (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
 	# but on a directory called "cpu,cpuacct" (note the inversion
 	# in the order of the groups). This tries to work around it.
-	if [ "$SUBSYS" = 'cpuacct,cpu' ]; then
-		ln -s "$SUBSYS" "$CGROUP/cpu,cpuacct"
+
+	if [ "$HIER" = 'cpuacct,cpu' ]; then
+		ln -s "$HIER" "$CGROUP/cpu,cpuacct"
+	fi
+
+	# If hierarchy has multiple subsystems, in /proc/<pid>/cgroup
+	# we will see ":subsys1,subsys2,subsys3,name=foo:" substring,
+	# we need to mount it to "$CGROUP/foo" and if there were no
+	# name to "$CGROUP/subsys1,subsys2,subsys3", so we must create
+	# symlinks for docker daemon to find these subsystems:
+	# ln -s $CGROUP/foo $CGROUP/subsys1
+	# ln -s $CGROUP/subsys1,subsys2,subsys3 $CGROUP/subsys1
+
+	if [ "$SUBSYSTEMS" != "${SUBSYSTEMS//,/ }" ]; then
+		SUBSYSTEMS="${SUBSYSTEMS//,/ }"
+		for SUBSYS in $SUBSYSTEMS
+		do
+			ln -s "$CGROUP/$HIER" "$CGROUP/$SUBSYS"
+		done
 	fi
 done
 
diff --git a/hack/install.sh b/hack/install.sh
index b0177e6..1984d5a 100755
--- a/hack/install.sh
+++ b/hack/install.sh
@@ -49,11 +49,18 @@
 			;;
 	esac
 
-	if command_exists docker || command_exists lxc-docker; then
+	if command_exists docker; then
 		cat >&2 <<-'EOF'
-		Warning: "docker" or "lxc-docker" command appears to already exist.
-		Please ensure that you do not already have docker installed.
-		You may press Ctrl+C now to abort this process and rectify this situation.
+			Warning: the "docker" command appears to already exist on this system.
+
+			If you already have Docker installed, this script can cause trouble, which is
+			why we're displaying this warning and provide the opportunity to cancel the
+			installation.
+
+			If you installed the current Docker package using this script and are using it
+			again to update Docker, you can safely ignore this message.
+
+			You may press Ctrl+C now to abort this script.
 		EOF
 		( set -x; sleep 20 )
 	fi
@@ -126,7 +133,7 @@
 			exit 0
 			;;
 
-		ubuntu|debian|linuxmint)
+		ubuntu|debian|linuxmint|'elementary os'|kali)
 			export DEBIAN_FRONTEND=noninteractive
 
 			did_apt_get_update=
diff --git a/hack/make.sh b/hack/make.sh
index 118d432..60aafbd 100755
--- a/hack/make.sh
+++ b/hack/make.sh
@@ -6,7 +6,7 @@
 #
 # Requirements:
 # - The current directory should be a checkout of the docker source code
-#   (http://github.com/docker/docker). Whatever version is checked out
+#   (https://github.com/docker/docker). Whatever version is checked out
 #   will be built.
 # - The VERSION file, at the root of the repository, should exist, and
 #   will be used as Docker binary version and package version.
@@ -24,10 +24,12 @@
 set -o pipefail
 
 export DOCKER_PKG='github.com/docker/docker'
+export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+export MAKEDIR="$SCRIPTDIR/make"
 
 # We're a nice, sexy, little shell script, and people might try to run us;
 # but really, they shouldn't. We want to be in a container!
-if [ "$(pwd)" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then
+if [ "$PWD" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then
 	{
 		echo "# WARNING! I don't seem to be running in the Docker container."
 		echo "# The result of this command might be an incorrect build, and will not be"
@@ -44,7 +46,9 @@
 DEFAULT_BUNDLES=(
 	validate-dco
 	validate-gofmt
+	validate-test
 	validate-toml
+	validate-vet
 
 	binary
 
@@ -53,7 +57,6 @@
 	test-docker-py
 
 	dynbinary
-	test-integration
 
 	cover
 	cross
@@ -61,7 +64,7 @@
 	ubuntu
 )
 
-VERSION=$(cat ./VERSION)
+VERSION=$(< ./VERSION)
 if command -v git &> /dev/null && git rev-parse &> /dev/null; then
 	GITCOMMIT=$(git rev-parse --short HEAD)
 	if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
@@ -81,15 +84,22 @@
 	rm -rf .gopath
 	mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")"
 	ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}"
-	export GOPATH="$(pwd)/.gopath:$(pwd)/vendor"
+	export GOPATH="${PWD}/.gopath:${PWD}/vendor"
 fi
 
 if [ ! "$GOPATH" ]; then
-	echo >&2 'error: missing GOPATH; please see http://golang.org/doc/code.html#GOPATH'
+	echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH'
 	echo >&2 '  alternatively, set AUTO_GOPATH=1'
 	exit 1
 fi
 
+if [ "$DOCKER_EXPERIMENTAL" ]; then
+	echo >&2 '# WARNING! DOCKER_EXPERIMENTAL is set: building experimental features'
+	echo >&2
+	VERSION+="-experimental"
+	DOCKER_BUILDTAGS+=" experimental"
+fi
+
 if [ -z "$DOCKER_CLIENTONLY" ]; then
 	DOCKER_BUILDTAGS+=" daemon"
 fi
@@ -98,10 +108,27 @@
 	DOCKER_BUILDTAGS+=' test_no_exec'
 fi
 
+# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately
+if \
+	command -v gcc &> /dev/null \
+	&& ! gcc -E - &> /dev/null <<<'#include <btrfs/version.h>' \
+; then
+	DOCKER_BUILDTAGS+=' btrfs_noversion'
+fi
+
+# test whether "libdevmapper.h" is new enough to support deferred remove
+# functionality.
+if \
+	command -v gcc &> /dev/null \
+	&& ! ( echo -e  '#include <libdevmapper.h>\nint main() { dm_task_deferred_remove(NULL); }'| gcc -ldevmapper -xc - &> /dev/null ) \
+; then
+       DOCKER_BUILDTAGS+=' libdm_no_deferred_remove'
+fi
+
 # Use these flags when compiling the tests and final binary
 
 IAMSTATIC='true'
-source "$(dirname "$BASH_SOURCE")/make/.go-autogen"
+source "$SCRIPTDIR/make/.go-autogen"
 LDFLAGS='-w'
 
 LDFLAGS_STATIC='-linkmode external'
@@ -156,7 +183,12 @@
 # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
 # You can use this to select certain tests to run, eg.
 #
-#   TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test
+#     TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit
+#
+# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want
+# to run certain tests on your local host, you should run with command:
+#
+#     TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli
 #
 go_test_dir() {
 	dir=$1
@@ -182,6 +214,7 @@
 		DEST="$DEST" \
 		DOCKER_EXECDRIVER="$DOCKER_EXECDRIVER" \
 		DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \
+		DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \
 		DOCKER_HOST="$DOCKER_HOST" \
 		GOPATH="$GOPATH" \
 		HOME="$DEST/fake-HOME" \
@@ -204,7 +237,6 @@
 	find . -not \( \
 		\( \
 			-path './vendor/*' \
-			-o -path './integration/*' \
 			-o -path './integration-cli/*' \
 			-o -path './contrib/*' \
 			-o -path './pkg/mflag/example/*' \
@@ -242,7 +274,7 @@
 	bundlescript=$1
 	bundle=$(basename $bundlescript)
 	echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)"
-	mkdir -p bundles/$VERSION/$bundle
+	mkdir -p "bundles/$VERSION/$bundle"
 	source "$bundlescript" "$(pwd)/bundles/$VERSION/$bundle"
 }
 
@@ -252,17 +284,24 @@
 	mkdir -p bundles
 	if [ -e "bundles/$VERSION" ]; then
 		echo "bundles/$VERSION already exists. Removing."
-		rm -fr bundles/$VERSION && mkdir bundles/$VERSION || exit 1
+		rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1
 		echo
 	fi
-	SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+	if [ "$(go env GOHOSTOS)" != 'windows' ]; then
+		# Windows and symlinks don't get along well
+
+		rm -f bundles/latest
+		ln -s "$VERSION" bundles/latest
+	fi
+
 	if [ $# -lt 1 ]; then
 		bundles=(${DEFAULT_BUNDLES[@]})
 	else
 		bundles=($@)
 	fi
 	for bundle in ${bundles[@]}; do
-		bundle $SCRIPTDIR/make/$bundle
+		bundle "$SCRIPTDIR/make/$bundle"
 		echo
 	done
 }
diff --git a/hack/make/.build-deb/compat b/hack/make/.build-deb/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/hack/make/.build-deb/compat
@@ -0,0 +1 @@
+9
diff --git a/hack/make/.build-deb/control b/hack/make/.build-deb/control
new file mode 100644
index 0000000..ac6541a
--- /dev/null
+++ b/hack/make/.build-deb/control
@@ -0,0 +1,27 @@
+Source: docker-engine
+Maintainer: Docker <support@docker.com>
+Homepage: https://dockerproject.com
+Vcs-Browser: https://github.com/docker/docker
+Vcs-Git: git://github.com/docker/docker.git
+
+Package: docker-engine
+Architecture: linux-any
+Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends}
+Recommends: aufs-tools,
+            ca-certificates,
+            cgroupfs-mount | cgroup-lite,
+            git,
+            xz-utils,
+            ${apparmor:Recommends}
+Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package
+Description: Docker: the open-source application container engine
+ Docker is an open source project to pack, ship and run any application as a
+ lightweight container
+ .
+ Docker containers are both hardware-agnostic and platform-agnostic. This means
+ they can run anywhere, from your laptop to the largest EC2 compute instance and
+ they can run anywhere, from your laptop to the largest EC2 compute instance and
+ everything in between - and they don't require you to use a particular
+ language, framework or packaging system. That makes them great building blocks
+ for deploying and scaling web apps, databases, and backend services without
+ depending on a particular stack or provider.
diff --git a/hack/make/.build-deb/docker-engine.bash-completion b/hack/make/.build-deb/docker-engine.bash-completion
new file mode 100644
index 0000000..6ea1119
--- /dev/null
+++ b/hack/make/.build-deb/docker-engine.bash-completion
@@ -0,0 +1 @@
+contrib/completion/bash/docker
diff --git a/hack/make/.build-deb/docker-engine.docker.default b/hack/make/.build-deb/docker-engine.docker.default
new file mode 120000
index 0000000..4278533
--- /dev/null
+++ b/hack/make/.build-deb/docker-engine.docker.default
@@ -0,0 +1 @@
+../../../contrib/init/sysvinit-debian/docker.default
\ No newline at end of file
diff --git a/hack/make/.build-deb/docker-engine.docker.init b/hack/make/.build-deb/docker-engine.docker.init
new file mode 120000
index 0000000..8cb89d3
--- /dev/null
+++ b/hack/make/.build-deb/docker-engine.docker.init
@@ -0,0 +1 @@
+../../../contrib/init/sysvinit-debian/docker
\ No newline at end of file
diff --git a/hack/make/.build-deb/docker-engine.docker.upstart b/hack/make/.build-deb/docker-engine.docker.upstart
new file mode 120000
index 0000000..7e1b64a
--- /dev/null
+++ b/hack/make/.build-deb/docker-engine.docker.upstart
@@ -0,0 +1 @@
+../../../contrib/init/upstart/docker.conf
\ No newline at end of file
diff --git a/hack/make/.build-deb/docker-engine.install b/hack/make/.build-deb/docker-engine.install
new file mode 100644
index 0000000..a8857a9
--- /dev/null
+++ b/hack/make/.build-deb/docker-engine.install
@@ -0,0 +1,11 @@
+#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/
+#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/
+#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/
+contrib/*-integration usr/share/docker-engine/contrib/
+contrib/check-config.sh usr/share/docker-engine/contrib/
+contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/
+contrib/init/systemd/docker.service lib/systemd/system/
+contrib/init/systemd/docker.socket lib/systemd/system/
+contrib/mk* usr/share/docker-engine/contrib/
+contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/
+contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/
diff --git a/hack/make/.build-deb/docker-engine.manpages b/hack/make/.build-deb/docker-engine.manpages
new file mode 100644
index 0000000..d5cff8a
--- /dev/null
+++ b/hack/make/.build-deb/docker-engine.manpages
@@ -0,0 +1 @@
+docs/man/man*/*
diff --git a/hack/make/.build-deb/docker-engine.postinst b/hack/make/.build-deb/docker-engine.postinst
new file mode 100644
index 0000000..eeef6ca
--- /dev/null
+++ b/hack/make/.build-deb/docker-engine.postinst
@@ -0,0 +1,20 @@
+#!/bin/sh
+set -e
+
+case "$1" in
+	configure)
+		if [ -z "$2" ]; then
+			if ! getent group docker > /dev/null; then
+				groupadd --system docker
+			fi
+		fi
+		;;
+	abort-*)
+		# How'd we get here??
+		exit 1
+		;;
+	*)
+		;;
+esac
+
+#DEBHELPER#
diff --git a/hack/make/.build-deb/docker-engine.udev b/hack/make/.build-deb/docker-engine.udev
new file mode 120000
index 0000000..914a361
--- /dev/null
+++ b/hack/make/.build-deb/docker-engine.udev
@@ -0,0 +1 @@
+../../../contrib/udev/80-docker.rules
\ No newline at end of file
diff --git a/hack/make/.build-deb/docs b/hack/make/.build-deb/docs
new file mode 100644
index 0000000..b43bf86
--- /dev/null
+++ b/hack/make/.build-deb/docs
@@ -0,0 +1 @@
+README.md
diff --git a/hack/make/.build-deb/rules b/hack/make/.build-deb/rules
new file mode 100755
index 0000000..fe19b72
--- /dev/null
+++ b/hack/make/.build-deb/rules
@@ -0,0 +1,36 @@
+#!/usr/bin/make -f
+
+VERSION = $(shell cat VERSION)
+
+override_dh_gencontrol:
+	# if we're on Ubuntu, we need to Recommends: apparmor
+	echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars
+	dh_gencontrol
+
+override_dh_auto_build:
+	./hack/make.sh dynbinary
+	# ./docs/man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here
+
+override_dh_auto_test:
+	./bundles/$(VERSION)/dynbinary/docker -v
+
+override_dh_strip:
+	# the SHA1 of dockerinit is important: don't strip it
+	# also, Go has lots of problems with stripping, so just don't
+
+override_dh_auto_install:
+	mkdir -p debian/docker-engine/usr/bin
+	cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/docker)" debian/docker-engine/usr/bin/docker
+	mkdir -p debian/docker-engine/usr/lib/docker
+	cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/dockerinit)" debian/docker-engine/usr/lib/docker/dockerinit
+
+override_dh_installinit:
+	# use "docker" as our service name, not "docker-engine"
+	dh_installinit --name=docker
+
+override_dh_installudev:
+	# match our existing priority
+	dh_installudev --priority=z80
+
+%:
+	dh $@ --with=systemd,bash-completion
diff --git a/hack/make/.build-rpm/docker-engine.spec b/hack/make/.build-rpm/docker-engine.spec
new file mode 100644
index 0000000..8cb7c11
--- /dev/null
+++ b/hack/make/.build-rpm/docker-engine.spec
@@ -0,0 +1,184 @@
+Name: docker-engine
+Version: %{_version}
+Release: %{_release}%{?dist}
+Summary: The open-source application container engine
+
+License: ASL 2.0
+Source: %{name}.tar.gz
+
+URL: https://dockerproject.com
+Vendor: Docker
+Packager: Docker <support@docker.com>
+
+# docker builds in a checksum of dockerinit into docker,
+# # so stripping the binaries breaks docker
+%global __os_install_post %{_rpmconfigdir}/brp-compress
+%global debug_package %{nil}
+
+# is_systemd conditional
+%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7
+%global is_systemd 1
+%endif
+
+# required packages for build
+# most are already in the container (see contrib/builder/rpm/generate.sh)
+# only require systemd on those systems
+%if 0%{?is_systemd}
+BuildRequires: pkgconfig(systemd)
+Requires: systemd-units
+%else
+Requires(post): chkconfig
+Requires(preun): chkconfig
+# This is for /sbin/service
+Requires(preun): initscripts
+%endif
+
+# required packages on install
+Requires: /bin/sh
+Requires: iptables
+Requires: libc.so.6
+Requires: libcgroup
+Requires: libpthread.so.0
+Requires: libsqlite3.so.0
+Requires: tar
+Requires: xz
+%if 0%{?fedora} >= 21
+# Resolves: rhbz#1165615
+Requires: device-mapper-libs >= 1.02.90-1
+%endif
+
+# conflicting packages
+Conflicts: docker
+Conflicts: docker-io
+
+%description
+Docker is an open source project to pack, ship and run any application as a
+lightweight container
+
+Docker containers are both hardware-agnostic and platform-agnostic. This means
+they can run anywhere, from your laptop to the largest EC2 compute instance and
+everything in between - and they don't require you to use a particular
+language, framework or packaging system. That makes them great building blocks
+for deploying and scaling web apps, databases, and backend services without
+depending on a particular stack or provider.
+
+%prep
+%if 0%{?centos} <= 6
+%setup -n %{name}
+%else
+%autosetup -n %{name}
+%endif
+
+%build
+./hack/make.sh dynbinary
+# ./docs/man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here
+
+%check
+./bundles/%{_origversion}/dynbinary/docker -v
+
+%install
+# install binary
+install -d $RPM_BUILD_ROOT/%{_bindir}
+install -p -m 755 bundles/%{_origversion}/dynbinary/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker
+
+# install dockerinit
+install -d $RPM_BUILD_ROOT/%{_libexecdir}/docker
+install -p -m 755 bundles/%{_origversion}/dynbinary/dockerinit-%{_origversion} $RPM_BUILD_ROOT/%{_libexecdir}/docker/dockerinit
+
+# install udev rules
+install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d
+install -p -m 755 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules
+
+# add init scripts
+install -d $RPM_BUILD_ROOT/etc/sysconfig
+install -d $RPM_BUILD_ROOT/%{_initddir}
+
+
+%if 0%{?is_systemd}
+install -d $RPM_BUILD_ROOT/%{_unitdir}
+install -p -m 644 contrib/init/systemd/docker.service $RPM_BUILD_ROOT/%{_unitdir}/docker.service
+install -p -m 644 contrib/init/systemd/docker.socket $RPM_BUILD_ROOT/%{_unitdir}/docker.socket
+%endif
+
+install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker
+install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker
+
+# add bash completions
+install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions
+install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions
+install -d $RPM_BUILD_ROOT/usr/share/fish/completions
+install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker
+install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker
+install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/completions/docker.fish
+
+# install manpages
+install -d %{buildroot}%{_mandir}/man1
+install -p -m 644 docs/man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1
+install -d %{buildroot}%{_mandir}/man5
+install -p -m 644 docs/man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5
+
+# add vimfiles
+install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc
+install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect
+install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax
+install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt
+install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim
+install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim
+
+# add nano
+install -d $RPM_BUILD_ROOT/usr/share/nano
+install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc
+
+# list files owned by the package here
+%files
+/%{_bindir}/docker
+/%{_libexecdir}/docker/dockerinit
+/%{_sysconfdir}/udev/rules.d/80-docker.rules
+%if 0%{?is_systemd}
+/%{_unitdir}/docker.service
+/%{_unitdir}/docker.socket
+%endif
+/etc/sysconfig/docker
+/%{_initddir}/docker
+/usr/share/bash-completion/completions/docker
+/usr/share/zsh/vendor-completions/_docker
+/usr/share/fish/completions/docker.fish
+%doc
+/%{_mandir}/man1/*
+/%{_mandir}/man5/*
+/usr/share/vim/vimfiles/doc/dockerfile.txt
+/usr/share/vim/vimfiles/ftdetect/dockerfile.vim
+/usr/share/vim/vimfiles/syntax/dockerfile.vim
+/usr/share/nano/Dockerfile.nanorc
+
+%post
+%if 0%{?is_systemd}
+%systemd_post docker
+%else
+# This adds the proper /etc/rc*.d links for the script
+/sbin/chkconfig --add docker
+%endif
+if ! getent group docker > /dev/null; then
+    groupadd --system docker
+fi
+
+%preun
+%if 0%{?is_systemd}
+%systemd_preun docker
+%else
+if [ $1 -eq 0 ] ; then
+    /sbin/service docker stop >/dev/null 2>&1
+    /sbin/chkconfig --del docker
+fi
+%endif
+
+%postun
+%if 0%{?is_systemd}
+%systemd_postun_with_restart docker
+%else
+if [ "$1" -ge "1" ] ; then
+    /sbin/service docker condrestart >/dev/null 2>&1 || :
+fi
+%endif
+
+%changelog
diff --git a/hack/make/.dockerinit b/hack/make/.dockerinit
index fceba7d..4a62ee1 100644
--- a/hack/make/.dockerinit
+++ b/hack/make/.dockerinit
@@ -2,7 +2,7 @@
 set -e
 
 IAMSTATIC="true"
-source "$(dirname "$BASH_SOURCE")/.go-autogen"
+source "${MAKEDIR}/.go-autogen"
 
 # dockerinit still needs to be a static binary, even if docker is dynamic
 go build \
@@ -30,4 +30,4 @@
 fi
 
 # sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another
-export DOCKER_INITSHA1="$($sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)"
+export DOCKER_INITSHA1=$($sha1sum "$DEST/dockerinit-$VERSION" | cut -d' ' -f1)
diff --git a/hack/make/.dockerinit-gccgo b/hack/make/.dockerinit-gccgo
index 592a415..9890863 100644
--- a/hack/make/.dockerinit-gccgo
+++ b/hack/make/.dockerinit-gccgo
@@ -2,7 +2,7 @@
 set -e
 
 IAMSTATIC="true"
-source "$(dirname "$BASH_SOURCE")/.go-autogen"
+source "${MAKEDIR}/.go-autogen"
 
 # dockerinit still needs to be a static binary, even if docker is dynamic
 go build --compiler=gccgo \
@@ -12,6 +12,7 @@
 		-g
 		-Wl,--no-export-dynamic
 		$EXTLDFLAGS_STATIC_DOCKER
+		-lnetgo
 	" \
 	./dockerinit
 
@@ -27,4 +28,4 @@
 fi
 
 # sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another
-export DOCKER_INITSHA1="$($sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)"
+export DOCKER_INITSHA1=$($sha1sum "$DEST/dockerinit-$VERSION" | cut -d' ' -f1)
diff --git a/hack/make/.ensure-frozen-images b/hack/make/.ensure-frozen-images
index 379f738..deded80 100644
--- a/hack/make/.ensure-frozen-images
+++ b/hack/make/.ensure-frozen-images
@@ -5,6 +5,7 @@
 images=(
 	busybox:latest
 	hello-world:frozen
+	jess/unshare:latest
 )
 
 if ! docker inspect "${images[@]}" &> /dev/null; then
diff --git a/hack/make/.integration-daemon-start b/hack/make/.integration-daemon-start
index 570c6c7..eecf682 100644
--- a/hack/make/.integration-daemon-start
+++ b/hack/make/.integration-daemon-start
@@ -14,8 +14,30 @@
 
 export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs}
 export DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native}
+export DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true}
+
+# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G"
+storage_params=""
+if [ -n "$DOCKER_STORAGE_OPTS" ]; then
+	IFS=','
+	for i in ${DOCKER_STORAGE_OPTS}; do
+		storage_params="--storage-opt $i $storage_params"
+	done
+	unset IFS
+fi
 
 if [ -z "$DOCKER_TEST_HOST" ]; then
+	# Start apparmor if it is enabled
+	if [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then
+		# reset container variable so apparmor profile is applied to process
+		# see https://github.com/docker/libcontainer/blob/master/apparmor/apparmor.go#L16
+		export container=""
+		(
+			set -x
+			/etc/init.d/apparmor start
+		)
+	fi
+
 	export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one
 	( set -x; exec \
 		docker --daemon --debug \
@@ -23,8 +45,11 @@
 		--storage-driver "$DOCKER_GRAPHDRIVER" \
 		--exec-driver "$DOCKER_EXECDRIVER" \
 		--pidfile "$DEST/docker.pid" \
+		--userland-proxy="$DOCKER_USERLANDPROXY" \
+		$storage_params \
 			&> "$DEST/docker.log"
 	) &
+	trap "source '${MAKEDIR}/.integration-daemon-stop'" EXIT # make sure that if the script exits unexpectedly, we stop this daemon we just started
 else
 	export DOCKER_HOST="$DOCKER_TEST_HOST"
 fi
diff --git a/hack/make/.integration-daemon-stop b/hack/make/.integration-daemon-stop
index 319aaa4..364490b 100644
--- a/hack/make/.integration-daemon-stop
+++ b/hack/make/.integration-daemon-stop
@@ -1,9 +1,21 @@
 #!/bin/bash
 
+trap - EXIT # reset EXIT trap applied in .integration-daemon-start
+
 for pidFile in $(find "$DEST" -name docker.pid); do
 	pid=$(set -x; cat "$pidFile")
-	( set -x; kill $pid )
-	if ! wait $pid; then
+	( set -x; kill "$pid" )
+	if ! wait "$pid"; then
 		echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code"
 	fi
 done
+
+if [ -z "$DOCKER_TEST_HOST" ]; then
+	# Stop apparmor if it is enabled
+	if [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then
+		(
+			set -x
+			/etc/init.d/apparmor stop
+		)
+	fi
+fi
diff --git a/hack/make/.validate b/hack/make/.validate
index 0228091..7397d0f 100644
--- a/hack/make/.validate
+++ b/hack/make/.validate
@@ -3,23 +3,23 @@
 if [ -z "$VALIDATE_UPSTREAM" ]; then
 	# this is kind of an expensive check, so let's not do this twice if we
 	# are running more than one validate bundlescript
-	
+
 	VALIDATE_REPO='https://github.com/docker/docker.git'
 	VALIDATE_BRANCH='master'
-	
+
 	if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
 		VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
 		VALIDATE_BRANCH="${TRAVIS_BRANCH}"
 	fi
-	
+
 	VALIDATE_HEAD="$(git rev-parse --verify HEAD)"
-	
+
 	git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH"
 	VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)"
-	
+
 	VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD"
 	VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD"
-	
+
 	validate_diff() {
 		if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
 			git diff "$VALIDATE_COMMIT_DIFF" "$@"
diff --git a/hack/make/binary b/hack/make/binary
index 0f57ea0..7b2af5c 100644
--- a/hack/make/binary
+++ b/hack/make/binary
@@ -11,8 +11,9 @@
 	DEST=$(cygpath -mw $DEST)
 fi
 
-source "$(dirname "$BASH_SOURCE")/.go-autogen"
+source "${MAKEDIR}/.go-autogen"
 
+echo "Building: $DEST/$BINARY_FULLNAME"
 go build \
 	-o "$DEST/$BINARY_FULLNAME" \
 	"${BUILDFLAGS[@]}" \
diff --git a/hack/make/build-deb b/hack/make/build-deb
new file mode 100644
index 0000000..36e1177
--- /dev/null
+++ b/hack/make/build-deb
@@ -0,0 +1,67 @@
+#!/bin/bash
+set -e
+
+DEST=$1
+
+# subshell so that we can export PATH without breaking other things
+(
+	source "${MAKEDIR}/.integration-daemon-start"
+
+	# TODO consider using frozen images for the dockercore/builder-deb tags
+
+	tilde='~' # ouch Bash 4.2 vs 4.3, you keel me
+	debVersion="${VERSION//-/$tilde}" # using \~ or '~' here works in 4.3, but not 4.2; just ~ causes $HOME to be inserted, hence the $tilde
+	# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better
+	if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
+		gitUnix="$(git log -1 --pretty='%at')"
+		gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')"
+		gitCommit="$(git log -1 --pretty='%h')"
+		gitVersion="git${gitDate}.0.${gitCommit}"
+		# gitVersion is now something like 'git20150128.112847.0.17e840a'
+		debVersion="$debVersion~$gitVersion"
+
+		# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false
+		# true
+		# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false
+		# true
+		# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false
+		# true
+
+		# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a
+	fi
+
+	debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)"
+	debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)"
+	debDate="$(date --rfc-2822)"
+
+	# if go-md2man is available, pre-generate the man pages
+	./docs/man/md2man-all.sh -q || true
+	# TODO decide if it's worth getting go-md2man in _each_ builder environment to avoid this
+
+	# TODO add a configurable knob for _which_ debs to build so we don't have to modify the file or build all of them every time we need to test
+	for dir in contrib/builder/deb/*/; do
+		version="$(basename "$dir")"
+		suite="${version##*-}"
+
+		image="dockercore/builder-deb:$version"
+		if ! docker inspect "$image" &> /dev/null; then
+			( set -x && docker build -t "$image" "$dir" )
+		fi
+
+		mkdir -p "$DEST/$version"
+		cat > "$DEST/$version/Dockerfile.build" <<-EOF
+			FROM $image
+			WORKDIR /usr/src/docker
+			COPY . /usr/src/docker
+			RUN ln -sfv hack/make/.build-deb debian
+			RUN { echo '$debSource (${debVersion}-0~${suite}) $suite; urgency=low'; echo; echo '  * Version: $VERSION'; echo; echo " -- $debMaintainer  $debDate"; } > debian/changelog && cat >&2 debian/changelog
+			RUN dpkg-buildpackage -uc -us
+		EOF
+		tempImage="docker-temp/build-deb:$version"
+		( set -x && docker build -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . )
+		docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version"
+		docker rmi "$tempImage"
+	done
+
+	source "${MAKEDIR}/.integration-daemon-stop"
+) 2>&1 | tee -a "$DEST/test.log"
diff --git a/hack/make/build-rpm b/hack/make/build-rpm
new file mode 100644
index 0000000..0f3ff6d
--- /dev/null
+++ b/hack/make/build-rpm
@@ -0,0 +1,73 @@
+#!/bin/bash
+set -e
+
+DEST=$1
+
+# subshell so that we can export PATH without breaking other things
+(
+	source "$(dirname "$BASH_SOURCE")/.integration-daemon-start"
+
+	# TODO consider using frozen images for the dockercore/builder-rpm tags
+
+	rpmName=docker-engine
+	rpmVersion="${VERSION%%-*}"
+	rpmRelease=1
+
+	# rpmRelease versioning is as follows
+	# Docker 1.7.0:  version=1.7.0, release=1
+	# Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1
+	# Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH
+
+	# if we have a "-rc*" suffix, set appropriate release
+	if [[ "$VERSION" == *-rc* ]]; then
+		rcVersion=${VERSION#*-rc}
+		rpmRelease="0.${rcVersion}.rc${rcVersion}"
+	fi
+
+	# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better
+	if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
+		gitUnix="$(git log -1 --pretty='%at')"
+		gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')"
+		gitCommit="$(git log -1 --pretty='%h')"
+		gitVersion="${gitDate}.git${gitCommit}"
+		# gitVersion is now something like '20150128.112847.17e840a'
+		rpmRelease="0.0.$gitVersion"
+	fi
+
+	rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)"
+	rpmDate="$(date +'%a %b %d %Y')"
+
+	# if go-md2man is available, pre-generate the man pages
+	./docs/man/md2man-all.sh -q || true
+	# TODO decide if it's worth getting go-md2man in _each_ builder environment to avoid this
+
+	# TODO add a configurable knob for _which_ rpms to build so we don't have to modify the file or build all of them every time we need to test
+	for dir in contrib/builder/rpm/*/; do
+		version="$(basename "$dir")"
+		suite="${version##*-}"
+
+		image="dockercore/builder-rpm:$version"
+		if ! docker inspect "$image" &> /dev/null; then
+			( set -x && docker build -t "$image" "$dir" )
+		fi
+
+		mkdir -p "$DEST/$version"
+		cat > "$DEST/$version/Dockerfile.build" <<-EOF
+			FROM $image
+			COPY . /usr/src/${rpmName}
+			RUN mkdir -p /root/rpmbuild/SOURCES
+			WORKDIR /root/rpmbuild
+			RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS
+			RUN tar -cz -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar.gz ${rpmName}
+			WORKDIR /root/rpmbuild/SPECS
+			RUN { echo '* $rpmDate $rpmPackager $rpmVersion-$rpmRelease'; echo '* Version: $VERSION'; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec
+			RUN rpmbuild -ba --define '_release $rpmRelease' --define '_version $rpmVersion' --define '_origversion $VERSION' ${rpmName}.spec
+		EOF
+		tempImage="docker-temp/build-rpm:$version"
+		( set -x && docker build -t "$tempImage" -f $DEST/$version/Dockerfile.build . )
+		docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version"
+		docker rmi "$tempImage"
+	done
+
+	source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop"
+) 2>&1 | tee -a $DEST/test.log
diff --git a/hack/make/cross b/hack/make/cross
index 3c5cb040..368ebc5 100644
--- a/hack/make/cross
+++ b/hack/make/cross
@@ -28,6 +28,6 @@
 			export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms
 			export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported
 		fi
-		source "$(dirname "$BASH_SOURCE")/binary" "$DEST/$platform"
+		source "${MAKEDIR}/binary" "$DEST/$platform"
 	)
 done
diff --git a/hack/make/dynbinary b/hack/make/dynbinary
index 861a192..e1b65b4 100644
--- a/hack/make/dynbinary
+++ b/hack/make/dynbinary
@@ -4,8 +4,8 @@
 DEST=$1
 
 if [ -z "$DOCKER_CLIENTONLY" ]; then
-	source "$(dirname "$BASH_SOURCE")/.dockerinit"
-	
+	source "${MAKEDIR}/.dockerinit"
+
 	hash_files "$DEST/dockerinit-$VERSION"
 else
 	# DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :)
@@ -18,5 +18,5 @@
 	export LDFLAGS_STATIC_DOCKER=''
 	export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary
 	export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here
-	source "$(dirname "$BASH_SOURCE")/binary"
+	source "${MAKEDIR}/binary"
 )
diff --git a/hack/make/dyngccgo b/hack/make/dyngccgo
index a76e9c5..7bdd404 100644
--- a/hack/make/dyngccgo
+++ b/hack/make/dyngccgo
@@ -4,8 +4,8 @@
 DEST=$1
 
 if [ -z "$DOCKER_CLIENTONLY" ]; then
-	source "$(dirname "$BASH_SOURCE")/.dockerinit-gccgo"
-	
+	source "${MAKEDIR}/.dockerinit-gccgo"
+
 	hash_files "$DEST/dockerinit-$VERSION"
 else
 	# DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :)
@@ -19,5 +19,5 @@
 	export LDFLAGS_STATIC_DOCKER=''
 	export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary
 	export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here
-	source "$(dirname "$BASH_SOURCE")/gccgo"
+	source "${MAKEDIR}/gccgo"
 )
diff --git a/hack/make/gccgo b/hack/make/gccgo
index c85d2fb..896c2d4 100644
--- a/hack/make/gccgo
+++ b/hack/make/gccgo
@@ -6,8 +6,11 @@
 BINARY_EXTENSION="$(binary_extension)"
 BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
 
-source "$(dirname "$BASH_SOURCE")/.go-autogen"
+source "${MAKEDIR}/.go-autogen"
 
+if [[ "${BUILDFLAGS[@]}" =~ 'netgo ' ]]; then
+	EXTLDFLAGS_STATIC_DOCKER+=' -lnetgo'
+fi
 go build -compiler=gccgo \
 	-o "$DEST/$BINARY_FULLNAME" \
 	"${BUILDFLAGS[@]}" \
diff --git a/hack/make/test-docker-py b/hack/make/test-docker-py
index b95cf40..ac5ef35 100644
--- a/hack/make/test-docker-py
+++ b/hack/make/test-docker-py
@@ -5,26 +5,16 @@
 
 # subshell so that we can export PATH without breaking other things
 (
-	source "$(dirname "$BASH_SOURCE")/.integration-daemon-start"
+	source "${MAKEDIR}/.integration-daemon-start"
 
-	# we need to wrap up everything in between integration-daemon-start and
-	# integration-daemon-stop to make sure we kill the daemon and don't hang,
-	# even and especially on test failures
-	didFail=
-	if ! {
-		dockerPy='/docker-py'
-		[ -d "$dockerPy" ] || {
-			dockerPy="$DEST/docker-py"
-			git clone https://github.com/docker/docker-py.git "$dockerPy"
-		}
+	dockerPy='/docker-py'
+	[ -d "$dockerPy" ] || {
+		dockerPy="$DEST/docker-py"
+		git clone https://github.com/docker/docker-py.git "$dockerPy"
+	}
 
-		# exporting PYTHONPATH to import "docker" from our local docker-py
-		test_env PYTHONPATH="$dockerPy" python "$dockerPy/tests/integration_test.py"
-	}; then
-		didFail=1
-	fi
+	# exporting PYTHONPATH to import "docker" from our local docker-py
+	test_env PYTHONPATH="$dockerPy" python "$dockerPy/tests/integration_test.py"
 
-	source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop"
-
-	[ -z "$didFail" ] # "set -e" ftw
-) 2>&1 | tee -a $DEST/test.log
+	source "${MAKEDIR}/.integration-daemon-stop"
+) 2>&1 | tee -a "$DEST/test.log"
diff --git a/hack/make/test-integration b/hack/make/test-integration
deleted file mode 100644
index 5cb7102..0000000
--- a/hack/make/test-integration
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-set -e
-
-DEST=$1
-
-INIT=$DEST/../dynbinary/dockerinit-$VERSION
-[ -x "$INIT" ] || {
-	source "$(dirname "$BASH_SOURCE")/.dockerinit"
-	INIT="$DEST/dockerinit"
-}
-export TEST_DOCKERINIT_PATH="$INIT"
-
-bundle_test_integration() {
-	LDFLAGS="
-		$LDFLAGS
-		-X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\"
-	" go_test_dir ./integration \
-		"-coverpkg $(find_dirs '*.go' | sed 's,^\.,'$DOCKER_PKG',g' | paste -d, -s)"
-}
-
-# this "grep" hides some really irritating warnings that "go test -coverpkg"
-# spews when it is given packages that aren't used
-bundle_test_integration 2>&1 \
-	| grep --line-buffered -v '^warning: no packages being tested depend on ' \
-	| tee -a $DEST/test.log
diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli
index 3ef41d9..db1cb29 100644
--- a/hack/make/test-integration-cli
+++ b/hack/make/test-integration-cli
@@ -9,23 +9,13 @@
 
 # subshell so that we can export PATH without breaking other things
 (
-	source "$(dirname "$BASH_SOURCE")/.integration-daemon-start"
+	source "${MAKEDIR}/.integration-daemon-start"
 
-	# we need to wrap up everything in between integration-daemon-start and
-	# integration-daemon-stop to make sure we kill the daemon and don't hang,
-	# even and especially on test failures
-	didFail=
-	if ! {
-		source "$(dirname "$BASH_SOURCE")/.ensure-frozen-images"
-		source "$(dirname "$BASH_SOURCE")/.ensure-httpserver"
-		source "$(dirname "$BASH_SOURCE")/.ensure-emptyfs"
+	source "${MAKEDIR}/.ensure-frozen-images"
+	source "${MAKEDIR}/.ensure-httpserver"
+	source "${MAKEDIR}/.ensure-emptyfs"
 
-		bundle_test_integration_cli
-	}; then
-		didFail=1
-	fi
+	bundle_test_integration_cli
 
-	source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop"
-
-	[ -z "$didFail" ] # "set -e" ftw
-) 2>&1 | tee -a $DEST/test.log
+	source "${MAKEDIR}/.integration-daemon-stop"
+) 2>&1 | tee -a "$DEST/test.log"
diff --git a/hack/make/test-unit b/hack/make/test-unit
index 540ba41..7b6ce08 100644
--- a/hack/make/test-unit
+++ b/hack/make/test-unit
@@ -12,7 +12,7 @@
 # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
 # You can use this to select certain tests to run, eg.
 #
-#   TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test-unit
+#   TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit
 #
 bundle_test_unit() {
 	{
@@ -39,12 +39,12 @@
 				mkdir -p "$HOME/.parallel"
 				touch "$HOME/.parallel/ignored_vars"
 
-				echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --env _ "$(dirname "$BASH_SOURCE")/.go-compile-test-dir"
+				echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --env _ "${MAKEDIR}/.go-compile-test-dir"
 				rm -rf "$HOME"
 			else
 				# aww, no "parallel" available - fall back to boring
 				for test_dir in $TESTDIRS; do
-					"$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" || true
+					"${MAKEDIR}/.go-compile-test-dir" "$test_dir" || true
 					# don't let one directory that fails to build tank _all_ our tests!
 				done
 			fi
@@ -85,4 +85,4 @@
 	fi
 }
 
-bundle_test_unit 2>&1 | tee -a $DEST/test.log
+bundle_test_unit 2>&1 | tee -a "$DEST/test.log"
diff --git a/hack/make/tgz b/hack/make/tgz
index 7234218..fa297e1 100644
--- a/hack/make/tgz
+++ b/hack/make/tgz
@@ -18,17 +18,17 @@
 	BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
 	mkdir -p "$DEST/$GOOS/$GOARCH"
 	TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME.tgz"
-	
+
 	mkdir -p "$DEST/build"
-	
+
 	mkdir -p "$DEST/build/usr/local/bin"
 	cp -L "$d/$BINARY_FULLNAME" "$DEST/build/usr/local/bin/docker$BINARY_EXTENSION"
-	
+
 	tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr
-	
+
 	hash_files "$TGZ"
-	
+
 	rm -rf "$DEST/build"
-	
+
 	echo "Created tgz: $TGZ"
 done
diff --git a/hack/make/ubuntu b/hack/make/ubuntu
index e34369e..7543789 100644
--- a/hack/make/ubuntu
+++ b/hack/make/ubuntu
@@ -23,7 +23,7 @@
 # ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a
 
 PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)"
-PACKAGE_URL="http://www.docker.com/"
+PACKAGE_URL="https://www.docker.com/"
 PACKAGE_MAINTAINER="support@docker.com"
 PACKAGE_DESCRIPTION="Linux container runtime
 Docker complements LXC with a high-level API which operates at the process
@@ -40,26 +40,26 @@
 	DIR=$DEST/build
 
 	# Include our udev rules
-	mkdir -p $DIR/etc/udev/rules.d
-	cp contrib/udev/80-docker.rules $DIR/etc/udev/rules.d/
+	mkdir -p "$DIR/etc/udev/rules.d"
+	cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/"
 
 	# Include our init scripts
-	mkdir -p $DIR/etc/init
-	cp contrib/init/upstart/docker.conf $DIR/etc/init/
-	mkdir -p $DIR/etc/init.d
-	cp contrib/init/sysvinit-debian/docker $DIR/etc/init.d/
-	mkdir -p $DIR/etc/default
-	cp contrib/init/sysvinit-debian/docker.default $DIR/etc/default/docker
-	mkdir -p $DIR/lib/systemd/system
-	cp contrib/init/systemd/docker.{service,socket} $DIR/lib/systemd/system/
+	mkdir -p "$DIR/etc/init"
+	cp contrib/init/upstart/docker.conf "$DIR/etc/init/"
+	mkdir -p "$DIR/etc/init.d"
+	cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/"
+	mkdir -p "$DIR/etc/default"
+	cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker"
+	mkdir -p "$DIR/lib/systemd/system"
+	cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/"
 
 	# Include contributed completions
-	mkdir -p $DIR/etc/bash_completion.d
-	cp contrib/completion/bash/docker $DIR/etc/bash_completion.d/
-	mkdir -p $DIR/usr/share/zsh/vendor-completions
-	cp contrib/completion/zsh/_docker $DIR/usr/share/zsh/vendor-completions/
-	mkdir -p $DIR/etc/fish/completions
-	cp contrib/completion/fish/docker.fish $DIR/etc/fish/completions/
+	mkdir -p "$DIR/etc/bash_completion.d"
+	cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/"
+	mkdir -p "$DIR/usr/share/zsh/vendor-completions"
+	cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/"
+	mkdir -p "$DIR/etc/fish/completions"
+	cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/"
 
 	# Include contributed man pages
 	docs/man/md2man-all.sh -q
@@ -76,11 +76,11 @@
 
 	# Copy the binary
 	# This will fail if the binary bundle hasn't been built
-	mkdir -p $DIR/usr/bin
-	cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker
+	mkdir -p "$DIR/usr/bin"
+	cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker"
 
 	# Generate postinst/prerm/postrm scripts
-	cat > $DEST/postinst <<'EOF'
+	cat > "$DEST/postinst" <<'EOF'
 #!/bin/sh
 set -e
 set -u
@@ -104,7 +104,7 @@
 
 #DEBHELPER#
 EOF
-	cat > $DEST/prerm <<'EOF'
+	cat > "$DEST/prerm" <<'EOF'
 #!/bin/sh
 set -e
 set -u
@@ -113,7 +113,7 @@
 
 #DEBHELPER#
 EOF
-	cat > $DEST/postrm <<'EOF'
+	cat > "$DEST/postrm" <<'EOF'
 #!/bin/sh
 set -e
 set -u
@@ -131,18 +131,18 @@
 #DEBHELPER#
 EOF
 	# TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way
-	chmod +x $DEST/postinst $DEST/prerm $DEST/postrm
+	chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm"
 
 	(
 		# switch directories so we create *.deb in the right folder
-		cd $DEST
+		cd "$DEST"
 
 		# create lxc-docker-VERSION package
-		fpm -s dir -C $DIR \
-			--name lxc-docker-$VERSION --version "$PKGVERSION" \
-			--after-install $DEST/postinst \
-			--before-remove $DEST/prerm \
-			--after-remove $DEST/postrm \
+		fpm -s dir -C "$DIR" \
+			--name "lxc-docker-$VERSION" --version "$PKGVERSION" \
+			--after-install "$DEST/postinst" \
+			--before-remove "$DEST/prerm" \
+			--after-remove "$DEST/postrm" \
 			--architecture "$PACKAGE_ARCHITECTURE" \
 			--prefix / \
 			--depends iptables \
@@ -184,8 +184,8 @@
 	)
 
 	# clean up after ourselves so we have a clean output directory
-	rm $DEST/postinst $DEST/prerm $DEST/postrm
-	rm -r $DIR
+	rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm"
+	rm -r "$DIR"
 }
 
 bundle_ubuntu
diff --git a/hack/make/validate-dco b/hack/make/validate-dco
index 84c47f5..5ac9872 100644
--- a/hack/make/validate-dco
+++ b/hack/make/validate-dco
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-source "$(dirname "$BASH_SOURCE")/.validate"
+source "${MAKEDIR}/.validate"
 
 adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }')
 dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }')
diff --git a/hack/make/validate-gofmt b/hack/make/validate-gofmt
index 8fc88cc..7ad9e85 100644
--- a/hack/make/validate-gofmt
+++ b/hack/make/validate-gofmt
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-source "$(dirname "$BASH_SOURCE")/.validate"
+source "${MAKEDIR}/.validate"
 
 IFS=$'\n'
 files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) )
diff --git a/hack/make/validate-test b/hack/make/validate-test
new file mode 100644
index 0000000..d9d05f3
--- /dev/null
+++ b/hack/make/validate-test
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Make sure we're not using gos' Testing package any more in integration-cli
+
+source "${MAKEDIR}/.validate"
+
+IFS=$'\n'
+files=( $(validate_diff --diff-filter=ACMR --name-only -- 'integration-cli/*.go' || true) )
+unset IFS
+
+badFiles=()
+for f in "${files[@]}"; do
+	# skip check_test.go since it *does* use the testing package
+	if [ "$f" = "integration-cli/check_test.go" ]; then
+		continue
+	fi
+
+	# we use "git show" here to validate that what's committed is formatted
+	if git show "$VALIDATE_HEAD:$f" | grep -q testing.T; then
+		badFiles+=( "$f" )
+	fi
+done
+
+if [ ${#badFiles[@]} -eq 0 ]; then
+	echo 'Congratulations! No testing.T found.'
+else
+	{
+		echo "These files use the wrong testing infrastructure:"
+		for f in "${badFiles[@]}"; do
+			echo " - $f"
+		done
+		echo
+	} >&2
+	false
+fi
diff --git a/hack/make/validate-toml b/hack/make/validate-toml
index 16c228d..18f26ee 100644
--- a/hack/make/validate-toml
+++ b/hack/make/validate-toml
@@ -1,6 +1,6 @@
 #!/bin/bash
 
-source "$(dirname "$BASH_SOURCE")/.validate"
+source "${MAKEDIR}/.validate"
 
 IFS=$'\n'
 files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) )
diff --git a/hack/make/validate-vet b/hack/make/validate-vet
new file mode 100644
index 0000000..febe93e
--- /dev/null
+++ b/hack/make/validate-vet
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+source "${MAKEDIR}/.validate"
+
+IFS=$'\n'
+files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) )
+unset IFS
+
+errors=()
+for f in "${files[@]}"; do
+	# we use "git show" here to validate that what's committed passes go vet
+	failedVet=$(go vet "$f")
+	if [ "$failedVet" ]; then
+		errors+=( "$failedVet" )
+	fi
+done
+
+
+if [ ${#errors[@]} -eq 0 ]; then
+	echo 'Congratulations!  All Go source files have been vetted.'
+else
+	{
+		echo "Errors from go vet:"
+		for err in "${errors[@]}"; do
+			echo " - $err"
+		done
+		echo
+		echo 'Please fix the above errors. You can test via "go vet" and commit the result.'
+		echo
+	} >&2
+	false
+fi
diff --git a/hack/release.sh b/hack/release.sh
index da95808..1d3c9c3 100755
--- a/hack/release.sh
+++ b/hack/release.sh
@@ -54,13 +54,13 @@
 
 if [ "$1" != '--release-regardless-of-test-failure' ]; then
 	RELEASE_BUNDLES=(
-		test-unit test-integration
+		test-unit
 		"${RELEASE_BUNDLES[@]}"
 		test-integration-cli
 	)
 fi
 
-VERSION=$(cat VERSION)
+VERSION=$(< VERSION)
 BUCKET=$AWS_S3_BUCKET
 
 # These are the 2 keys we've used to sign the deb's
@@ -71,23 +71,23 @@
 
 setup_s3() {
 	# Try creating the bucket. Ignore errors (it might already exist).
-	s3cmd mb s3://$BUCKET 2>/dev/null || true
+	s3cmd mb "s3://$BUCKET" 2>/dev/null || true
 	# Check access to the bucket.
 	# s3cmd has no useful exit status, so we cannot check that.
 	# Instead, we check if it outputs anything on standard output.
 	# (When there are problems, it uses standard error instead.)
-	s3cmd info s3://$BUCKET | grep -q .
+	s3cmd info "s3://$BUCKET" | grep -q .
 	# Make the bucket accessible through website endpoints.
-	s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET
+	s3cmd ws-create --ws-index index --ws-error error "s3://$BUCKET"
 }
 
 # write_to_s3 uploads the contents of standard input to the specified S3 url.
 write_to_s3() {
 	DEST=$1
 	F=`mktemp`
-	cat > $F
-	s3cmd --acl-public --mime-type='text/plain' put $F $DEST
-	rm -f $F
+	cat > "$F"
+	s3cmd --acl-public --mime-type='text/plain' put "$F" "$DEST"
+	rm -f "$F"
 }
 
 s3_url() {
@@ -246,20 +246,20 @@
 # 1. A full APT repository is published at $BUCKET/ubuntu/
 # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index
 release_ubuntu() {
-	[ -e bundles/$VERSION/ubuntu ] || {
+	[ -e "bundles/$VERSION/ubuntu" ] || {
 		echo >&2 './hack/make.sh must be run before release_ubuntu'
 		exit 1
 	}
 
 	# Sign our packages
 	dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \
-		--sign builder bundles/$VERSION/ubuntu/*.deb
+		--sign builder "bundles/$VERSION/ubuntu/"*.deb
 
 	# Setup the APT repo
 	APTDIR=bundles/$VERSION/ubuntu/apt
-	mkdir -p $APTDIR/conf $APTDIR/db
-	s3cmd sync s3://$BUCKET/ubuntu/db/ $APTDIR/db/ || true
-	cat > $APTDIR/conf/distributions <<EOF
+	mkdir -p "$APTDIR/conf" "$APTDIR/db"
+	s3cmd sync "s3://$BUCKET/ubuntu/db/" "$APTDIR/db/" || true
+	cat > "$APTDIR/conf/distributions" <<EOF
 Codename: docker
 Components: main
 Architectures: amd64 i386
@@ -267,19 +267,19 @@
 
 	# Add the DEB package to the APT repo
 	DEBFILE=bundles/$VERSION/ubuntu/lxc-docker*.deb
-	reprepro -b $APTDIR includedeb docker $DEBFILE
+	reprepro -b "$APTDIR" includedeb docker "$DEBFILE"
 
 	# Sign
 	for F in $(find $APTDIR -name Release); do
-		gpg -u releasedocker --passphrase $GPG_PASSPHRASE \
+		gpg -u releasedocker --passphrase "$GPG_PASSPHRASE" \
 			--armor --sign --detach-sign \
-			--output $F.gpg $F
+			--output "$F.gpg" "$F"
 	done
 
 	# Upload keys
-	s3cmd sync $HOME/.gnupg/ s3://$BUCKET/ubuntu/.gnupg/
-	gpg --armor --export releasedocker > bundles/$VERSION/ubuntu/gpg
-	s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg
+	s3cmd sync "$HOME/.gnupg/" "s3://$BUCKET/ubuntu/.gnupg/"
+	gpg --armor --export releasedocker > "bundles/$VERSION/ubuntu/gpg"
+	s3cmd --acl-public put "bundles/$VERSION/ubuntu/gpg" "s3://$BUCKET/gpg"
 
 	local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9
 	if [[ $BUCKET == test* ]]; then
@@ -287,7 +287,7 @@
 	fi
 
 	# Upload repo
-	s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/
+	s3cmd --acl-public sync "$APTDIR/" "s3://$BUCKET/ubuntu/"
 	cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index
 # Check that HTTPS transport is available to APT
 if [ ! -e /usr/lib/apt/methods/https ]; then
@@ -312,14 +312,14 @@
 
 	# Add redirect at /ubuntu/info for URL-backwards-compatibility
 	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
-	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/ubuntu/info
+	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET/ubuntu/info"
 
 	echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu"
 }
 
 # Upload binaries and tgz files to S3
 release_binaries() {
-	[ -e bundles/$VERSION/cross/linux/amd64/docker-$VERSION ] || {
+	[ -e "bundles/$VERSION/cross/linux/amd64/docker-$VERSION" ] || {
 		echo >&2 './hack/make.sh must be run before release_binaries'
 		exit 1
 	}
@@ -341,29 +341,29 @@
 
 	# Add redirect at /builds/info for URL-backwards-compatibility
 	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
-	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/builds/info
+	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET/builds/info"
 
 	if [ -z "$NOLATEST" ]; then
 		echo "Advertising $VERSION on $BUCKET as most recent version"
-		echo $VERSION | write_to_s3 s3://$BUCKET/latest
+		echo "$VERSION" | write_to_s3 "s3://$BUCKET/latest"
 	fi
 }
 
 # Upload the index script
 release_index() {
-	sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 s3://$BUCKET/index
+	sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 "s3://$BUCKET/index"
 }
 
 release_test() {
 	if [ -e "bundles/$VERSION/test" ]; then
-		s3cmd --acl-public sync bundles/$VERSION/test/ s3://$BUCKET/test/
+		s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET/test/"
 	fi
 }
 
 setup_gpg() {
 	# Make sure that we have our keys
-	mkdir -p $HOME/.gnupg/
-	s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ $HOME/.gnupg/ || true
+	mkdir -p "$HOME/.gnupg/"
+	s3cmd sync "s3://$BUCKET/ubuntu/.gnupg/" "$HOME/.gnupg/" || true
 	gpg --list-keys releasedocker >/dev/null || {
 		gpg --gen-key --batch <<EOF
 Key-Type: RSA
diff --git a/hack/vendor.sh b/hack/vendor.sh
index c0b1112..488629d 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -11,17 +11,17 @@
 	vcs=$1
 	pkg=$2
 	rev=$3
-	
+
 	pkg_url=https://$pkg
 	target_dir=src/$pkg
-	
+
 	echo -n "$pkg @ $rev: "
-	
+
 	if [ -d $target_dir ]; then
 		echo -n 'rm old, '
 		rm -fr $target_dir
 	fi
-	
+
 	echo -n 'clone, '
 	case $vcs in
 		git)
@@ -32,51 +32,45 @@
 			hg clone --quiet --updaterev $rev $pkg_url $target_dir
 			;;
 	esac
-	
+
 	echo -n 'rm VCS, '
 	( cd $target_dir && rm -rf .{git,hg} )
-	
+
+	echo -n 'rm vendor, '
+	( cd $target_dir && rm -rf vendor Godeps/_workspace )
+
 	echo done
 }
 
-clone git github.com/kr/pty 05017fcccf
-
+# the following lines are in sorted order, FYI
+clone git github.com/Sirupsen/logrus v0.7.3 # logrus is a common dependency among multiple deps
+clone git github.com/docker/libtrust 230dfd18c232
+clone git github.com/go-check/check 64131543e7896d5bcc6bd5a76287eb75ea96c673
 clone git github.com/gorilla/context 14f550f51a
-
-clone git github.com/gorilla/mux 136d54f81f
-
-clone git github.com/tchap/go-patricia v1.0.1
-
+clone git github.com/gorilla/mux e444e69cbd
+clone git github.com/kr/pty 5cf931ef8f
+clone git github.com/mistifyio/go-zfs v2.1.0
+clone git github.com/tchap/go-patricia v2.1.0
 clone hg code.google.com/p/go.net 84a4013f96e0
-
 clone hg code.google.com/p/gosqlite 74691fb6f837
 
-clone git github.com/docker/libtrust 230dfd18c232
+#get libnetwork packages
+clone git github.com/docker/libnetwork 2da2dc055de5a474c8540871ad88a48213b0994f
+clone git github.com/vishvananda/netns 008d17ae001344769b031375bdb38a86219154c6
+clone git github.com/vishvananda/netlink 8eb64238879fed52fd51c5b30ad20b928fb4c36c
 
-clone git github.com/Sirupsen/logrus v0.7.1
-
-clone git github.com/go-fsnotify/fsnotify v1.0.4
-
-# get Go tip's archive/tar, for xattr support and improved performance
-# TODO after Go 1.4 drops, bump our minimum supported version and drop this vendored dep
-if [ "$1" = '--go' ]; then
-	# Go takes forever and a half to clone, so we only redownload it when explicitly requested via the "--go" flag to this script.
-	clone hg code.google.com/p/go 1b17b3426e3c
-	mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar
-	rm -rf src/code.google.com/p/go
-	mkdir -p src/code.google.com/p/go/src/pkg/archive
-	mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar
-fi
-
-# get digest package from distribution
+# get distribution packages
 clone git github.com/docker/distribution d957768537c5af40e4f4cd96871f7b2bde9e2923
 mv src/github.com/docker/distribution/digest tmp-digest
+mv src/github.com/docker/distribution/registry/api tmp-api
 rm -rf src/github.com/docker/distribution
 mkdir -p src/github.com/docker/distribution
 mv tmp-digest src/github.com/docker/distribution/digest
+mkdir -p src/github.com/docker/distribution/registry
+mv tmp-api src/github.com/docker/distribution/registry/api
 
-clone git github.com/docker/libcontainer 227771c8f611f03639f0eeb169428761d9504ab5
-# see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file)
-rm -rf src/github.com/docker/libcontainer/vendor
-eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli' | grep -v 'github.com/Sirupsen/logrus')"
-# we exclude "github.com/codegangsta/cli" here because it's only needed for "nsinit", which Docker doesn't include
+clone git github.com/docker/libcontainer a37b2a4f152e2a1c9de596f54c051cb889de0691
+# libcontainer deps (see src/github.com/docker/libcontainer/update-vendor.sh)
+clone git github.com/coreos/go-systemd v2
+clone git github.com/godbus/dbus v2
+clone git github.com/syndtr/gocapability 66ef2aa7a23ba682594e2b6f74cf40c0692b49fb
diff --git a/image/image.go b/image/image.go
index a661e3c..4e37ebc 100644
--- a/image/image.go
+++ b/image/image.go
@@ -5,13 +5,13 @@
 	"fmt"
 	"io/ioutil"
 	"os"
-	"path"
+	"path/filepath"
+	"regexp"
 	"strconv"
 	"time"
 
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/utils"
 )
 
 // Set the max depth to the aufs default that most
@@ -19,6 +19,8 @@
 // For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk
 const MaxImageDepth = 127
 
+var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
+
 type Image struct {
 	ID              string            `json:"id"`
 	Parent          string            `json:"parent,omitempty"`
@@ -51,11 +53,11 @@
 	if err := dec.Decode(img); err != nil {
 		return nil, err
 	}
-	if err := utils.ValidateID(img.ID); err != nil {
+	if err := ValidateID(img.ID); err != nil {
 		return nil, err
 	}
 
-	if buf, err := ioutil.ReadFile(path.Join(root, "layersize")); err != nil {
+	if buf, err := ioutil.ReadFile(filepath.Join(root, "layersize")); err != nil {
 		if !os.IsNotExist(err) {
 			return nil, err
 		}
@@ -107,21 +109,21 @@
 
 // SaveSize stores the current `size` value of `img` in the directory `root`.
 func (img *Image) SaveSize(root string) error {
-	if err := ioutil.WriteFile(path.Join(root, "layersize"), []byte(strconv.Itoa(int(img.Size))), 0600); err != nil {
+	if err := ioutil.WriteFile(filepath.Join(root, "layersize"), []byte(strconv.Itoa(int(img.Size))), 0600); err != nil {
 		return fmt.Errorf("Error storing image size in %s/layersize: %s", root, err)
 	}
 	return nil
 }
 
 func (img *Image) SaveCheckSum(root, checksum string) error {
-	if err := ioutil.WriteFile(path.Join(root, "checksum"), []byte(checksum), 0600); err != nil {
+	if err := ioutil.WriteFile(filepath.Join(root, "checksum"), []byte(checksum), 0600); err != nil {
 		return fmt.Errorf("Error storing checksum in %s/checksum: %s", root, err)
 	}
 	return nil
 }
 
 func (img *Image) GetCheckSum(root string) (string, error) {
-	cs, err := ioutil.ReadFile(path.Join(root, "checksum"))
+	cs, err := ioutil.ReadFile(filepath.Join(root, "checksum"))
 	if err != nil {
 		if os.IsNotExist(err) {
 			return "", nil
@@ -132,7 +134,7 @@
 }
 
 func jsonPath(root string) string {
-	return path.Join(root, "json")
+	return filepath.Join(root, "json")
 }
 
 func (img *Image) RawJson() ([]byte, error) {
@@ -263,3 +265,11 @@
 	}
 	return ret, nil
 }
+
+// Check wheather id is a valid image ID or not
+func ValidateID(id string) error {
+	if ok := validHex.MatchString(id); !ok {
+		return fmt.Errorf("image ID '%s' is invalid", id)
+	}
+	return nil
+}
diff --git a/image/spec/v1.md b/image/spec/v1.md
index abed758..b428cbb 100644
--- a/image/spec/v1.md
+++ b/image/spec/v1.md
@@ -31,7 +31,7 @@
         Image JSON
     </dt>
     <dd>
-        Each layer has an associated A JSON structure which describes some
+        Each layer has an associated JSON structure which describes some
         basic information about the image such as date created, author, and the
         ID of its parent image as well as execution/runtime configuration like
         its entry point, default arguments, CPU/memory shares, networking, and
@@ -81,7 +81,7 @@
         times of any entries differ. For this reason, image checksums are
         generated using the TarSum algorithm which produces a cryptographic
         hash of file contents and selected headers only. Details of this
-        algorithm are described in the separate [TarSum specification](https://github.com/docker/docker/blob/master/pkg/tarsum/tarsum_spec.md).
+        algorithm are described in the separate <a href="https://github.com/docker/docker/blob/master/pkg/tarsum/tarsum_spec.md">TarSum specification</a>.
     </dd>
     <dt>
         Tag
@@ -492,9 +492,9 @@
 There is also a format for a single archive which contains complete information
 about an image, including:
 
-    - repository names/tags
-    - all image layer JSON files
-    - all tar archives of each layer filesystem changesets
+ - repository names/tags
+ - all image layer JSON files
+ - all tar archives of each layer filesystem changesets
 
 For example, here's what the full archive of `library/busybox` is (displayed in
 `tree` format):
@@ -523,10 +523,10 @@
 There are one or more directories named with the ID for each layer in a full
 image. Each of these directories contains 3 files:
 
-    * `VERSION` - The schema version of the `json` file
-    * `json` - The JSON metadata for an image layer
-    * `layer.tar` - The Tar archive of the filesystem changeset for an image
-      layer.
+ * `VERSION` - The schema version of the `json` file
+ * `json` - The JSON metadata for an image layer
+ * `layer.tar` - The Tar archive of the filesystem changeset for an image
+   layer.
 
 The content of the `VERSION` files is simply the semantic version of the JSON
 metadata schema:
diff --git a/integration-cli/check_test.go b/integration-cli/check_test.go
new file mode 100644
index 0000000..202799c
--- /dev/null
+++ b/integration-cli/check_test.go
@@ -0,0 +1,81 @@
+package main
+
+import (
+	"fmt"
+	"testing"
+	"time"
+
+	"github.com/go-check/check"
+)
+
+func Test(t *testing.T) {
+	check.TestingT(t)
+}
+
+type TimerSuite struct {
+	start time.Time
+}
+
+func (s *TimerSuite) SetUpTest(c *check.C) {
+	s.start = time.Now()
+}
+
+func (s *TimerSuite) TearDownTest(c *check.C) {
+	fmt.Printf("%-60s%.2f\n", c.TestName(), time.Since(s.start).Seconds())
+}
+
+func init() {
+	check.Suite(&DockerSuite{})
+}
+
+type DockerSuite struct {
+	TimerSuite
+}
+
+func (s *DockerSuite) TearDownTest(c *check.C) {
+	deleteAllContainers()
+	deleteAllImages()
+	s.TimerSuite.TearDownTest(c)
+}
+
+func init() {
+	check.Suite(&DockerRegistrySuite{
+		ds: &DockerSuite{},
+	})
+}
+
+type DockerRegistrySuite struct {
+	ds  *DockerSuite
+	reg *testRegistryV2
+}
+
+func (s *DockerRegistrySuite) SetUpTest(c *check.C) {
+	s.reg = setupRegistry(c)
+	s.ds.SetUpTest(c)
+}
+
+func (s *DockerRegistrySuite) TearDownTest(c *check.C) {
+	s.reg.Close()
+	s.ds.TearDownTest(c)
+}
+
+func init() {
+	check.Suite(&DockerDaemonSuite{
+		ds: &DockerSuite{},
+	})
+}
+
+type DockerDaemonSuite struct {
+	ds *DockerSuite
+	d  *Daemon
+}
+
+func (s *DockerDaemonSuite) SetUpTest(c *check.C) {
+	s.d = NewDaemon(c)
+	s.ds.SetUpTest(c)
+}
+
+func (s *DockerDaemonSuite) TearDownTest(c *check.C) {
+	s.d.Stop()
+	s.ds.TearDownTest(c)
+}
diff --git a/integration-cli/docker_api_attach_test.go b/integration-cli/docker_api_attach_test.go
index b16a7bb..c784d5c 100644
--- a/integration-cli/docker_api_attach_test.go
+++ b/integration-cli/docker_api_attach_test.go
@@ -3,64 +3,77 @@
 import (
 	"bytes"
 	"os/exec"
-	"testing"
+	"strings"
 	"time"
 
+	"github.com/go-check/check"
+
 	"code.google.com/p/go.net/websocket"
 )
 
-func TestGetContainersAttachWebsocket(t *testing.T) {
+func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-dit", "busybox", "cat")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
-	defer deleteAllContainers()
 
 	rwc, err := sockConn(time.Duration(10 * time.Second))
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 	config, err := websocket.NewConfig(
 		"/containers/"+cleanedContainerID+"/attach/ws?stream=1&stdin=1&stdout=1&stderr=1",
 		"http://localhost",
 	)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	ws, err := websocket.NewClient(config, rwc)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ws.Close()
 
 	expected := []byte("hello")
 	actual := make([]byte, len(expected))
-	outChan := make(chan string)
+
+	outChan := make(chan error)
 	go func() {
-		if _, err := ws.Read(actual); err != nil {
-			t.Fatal(err)
-		}
-		outChan <- "done"
+		_, err := ws.Read(actual)
+		outChan <- err
+		close(outChan)
 	}()
 
-	inChan := make(chan string)
+	inChan := make(chan error)
 	go func() {
-		if _, err := ws.Write(expected); err != nil {
-			t.Fatal(err)
-		}
-		inChan <- "done"
+		_, err := ws.Write(expected)
+		inChan <- err
+		close(inChan)
 	}()
 
-	<-inChan
-	<-outChan
-
-	if !bytes.Equal(expected, actual) {
-		t.Fatal("Expected output on websocket to match input")
+	select {
+	case err := <-inChan:
+		if err != nil {
+			c.Fatal(err)
+		}
+	case <-time.After(5 * time.Second):
+		c.Fatal("Timeout writing to ws")
 	}
 
-	logDone("container attach websocket - can echo input via cat")
+	select {
+	case err := <-outChan:
+		if err != nil {
+			c.Fatal(err)
+		}
+	case <-time.After(5 * time.Second):
+		c.Fatal("Timeout reading from ws")
+	}
+
+	if !bytes.Equal(expected, actual) {
+		c.Fatal("Expected output on websocket to match input")
+	}
 }
diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go
index ea2f245..daec87c 100644
--- a/integration-cli/docker_api_containers_test.go
+++ b/integration-cli/docker_api_containers_test.go
@@ -1,70 +1,67 @@
 package main
 
 import (
+	"archive/tar"
 	"bytes"
 	"encoding/json"
 	"io"
+	"net/http"
+	"net/http/httputil"
+	"os"
 	"os/exec"
 	"strings"
-	"testing"
 	"time"
 
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/runconfig"
+	"github.com/go-check/check"
 )
 
-func TestContainerApiGetAll(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestContainerApiGetAll(c *check.C) {
 	startCount, err := getContainerCount()
 	if err != nil {
-		t.Fatalf("Cannot query container count: %v", err)
+		c.Fatalf("Cannot query container count: %v", err)
 	}
 
 	name := "getall"
 	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("Error on container creation: %v, output: %q", err, out)
+		c.Fatalf("Error on container creation: %v, output: %q", err, out)
 	}
 
-	body, err := sockRequest("GET", "/containers/json?all=1", nil)
-	if err != nil {
-		t.Fatalf("GET all containers sockRequest failed: %v", err)
-	}
+	status, body, err := sockRequest("GET", "/containers/json?all=1", nil)
+	c.Assert(status, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
 
 	var inspectJSON []struct {
 		Names []string
 	}
 	if err = json.Unmarshal(body, &inspectJSON); err != nil {
-		t.Fatalf("unable to unmarshal response body: %v", err)
+		c.Fatalf("unable to unmarshal response body: %v", err)
 	}
 
 	if len(inspectJSON) != startCount+1 {
-		t.Fatalf("Expected %d container(s), %d found (started with: %d)", startCount+1, len(inspectJSON), startCount)
+		c.Fatalf("Expected %d container(s), %d found (started with: %d)", startCount+1, len(inspectJSON), startCount)
 	}
 
 	if actual := inspectJSON[0].Names[0]; actual != "/"+name {
-		t.Fatalf("Container Name mismatch. Expected: %q, received: %q\n", "/"+name, actual)
+		c.Fatalf("Container Name mismatch. Expected: %q, received: %q\n", "/"+name, actual)
 	}
-
-	logDone("container REST API - check GET json/all=1")
 }
 
-func TestContainerApiGetExport(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestContainerApiGetExport(c *check.C) {
 	name := "exportcontainer"
 	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "touch", "/test")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("Error on container creation: %v, output: %q", err, out)
+		c.Fatalf("Error on container creation: %v, output: %q", err, out)
 	}
 
-	body, err := sockRequest("GET", "/containers/"+name+"/export", nil)
-	if err != nil {
-		t.Fatalf("GET containers/export sockRequest failed: %v", err)
-	}
+	status, body, err := sockRequest("GET", "/containers/"+name+"/export", nil)
+	c.Assert(status, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
 
 	found := false
 	for tarReader := tar.NewReader(bytes.NewReader(body)); ; {
@@ -73,7 +70,7 @@
 			if err == io.EOF {
 				break
 			}
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		if h.Name == "test" {
 			found = true
@@ -82,33 +79,28 @@
 	}
 
 	if !found {
-		t.Fatalf("The created test file has not been found in the exported image")
+		c.Fatalf("The created test file has not been found in the exported image")
 	}
-
-	logDone("container REST API - check GET containers/export")
 }
 
-func TestContainerApiGetChanges(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestContainerApiGetChanges(c *check.C) {
 	name := "changescontainer"
 	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "rm", "/etc/passwd")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("Error on container creation: %v, output: %q", err, out)
+		c.Fatalf("Error on container creation: %v, output: %q", err, out)
 	}
 
-	body, err := sockRequest("GET", "/containers/"+name+"/changes", nil)
-	if err != nil {
-		t.Fatalf("GET containers/changes sockRequest failed: %v", err)
-	}
+	status, body, err := sockRequest("GET", "/containers/"+name+"/changes", nil)
+	c.Assert(status, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
 
 	changes := []struct {
 		Kind int
 		Path string
 	}{}
 	if err = json.Unmarshal(body, &changes); err != nil {
-		t.Fatalf("unable to unmarshal response body: %v", err)
+		c.Fatalf("unable to unmarshal response body: %v", err)
 	}
 
 	// Check the changelog for removal of /etc/passwd
@@ -119,56 +111,50 @@
 		}
 	}
 	if !success {
-		t.Fatalf("/etc/passwd has been removed but is not present in the diff")
+		c.Fatalf("/etc/passwd has been removed but is not present in the diff")
 	}
-
-	logDone("container REST API - check GET containers/changes")
 }
 
-func TestContainerApiStartVolumeBinds(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestContainerApiStartVolumeBinds(c *check.C) {
 	name := "testing"
 	config := map[string]interface{}{
 		"Image":   "busybox",
 		"Volumes": map[string]struct{}{"/tmp": {}},
 	}
 
-	if _, err := sockRequest("POST", "/containers/create?name="+name, config); err != nil && !strings.Contains(err.Error(), "201 Created") {
-		t.Fatal(err)
-	}
+	status, _, err := sockRequest("POST", "/containers/create?name="+name, config)
+	c.Assert(status, check.Equals, http.StatusCreated)
+	c.Assert(err, check.IsNil)
 
 	bindPath := randomUnixTmpDirPath("test")
 	config = map[string]interface{}{
 		"Binds": []string{bindPath + ":/tmp"},
 	}
-	if _, err := sockRequest("POST", "/containers/"+name+"/start", config); err != nil && !strings.Contains(err.Error(), "204 No Content") {
-		t.Fatal(err)
-	}
+	status, _, err = sockRequest("POST", "/containers/"+name+"/start", config)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
 
 	pth, err := inspectFieldMap(name, "Volumes", "/tmp")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if pth != bindPath {
-		t.Fatalf("expected volume host path to be %s, got %s", bindPath, pth)
+		c.Fatalf("expected volume host path to be %s, got %s", bindPath, pth)
 	}
-
-	logDone("container REST API - check volume binds on start")
 }
 
 // Test for GH#10618
-func TestContainerApiStartDupVolumeBinds(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestContainerApiStartDupVolumeBinds(c *check.C) {
 	name := "testdups"
 	config := map[string]interface{}{
 		"Image":   "busybox",
 		"Volumes": map[string]struct{}{"/tmp": {}},
 	}
 
-	if _, err := sockRequest("POST", "/containers/create?name="+name, config); err != nil && !strings.Contains(err.Error(), "201 Created") {
-		t.Fatal(err)
-	}
+	status, _, err := sockRequest("POST", "/containers/create?name="+name, config)
+	c.Assert(status, check.Equals, http.StatusCreated)
+	c.Assert(err, check.IsNil)
 
 	bindPath1 := randomUnixTmpDirPath("test1")
 	bindPath2 := randomUnixTmpDirPath("test2")
@@ -176,177 +162,158 @@
 	config = map[string]interface{}{
 		"Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"},
 	}
-	if body, err := sockRequest("POST", "/containers/"+name+"/start", config); err == nil {
-		t.Fatal("expected container start to fail when duplicate volume binds to same container path")
-	} else {
-		if !strings.Contains(string(body), "Duplicate volume") {
-			t.Fatalf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)
-		}
-	}
+	status, body, err := sockRequest("POST", "/containers/"+name+"/start", config)
+	c.Assert(status, check.Equals, http.StatusInternalServerError)
+	c.Assert(err, check.IsNil)
 
-	logDone("container REST API - check for duplicate volume binds error on start")
+	if !strings.Contains(string(body), "Duplicate bind") {
+		c.Fatalf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)
+	}
 }
-func TestContainerApiStartVolumesFrom(t *testing.T) {
-	defer deleteAllContainers()
+
+func (s *DockerSuite) TestContainerApiStartVolumesFrom(c *check.C) {
 	volName := "voltst"
 	volPath := "/tmp"
 
 	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", volName, "-v", volPath, "busybox")); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	name := "testing"
+	name := "TestContainerApiStartDupVolumeBinds"
 	config := map[string]interface{}{
 		"Image":   "busybox",
 		"Volumes": map[string]struct{}{volPath: {}},
 	}
 
-	if _, err := sockRequest("POST", "/containers/create?name="+name, config); err != nil && !strings.Contains(err.Error(), "201 Created") {
-		t.Fatal(err)
-	}
+	status, _, err := sockRequest("POST", "/containers/create?name="+name, config)
+	c.Assert(status, check.Equals, http.StatusCreated)
+	c.Assert(err, check.IsNil)
 
 	config = map[string]interface{}{
 		"VolumesFrom": []string{volName},
 	}
-	if _, err := sockRequest("POST", "/containers/"+name+"/start", config); err != nil && !strings.Contains(err.Error(), "204 No Content") {
-		t.Fatal(err)
-	}
+	status, _, err = sockRequest("POST", "/containers/"+name+"/start", config)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
 
 	pth, err := inspectFieldMap(name, "Volumes", volPath)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	pth2, err := inspectFieldMap(volName, "Volumes", volPath)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if pth != pth2 {
-		t.Fatalf("expected volume host path to be %s, got %s", pth, pth2)
+		c.Fatalf("expected volume host path to be %s, got %s", pth, pth2)
 	}
-
-	logDone("container REST API - check VolumesFrom on start")
 }
 
-// Ensure that volumes-from has priority over binds/anything else
-// This is pretty much the same as TestRunApplyVolumesFromBeforeVolumes, except with passing the VolumesFrom and the bind on start
-func TestVolumesFromHasPriority(t *testing.T) {
-	defer deleteAllContainers()
-	volName := "voltst"
-	volPath := "/tmp"
-
-	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", volName, "-v", volPath, "busybox")); err != nil {
-		t.Fatal(out, err)
-	}
-
-	name := "testing"
-	config := map[string]interface{}{
-		"Image":   "busybox",
-		"Volumes": map[string]struct{}{volPath: {}},
-	}
-
-	if _, err := sockRequest("POST", "/containers/create?name="+name, config); err != nil && !strings.Contains(err.Error(), "201 Created") {
-		t.Fatal(err)
-	}
-
-	bindPath := randomUnixTmpDirPath("test")
-	config = map[string]interface{}{
-		"VolumesFrom": []string{volName},
-		"Binds":       []string{bindPath + ":/tmp"},
-	}
-	if _, err := sockRequest("POST", "/containers/"+name+"/start", config); err != nil && !strings.Contains(err.Error(), "204 No Content") {
-		t.Fatal(err)
-	}
-
-	pth, err := inspectFieldMap(name, "Volumes", volPath)
-	if err != nil {
-		t.Fatal(err)
-	}
-	pth2, err := inspectFieldMap(volName, "Volumes", volPath)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if pth != pth2 {
-		t.Fatalf("expected volume host path to be %s, got %s", pth, pth2)
-	}
-
-	logDone("container REST API - check VolumesFrom has priority")
-}
-
-func TestGetContainerStats(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestGetContainerStats(c *check.C) {
 	var (
 		name   = "statscontainer"
 		runCmd = exec.Command(dockerBinary, "run", "-d", "--name", name, "busybox", "top")
 	)
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("Error on container creation: %v, output: %q", err, out)
+		c.Fatalf("Error on container creation: %v, output: %q", err, out)
 	}
 	type b struct {
-		body []byte
-		err  error
+		status int
+		body   []byte
+		err    error
 	}
 	bc := make(chan b, 1)
 	go func() {
-		body, err := sockRequest("GET", "/containers/"+name+"/stats", nil)
-		bc <- b{body, err}
+		status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil)
+		bc <- b{status, body, err}
 	}()
 
 	// allow some time to stream the stats from the container
 	time.Sleep(4 * time.Second)
 	if _, err := runCommand(exec.Command(dockerBinary, "rm", "-f", name)); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// collect the results from the stats stream or timeout and fail
 	// if the stream was not disconnected.
 	select {
 	case <-time.After(2 * time.Second):
-		t.Fatal("stream was not closed after container was removed")
+		c.Fatal("stream was not closed after container was removed")
 	case sr := <-bc:
-		if sr.err != nil {
-			t.Fatal(sr.err)
-		}
+		c.Assert(sr.err, check.IsNil)
+		c.Assert(sr.status, check.Equals, http.StatusOK)
 
 		dec := json.NewDecoder(bytes.NewBuffer(sr.body))
 		var s *types.Stats
 		// decode only one object from the stream
 		if err := dec.Decode(&s); err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 	}
-	logDone("container REST API - check GET containers/stats")
 }
 
-func TestGetStoppedContainerStats(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestContainerStatsRmRunning(c *check.C) {
+	out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
+	id := strings.TrimSpace(out)
+
+	buf := &channelBuffer{make(chan []byte, 1)}
+	defer buf.Close()
+	chErr := make(chan error)
+	go func() {
+		_, body, err := sockRequestRaw("GET", "/containers/"+id+"/stats?stream=1", nil, "application/json")
+		if err != nil {
+			chErr <- err
+		}
+		defer body.Close()
+		_, err = io.Copy(buf, body)
+		chErr <- err
+	}()
+	defer func() {
+		c.Assert(<-chErr, check.IsNil)
+	}()
+
+	b := make([]byte, 32)
+	// make sure we've got some stats
+	_, err := buf.ReadTimeout(b, 2*time.Second)
+	c.Assert(err, check.IsNil)
+
+	// Now remove without `-f` and make sure we are still pulling stats
+	_, err = runCommand(exec.Command(dockerBinary, "rm", id))
+	c.Assert(err, check.Not(check.IsNil), check.Commentf("rm should have failed but didn't"))
+	_, err = buf.ReadTimeout(b, 2*time.Second)
+	c.Assert(err, check.IsNil)
+	dockerCmd(c, "rm", "-f", id)
+
+	_, err = buf.ReadTimeout(b, 2*time.Second)
+	c.Assert(err, check.Not(check.IsNil))
+}
+
+func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) {
+	// TODO: this test does nothing because we are c.Assert'ing in goroutine
 	var (
 		name   = "statscontainer"
 		runCmd = exec.Command(dockerBinary, "create", "--name", name, "busybox", "top")
 	)
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("Error on container creation: %v, output: %q", err, out)
+		c.Fatalf("Error on container creation: %v, output: %q", err, out)
 	}
 
 	go func() {
 		// We'll never get return for GET stats from sockRequest as of now,
 		// just send request and see if panic or error would happen on daemon side.
-		_, err := sockRequest("GET", "/containers/"+name+"/stats", nil)
-		if err != nil {
-			t.Fatal(err)
-		}
+		status, _, err := sockRequest("GET", "/containers/"+name+"/stats", nil)
+		c.Assert(status, check.Equals, http.StatusOK)
+		c.Assert(err, check.IsNil)
 	}()
 
 	// allow some time to send request and let daemon deal with it
 	time.Sleep(1 * time.Second)
-
-	logDone("container REST API - check GET stopped containers/stats")
 }
 
-func TestBuildApiDockerfilePath(t *testing.T) {
+func (s *DockerSuite) TestBuildApiDockerfilePath(c *check.C) {
 	// Test to make sure we stop people from trying to leave the
 	// build context when specifying the path to the dockerfile
 	buffer := new(bytes.Buffer)
@@ -358,28 +325,30 @@
 		Name: "Dockerfile",
 		Size: int64(len(dockerfile)),
 	}); err != nil {
-		t.Fatalf("failed to write tar file header: %v", err)
+		c.Fatalf("failed to write tar file header: %v", err)
 	}
 	if _, err := tw.Write(dockerfile); err != nil {
-		t.Fatalf("failed to write tar file content: %v", err)
+		c.Fatalf("failed to write tar file content: %v", err)
 	}
 	if err := tw.Close(); err != nil {
-		t.Fatalf("failed to close tar archive: %v", err)
+		c.Fatalf("failed to close tar archive: %v", err)
 	}
 
-	out, err := sockRequestRaw("POST", "/build?dockerfile=../Dockerfile", buffer, "application/x-tar")
-	if err == nil {
-		t.Fatalf("Build was supposed to fail: %s", out)
+	res, body, err := sockRequestRaw("POST", "/build?dockerfile=../Dockerfile", buffer, "application/x-tar")
+	c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError)
+	c.Assert(err, check.IsNil)
+
+	out, err := readBody(body)
+	if err != nil {
+		c.Fatal(err)
 	}
 
 	if !strings.Contains(string(out), "must be within the build context") {
-		t.Fatalf("Didn't complain about leaving build context: %s", out)
+		c.Fatalf("Didn't complain about leaving build context: %s", out)
 	}
-
-	logDone("container REST API - check build w/bad Dockerfile path")
 }
 
-func TestBuildApiDockerFileRemote(t *testing.T) {
+func (s *DockerSuite) TestBuildApiDockerFileRemote(c *check.C) {
 	server, err := fakeStorage(map[string]string{
 		"testD": `FROM busybox
 COPY * /tmp/
@@ -387,13 +356,17 @@
 RUN find /tmp/`,
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server.Close()
 
-	buf, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json")
+	res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json")
+	c.Assert(res.StatusCode, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
+
+	buf, err := readBody(body)
 	if err != nil {
-		t.Fatalf("Build failed: %s", err)
+		c.Fatal(err)
 	}
 
 	// Make sure Dockerfile exists.
@@ -401,36 +374,36 @@
 	out := string(buf)
 	if !strings.Contains(out, "/tmp/Dockerfile") ||
 		strings.Contains(out, "baz") {
-		t.Fatalf("Incorrect output: %s", out)
+		c.Fatalf("Incorrect output: %s", out)
 	}
-
-	logDone("container REST API - check build with -f from remote")
 }
 
-func TestBuildApiLowerDockerfile(t *testing.T) {
+func (s *DockerSuite) TestBuildApiLowerDockerfile(c *check.C) {
 	git, err := fakeGIT("repo", map[string]string{
 		"dockerfile": `FROM busybox
 RUN echo from dockerfile`,
 	}, false)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer git.Close()
 
-	buf, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json")
+	res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json")
+	c.Assert(res.StatusCode, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
+
+	buf, err := readBody(body)
 	if err != nil {
-		t.Fatalf("Build failed: %s\n%q", err, buf)
+		c.Fatal(err)
 	}
 
 	out := string(buf)
 	if !strings.Contains(out, "from dockerfile") {
-		t.Fatalf("Incorrect output: %s", out)
+		c.Fatalf("Incorrect output: %s", out)
 	}
-
-	logDone("container REST API - check build with lower dockerfile")
 }
 
-func TestBuildApiBuildGitWithF(t *testing.T) {
+func (s *DockerSuite) TestBuildApiBuildGitWithF(c *check.C) {
 	git, err := fakeGIT("repo", map[string]string{
 		"baz": `FROM busybox
 RUN echo from baz`,
@@ -438,26 +411,28 @@
 RUN echo from Dockerfile`,
 	}, false)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer git.Close()
 
 	// Make sure it tries to 'dockerfile' query param value
-	buf, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json")
+	res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json")
+	c.Assert(res.StatusCode, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
+
+	buf, err := readBody(body)
 	if err != nil {
-		t.Fatalf("Build failed: %s\n%q", err, buf)
+		c.Fatal(err)
 	}
 
 	out := string(buf)
 	if !strings.Contains(out, "from baz") {
-		t.Fatalf("Incorrect output: %s", out)
+		c.Fatalf("Incorrect output: %s", out)
 	}
-
-	logDone("container REST API - check build from git w/F")
 }
 
-func TestBuildApiDoubleDockerfile(t *testing.T) {
-	testRequires(t, UnixCli) // dockerfile overwrites Dockerfile on Windows
+func (s *DockerSuite) TestBuildApiDoubleDockerfile(c *check.C) {
+	testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows
 	git, err := fakeGIT("repo", map[string]string{
 		"Dockerfile": `FROM busybox
 RUN echo from Dockerfile`,
@@ -465,25 +440,27 @@
 RUN echo from dockerfile`,
 	}, false)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer git.Close()
 
 	// Make sure it tries to 'dockerfile' query param value
-	buf, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json")
+	res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json")
+	c.Assert(res.StatusCode, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
+
+	buf, err := readBody(body)
 	if err != nil {
-		t.Fatalf("Build failed: %s", err)
+		c.Fatal(err)
 	}
 
 	out := string(buf)
 	if !strings.Contains(out, "from Dockerfile") {
-		t.Fatalf("Incorrect output: %s", out)
+		c.Fatalf("Incorrect output: %s", out)
 	}
-
-	logDone("container REST API - check build with two dockerfiles")
 }
 
-func TestBuildApiDockerfileSymlink(t *testing.T) {
+func (s *DockerSuite) TestBuildApiDockerfileSymlink(c *check.C) {
 	// Test to make sure we stop people from trying to leave the
 	// build context when specifying a symlink as the path to the dockerfile
 	buffer := new(bytes.Buffer)
@@ -495,15 +472,19 @@
 		Typeflag: tar.TypeSymlink,
 		Linkname: "/etc/passwd",
 	}); err != nil {
-		t.Fatalf("failed to write tar file header: %v", err)
+		c.Fatalf("failed to write tar file header: %v", err)
 	}
 	if err := tw.Close(); err != nil {
-		t.Fatalf("failed to close tar archive: %v", err)
+		c.Fatalf("failed to close tar archive: %v", err)
 	}
 
-	out, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar")
-	if err == nil {
-		t.Fatalf("Build was supposed to fail: %s", out)
+	res, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar")
+	c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError)
+	c.Assert(err, check.IsNil)
+
+	out, err := readBody(body)
+	if err != nil {
+		c.Fatal(err)
 	}
 
 	// The reason the error is "Cannot locate specified Dockerfile" is because
@@ -511,45 +492,843 @@
 	// Dockerfile -> /etc/passwd becomes etc/passwd from the context which is
 	// a nonexistent file.
 	if !strings.Contains(string(out), "Cannot locate specified Dockerfile: Dockerfile") {
-		t.Fatalf("Didn't complain about leaving build context: %s", out)
+		c.Fatalf("Didn't complain about leaving build context: %s", out)
 	}
-
-	logDone("container REST API - check build w/bad Dockerfile symlink path")
 }
 
 // #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume
-func TestPostContainerBindNormalVolume(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestPostContainerBindNormalVolume(c *check.C) {
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "-v", "/foo", "--name=one", "busybox"))
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	fooDir, err := inspectFieldMap("one", "Volumes", "/foo")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "create", "-v", "/foo", "--name=two", "busybox"))
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}}
-	_, err = sockRequest("POST", "/containers/two/start", bindSpec)
-	if err != nil && !strings.Contains(err.Error(), "204 No Content") {
-		t.Fatal(err)
-	}
+	status, _, err := sockRequest("POST", "/containers/two/start", bindSpec)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
 
 	fooDir2, err := inspectFieldMap("two", "Volumes", "/foo")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if fooDir2 != fooDir {
-		t.Fatal("expected volume path to be %s, got: %s", fooDir, fooDir2)
+		c.Fatalf("expected volume path to be %s, got: %s", fooDir, fooDir2)
+	}
+}
+
+func (s *DockerSuite) TestContainerApiPause(c *check.C) {
+	defer unpauseAllContainers()
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep", "30")
+	out, _, err := runCommandWithOutput(runCmd)
+
+	if err != nil {
+		c.Fatalf("failed to create a container: %s, %v", out, err)
+	}
+	ContainerID := strings.TrimSpace(out)
+
+	status, _, err := sockRequest("POST", "/containers/"+ContainerID+"/pause", nil)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
+
+	pausedContainers, err := getSliceOfPausedContainers()
+
+	if err != nil {
+		c.Fatalf("error thrown while checking if containers were paused: %v", err)
 	}
 
-	logDone("container REST API - can use path from normal volume as bind-mount to overwrite another volume")
+	if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] {
+		c.Fatalf("there should be one paused container and not %d", len(pausedContainers))
+	}
+
+	status, _, err = sockRequest("POST", "/containers/"+ContainerID+"/unpause", nil)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
+
+	pausedContainers, err = getSliceOfPausedContainers()
+
+	if err != nil {
+		c.Fatalf("error thrown while checking if containers were paused: %v", err)
+	}
+
+	if pausedContainers != nil {
+		c.Fatalf("There should be no paused container.")
+	}
+}
+
+func (s *DockerSuite) TestContainerApiTop(c *check.C) {
+	out, err := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", "top").CombinedOutput()
+	if err != nil {
+		c.Fatal(err, out)
+	}
+	id := strings.TrimSpace(string(out))
+	if err := waitRun(id); err != nil {
+		c.Fatal(err)
+	}
+
+	type topResp struct {
+		Titles    []string
+		Processes [][]string
+	}
+	var top topResp
+	status, b, err := sockRequest("GET", "/containers/"+id+"/top?ps_args=aux", nil)
+	c.Assert(status, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
+
+	if err := json.Unmarshal(b, &top); err != nil {
+		c.Fatal(err)
+	}
+
+	if len(top.Titles) != 11 {
+		c.Fatalf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles)
+	}
+
+	if top.Titles[0] != "USER" || top.Titles[10] != "COMMAND" {
+		c.Fatalf("expected `USER` at `Titles[0]` and `COMMAND` at Titles[10]: %v", top.Titles)
+	}
+	if len(top.Processes) != 2 {
+		c.Fatalf("expected 2 processes, found %d: %v", len(top.Processes), top.Processes)
+	}
+	if top.Processes[0][10] != "/bin/sh -c top" {
+		c.Fatalf("expected `/bin/sh -c top`, found: %s", top.Processes[0][10])
+	}
+	if top.Processes[1][10] != "top" {
+		c.Fatalf("expected `top`, found: %s", top.Processes[1][10])
+	}
+}
+
+func (s *DockerSuite) TestContainerApiCommit(c *check.C) {
+	cName := "testapicommit"
+	out, err := exec.Command(dockerBinary, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test").CombinedOutput()
+	if err != nil {
+		c.Fatal(err, out)
+	}
+
+	name := "TestContainerApiCommit"
+	status, b, err := sockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil)
+	c.Assert(status, check.Equals, http.StatusCreated)
+	c.Assert(err, check.IsNil)
+
+	type resp struct {
+		Id string
+	}
+	var img resp
+	if err := json.Unmarshal(b, &img); err != nil {
+		c.Fatal(err)
+	}
+
+	cmd, err := inspectField(img.Id, "Config.Cmd")
+	if err != nil {
+		c.Fatal(err)
+	}
+	if cmd != "{[/bin/sh -c touch /test]}" {
+		c.Fatalf("got wrong Cmd from commit: %q", cmd)
+	}
+	// sanity check, make sure the image is what we think it is
+	out, err = exec.Command(dockerBinary, "run", img.Id, "ls", "/test").CombinedOutput()
+	if err != nil {
+		c.Fatalf("error checking committed image: %v - %q", err, string(out))
+	}
+}
+
+func (s *DockerSuite) TestContainerApiCreate(c *check.C) {
+	config := map[string]interface{}{
+		"Image": "busybox",
+		"Cmd":   []string{"/bin/sh", "-c", "touch /test && ls /test"},
+	}
+
+	status, b, err := sockRequest("POST", "/containers/create", config)
+	c.Assert(status, check.Equals, http.StatusCreated)
+	c.Assert(err, check.IsNil)
+
+	type createResp struct {
+		Id string
+	}
+	var container createResp
+	if err := json.Unmarshal(b, &container); err != nil {
+		c.Fatal(err)
+	}
+
+	out, err := exec.Command(dockerBinary, "start", "-a", container.Id).CombinedOutput()
+	if err != nil {
+		c.Fatal(out, err)
+	}
+	if strings.TrimSpace(string(out)) != "/test" {
+		c.Fatalf("expected output `/test`, got %q", out)
+	}
+}
+
+func (s *DockerSuite) TestContainerApiCreateWithHostName(c *check.C) {
+	hostName := "test-host"
+	config := map[string]interface{}{
+		"Image":    "busybox",
+		"Hostname": hostName,
+	}
+
+	status, body, err := sockRequest("POST", "/containers/create", config)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusCreated)
+
+	var container types.ContainerCreateResponse
+	if err := json.Unmarshal(body, &container); err != nil {
+		c.Fatal(err)
+	}
+
+	status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusOK)
+
+	var containerJSON types.ContainerJSON
+	if err := json.Unmarshal(body, &containerJSON); err != nil {
+		c.Fatal(err)
+	}
+
+	if containerJSON.Config.Hostname != hostName {
+		c.Fatalf("Mismatched Hostname, Expected %s, Actual: %s ", hostName, containerJSON.Config.Hostname)
+	}
+}
+
+func (s *DockerSuite) TestContainerApiCreateWithDomainName(c *check.C) {
+	domainName := "test-domain"
+	config := map[string]interface{}{
+		"Image":      "busybox",
+		"Domainname": domainName,
+	}
+
+	status, body, err := sockRequest("POST", "/containers/create", config)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusCreated)
+
+	var container types.ContainerCreateResponse
+	if err := json.Unmarshal(body, &container); err != nil {
+		c.Fatal(err)
+	}
+
+	status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusOK)
+
+	var containerJSON types.ContainerJSON
+	if err := json.Unmarshal(body, &containerJSON); err != nil {
+		c.Fatal(err)
+	}
+
+	if containerJSON.Config.Domainname != domainName {
+		c.Fatalf("Mismatched Domainname, Expected %s, Actual: %s ", domainName, containerJSON.Config.Domainname)
+	}
+}
+
+func (s *DockerSuite) TestContainerApiCreateNetworkMode(c *check.C) {
+	UtilCreateNetworkMode(c, "host")
+	UtilCreateNetworkMode(c, "bridge")
+	UtilCreateNetworkMode(c, "container:web1")
+}
+
+func UtilCreateNetworkMode(c *check.C, networkMode string) {
+	config := map[string]interface{}{
+		"Image":      "busybox",
+		"HostConfig": map[string]interface{}{"NetworkMode": networkMode},
+	}
+
+	status, body, err := sockRequest("POST", "/containers/create", config)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusCreated)
+
+	var container types.ContainerCreateResponse
+	if err := json.Unmarshal(body, &container); err != nil {
+		c.Fatal(err)
+	}
+
+	status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusOK)
+
+	var containerJSON types.ContainerJSON
+	if err := json.Unmarshal(body, &containerJSON); err != nil {
+		c.Fatal(err)
+	}
+
+	if containerJSON.HostConfig.NetworkMode != runconfig.NetworkMode(networkMode) {
+		c.Fatalf("Mismatched NetworkMode, Expected %s, Actual: %s ", networkMode, containerJSON.HostConfig.NetworkMode)
+	}
+}
+
+func (s *DockerSuite) TestContainerApiCreateWithCpuSharesCpuset(c *check.C) {
+	config := map[string]interface{}{
+		"Image":      "busybox",
+		"CpuShares":  512,
+		"CpusetCpus": "0,1",
+	}
+
+	status, body, err := sockRequest("POST", "/containers/create", config)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusCreated)
+
+	var container types.ContainerCreateResponse
+	if err := json.Unmarshal(body, &container); err != nil {
+		c.Fatal(err)
+	}
+
+	status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusOK)
+
+	var containerJson types.ContainerJSON
+
+	c.Assert(json.Unmarshal(body, &containerJson), check.IsNil)
+
+	out, err := inspectField(containerJson.Id, "HostConfig.CpuShares")
+	c.Assert(err, check.IsNil)
+	c.Assert(out, check.Equals, "512")
+
+	outCpuset, errCpuset := inspectField(containerJson.Id, "HostConfig.CpusetCpus")
+	c.Assert(errCpuset, check.IsNil, check.Commentf("Output: %s", outCpuset))
+	c.Assert(outCpuset, check.Equals, "0,1")
+}
+
+func (s *DockerSuite) TestContainerApiVerifyHeader(c *check.C) {
+	config := map[string]interface{}{
+		"Image": "busybox",
+	}
+
+	create := func(ct string) (*http.Response, io.ReadCloser, error) {
+		jsonData := bytes.NewBuffer(nil)
+		if err := json.NewEncoder(jsonData).Encode(config); err != nil {
+			c.Fatal(err)
+		}
+		return sockRequestRaw("POST", "/containers/create", jsonData, ct)
+	}
+
+	// Try with no content-type
+	res, body, err := create("")
+	c.Assert(err, check.IsNil)
+	c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError)
+	body.Close()
+
+	// Try with wrong content-type
+	res, body, err = create("application/xml")
+	c.Assert(err, check.IsNil)
+	c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError)
+	body.Close()
+
+	// now application/json
+	res, body, err = create("application/json")
+	c.Assert(err, check.IsNil)
+	c.Assert(res.StatusCode, check.Equals, http.StatusCreated)
+	body.Close()
+}
+
+// Issue 7941 - test to make sure a "null" in JSON is just ignored.
+// W/o this fix a null in JSON would be parsed into a string var as "null"
+func (s *DockerSuite) TestContainerApiPostCreateNull(c *check.C) {
+	config := `{
+		"Hostname":"",
+		"Domainname":"",
+		"Memory":0,
+		"MemorySwap":0,
+		"CpuShares":0,
+		"Cpuset":null,
+		"AttachStdin":true,
+		"AttachStdout":true,
+		"AttachStderr":true,
+		"PortSpecs":null,
+		"ExposedPorts":{},
+		"Tty":true,
+		"OpenStdin":true,
+		"StdinOnce":true,
+		"Env":[],
+		"Cmd":"ls",
+		"Image":"busybox",
+		"Volumes":{},
+		"WorkingDir":"",
+		"Entrypoint":null,
+		"NetworkDisabled":false,
+		"OnBuild":null}`
+
+	res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json")
+	c.Assert(res.StatusCode, check.Equals, http.StatusCreated)
+	c.Assert(err, check.IsNil)
+
+	b, err := readBody(body)
+	if err != nil {
+		c.Fatal(err)
+	}
+	type createResp struct {
+		Id string
+	}
+	var container createResp
+	if err := json.Unmarshal(b, &container); err != nil {
+		c.Fatal(err)
+	}
+
+	out, err := inspectField(container.Id, "HostConfig.CpusetCpus")
+	if err != nil {
+		c.Fatal(err, out)
+	}
+	if out != "" {
+		c.Fatalf("expected empty string, got %q", out)
+	}
+
+	outMemory, errMemory := inspectField(container.Id, "HostConfig.Memory")
+	c.Assert(outMemory, check.Equals, "0")
+	if errMemory != nil {
+		c.Fatal(errMemory, outMemory)
+	}
+	outMemorySwap, errMemorySwap := inspectField(container.Id, "HostConfig.MemorySwap")
+	c.Assert(outMemorySwap, check.Equals, "0")
+	if errMemorySwap != nil {
+		c.Fatal(errMemorySwap, outMemorySwap)
+	}
+}
+
+func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) {
+	config := `{
+		"Image":     "busybox",
+		"Cmd":       "ls",
+		"OpenStdin": true,
+		"CpuShares": 100,
+		"Memory":    524287
+	}`
+
+	res, body, _ := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json")
+	b, err2 := readBody(body)
+	if err2 != nil {
+		c.Fatal(err2)
+	}
+
+	c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError)
+	c.Assert(strings.Contains(string(b), "Minimum memory limit allowed is 4MB"), check.Equals, true)
+}
+
+func (s *DockerSuite) TestStartWithTooLowMemoryLimit(c *check.C) {
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "busybox"))
+	if err != nil {
+		c.Fatal(err, out)
+	}
+
+	containerID := strings.TrimSpace(out)
+
+	config := `{
+                "CpuShares": 100,
+                "Memory":    524287
+        }`
+
+	res, body, _ := sockRequestRaw("POST", "/containers/"+containerID+"/start", strings.NewReader(config), "application/json")
+	b, err2 := readBody(body)
+	if err2 != nil {
+		c.Fatal(err2)
+	}
+
+	c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError)
+	c.Assert(strings.Contains(string(b), "Minimum memory limit allowed is 4MB"), check.Equals, true)
+}
+
+func (s *DockerSuite) TestContainerApiRename(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "--name", "TestContainerApiRename", "-d", "busybox", "sh")
+	out, _, err := runCommandWithOutput(runCmd)
+	c.Assert(err, check.IsNil)
+
+	containerID := strings.TrimSpace(out)
+	newName := "TestContainerApiRenameNew"
+	statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/rename?name="+newName, nil)
+
+	// 204 No Content is expected, not 200
+	c.Assert(statusCode, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
+
+	name, err := inspectField(containerID, "Name")
+	if name != "/"+newName {
+		c.Fatalf("Failed to rename container, expected %v, got %v. Container rename API failed", newName, name)
+	}
+}
+
+func (s *DockerSuite) TestContainerApiKill(c *check.C) {
+	name := "test-api-kill"
+	runCmd := exec.Command(dockerBinary, "run", "-di", "--name", name, "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf("Error on container creation: %v, output: %q", err, out)
+	}
+
+	status, _, err := sockRequest("POST", "/containers/"+name+"/kill", nil)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
+
+	state, err := inspectField(name, "State.Running")
+	if err != nil {
+		c.Fatal(err)
+	}
+	if state != "false" {
+		c.Fatalf("got wrong State from container %s: %q", name, state)
+	}
+}
+
+func (s *DockerSuite) TestContainerApiRestart(c *check.C) {
+	name := "test-api-restart"
+	runCmd := exec.Command(dockerBinary, "run", "-di", "--name", name, "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf("Error on container creation: %v, output: %q", err, out)
+	}
+
+	status, _, err := sockRequest("POST", "/containers/"+name+"/restart?t=1", nil)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
+
+	if err := waitInspect(name, "{{ .State.Restarting  }} {{ .State.Running  }}", "false true", 5); err != nil {
+		c.Fatal(err)
+	}
+}
+
+func (s *DockerSuite) TestContainerApiRestartNotimeoutParam(c *check.C) {
+	name := "test-api-restart-no-timeout-param"
+	runCmd := exec.Command(dockerBinary, "run", "-di", "--name", name, "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf("Error on container creation: %v, output: %q", err, out)
+	}
+	id := strings.TrimSpace(out)
+	c.Assert(waitRun(id), check.IsNil)
+
+	status, _, err := sockRequest("POST", "/containers/"+name+"/restart", nil)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
+
+	if err := waitInspect(name, "{{ .State.Restarting  }} {{ .State.Running  }}", "false true", 5); err != nil {
+		c.Fatal(err)
+	}
+}
+
+func (s *DockerSuite) TestContainerApiStart(c *check.C) {
+	name := "testing-start"
+	config := map[string]interface{}{
+		"Image":     "busybox",
+		"Cmd":       []string{"/bin/sh", "-c", "/bin/top"},
+		"OpenStdin": true,
+	}
+
+	status, _, err := sockRequest("POST", "/containers/create?name="+name, config)
+	c.Assert(status, check.Equals, http.StatusCreated)
+	c.Assert(err, check.IsNil)
+
+	conf := make(map[string]interface{})
+	status, _, err = sockRequest("POST", "/containers/"+name+"/start", conf)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
+
+	// second call to start should give 304
+	status, _, err = sockRequest("POST", "/containers/"+name+"/start", conf)
+	c.Assert(status, check.Equals, http.StatusNotModified)
+	c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestContainerApiStop(c *check.C) {
+	name := "test-api-stop"
+	runCmd := exec.Command(dockerBinary, "run", "-di", "--name", name, "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf("Error on container creation: %v, output: %q", err, out)
+	}
+
+	status, _, err := sockRequest("POST", "/containers/"+name+"/stop?t=1", nil)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
+
+	if err := waitInspect(name, "{{ .State.Running  }}", "false", 5); err != nil {
+		c.Fatal(err)
+	}
+
+	// second call to start should give 304
+	status, _, err = sockRequest("POST", "/containers/"+name+"/stop?t=1", nil)
+	c.Assert(status, check.Equals, http.StatusNotModified)
+	c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestContainerApiWait(c *check.C) {
+	name := "test-api-wait"
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sleep", "5")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf("Error on container creation: %v, output: %q", err, out)
+	}
+
+	status, body, err := sockRequest("POST", "/containers/"+name+"/wait", nil)
+	c.Assert(status, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
+
+	if err := waitInspect(name, "{{ .State.Running  }}", "false", 5); err != nil {
+		c.Fatal(err)
+	}
+
+	var waitres types.ContainerWaitResponse
+	if err := json.Unmarshal(body, &waitres); err != nil {
+		c.Fatalf("unable to unmarshal response body: %v", err)
+	}
+
+	if waitres.StatusCode != 0 {
+		c.Fatalf("Expected wait response StatusCode to be 0, got %d", waitres.StatusCode)
+	}
+}
+
+func (s *DockerSuite) TestContainerApiCopy(c *check.C) {
+	name := "test-container-api-copy"
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "touch", "/test.txt")
+	_, err := runCommand(runCmd)
+	c.Assert(err, check.IsNil)
+
+	postData := types.CopyConfig{
+		Resource: "/test.txt",
+	}
+
+	status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusOK)
+
+	found := false
+	for tarReader := tar.NewReader(bytes.NewReader(body)); ; {
+		h, err := tarReader.Next()
+		if err != nil {
+			if err == io.EOF {
+				break
+			}
+			c.Fatal(err)
+		}
+		if h.Name == "test.txt" {
+			found = true
+			break
+		}
+	}
+	c.Assert(found, check.Equals, true)
+}
+
+func (s *DockerSuite) TestContainerApiCopyResourcePathEmpty(c *check.C) {
+	name := "test-container-api-copy-resource-empty"
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "touch", "/test.txt")
+	_, err := runCommand(runCmd)
+	c.Assert(err, check.IsNil)
+
+	postData := types.CopyConfig{
+		Resource: "",
+	}
+
+	status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusInternalServerError)
+	c.Assert(string(body), check.Matches, "Path cannot be empty\n")
+}
+
+func (s *DockerSuite) TestContainerApiCopyResourcePathNotFound(c *check.C) {
+	name := "test-container-api-copy-resource-not-found"
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox")
+	_, err := runCommand(runCmd)
+	c.Assert(err, check.IsNil)
+
+	postData := types.CopyConfig{
+		Resource: "/notexist",
+	}
+
+	status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusInternalServerError)
+	c.Assert(string(body), check.Matches, "Could not find the file /notexist in container "+name+"\n")
+}
+
+func (s *DockerSuite) TestContainerApiCopyContainerNotFound(c *check.C) {
+	postData := types.CopyConfig{
+		Resource: "/something",
+	}
+
+	status, _, err := sockRequest("POST", "/containers/notexists/copy", postData)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusNotFound)
+}
+
+func (s *DockerSuite) TestContainerApiDelete(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	c.Assert(err, check.IsNil)
+
+	id := strings.TrimSpace(out)
+	c.Assert(waitRun(id), check.IsNil)
+
+	stopCmd := exec.Command(dockerBinary, "stop", id)
+	_, err = runCommand(stopCmd)
+	c.Assert(err, check.IsNil)
+
+	status, _, err := sockRequest("DELETE", "/containers/"+id, nil)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+}
+
+func (s *DockerSuite) TestContainerApiDeleteNotExist(c *check.C) {
+	status, body, err := sockRequest("DELETE", "/containers/doesnotexist", nil)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusNotFound)
+	c.Assert(string(body), check.Matches, "no such id: doesnotexist\n")
+}
+
+func (s *DockerSuite) TestContainerApiDeleteForce(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	c.Assert(err, check.IsNil)
+
+	id := strings.TrimSpace(out)
+	c.Assert(waitRun(id), check.IsNil)
+
+	status, _, err := sockRequest("DELETE", "/containers/"+id+"?force=1", nil)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+}
+
+func (s *DockerSuite) TestContainerApiDeleteRemoveLinks(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "tlink1", "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	c.Assert(err, check.IsNil)
+
+	id := strings.TrimSpace(out)
+	c.Assert(waitRun(id), check.IsNil)
+
+	runCmd = exec.Command(dockerBinary, "run", "--link", "tlink1:tlink1", "--name", "tlink2", "-d", "busybox", "top")
+	out, _, err = runCommandWithOutput(runCmd)
+	c.Assert(err, check.IsNil)
+
+	id2 := strings.TrimSpace(out)
+	c.Assert(waitRun(id2), check.IsNil)
+
+	links, err := inspectFieldJSON(id2, "HostConfig.Links")
+	c.Assert(err, check.IsNil)
+
+	if links != "[\"/tlink1:/tlink2/tlink1\"]" {
+		c.Fatal("expected to have links between containers")
+	}
+
+	status, _, err := sockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+
+	linksPostRm, err := inspectFieldJSON(id2, "HostConfig.Links")
+	c.Assert(err, check.IsNil)
+
+	if linksPostRm != "null" {
+		c.Fatal("call to api deleteContainer links should have removed the specified links")
+	}
+}
+
+func (s *DockerSuite) TestContainerApiDeleteConflict(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	c.Assert(err, check.IsNil)
+
+	id := strings.TrimSpace(out)
+	c.Assert(waitRun(id), check.IsNil)
+
+	status, _, err := sockRequest("DELETE", "/containers/"+id, nil)
+	c.Assert(status, check.Equals, http.StatusConflict)
+	c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestContainerApiDeleteRemoveVolume(c *check.C) {
+	testRequires(c, SameHostDaemon)
+
+	runCmd := exec.Command(dockerBinary, "run", "-d", "-v", "/testvolume", "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	c.Assert(err, check.IsNil)
+
+	id := strings.TrimSpace(out)
+	c.Assert(waitRun(id), check.IsNil)
+
+	vol, err := inspectFieldMap(id, "Volumes", "/testvolume")
+	c.Assert(err, check.IsNil)
+
+	_, err = os.Stat(vol)
+	c.Assert(err, check.IsNil)
+
+	status, _, err := sockRequest("DELETE", "/containers/"+id+"?v=1&force=1", nil)
+	c.Assert(status, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
+
+	if _, err := os.Stat(vol); !os.IsNotExist(err) {
+		c.Fatalf("expected to get ErrNotExist error, got %v", err)
+	}
+}
+
+// Regression test for https://github.com/docker/docker/issues/6231
+func (s *DockerSuite) TestContainersApiChunkedEncoding(c *check.C) {
+	out, _ := dockerCmd(c, "create", "-v", "/foo", "busybox", "true")
+	id := strings.TrimSpace(out)
+
+	conn, err := sockConn(time.Duration(10 * time.Second))
+	if err != nil {
+		c.Fatal(err)
+	}
+	client := httputil.NewClientConn(conn, nil)
+	defer client.Close()
+
+	bindCfg := strings.NewReader(`{"Binds": ["/tmp:/foo"]}`)
+	req, err := http.NewRequest("POST", "/containers/"+id+"/start", bindCfg)
+	if err != nil {
+		c.Fatal(err)
+	}
+	req.Header.Set("Content-Type", "application/json")
+	// This is a cheat to make the http request do chunked encoding
+	// Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite
+	// https://golang.org/src/pkg/net/http/request.go?s=11980:12172
+	req.ContentLength = -1
+
+	resp, err := client.Do(req)
+	if err != nil {
+		c.Fatalf("error starting container with chunked encoding: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode != 204 {
+		c.Fatalf("expected status code 204, got %d", resp.StatusCode)
+	}
+
+	out, err = inspectFieldJSON(id, "HostConfig.Binds")
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	var binds []string
+	if err := json.NewDecoder(strings.NewReader(out)).Decode(&binds); err != nil {
+		c.Fatal(err)
+	}
+	if len(binds) != 1 {
+		c.Fatalf("got unexpected binds: %v", binds)
+	}
+
+	expected := "/tmp:/foo"
+	if binds[0] != expected {
+		c.Fatalf("got incorrect bind spec, wanted %s, got: %s", expected, binds[0])
+	}
+}
+
+func (s *DockerSuite) TestPostContainerStop(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	c.Assert(err, check.IsNil)
+
+	containerID := strings.TrimSpace(out)
+	c.Assert(waitRun(containerID), check.IsNil)
+
+	statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/stop", nil)
+
+	// 204 No Content is expected, not 200
+	c.Assert(statusCode, check.Equals, http.StatusNoContent)
+	c.Assert(err, check.IsNil)
+
+	if err := waitInspect(containerID, "{{ .State.Running  }}", "false", 5); err != nil {
+		c.Fatal(err)
+	}
 }
diff --git a/integration-cli/docker_api_exec_resize_test.go b/integration-cli/docker_api_exec_resize_test.go
new file mode 100644
index 0000000..ab753d8
--- /dev/null
+++ b/integration-cli/docker_api_exec_resize_test.go
@@ -0,0 +1,23 @@
+package main
+
+import (
+	"net/http"
+	"os/exec"
+	"strings"
+
+	"github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestExecResizeApiHeightWidthNoInt(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf(out, err)
+	}
+	cleanedContainerID := strings.TrimSpace(out)
+
+	endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar"
+	status, _, err := sockRequest("POST", endpoint, nil)
+	c.Assert(status, check.Equals, http.StatusInternalServerError)
+	c.Assert(err, check.IsNil)
+}
diff --git a/integration-cli/docker_api_exec_test.go b/integration-cli/docker_api_exec_test.go
index 1ed99a2..b795748 100644
--- a/integration-cli/docker_api_exec_test.go
+++ b/integration-cli/docker_api_exec_test.go
@@ -5,23 +5,25 @@
 import (
 	"bytes"
 	"fmt"
+	"net/http"
 	"os/exec"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
 // Regression test for #9414
-func TestExecApiCreateNoCmd(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestExecApiCreateNoCmd(c *check.C) {
 	name := "exec_test"
 	runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh")
 	if out, _, err := runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil})
-	if err == nil || !bytes.Contains(body, []byte("No exec command specified")) {
-		t.Fatalf("Expected error when creating exec command with no Cmd specified: %q", err)
-	}
+	status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil})
+	c.Assert(status, check.Equals, http.StatusInternalServerError)
+	c.Assert(err, check.IsNil)
 
-	logDone("exec create API - returns error when missing Cmd")
+	if !bytes.Contains(body, []byte("No exec command specified")) {
+		c.Fatalf("Expected message when creating exec command with no Cmd specified")
+	}
 }
diff --git a/integration-cli/docker_api_images_test.go b/integration-cli/docker_api_images_test.go
new file mode 100644
index 0000000..573a890
--- /dev/null
+++ b/integration-cli/docker_api_images_test.go
@@ -0,0 +1,134 @@
+package main
+
+import (
+	"encoding/json"
+	"net/http"
+	"net/url"
+	"os/exec"
+	"strings"
+
+	"github.com/docker/docker/api/types"
+	"github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestApiImagesFilter(c *check.C) {
+	name := "utest:tag1"
+	name2 := "utest/docker:tag2"
+	name3 := "utest:5000/docker:tag3"
+	for _, n := range []string{name, name2, name3} {
+		if out, err := exec.Command(dockerBinary, "tag", "busybox", n).CombinedOutput(); err != nil {
+			c.Fatal(err, out)
+		}
+	}
+	type image types.Image
+	getImages := func(filter string) []image {
+		v := url.Values{}
+		v.Set("filter", filter)
+		status, b, err := sockRequest("GET", "/images/json?"+v.Encode(), nil)
+		c.Assert(status, check.Equals, http.StatusOK)
+		c.Assert(err, check.IsNil)
+
+		var images []image
+		if err := json.Unmarshal(b, &images); err != nil {
+			c.Fatal(err)
+		}
+
+		return images
+	}
+
+	errMsg := "incorrect number of matches returned"
+	if images := getImages("utest*/*"); len(images[0].RepoTags) != 2 {
+		c.Fatal(errMsg)
+	}
+	if images := getImages("utest"); len(images[0].RepoTags) != 1 {
+		c.Fatal(errMsg)
+	}
+	if images := getImages("utest*"); len(images[0].RepoTags) != 1 {
+		c.Fatal(errMsg)
+	}
+	if images := getImages("*5000*/*"); len(images[0].RepoTags) != 1 {
+		c.Fatal(errMsg)
+	}
+}
+
+func (s *DockerSuite) TestApiImagesSaveAndLoad(c *check.C) {
+	testRequires(c, Network)
+	out, err := buildImage("saveandload", "FROM hello-world\nENV FOO bar", false)
+	if err != nil {
+		c.Fatal(err)
+	}
+	id := strings.TrimSpace(out)
+
+	res, body, err := sockRequestRaw("GET", "/images/"+id+"/get", nil, "")
+	c.Assert(err, check.IsNil)
+	c.Assert(res.StatusCode, check.Equals, http.StatusOK)
+
+	defer body.Close()
+
+	if out, err := exec.Command(dockerBinary, "rmi", id).CombinedOutput(); err != nil {
+		c.Fatal(err, out)
+	}
+
+	res, loadBody, err := sockRequestRaw("POST", "/images/load", body, "application/x-tar")
+	c.Assert(err, check.IsNil)
+	c.Assert(res.StatusCode, check.Equals, http.StatusOK)
+
+	defer loadBody.Close()
+
+	inspectOut, err := exec.Command(dockerBinary, "inspect", "--format='{{ .Id }}'", id).CombinedOutput()
+	if err != nil {
+		c.Fatal(err, inspectOut)
+	}
+	if strings.TrimSpace(string(inspectOut)) != id {
+		c.Fatal("load did not work properly")
+	}
+}
+
+func (s *DockerSuite) TestApiImagesDelete(c *check.C) {
+	testRequires(c, Network)
+	name := "test-api-images-delete"
+	out, err := buildImage(name, "FROM hello-world\nENV FOO bar", false)
+	if err != nil {
+		c.Fatal(err)
+	}
+	defer deleteImages(name)
+	id := strings.TrimSpace(out)
+
+	if out, err := exec.Command(dockerBinary, "tag", name, "test:tag1").CombinedOutput(); err != nil {
+		c.Fatal(err, out)
+	}
+
+	status, _, err := sockRequest("DELETE", "/images/"+id, nil)
+	c.Assert(status, check.Equals, http.StatusConflict)
+	c.Assert(err, check.IsNil)
+
+	status, _, err = sockRequest("DELETE", "/images/test:noexist", nil)
+	c.Assert(status, check.Equals, http.StatusNotFound) //Status Codes:404 – no such image
+	c.Assert(err, check.IsNil)
+
+	status, _, err = sockRequest("DELETE", "/images/test:tag1", nil)
+	c.Assert(status, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestApiImagesHistory(c *check.C) {
+	testRequires(c, Network)
+	name := "test-api-images-history"
+	out, err := buildImage(name, "FROM hello-world\nENV FOO bar", false)
+	c.Assert(err, check.IsNil)
+
+	defer deleteImages(name)
+	id := strings.TrimSpace(out)
+
+	status, body, err := sockRequest("GET", "/images/"+id+"/history", nil)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusOK)
+
+	var historydata []types.ImageHistory
+	if err = json.Unmarshal(body, &historydata); err != nil {
+		c.Fatalf("Error on unmarshal: %s", err)
+	}
+
+	c.Assert(len(historydata), check.Not(check.Equals), 0)
+	c.Assert(historydata[0].Tags[0], check.Equals, "test-api-images-history:latest")
+}
diff --git a/integration-cli/docker_api_info_test.go b/integration-cli/docker_api_info_test.go
new file mode 100644
index 0000000..4084289
--- /dev/null
+++ b/integration-cli/docker_api_info_test.go
@@ -0,0 +1,36 @@
+package main
+
+import (
+	"net/http"
+	"strings"
+
+	"github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestInfoApi(c *check.C) {
+	endpoint := "/info"
+
+	status, body, err := sockRequest("GET", endpoint, nil)
+	c.Assert(status, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
+
+	// always shown fields
+	stringsToCheck := []string{
+		"ID",
+		"Containers",
+		"Images",
+		"ExecutionDriver",
+		"LoggingDriver",
+		"OperatingSystem",
+		"NCPU",
+		"MemTotal",
+		"KernelVersion",
+		"Driver"}
+
+	out := string(body)
+	for _, linePrefix := range stringsToCheck {
+		if !strings.Contains(out, linePrefix) {
+			c.Errorf("couldn't find string %v in output", linePrefix)
+		}
+	}
+}
diff --git a/integration-cli/docker_api_inspect_test.go b/integration-cli/docker_api_inspect_test.go
index ed6f596..62f3235 100644
--- a/integration-cli/docker_api_inspect_test.go
+++ b/integration-cli/docker_api_inspect_test.go
@@ -2,58 +2,43 @@
 
 import (
 	"encoding/json"
+	"net/http"
 	"os/exec"
-	"testing"
+	"strings"
+
+	"github.com/go-check/check"
 )
 
-func TestInspectApiContainerResponse(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestInspectApiContainerResponse(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("failed to create a container: %s, %v", out, err)
+		c.Fatalf("failed to create a container: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
-	// test on json marshal version
-	// and latest version
-	testVersions := []string{"v1.11", "latest"}
+	endpoint := "/containers/" + cleanedContainerID + "/json"
+	status, body, err := sockRequest("GET", endpoint, nil)
+	c.Assert(status, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
 
-	for _, testVersion := range testVersions {
-		endpoint := "/containers/" + cleanedContainerID + "/json"
-		if testVersion != "latest" {
-			endpoint = "/" + testVersion + endpoint
-		}
-		body, err := sockRequest("GET", endpoint, nil)
-		if err != nil {
-			t.Fatalf("sockRequest failed for %s version: %v", testVersion, err)
-		}
-
-		var inspectJSON map[string]interface{}
-		if err = json.Unmarshal(body, &inspectJSON); err != nil {
-			t.Fatalf("unable to unmarshal body for %s version: %v", testVersion, err)
-		}
-
-		keys := []string{"State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "ExecDriver", "MountLabel", "ProcessLabel", "Volumes", "VolumesRW"}
-
-		if testVersion == "v1.11" {
-			keys = append(keys, "ID")
-		} else {
-			keys = append(keys, "Id")
-		}
-
-		for _, key := range keys {
-			if _, ok := inspectJSON[key]; !ok {
-				t.Fatalf("%s does not exist in reponse for %s version", key, testVersion)
-			}
-		}
-		//Issue #6830: type not properly converted to JSON/back
-		if _, ok := inspectJSON["Path"].(bool); ok {
-			t.Fatalf("Path of `true` should not be converted to boolean `true` via JSON marshalling")
-		}
+	var inspectJSON map[string]interface{}
+	if err = json.Unmarshal(body, &inspectJSON); err != nil {
+		c.Fatalf("unable to unmarshal body for latest version: %v", err)
 	}
 
-	logDone("container json - check keys in container json response")
+	keys := []string{"State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "ExecDriver", "MountLabel", "ProcessLabel", "Volumes", "VolumesRW"}
+
+	keys = append(keys, "Id")
+
+	for _, key := range keys {
+		if _, ok := inspectJSON[key]; !ok {
+			c.Fatalf("%s does not exist in response for latest version", key)
+		}
+	}
+	//Issue #6830: type not properly converted to JSON/back
+	if _, ok := inspectJSON["Path"].(bool); ok {
+		c.Fatalf("Path of `true` should not be converted to boolean `true` via JSON marshalling")
+	}
 }
diff --git a/integration-cli/docker_api_logs_test.go b/integration-cli/docker_api_logs_test.go
new file mode 100644
index 0000000..caef725
--- /dev/null
+++ b/integration-cli/docker_api_logs_test.go
@@ -0,0 +1,83 @@
+package main
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"net/http"
+	"os/exec"
+	"strings"
+	"time"
+
+	"github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestLogsApiWithStdout(c *check.C) {
+	out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 1; done")
+	id := strings.TrimSpace(out)
+	if err := waitRun(id); err != nil {
+		c.Fatal(err)
+	}
+
+	type logOut struct {
+		out string
+		res *http.Response
+		err error
+	}
+	chLog := make(chan logOut)
+
+	go func() {
+		res, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&timestamps=1", id), nil, "")
+		out, _ := bufio.NewReader(body).ReadString('\n')
+		chLog <- logOut{strings.TrimSpace(out), res, err}
+	}()
+
+	select {
+	case l := <-chLog:
+		c.Assert(l.err, check.IsNil)
+		c.Assert(l.res.StatusCode, check.Equals, http.StatusOK)
+		if !strings.HasSuffix(l.out, "hello") {
+			c.Fatalf("expected log output to container 'hello', but it does not")
+		}
+	case <-time.After(2 * time.Second):
+		c.Fatal("timeout waiting for logs to exit")
+	}
+}
+
+func (s *DockerSuite) TestLogsApiNoStdoutNorStderr(c *check.C) {
+	name := "logs_test"
+	runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh")
+	if out, _, err := runCommandWithOutput(runCmd); err != nil {
+		c.Fatal(out, err)
+	}
+
+	status, body, err := sockRequest("GET", fmt.Sprintf("/containers/%s/logs", name), nil)
+	c.Assert(status, check.Equals, http.StatusBadRequest)
+	c.Assert(err, check.IsNil)
+
+	expected := "Bad parameters: you must choose at least one stream"
+	if !bytes.Contains(body, []byte(expected)) {
+		c.Fatalf("Expected %s, got %s", expected, string(body[:]))
+	}
+}
+
+// Regression test for #12704
+func (s *DockerSuite) TestLogsApiFollowEmptyOutput(c *check.C) {
+	name := "logs_test"
+	t0 := time.Now()
+	runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10")
+	if out, _, err := runCommandWithOutput(runCmd); err != nil {
+		c.Fatal(out, err)
+	}
+
+	_, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "")
+	t1 := time.Now()
+	body.Close()
+	if err != nil {
+		c.Fatal(err)
+	}
+	elapsed := t1.Sub(t0).Seconds()
+	if elapsed > 5.0 {
+		c.Fatalf("HTTP response was not immediate (elapsed %.1fs)", elapsed)
+	}
+}
diff --git a/integration-cli/docker_api_resize_test.go b/integration-cli/docker_api_resize_test.go
index 6ba95c3..6d55280 100644
--- a/integration-cli/docker_api_resize_test.go
+++ b/integration-cli/docker_api_resize_test.go
@@ -1,53 +1,62 @@
 package main
 
 import (
+	"net/http"
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestResizeApiResponse(t *testing.T) {
+func (s *DockerSuite) TestResizeApiResponse(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
-	defer deleteAllContainers()
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40"
-	_, err = sockRequest("POST", endpoint, nil)
-	if err != nil {
-		t.Fatalf("resize Request failed %v", err)
-	}
-
-	logDone("container resize - when started")
+	status, _, err := sockRequest("POST", endpoint, nil)
+	c.Assert(status, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
 }
 
-func TestResizeApiResponseWhenContainerNotStarted(t *testing.T) {
+func (s *DockerSuite) TestResizeApiHeightWidthNoInt(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf(out, err)
+	}
+	cleanedContainerID := strings.TrimSpace(out)
+
+	endpoint := "/containers/" + cleanedContainerID + "/resize?h=foo&w=bar"
+	status, _, err := sockRequest("POST", endpoint, nil)
+	c.Assert(status, check.Equals, http.StatusInternalServerError)
+	c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestResizeApiResponseWhenContainerNotStarted(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
-	defer deleteAllContainers()
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
-	// make sure the exited cintainer is not running
+	// make sure the exited container is not running
 	runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
 
 	endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40"
-	body, err := sockRequest("POST", endpoint, nil)
-	if err == nil {
-		t.Fatalf("resize should fail when container is not started")
-	}
-	if !strings.Contains(string(body), "Cannot resize container") && !strings.Contains(string(body), cleanedContainerID) {
-		t.Fatalf("resize should fail with message 'Cannot resize container' but instead received %s", string(body))
-	}
+	status, body, err := sockRequest("POST", endpoint, nil)
+	c.Assert(status, check.Equals, http.StatusInternalServerError)
+	c.Assert(err, check.IsNil)
 
-	logDone("container resize - when not started should not resize")
+	if !strings.Contains(string(body), "Cannot resize container") && !strings.Contains(string(body), cleanedContainerID) {
+		c.Fatalf("resize should fail with message 'Cannot resize container' but instead received %s", string(body))
+	}
 }
diff --git a/integration-cli/docker_api_test.go b/integration-cli/docker_api_test.go
new file mode 100644
index 0000000..0bd4888
--- /dev/null
+++ b/integration-cli/docker_api_test.go
@@ -0,0 +1,42 @@
+package main
+
+import (
+	"net/http"
+	"net/http/httputil"
+	"time"
+
+	"github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestApiOptionsRoute(c *check.C) {
+	status, _, err := sockRequest("OPTIONS", "/", nil)
+	c.Assert(status, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestApiGetEnabledCors(c *check.C) {
+	res, body, err := sockRequestRaw("GET", "/version", nil, "")
+	body.Close()
+	c.Assert(err, check.IsNil)
+	c.Assert(res.StatusCode, check.Equals, http.StatusOK)
+	// TODO: @runcom incomplete tests, why old integration tests had this headers
+	// and here none of the headers below are in the response?
+	//c.Log(res.Header)
+	//c.Assert(res.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*")
+	//c.Assert(res.Header.Get("Access-Control-Allow-Headers"), check.Equals, "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth")
+}
+
+func (s *DockerSuite) TestVersionStatusCode(c *check.C) {
+	conn, err := sockConn(time.Duration(10 * time.Second))
+	c.Assert(err, check.IsNil)
+
+	client := httputil.NewClientConn(conn, nil)
+	defer client.Close()
+
+	req, err := http.NewRequest("GET", "/v999.0/version", nil)
+	c.Assert(err, check.IsNil)
+	req.Header.Set("User-Agent", "Docker-Client/999.0")
+
+	res, err := client.Do(req)
+	c.Assert(res.StatusCode, check.Equals, http.StatusBadRequest)
+}
diff --git a/integration-cli/docker_api_version_test.go b/integration-cli/docker_api_version_test.go
new file mode 100644
index 0000000..b756794
--- /dev/null
+++ b/integration-cli/docker_api_version_test.go
@@ -0,0 +1,25 @@
+package main
+
+import (
+	"encoding/json"
+	"net/http"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/autogen/dockerversion"
+	"github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestGetVersion(c *check.C) {
+	status, body, err := sockRequest("GET", "/version", nil)
+	c.Assert(status, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
+
+	var v types.Version
+	if err := json.Unmarshal(body, &v); err != nil {
+		c.Fatal(err)
+	}
+
+	if v.Version != dockerversion.VERSION {
+		c.Fatal("Version mismatch")
+	}
+}
diff --git a/integration-cli/docker_cli_attach_test.go b/integration-cli/docker_cli_attach_test.go
index cf21cda..fc2ea1a 100644
--- a/integration-cli/docker_cli_attach_test.go
+++ b/integration-cli/docker_cli_attach_test.go
@@ -1,18 +1,20 @@
 package main
 
 import (
+	"bufio"
+	"fmt"
 	"io"
 	"os/exec"
 	"strings"
 	"sync"
-	"testing"
 	"time"
+
+	"github.com/go-check/check"
 )
 
 const attachWait = 5 * time.Second
 
-func TestAttachMultipleAndRestart(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) {
 
 	endGroup := &sync.WaitGroup{}
 	startGroup := &sync.WaitGroup{}
@@ -20,7 +22,7 @@
 	startGroup.Add(3)
 
 	if err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done"); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	startDone := make(chan struct{})
@@ -38,32 +40,32 @@
 
 	for i := 0; i < 3; i++ {
 		go func() {
-			c := exec.Command(dockerBinary, "attach", "attacher")
+			cmd := exec.Command(dockerBinary, "attach", "attacher")
 
 			defer func() {
-				c.Wait()
+				cmd.Wait()
 				endGroup.Done()
 			}()
 
-			out, err := c.StdoutPipe()
+			out, err := cmd.StdoutPipe()
 			if err != nil {
-				t.Fatal(err)
+				c.Fatal(err)
 			}
 
-			if err := c.Start(); err != nil {
-				t.Fatal(err)
+			if err := cmd.Start(); err != nil {
+				c.Fatal(err)
 			}
 
 			buf := make([]byte, 1024)
 
 			if _, err := out.Read(buf); err != nil && err != io.EOF {
-				t.Fatal(err)
+				c.Fatal(err)
 			}
 
 			startGroup.Done()
 
 			if !strings.Contains(string(buf), "hello") {
-				t.Fatalf("unexpected output %s expected hello\n", string(buf))
+				c.Fatalf("unexpected output %s expected hello\n", string(buf))
 			}
 		}()
 	}
@@ -71,66 +73,111 @@
 	select {
 	case <-startDone:
 	case <-time.After(attachWait):
-		t.Fatalf("Attaches did not initialize properly")
+		c.Fatalf("Attaches did not initialize properly")
 	}
 
 	cmd := exec.Command(dockerBinary, "kill", "attacher")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	select {
 	case <-endDone:
 	case <-time.After(attachWait):
-		t.Fatalf("Attaches did not finish properly")
+		c.Fatalf("Attaches did not finish properly")
 	}
 
-	logDone("attach - multiple attach")
 }
 
-func TestAttachTtyWithoutStdin(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestAttachTtyWithoutStdin(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-d", "-ti", "busybox")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to start container: %v (%v)", out, err)
+		c.Fatalf("failed to start container: %v (%v)", out, err)
 	}
 
 	id := strings.TrimSpace(out)
 	if err := waitRun(id); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	defer func() {
 		cmd := exec.Command(dockerBinary, "kill", id)
 		if out, _, err := runCommandWithOutput(cmd); err != nil {
-			t.Fatalf("failed to kill container: %v (%v)", out, err)
+			c.Fatalf("failed to kill container: %v (%v)", out, err)
 		}
 	}()
 
-	done := make(chan struct{})
+	done := make(chan error)
 	go func() {
 		defer close(done)
 
 		cmd := exec.Command(dockerBinary, "attach", id)
 		if _, err := cmd.StdinPipe(); err != nil {
-			t.Fatal(err)
+			done <- err
+			return
 		}
 
 		expected := "cannot enable tty mode"
 		if out, _, err := runCommandWithOutput(cmd); err == nil {
-			t.Fatal("attach should have failed")
+			done <- fmt.Errorf("attach should have failed")
+			return
 		} else if !strings.Contains(out, expected) {
-			t.Fatalf("attach failed with error %q: expected %q", out, expected)
+			done <- fmt.Errorf("attach failed with error %q: expected %q", out, expected)
+			return
 		}
 	}()
 
 	select {
-	case <-done:
+	case err := <-done:
+		c.Assert(err, check.IsNil)
 	case <-time.After(attachWait):
-		t.Fatal("attach is running but should have failed")
+		c.Fatal("attach is running but should have failed")
+	}
+}
+
+func (s *DockerSuite) TestAttachDisconnect(c *check.C) {
+	out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat")
+	id := strings.TrimSpace(out)
+
+	cmd := exec.Command(dockerBinary, "attach", id)
+	stdin, err := cmd.StdinPipe()
+	if err != nil {
+		c.Fatal(err)
+	}
+	defer stdin.Close()
+	stdout, err := cmd.StdoutPipe()
+	if err != nil {
+		c.Fatal(err)
+	}
+	defer stdout.Close()
+	if err := cmd.Start(); err != nil {
+		c.Fatal(err)
+	}
+	defer cmd.Process.Kill()
+
+	if _, err := stdin.Write([]byte("hello\n")); err != nil {
+		c.Fatal(err)
+	}
+	out, err = bufio.NewReader(stdout).ReadString('\n')
+	if err != nil {
+		c.Fatal(err)
+	}
+	if strings.TrimSpace(out) != "hello" {
+		c.Fatalf("expected 'hello', got %q", out)
 	}
 
-	logDone("attach - forbid piped stdin to tty enabled container")
+	if err := stdin.Close(); err != nil {
+		c.Fatal(err)
+	}
+
+	// Expect container to still be running after stdin is closed
+	running, err := inspectField(id, "State.Running")
+	if err != nil {
+		c.Fatal(err)
+	}
+	if running != "true" {
+		c.Fatal("expected container to still be running")
+	}
+
 }
diff --git a/integration-cli/docker_cli_attach_unix_test.go b/integration-cli/docker_cli_attach_unix_test.go
index a3bfa5b..82808a5 100644
--- a/integration-cli/docker_cli_attach_unix_test.go
+++ b/integration-cli/docker_cli_attach_unix_test.go
@@ -3,37 +3,38 @@
 package main
 
 import (
+	"bufio"
 	"os/exec"
 	"strings"
-	"testing"
 	"time"
 
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/go-check/check"
 	"github.com/kr/pty"
 )
 
 // #9860
-func TestAttachClosedOnContainerStop(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) {
 
 	cmd := exec.Command(dockerBinary, "run", "-dti", "busybox", "sleep", "2")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to start container: %v (%v)", out, err)
+		c.Fatalf("failed to start container: %v (%v)", out, err)
 	}
 
-	id := stripTrailingCharacters(out)
+	id := strings.TrimSpace(out)
 	if err := waitRun(id); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	done := make(chan struct{})
-
+	errChan := make(chan error)
 	go func() {
-		defer close(done)
+		defer close(errChan)
 
 		_, tty, err := pty.Open()
 		if err != nil {
-			t.Fatalf("could not open pty: %v", err)
+			errChan <- err
+			return
 		}
 		attachCmd := exec.Command(dockerBinary, "attach", id)
 		attachCmd.Stdin = tty
@@ -41,58 +42,61 @@
 		attachCmd.Stderr = tty
 
 		if err := attachCmd.Run(); err != nil {
-			t.Fatalf("attach returned error %s", err)
+			errChan <- err
+			return
 		}
 	}()
 
 	waitCmd := exec.Command(dockerBinary, "wait", id)
 	if out, _, err = runCommandWithOutput(waitCmd); err != nil {
-		t.Fatalf("error thrown while waiting for container: %s, %v", out, err)
+		c.Fatalf("error thrown while waiting for container: %s, %v", out, err)
 	}
 	select {
-	case <-done:
+	case err := <-errChan:
+		c.Assert(err, check.IsNil)
 	case <-time.After(attachWait):
-		t.Fatal("timed out without attach returning")
+		c.Fatal("timed out without attach returning")
 	}
 
-	logDone("attach - return after container finished")
 }
 
-func TestAttachAfterDetach(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestAttachAfterDetach(c *check.C) {
 
 	name := "detachtest"
 
 	cpty, tty, err := pty.Open()
 	if err != nil {
-		t.Fatalf("Could not open pty: %v", err)
+		c.Fatalf("Could not open pty: %v", err)
 	}
 	cmd := exec.Command(dockerBinary, "run", "-ti", "--name", name, "busybox")
 	cmd.Stdin = tty
 	cmd.Stdout = tty
 	cmd.Stderr = tty
 
-	detached := make(chan struct{})
+	errChan := make(chan error)
 	go func() {
-		if err := cmd.Run(); err != nil {
-			t.Fatalf("attach returned error %s", err)
-		}
-		close(detached)
+		errChan <- cmd.Run()
+		close(errChan)
 	}()
 
 	time.Sleep(500 * time.Millisecond)
 	if err := waitRun(name); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	cpty.Write([]byte{16})
 	time.Sleep(100 * time.Millisecond)
 	cpty.Write([]byte{17})
 
-	<-detached
+	select {
+	case err := <-errChan:
+		c.Assert(err, check.IsNil)
+	case <-time.After(5 * time.Second):
+		c.Fatal("timeout while detaching")
+	}
 
 	cpty, tty, err = pty.Open()
 	if err != nil {
-		t.Fatalf("Could not open pty: %v", err)
+		c.Fatalf("Could not open pty: %v", err)
 	}
 
 	cmd = exec.Command(dockerBinary, "attach", name)
@@ -101,7 +105,7 @@
 	cmd.Stderr = tty
 
 	if err := cmd.Start(); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	bytes := make([]byte, 10)
@@ -120,20 +124,162 @@
 
 	select {
 	case err := <-readErr:
-		if err != nil {
-			t.Fatal(err)
-		}
+		c.Assert(err, check.IsNil)
 	case <-time.After(2 * time.Second):
-		t.Fatal("timeout waiting for attach read")
+		c.Fatal("timeout waiting for attach read")
 	}
 
 	if err := cmd.Wait(); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if !strings.Contains(string(bytes[:nBytes]), "/ #") {
-		t.Fatalf("failed to get a new prompt. got %s", string(bytes[:nBytes]))
+		c.Fatalf("failed to get a new prompt. got %s", string(bytes[:nBytes]))
 	}
 
-	logDone("attach - reconnect after detaching")
+}
+
+// TestAttachDetach checks that attach in tty mode can be detached using the long container ID
+func (s *DockerSuite) TestAttachDetach(c *check.C) {
+	out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat")
+	id := strings.TrimSpace(out)
+	if err := waitRun(id); err != nil {
+		c.Fatal(err)
+	}
+
+	cpty, tty, err := pty.Open()
+	if err != nil {
+		c.Fatal(err)
+	}
+	defer cpty.Close()
+
+	cmd := exec.Command(dockerBinary, "attach", id)
+	cmd.Stdin = tty
+	stdout, err := cmd.StdoutPipe()
+	if err != nil {
+		c.Fatal(err)
+	}
+	defer stdout.Close()
+	if err := cmd.Start(); err != nil {
+		c.Fatal(err)
+	}
+	if err := waitRun(id); err != nil {
+		c.Fatalf("error waiting for container to start: %v", err)
+	}
+
+	if _, err := cpty.Write([]byte("hello\n")); err != nil {
+		c.Fatal(err)
+	}
+	out, err = bufio.NewReader(stdout).ReadString('\n')
+	if err != nil {
+		c.Fatal(err)
+	}
+	if strings.TrimSpace(out) != "hello" {
+		c.Fatalf("expected 'hello', got %q", out)
+	}
+
+	// escape sequence
+	if _, err := cpty.Write([]byte{16}); err != nil {
+		c.Fatal(err)
+	}
+	time.Sleep(100 * time.Millisecond)
+	if _, err := cpty.Write([]byte{17}); err != nil {
+		c.Fatal(err)
+	}
+
+	ch := make(chan struct{})
+	go func() {
+		cmd.Wait()
+		ch <- struct{}{}
+	}()
+
+	running, err := inspectField(id, "State.Running")
+	if err != nil {
+		c.Fatal(err)
+	}
+	if running != "true" {
+		c.Fatal("expected container to still be running")
+	}
+
+	go func() {
+		dockerCmd(c, "kill", id)
+	}()
+
+	select {
+	case <-ch:
+	case <-time.After(10 * time.Millisecond):
+		c.Fatal("timed out waiting for container to exit")
+	}
+
+}
+
+// TestAttachDetachTruncatedID checks that attach in tty mode can be detached
+func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) {
+	out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat")
+	id := stringid.TruncateID(strings.TrimSpace(out))
+	if err := waitRun(id); err != nil {
+		c.Fatal(err)
+	}
+
+	cpty, tty, err := pty.Open()
+	if err != nil {
+		c.Fatal(err)
+	}
+	defer cpty.Close()
+
+	cmd := exec.Command(dockerBinary, "attach", id)
+	cmd.Stdin = tty
+	stdout, err := cmd.StdoutPipe()
+	if err != nil {
+		c.Fatal(err)
+	}
+	defer stdout.Close()
+	if err := cmd.Start(); err != nil {
+		c.Fatal(err)
+	}
+
+	if _, err := cpty.Write([]byte("hello\n")); err != nil {
+		c.Fatal(err)
+	}
+	out, err = bufio.NewReader(stdout).ReadString('\n')
+	if err != nil {
+		c.Fatal(err)
+	}
+	if strings.TrimSpace(out) != "hello" {
+		c.Fatalf("expected 'hello', got %q", out)
+	}
+
+	// escape sequence
+	if _, err := cpty.Write([]byte{16}); err != nil {
+		c.Fatal(err)
+	}
+	time.Sleep(100 * time.Millisecond)
+	if _, err := cpty.Write([]byte{17}); err != nil {
+		c.Fatal(err)
+	}
+
+	ch := make(chan struct{})
+	go func() {
+		cmd.Wait()
+		ch <- struct{}{}
+	}()
+
+	running, err := inspectField(id, "State.Running")
+	if err != nil {
+		c.Fatal(err)
+	}
+	if running != "true" {
+		c.Fatal("expected container to still be running")
+	}
+
+	go func() {
+		dockerCmd(c, "kill", id)
+	}()
+
+	select {
+	case <-ch:
+	case <-time.After(10 * time.Millisecond):
+		c.Fatal("timed out waiting for container to exit")
+	}
+
 }
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
index 6c96e2a..5e35d6e 100644
--- a/integration-cli/docker_cli_build_test.go
+++ b/integration-cli/docker_cli_build_test.go
@@ -15,18 +15,17 @@
 	"runtime"
 	"strconv"
 	"strings"
-	"sync"
-	"testing"
 	"text/template"
 	"time"
 
 	"github.com/docker/docker/builder/command"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/stringutils"
+	"github.com/go-check/check"
 )
 
-func TestBuildJSONEmptyRun(t *testing.T) {
+func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) {
 	name := "testbuildjsonemptyrun"
-	defer deleteImages(name)
 
 	_, err := buildImage(
 		name,
@@ -37,15 +36,13 @@
 		true)
 
 	if err != nil {
-		t.Fatal("error when dealing with a RUN statement with empty JSON array")
+		c.Fatal("error when dealing with a RUN statement with empty JSON array")
 	}
 
-	logDone("build - RUN with an empty array should not panic")
 }
 
-func TestBuildEmptyWhitespace(t *testing.T) {
+func (s *DockerSuite) TestBuildEmptyWhitespace(c *check.C) {
 	name := "testbuildemptywhitespace"
-	defer deleteImages(name)
 
 	_, err := buildImage(
 		name,
@@ -58,15 +55,13 @@
 		true)
 
 	if err == nil {
-		t.Fatal("no error when dealing with a COPY statement with no content on the same line")
+		c.Fatal("no error when dealing with a COPY statement with no content on the same line")
 	}
 
-	logDone("build - statements with whitespace and no content should generate a parse error")
 }
 
-func TestBuildShCmdJSONEntrypoint(t *testing.T) {
+func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) {
 	name := "testbuildshcmdjsonentrypoint"
-	defer deleteImages(name)
 
 	_, err := buildImage(
 		name,
@@ -78,7 +73,7 @@
 		true)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	out, _, err := runCommandWithOutput(
@@ -89,19 +84,17 @@
 			name))
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if strings.TrimSpace(out) != "/bin/sh -c echo test" {
-		t.Fatal("CMD did not contain /bin/sh -c")
+		c.Fatal("CMD did not contain /bin/sh -c")
 	}
 
-	logDone("build - CMD should always contain /bin/sh -c when specified without JSON")
 }
 
-func TestBuildEnvironmentReplacementUser(t *testing.T) {
+func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) {
 	name := "testbuildenvironmentreplacement"
-	defer deleteImages(name)
 
 	_, err := buildImage(name, `
   FROM scratch
@@ -109,24 +102,22 @@
   USER ${user}
   `, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	res, err := inspectFieldJSON(name, "Config.User")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if res != `"foo"` {
-		t.Fatal("User foo from environment not in Config.User on image")
+		c.Fatal("User foo from environment not in Config.User on image")
 	}
 
-	logDone("build - user environment replacement")
 }
 
-func TestBuildEnvironmentReplacementVolume(t *testing.T) {
+func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) {
 	name := "testbuildenvironmentreplacement"
-	defer deleteImages(name)
 
 	_, err := buildImage(name, `
   FROM scratch
@@ -134,30 +125,28 @@
   VOLUME ${volume}
   `, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	res, err := inspectFieldJSON(name, "Config.Volumes")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	var volumes map[string]interface{}
 
 	if err := json.Unmarshal([]byte(res), &volumes); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, ok := volumes["/quux"]; !ok {
-		t.Fatal("Volume /quux from environment not in Config.Volumes on image")
+		c.Fatal("Volume /quux from environment not in Config.Volumes on image")
 	}
 
-	logDone("build - volume environment replacement")
 }
 
-func TestBuildEnvironmentReplacementExpose(t *testing.T) {
+func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) {
 	name := "testbuildenvironmentreplacement"
-	defer deleteImages(name)
 
 	_, err := buildImage(name, `
   FROM scratch
@@ -165,30 +154,28 @@
   EXPOSE ${port}
   `, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	res, err := inspectFieldJSON(name, "Config.ExposedPorts")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	var exposedPorts map[string]interface{}
 
 	if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, ok := exposedPorts["80/tcp"]; !ok {
-		t.Fatal("Exposed port 80 from environment not in Config.ExposedPorts on image")
+		c.Fatal("Exposed port 80 from environment not in Config.ExposedPorts on image")
 	}
 
-	logDone("build - expose environment replacement")
 }
 
-func TestBuildEnvironmentReplacementWorkdir(t *testing.T) {
+func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) {
 	name := "testbuildenvironmentreplacement"
-	defer deleteImages(name)
 
 	_, err := buildImage(name, `
   FROM busybox
@@ -198,47 +185,48 @@
   `, true)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	logDone("build - workdir environment replacement")
 }
 
-func TestBuildEnvironmentReplacementAddCopy(t *testing.T) {
+func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) {
 	name := "testbuildenvironmentreplacement"
-	defer deleteImages(name)
 
 	ctx, err := fakeContext(`
   FROM scratch
   ENV baz foo
   ENV quux bar
   ENV dot .
+  ENV fee fff
+  ENV gee ggg
 
   ADD ${baz} ${dot}
   COPY ${quux} ${dot}
+  ADD ${zzz:-${fee}} ${dot}
+  COPY ${zzz:-${gee}} ${dot}
   `,
 		map[string]string{
 			"foo": "test1",
 			"bar": "test2",
+			"fff": "test3",
+			"ggg": "test4",
 		})
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	logDone("build - add/copy environment replacement")
 }
 
-func TestBuildEnvironmentReplacementEnv(t *testing.T) {
+func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) {
 	name := "testbuildenvironmentreplacement"
 
-	defer deleteImages(name)
-
 	_, err := buildImage(name,
 		`
   FROM busybox
@@ -256,18 +244,18 @@
   `, true)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	res, err := inspectFieldJSON(name, "Config.Env")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	envResult := []string{}
 
 	if err = unmarshalJSON([]byte(res), &envResult); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	found := false
@@ -278,32 +266,34 @@
 		if parts[0] == "bar" {
 			found = true
 			if parts[1] != "zzz" {
-				t.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1])
+				c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1])
 			}
 		} else if strings.HasPrefix(parts[0], "env") {
 			envCount++
 			if parts[1] != "zzz" {
-				t.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1])
+				c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1])
+			}
+		} else if strings.HasPrefix(parts[0], "env") {
+			envCount++
+			if parts[1] != "foo" {
+				c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1])
 			}
 		}
 	}
 
 	if !found {
-		t.Fatal("Never found the `bar` env variable")
+		c.Fatal("Never found the `bar` env variable")
 	}
 
 	if envCount != 4 {
-		t.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult)
+		c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult)
 	}
 
-	logDone("build - env environment replacement")
 }
 
-func TestBuildHandleEscapes(t *testing.T) {
+func (s *DockerSuite) TestBuildHandleEscapes(c *check.C) {
 	name := "testbuildhandleescapes"
 
-	defer deleteImages(name)
-
 	_, err := buildImage(name,
 		`
   FROM scratch
@@ -312,22 +302,22 @@
   `, true)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	var result map[string]map[string]struct{}
 
 	res, err := inspectFieldJSON(name, "Config.Volumes")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if err = unmarshalJSON([]byte(res), &result); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, ok := result["bar"]; !ok {
-		t.Fatal("Could not find volume bar set from env foo in volumes table")
+		c.Fatal("Could not find volume bar set from env foo in volumes table")
 	}
 
 	deleteImages(name)
@@ -340,20 +330,20 @@
   `, true)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	res, err = inspectFieldJSON(name, "Config.Volumes")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if err = unmarshalJSON([]byte(res), &result); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, ok := result["${FOO}"]; !ok {
-		t.Fatal("Could not find volume ${FOO} set from env foo in volumes table")
+		c.Fatal("Could not find volume ${FOO} set from env foo in volumes table")
 	}
 
 	deleteImages(name)
@@ -370,31 +360,28 @@
   `, true)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	res, err = inspectFieldJSON(name, "Config.Volumes")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if err = unmarshalJSON([]byte(res), &result); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, ok := result[`\\\${FOO}`]; !ok {
-		t.Fatal(`Could not find volume \\\${FOO} set from env foo in volumes table`, result)
+		c.Fatal(`Could not find volume \\\${FOO} set from env foo in volumes table`, result)
 	}
 
-	logDone("build - handle escapes")
 }
 
-func TestBuildOnBuildLowercase(t *testing.T) {
+func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) {
 	name := "testbuildonbuildlowercase"
 	name2 := "testbuildonbuildlowercase2"
 
-	defer deleteImages(name, name2)
-
 	_, err := buildImage(name,
 		`
   FROM busybox
@@ -402,7 +389,7 @@
   `, true)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	_, out, err := buildImageWithOut(name2, fmt.Sprintf(`
@@ -410,24 +397,21 @@
   `, name), true)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if !strings.Contains(out, "quux") {
-		t.Fatalf("Did not receive the expected echo text, got %s", out)
+		c.Fatalf("Did not receive the expected echo text, got %s", out)
 	}
 
 	if strings.Contains(out, "ONBUILD ONBUILD") {
-		t.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out)
+		c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out)
 	}
 
-	logDone("build - handle case-insensitive onbuild statement")
 }
 
-func TestBuildEnvEscapes(t *testing.T) {
+func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) {
 	name := "testbuildenvescapes"
-	defer deleteImages(name)
-	defer deleteAllContainers()
 	_, err := buildImage(name,
 		`
     FROM busybox
@@ -439,20 +423,17 @@
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name))
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if strings.TrimSpace(out) != "$" {
-		t.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
+		c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
 	}
 
-	logDone("build - env should handle \\$ properly")
 }
 
-func TestBuildEnvOverwrite(t *testing.T) {
+func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) {
 	name := "testbuildenvoverwrite"
-	defer deleteImages(name)
-	defer deleteAllContainers()
 
 	_, err := buildImage(name,
 		`
@@ -463,40 +444,36 @@
 		true)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-e", "TEST=bar", "-t", name))
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if strings.TrimSpace(out) != "bar" {
-		t.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
+		c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
 	}
 
-	logDone("build - env should overwrite builder ENV during run")
 }
 
-func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) {
+func (s *DockerSuite) TestBuildOnBuildForbiddenMaintainerInSourceImage(c *check.C) {
 	name := "testbuildonbuildforbiddenmaintainerinsourceimage"
-	defer deleteImages("onbuild")
-	defer deleteImages(name)
-	defer deleteAllContainers()
 
 	createCmd := exec.Command(dockerBinary, "create", "busybox", "true")
 	out, _, _, err := runCommandWithStdoutStderr(createCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"MAINTAINER docker.io\"]}", cleanedContainerID, "onbuild")
 
 	if _, err := runCommand(commitCmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	_, err = buildImage(name,
@@ -504,33 +481,29 @@
 		true)
 	if err != nil {
 		if !strings.Contains(err.Error(), "maintainer isn't allowed as an ONBUILD trigger") {
-			t.Fatalf("Wrong error %v, must be about MAINTAINER and ONBUILD in source image", err)
+			c.Fatalf("Wrong error %v, must be about MAINTAINER and ONBUILD in source image", err)
 		}
 	} else {
-		t.Fatal("Error must not be nil")
+		c.Fatal("Error must not be nil")
 	}
-	logDone("build - onbuild forbidden maintainer in source image")
 
 }
 
-func TestBuildOnBuildForbiddenFromInSourceImage(t *testing.T) {
+func (s *DockerSuite) TestBuildOnBuildForbiddenFromInSourceImage(c *check.C) {
 	name := "testbuildonbuildforbiddenfrominsourceimage"
-	defer deleteImages("onbuild")
-	defer deleteImages(name)
-	defer deleteAllContainers()
 
 	createCmd := exec.Command(dockerBinary, "create", "busybox", "true")
 	out, _, _, err := runCommandWithStdoutStderr(createCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"FROM busybox\"]}", cleanedContainerID, "onbuild")
 
 	if _, err := runCommand(commitCmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	_, err = buildImage(name,
@@ -538,33 +511,29 @@
 		true)
 	if err != nil {
 		if !strings.Contains(err.Error(), "from isn't allowed as an ONBUILD trigger") {
-			t.Fatalf("Wrong error %v, must be about FROM and ONBUILD in source image", err)
+			c.Fatalf("Wrong error %v, must be about FROM and ONBUILD in source image", err)
 		}
 	} else {
-		t.Fatal("Error must not be nil")
+		c.Fatal("Error must not be nil")
 	}
-	logDone("build - onbuild forbidden from in source image")
 
 }
 
-func TestBuildOnBuildForbiddenChainedInSourceImage(t *testing.T) {
+func (s *DockerSuite) TestBuildOnBuildForbiddenChainedInSourceImage(c *check.C) {
 	name := "testbuildonbuildforbiddenchainedinsourceimage"
-	defer deleteImages("onbuild")
-	defer deleteImages(name)
-	defer deleteAllContainers()
 
 	createCmd := exec.Command(dockerBinary, "create", "busybox", "true")
 	out, _, _, err := runCommandWithStdoutStderr(createCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"ONBUILD RUN ls\"]}", cleanedContainerID, "onbuild")
 
 	if _, err := runCommand(commitCmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	_, err = buildImage(name,
@@ -572,23 +541,18 @@
 		true)
 	if err != nil {
 		if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") {
-			t.Fatalf("Wrong error %v, must be about chaining ONBUILD in source image", err)
+			c.Fatalf("Wrong error %v, must be about chaining ONBUILD in source image", err)
 		}
 	} else {
-		t.Fatal("Error must not be nil")
+		c.Fatal("Error must not be nil")
 	}
-	logDone("build - onbuild forbidden chained in source image")
 
 }
 
-func TestBuildOnBuildCmdEntrypointJSON(t *testing.T) {
+func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) {
 	name1 := "onbuildcmd"
 	name2 := "onbuildgenerated"
 
-	defer deleteImages(name2)
-	defer deleteImages(name1)
-	defer deleteAllContainers()
-
 	_, err := buildImage(name1, `
 FROM busybox
 ONBUILD CMD ["hello world"]
@@ -597,71 +561,64 @@
 		false)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	_, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2))
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) {
-		t.Fatal("did not get echo output from onbuild", out)
+		c.Fatal("did not get echo output from onbuild", out)
 	}
 
-	logDone("build - onbuild with json entrypoint/cmd")
 }
 
-func TestBuildOnBuildEntrypointJSON(t *testing.T) {
+func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) {
 	name1 := "onbuildcmd"
 	name2 := "onbuildgenerated"
 
-	defer deleteImages(name2)
-	defer deleteImages(name1)
-	defer deleteAllContainers()
-
 	_, err := buildImage(name1, `
 FROM busybox
 ONBUILD ENTRYPOINT ["echo"]`,
 		false)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	_, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2))
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) {
-		t.Fatal("got malformed output from onbuild", out)
+		c.Fatal("got malformed output from onbuild", out)
 	}
 
-	logDone("build - onbuild with json entrypoint")
 }
 
-func TestBuildCacheADD(t *testing.T) {
+func (s *DockerSuite) TestBuildCacheAdd(c *check.C) {
 	name := "testbuildtwoimageswithadd"
-	defer deleteImages(name)
 	server, err := fakeStorage(map[string]string{
 		"robots.txt": "hello",
 		"index.html": "world",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server.Close()
 
@@ -669,10 +626,10 @@
 		fmt.Sprintf(`FROM scratch
 		ADD %s/robots.txt /`, server.URL()),
 		true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	deleteImages(name)
 	_, out, err := buildImageWithOut(name,
@@ -680,24 +637,22 @@
 		ADD %s/index.html /`, server.URL()),
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if strings.Contains(out, "Using cache") {
-		t.Fatal("2nd build used cache on ADD, it shouldn't")
+		c.Fatal("2nd build used cache on ADD, it shouldn't")
 	}
 
-	logDone("build - build two images with remote ADD")
 }
 
-func TestBuildLastModified(t *testing.T) {
+func (s *DockerSuite) TestBuildLastModified(c *check.C) {
 	name := "testbuildlastmodified"
-	defer deleteImages(name)
 
 	server, err := fakeStorage(map[string]string{
 		"file": "hello",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server.Close()
 
@@ -710,13 +665,13 @@
 	dockerfile := fmt.Sprintf(dFmt, server.URL())
 
 	if _, out, err = buildImageWithOut(name, dockerfile, false); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	originMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out)
 	// Make sure our regexp is correct
 	if strings.Index(originMTime, "/file") < 0 {
-		t.Fatalf("Missing ls info on 'file':\n%s", out)
+		c.Fatalf("Missing ls info on 'file':\n%s", out)
 	}
 
 	// Build it again and make sure the mtime of the file didn't change.
@@ -724,12 +679,12 @@
 	time.Sleep(2 * time.Second)
 
 	if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	newMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out2)
 	if newMTime != originMTime {
-		t.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", originMTime, newMTime)
+		c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", originMTime, newMTime)
 	}
 
 	// Now 'touch' the file and make sure the timestamp DID change this time
@@ -738,45 +693,41 @@
 		"file": "hello",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server.Close()
 
 	dockerfile = fmt.Sprintf(dFmt, server.URL())
 
 	if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	newMTime = regexp.MustCompile(`root.*/file.*\n`).FindString(out2)
 	if newMTime == originMTime {
-		t.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", originMTime, newMTime)
+		c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", originMTime, newMTime)
 	}
 
-	logDone("build - use Last-Modified header")
 }
 
-func TestBuildSixtySteps(t *testing.T) {
+func (s *DockerSuite) TestBuildSixtySteps(c *check.C) {
 	name := "foobuildsixtysteps"
-	defer deleteImages(name)
 	ctx, err := fakeContext("FROM scratch\n"+strings.Repeat("ADD foo /\n", 60),
 		map[string]string{
 			"foo": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - build an image with sixty build steps")
 }
 
-func TestBuildAddSingleFileToRoot(t *testing.T) {
+func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) {
 	name := "testaddimg"
-	defer deleteImages(name)
 	ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -790,48 +741,44 @@
 			"test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - add single file to root")
 }
 
 // Issue #3960: "ADD src ." hangs
-func TestBuildAddSingleFileToWorkdir(t *testing.T) {
+func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) {
 	name := "testaddsinglefiletoworkdir"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 ADD test_file .`,
 		map[string]string{
 			"test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
-	done := make(chan struct{})
+	errChan := make(chan error)
 	go func() {
-		if _, err := buildImageFromContext(name, ctx, true); err != nil {
-			t.Fatal(err)
-		}
-		close(done)
+		_, err := buildImageFromContext(name, ctx, true)
+		errChan <- err
+		close(errChan)
 	}()
 	select {
 	case <-time.After(5 * time.Second):
-		t.Fatal("Build with adding to workdir timed out")
-	case <-done:
+		c.Fatal("Build with adding to workdir timed out")
+	case err := <-errChan:
+		c.Assert(err, check.IsNil)
 	}
-	logDone("build - add single file to workdir")
 }
 
-func TestBuildAddSingleFileToExistDir(t *testing.T) {
+func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) {
 	name := "testaddsinglefiletoexistdir"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -846,27 +793,25 @@
 			"test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - add single file to existing dir")
 }
 
-func TestBuildCopyAddMultipleFiles(t *testing.T) {
+func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) {
 	server, err := fakeStorage(map[string]string{
 		"robots.txt": "hello",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server.Close()
 
 	name := "testcopymultiplefilestofile"
-	defer deleteImages(name)
 	ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -893,18 +838,16 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - multiple file copy/add tests")
 }
 
-func TestBuildAddMultipleFilesToFile(t *testing.T) {
+func (s *DockerSuite) TestBuildAddMultipleFilesToFile(c *check.C) {
 	name := "testaddmultiplefilestofile"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM scratch
 	ADD file1.txt file2.txt test
 	`,
@@ -914,20 +857,18 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	expected := "When using ADD with more than one source file, the destination must be a directory and end with a /"
 	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
-		t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
+		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
 	}
 
-	logDone("build - multiple add files to file")
 }
 
-func TestBuildJSONAddMultipleFilesToFile(t *testing.T) {
+func (s *DockerSuite) TestBuildJSONAddMultipleFilesToFile(c *check.C) {
 	name := "testjsonaddmultiplefilestofile"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM scratch
 	ADD ["file1.txt", "file2.txt", "test"]
 	`,
@@ -937,20 +878,18 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	expected := "When using ADD with more than one source file, the destination must be a directory and end with a /"
 	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
-		t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
+		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
 	}
 
-	logDone("build - multiple add files to file json syntax")
 }
 
-func TestBuildAddMultipleFilesToFileWild(t *testing.T) {
+func (s *DockerSuite) TestBuildAddMultipleFilesToFileWild(c *check.C) {
 	name := "testaddmultiplefilestofilewild"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM scratch
 	ADD file*.txt test
 	`,
@@ -960,20 +899,18 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	expected := "When using ADD with more than one source file, the destination must be a directory and end with a /"
 	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
-		t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
+		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
 	}
 
-	logDone("build - multiple add files to file wild")
 }
 
-func TestBuildJSONAddMultipleFilesToFileWild(t *testing.T) {
+func (s *DockerSuite) TestBuildJSONAddMultipleFilesToFileWild(c *check.C) {
 	name := "testjsonaddmultiplefilestofilewild"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM scratch
 	ADD ["file*.txt", "test"]
 	`,
@@ -983,20 +920,18 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	expected := "When using ADD with more than one source file, the destination must be a directory and end with a /"
 	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
-		t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
+		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
 	}
 
-	logDone("build - multiple add files to file wild json syntax")
 }
 
-func TestBuildCopyMultipleFilesToFile(t *testing.T) {
+func (s *DockerSuite) TestBuildCopyMultipleFilesToFile(c *check.C) {
 	name := "testcopymultiplefilestofile"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM scratch
 	COPY file1.txt file2.txt test
 	`,
@@ -1006,20 +941,18 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	expected := "When using COPY with more than one source file, the destination must be a directory and end with a /"
 	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
-		t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
+		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
 	}
 
-	logDone("build - multiple copy files to file")
 }
 
-func TestBuildJSONCopyMultipleFilesToFile(t *testing.T) {
+func (s *DockerSuite) TestBuildJSONCopyMultipleFilesToFile(c *check.C) {
 	name := "testjsoncopymultiplefilestofile"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM scratch
 	COPY ["file1.txt", "file2.txt", "test"]
 	`,
@@ -1029,20 +962,18 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	expected := "When using COPY with more than one source file, the destination must be a directory and end with a /"
 	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
-		t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
+		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
 	}
 
-	logDone("build - multiple copy files to file json syntax")
 }
 
-func TestBuildAddFileWithWhitespace(t *testing.T) {
+func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) {
 	name := "testaddfilewithwhitespace"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 RUN mkdir "/test dir"
 RUN mkdir "/test_dir"
@@ -1068,18 +999,16 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - add file with whitespace")
 }
 
-func TestBuildCopyFileWithWhitespace(t *testing.T) {
+func (s *DockerSuite) TestBuildCopyFileWithWhitespace(c *check.C) {
 	name := "testcopyfilewithwhitespace"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 RUN mkdir "/test dir"
 RUN mkdir "/test_dir"
@@ -1105,18 +1034,16 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - copy file with whitespace")
 }
 
-func TestBuildAddMultipleFilesToFileWithWhitespace(t *testing.T) {
+func (s *DockerSuite) TestBuildAddMultipleFilesToFileWithWhitespace(c *check.C) {
 	name := "testaddmultiplefilestofilewithwhitespace"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 	ADD [ "test file1", "test file2", "test" ]
     `,
@@ -1126,20 +1053,18 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	expected := "When using ADD with more than one source file, the destination must be a directory and end with a /"
 	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
-		t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
+		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
 	}
 
-	logDone("build - multiple add files to file with whitespace")
 }
 
-func TestBuildCopyMultipleFilesToFileWithWhitespace(t *testing.T) {
+func (s *DockerSuite) TestBuildCopyMultipleFilesToFileWithWhitespace(c *check.C) {
 	name := "testcopymultiplefilestofilewithwhitespace"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 	COPY [ "test file1", "test file2", "test" ]
         `,
@@ -1149,26 +1074,24 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	expected := "When using COPY with more than one source file, the destination must be a directory and end with a /"
 	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
-		t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
+		c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err)
 	}
 
-	logDone("build - multiple copy files to file with whitespace")
 }
 
-func TestBuildCopyWildcard(t *testing.T) {
+func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) {
 	name := "testcopywildcard"
-	defer deleteImages(name)
 	server, err := fakeStorage(map[string]string{
 		"robots.txt": "hello",
 		"index.html": "world",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server.Close()
 
@@ -1189,67 +1112,88 @@
 			"dir/nested_dir/nest_nest_file": "2 times nested",
 			"dirt": "dirty",
 		})
-	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
+	defer ctx.Close()
 
 	id1, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// Now make sure we use a cache the 2nd time
 	id2, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if id1 != id2 {
-		t.Fatal("didn't use the cache")
+		c.Fatal("didn't use the cache")
 	}
 
-	logDone("build - copy wild card")
 }
 
-func TestBuildCopyWildcardNoFind(t *testing.T) {
+func (s *DockerSuite) TestBuildCopyWildcardNoFind(c *check.C) {
 	name := "testcopywildcardnofind"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 	COPY file*.txt /tmp/
 	`, nil)
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	_, err = buildImageFromContext(name, ctx, true)
 	if err == nil {
-		t.Fatal("should have failed to find a file")
+		c.Fatal("should have failed to find a file")
 	}
 	if !strings.Contains(err.Error(), "No source files were specified") {
-		t.Fatalf("Wrong error %v, must be about no source files", err)
+		c.Fatalf("Wrong error %v, must be about no source files", err)
 	}
 
-	logDone("build - copy wild card no find")
 }
 
-func TestBuildCopyWildcardCache(t *testing.T) {
-	name := "testcopywildcardcache"
+func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) {
+	name := "testcopywildcardinname"
 	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
+	COPY *.txt /tmp/
+	RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ]
+	`, map[string]string{"*.txt": "hi there"})
+
+	if err != nil {
+		// Normally we would do c.Fatal(err) here but given that
+		// the odds of this failing are so rare, it must be because
+		// the OS we're running the client on doesn't support * in
+		// filenames (like windows).  So, instead of failing the test
+		// just let it pass. Then we don't need to explicitly
+		// say which OSs this works on or not.
+		return
+	}
+	defer ctx.Close()
+
+	_, err = buildImageFromContext(name, ctx, true)
+	if err != nil {
+		c.Fatalf("should have built: %q", err)
+	}
+}
+
+func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) {
+	name := "testcopywildcardcache"
+	ctx, err := fakeContext(`FROM busybox
 	COPY file1.txt /tmp/`,
 		map[string]string{
 			"file1.txt": "test1",
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	id1, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// Now make sure we use a cache the 2nd time even with wild cards.
@@ -1259,19 +1203,17 @@
 
 	id2, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if id1 != id2 {
-		t.Fatal("didn't use the cache")
+		c.Fatal("didn't use the cache")
 	}
 
-	logDone("build - copy wild card cache")
 }
 
-func TestBuildAddSingleFileToNonExistingDir(t *testing.T) {
+func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) {
 	name := "testaddsinglefiletononexistingdir"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -1285,20 +1227,18 @@
 			"test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	logDone("build - add single file to non-existing dir")
 }
 
-func TestBuildAddDirContentToRoot(t *testing.T) {
+func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) {
 	name := "testadddircontenttoroot"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -1311,19 +1251,17 @@
 			"test_dir/test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - add directory contents to root")
 }
 
-func TestBuildAddDirContentToExistingDir(t *testing.T) {
+func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) {
 	name := "testadddircontenttoexistingdir"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -1338,19 +1276,17 @@
 			"test_dir/test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - add directory contents to existing dir")
 }
 
-func TestBuildAddWholeDirToRoot(t *testing.T) {
+func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) {
 	name := "testaddwholedirtoroot"
-	defer deleteImages(name)
 	ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -1366,40 +1302,36 @@
 			"test_dir/test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - add whole directory to root")
 }
 
 // Testing #5941
-func TestBuildAddEtcToRoot(t *testing.T) {
+func (s *DockerSuite) TestBuildAddEtcToRoot(c *check.C) {
 	name := "testaddetctoroot"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM scratch
 ADD . /`,
 		map[string]string{
 			"etc/test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - add etc directory to root")
 }
 
 // Testing #9401
-func TestBuildAddPreservesFilesSpecialBits(t *testing.T) {
+func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) {
 	name := "testaddpreservesfilesspecialbits"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 ADD suidbin /usr/bin/suidbin
 RUN chmod 4755 /usr/bin/suidbin
@@ -1411,19 +1343,17 @@
 			"/data/usr/test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - add preserves files special bits")
 }
 
-func TestBuildCopySingleFileToRoot(t *testing.T) {
+func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) {
 	name := "testcopysinglefiletoroot"
-	defer deleteImages(name)
 	ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -1437,48 +1367,44 @@
 			"test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - copy single file to root")
 }
 
 // Issue #3960: "ADD src ." hangs - adapted for COPY
-func TestBuildCopySingleFileToWorkdir(t *testing.T) {
+func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) {
 	name := "testcopysinglefiletoworkdir"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 COPY test_file .`,
 		map[string]string{
 			"test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
-	done := make(chan struct{})
+	errChan := make(chan error)
 	go func() {
-		if _, err := buildImageFromContext(name, ctx, true); err != nil {
-			t.Fatal(err)
-		}
-		close(done)
+		_, err := buildImageFromContext(name, ctx, true)
+		errChan <- err
+		close(errChan)
 	}()
 	select {
 	case <-time.After(5 * time.Second):
-		t.Fatal("Build with adding to workdir timed out")
-	case <-done:
+		c.Fatal("Build with adding to workdir timed out")
+	case err := <-errChan:
+		c.Assert(err, check.IsNil)
 	}
-	logDone("build - copy single file to workdir")
 }
 
-func TestBuildCopySingleFileToExistDir(t *testing.T) {
+func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) {
 	name := "testcopysinglefiletoexistdir"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -1493,19 +1419,17 @@
 			"test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - copy single file to existing dir")
 }
 
-func TestBuildCopySingleFileToNonExistDir(t *testing.T) {
+func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) {
 	name := "testcopysinglefiletononexistdir"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -1519,19 +1443,17 @@
 			"test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - copy single file to non-existing dir")
 }
 
-func TestBuildCopyDirContentToRoot(t *testing.T) {
+func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) {
 	name := "testcopydircontenttoroot"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -1544,19 +1466,17 @@
 			"test_dir/test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - copy directory contents to root")
 }
 
-func TestBuildCopyDirContentToExistDir(t *testing.T) {
+func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) {
 	name := "testcopydircontenttoexistdir"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -1571,19 +1491,17 @@
 			"test_dir/test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - copy directory contents to existing dir")
 }
 
-func TestBuildCopyWholeDirToRoot(t *testing.T) {
+func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) {
 	name := "testcopywholedirtoroot"
-	defer deleteImages(name)
 	ctx, err := fakeContext(fmt.Sprintf(`FROM busybox
 RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
 RUN echo 'dockerio:x:1001:' >> /etc/group
@@ -1599,48 +1517,43 @@
 			"test_dir/test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - copy whole directory to root")
 }
 
-func TestBuildCopyEtcToRoot(t *testing.T) {
+func (s *DockerSuite) TestBuildCopyEtcToRoot(c *check.C) {
 	name := "testcopyetctoroot"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM scratch
 COPY . /`,
 		map[string]string{
 			"etc/test_file": "test1",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - copy etc directory to root")
 }
 
-func TestBuildCopyDisallowRemote(t *testing.T) {
+func (s *DockerSuite) TestBuildCopyDisallowRemote(c *check.C) {
 	name := "testcopydisallowremote"
-	defer deleteImages(name)
 	_, out, err := buildImageWithOut(name, `FROM scratch
 COPY https://index.docker.io/robots.txt /`,
 		true)
 	if err == nil || !strings.Contains(out, "Source can't be a URL for COPY") {
-		t.Fatalf("Error should be about disallowed remote source, got err: %s, out: %q", err, out)
+		c.Fatalf("Error should be about disallowed remote source, got err: %s, out: %q", err, out)
 	}
-	logDone("build - copy - disallow copy from remote")
 }
 
-func TestBuildAddBadLinks(t *testing.T) {
+func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) {
 	const (
 		dockerfile = `
 			FROM scratch
@@ -1652,16 +1565,15 @@
 	var (
 		name = "test-link-absolute"
 	)
-	defer deleteImages(name)
 	ctx, err := fakeContext(dockerfile, nil)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-")
 	if err != nil {
-		t.Fatalf("failed to create temporary directory: %s", tempDir)
+		c.Fatalf("failed to create temporary directory: %s", tempDir)
 	}
 	defer os.RemoveAll(tempDir)
 
@@ -1669,7 +1581,7 @@
 	if runtime.GOOS == "windows" {
 		var driveLetter string
 		if abs, err := filepath.Abs(tempDir); err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		} else {
 			driveLetter = abs[:1]
 		}
@@ -1685,7 +1597,7 @@
 
 	tarOut, err := os.Create(tarPath)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	tarWriter := tar.NewWriter(tarOut)
@@ -1701,7 +1613,7 @@
 
 	err = tarWriter.WriteHeader(header)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	tarWriter.Close()
@@ -1709,26 +1621,25 @@
 
 	foo, err := os.Create(fooPath)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer foo.Close()
 
 	if _, err := foo.WriteString("test"); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
-		t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
+		c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
 	}
 
-	logDone("build - ADD must add files in container")
 }
 
-func TestBuildAddBadLinksVolume(t *testing.T) {
+func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) {
 	const (
 		dockerfileTemplate = `
 		FROM busybox
@@ -1741,11 +1652,10 @@
 		name       = "test-link-absolute-volume"
 		dockerfile = ""
 	)
-	defer deleteImages(name)
 
 	tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-")
 	if err != nil {
-		t.Fatalf("failed to create temporary directory: %s", tempDir)
+		c.Fatalf("failed to create temporary directory: %s", tempDir)
 	}
 	defer os.RemoveAll(tempDir)
 
@@ -1754,76 +1664,73 @@
 
 	ctx, err := fakeContext(dockerfile, nil)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 	fooPath := filepath.Join(ctx.Dir, targetFile)
 
 	foo, err := os.Create(fooPath)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer foo.Close()
 
 	if _, err := foo.WriteString("test"); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
-		t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
+		c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
 	}
 
-	logDone("build - ADD should add files in volume")
 }
 
 // Issue #5270 - ensure we throw a better error than "unexpected EOF"
 // when we can't access files in the context.
-func TestBuildWithInaccessibleFilesInContext(t *testing.T) {
-	testRequires(t, UnixCli) // test uses chown/chmod: not available on windows
+func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) {
+	testRequires(c, UnixCli) // test uses chown/chmod: not available on windows
 
 	{
 		name := "testbuildinaccessiblefiles"
-		defer deleteImages(name)
 		ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"})
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		defer ctx.Close()
 		// This is used to ensure we detect inaccessible files early during build in the cli client
 		pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess")
 
 		if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil {
-			t.Fatalf("failed to chown file to root: %s", err)
+			c.Fatalf("failed to chown file to root: %s", err)
 		}
 		if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil {
-			t.Fatalf("failed to chmod file to 700: %s", err)
+			c.Fatalf("failed to chmod file to 700: %s", err)
 		}
 		buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name))
 		buildCmd.Dir = ctx.Dir
 		out, _, err := runCommandWithOutput(buildCmd)
 		if err == nil {
-			t.Fatalf("build should have failed: %s %s", err, out)
+			c.Fatalf("build should have failed: %s %s", err, out)
 		}
 
 		// check if we've detected the failure before we started building
 		if !strings.Contains(out, "no permission to read from ") {
-			t.Fatalf("output should've contained the string: no permission to read from but contained: %s", out)
+			c.Fatalf("output should've contained the string: no permission to read from but contained: %s", out)
 		}
 
 		if !strings.Contains(out, "Error checking context is accessible") {
-			t.Fatalf("output should've contained the string: Error checking context is accessible")
+			c.Fatalf("output should've contained the string: Error checking context is accessible")
 		}
 	}
 	{
 		name := "testbuildinaccessibledirectory"
-		defer deleteImages(name)
 		ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"})
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		defer ctx.Close()
 		// This is used to ensure we detect inaccessible directories early during build in the cli client
@@ -1831,118 +1738,111 @@
 		pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
 
 		if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil {
-			t.Fatalf("failed to chown directory to root: %s", err)
+			c.Fatalf("failed to chown directory to root: %s", err)
 		}
 		if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil {
-			t.Fatalf("failed to chmod directory to 444: %s", err)
+			c.Fatalf("failed to chmod directory to 444: %s", err)
 		}
 		if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil {
-			t.Fatalf("failed to chmod file to 700: %s", err)
+			c.Fatalf("failed to chmod file to 700: %s", err)
 		}
 
 		buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name))
 		buildCmd.Dir = ctx.Dir
 		out, _, err := runCommandWithOutput(buildCmd)
 		if err == nil {
-			t.Fatalf("build should have failed: %s %s", err, out)
+			c.Fatalf("build should have failed: %s %s", err, out)
 		}
 
 		// check if we've detected the failure before we started building
 		if !strings.Contains(out, "can't stat") {
-			t.Fatalf("output should've contained the string: can't access %s", out)
+			c.Fatalf("output should've contained the string: can't access %s", out)
 		}
 
 		if !strings.Contains(out, "Error checking context is accessible") {
-			t.Fatalf("output should've contained the string: Error checking context is accessible")
+			c.Fatalf("output should've contained the string: Error checking context is accessible")
 		}
 
 	}
 	{
 		name := "testlinksok"
-		defer deleteImages(name)
 		ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil)
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		defer ctx.Close()
 
 		target := "../../../../../../../../../../../../../../../../../../../azA"
 		if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		defer os.Remove(target)
 		// This is used to ensure we don't follow links when checking if everything in the context is accessible
 		// This test doesn't require that we run commands as an unprivileged user
 		if _, err := buildImageFromContext(name, ctx, true); err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 	}
 	{
 		name := "testbuildignoredinaccessible"
-		defer deleteImages(name)
 		ctx, err := fakeContext("FROM scratch\nADD . /foo/",
 			map[string]string{
 				"directoryWeCantStat/bar": "foo",
 				".dockerignore":           "directoryWeCantStat",
 			})
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		defer ctx.Close()
 		// This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern
 		pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat")
 		pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
 		if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil {
-			t.Fatalf("failed to chown directory to root: %s", err)
+			c.Fatalf("failed to chown directory to root: %s", err)
 		}
 		if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil {
-			t.Fatalf("failed to chmod directory to 755: %s", err)
+			c.Fatalf("failed to chmod directory to 755: %s", err)
 		}
 		if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil {
-			t.Fatalf("failed to chmod file to 444: %s", err)
+			c.Fatalf("failed to chmod file to 444: %s", err)
 		}
 
 		buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name))
 		buildCmd.Dir = ctx.Dir
 		if out, _, err := runCommandWithOutput(buildCmd); err != nil {
-			t.Fatalf("build should have worked: %s %s", err, out)
+			c.Fatalf("build should have worked: %s %s", err, out)
 		}
 
 	}
-	logDone("build - ADD from context with inaccessible files must not pass")
-	logDone("build - ADD from context with accessible links must work")
-	logDone("build - ADD from context with ignored inaccessible files must work")
 }
 
-func TestBuildForceRm(t *testing.T) {
+func (s *DockerSuite) TestBuildForceRm(c *check.C) {
 	containerCountBefore, err := getContainerCount()
 	if err != nil {
-		t.Fatalf("failed to get the container count: %s", err)
+		c.Fatalf("failed to get the container count: %s", err)
 	}
 	name := "testbuildforcerm"
-	defer deleteImages(name)
 	ctx, err := fakeContext("FROM scratch\nRUN true\nRUN thiswillfail", nil)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	buildCmd := exec.Command(dockerBinary, "build", "-t", name, "--force-rm", ".")
 	buildCmd.Dir = ctx.Dir
 	if out, _, err := runCommandWithOutput(buildCmd); err == nil {
-		t.Fatalf("failed to build the image: %s, %v", out, err)
+		c.Fatalf("failed to build the image: %s, %v", out, err)
 	}
 
 	containerCountAfter, err := getContainerCount()
 	if err != nil {
-		t.Fatalf("failed to get the container count: %s", err)
+		c.Fatalf("failed to get the container count: %s", err)
 	}
 
 	if containerCountBefore != containerCountAfter {
-		t.Fatalf("--force-rm shouldn't have left containers behind")
+		c.Fatalf("--force-rm shouldn't have left containers behind")
 	}
 
-	logDone("build - ensure --force-rm doesn't leave containers behind")
 }
 
 // Test that an infinite sleep during a build is killed if the client disconnects.
@@ -1952,95 +1852,79 @@
 // * Run a 1-year-long sleep from a docker build.
 // * When docker events sees container start, close the "docker build" command
 // * Wait for docker events to emit a dying event.
-func TestBuildCancelationKillsSleep(t *testing.T) {
-	// TODO(jfrazelle): Make this work on Windows.
-	testRequires(t, SameHostDaemon)
-
+func (s *DockerSuite) TestBuildCancelationKillsSleep(c *check.C) {
 	name := "testbuildcancelation"
-	defer deleteImages(name)
-	defer deleteAllContainers()
 
 	// (Note: one year, will never finish)
 	ctx, err := fakeContext("FROM busybox\nRUN sleep 31536000", nil)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
-	var wg sync.WaitGroup
-	defer wg.Wait()
-
 	finish := make(chan struct{})
 	defer close(finish)
 
 	eventStart := make(chan struct{})
 	eventDie := make(chan struct{})
+	containerID := make(chan string)
 
-	// Start one second ago, to avoid rounding problems
-	startEpoch := time.Now().Add(-1 * time.Second)
+	startEpoch := daemonTime(c).Unix()
+	// Watch for events since epoch.
+	eventsCmd := exec.Command(
+		dockerBinary, "events",
+		"--since", strconv.FormatInt(startEpoch, 10))
+	stdout, err := eventsCmd.StdoutPipe()
+	if err != nil {
+		c.Fatal(err)
+	}
+	if err := eventsCmd.Start(); err != nil {
+		c.Fatal(err)
+	}
+	defer eventsCmd.Process.Kill()
 
 	// Goroutine responsible for watching start/die events from `docker events`
-	wg.Add(1)
 	go func() {
-		defer wg.Done()
+		cid := <-containerID
 
-		// Watch for events since epoch.
-		eventsCmd := exec.Command(dockerBinary, "events",
-			"-since", fmt.Sprint(startEpoch.Unix()))
-		stdout, err := eventsCmd.StdoutPipe()
-		err = eventsCmd.Start()
-		if err != nil {
-			t.Fatalf("failed to start 'docker events': %s", err)
-		}
-
-		go func() {
-			<-finish
-			eventsCmd.Process.Kill()
-		}()
-
-		var started, died bool
-		matchStart := regexp.MustCompile(" \\(from busybox\\:latest\\) start$")
-		matchDie := regexp.MustCompile(" \\(from busybox\\:latest\\) die$")
+		matchStart := regexp.MustCompile(cid + `(.*) start$`)
+		matchDie := regexp.MustCompile(cid + `(.*) die$`)
 
 		//
 		// Read lines of `docker events` looking for container start and stop.
 		//
 		scanner := bufio.NewScanner(stdout)
 		for scanner.Scan() {
-			if ok := matchStart.MatchString(scanner.Text()); ok {
-				if started {
-					t.Fatal("assertion fail: more than one container started")
-				}
+			switch {
+			case matchStart.MatchString(scanner.Text()):
 				close(eventStart)
-				started = true
-			}
-			if ok := matchDie.MatchString(scanner.Text()); ok {
-				if died {
-					t.Fatal("assertion fail: more than one container died")
-				}
+			case matchDie.MatchString(scanner.Text()):
 				close(eventDie)
-				died = true
 			}
 		}
-
-		err = eventsCmd.Wait()
-		if err != nil && !IsKilled(err) {
-			t.Fatalf("docker events had bad exit status: %s", err)
-		}
 	}()
 
 	buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".")
 	buildCmd.Dir = ctx.Dir
-	buildCmd.Stdout = os.Stdout
 
-	err = buildCmd.Start()
-	if err != nil {
-		t.Fatalf("failed to run build: %s", err)
+	stdoutBuild, err := buildCmd.StdoutPipe()
+	if err := buildCmd.Start(); err != nil {
+		c.Fatalf("failed to run build: %s", err)
+	}
+
+	matchCID := regexp.MustCompile("Running in ")
+	scanner := bufio.NewScanner(stdoutBuild)
+	for scanner.Scan() {
+		line := scanner.Text()
+		if ok := matchCID.MatchString(line); ok {
+			containerID <- line[len(line)-12:]
+			break
+		}
 	}
 
 	select {
-	case <-time.After(30 * time.Second):
-		t.Fatal("failed to observe build container start in timely fashion")
+	case <-time.After(5 * time.Second):
+		c.Fatal("failed to observe build container start in timely fashion")
 	case <-eventStart:
 		// Proceeds from here when we see the container fly past in the
 		// output of "docker events".
@@ -2049,56 +1933,52 @@
 
 	// Send a kill to the `docker build` command.
 	// Causes the underlying build to be cancelled due to socket close.
-	err = buildCmd.Process.Kill()
-	if err != nil {
-		t.Fatalf("error killing build command: %s", err)
+	if err := buildCmd.Process.Kill(); err != nil {
+		c.Fatalf("error killing build command: %s", err)
 	}
 
 	// Get the exit status of `docker build`, check it exited because killed.
-	err = buildCmd.Wait()
-	if err != nil && !IsKilled(err) {
-		t.Fatalf("wait failed during build run: %T %s", err, err)
+	if err := buildCmd.Wait(); err != nil && !IsKilled(err) {
+		c.Fatalf("wait failed during build run: %T %s", err, err)
 	}
 
 	select {
-	case <-time.After(30 * time.Second):
+	case <-time.After(5 * time.Second):
 		// If we don't get here in a timely fashion, it wasn't killed.
-		t.Fatal("container cancel did not succeed")
+		c.Fatal("container cancel did not succeed")
 	case <-eventDie:
 		// We saw the container shut down in the `docker events` stream,
 		// as expected.
 	}
 
-	logDone("build - ensure canceled job finishes immediately")
 }
 
-func TestBuildRm(t *testing.T) {
+func (s *DockerSuite) TestBuildRm(c *check.C) {
 	name := "testbuildrm"
-	defer deleteImages(name)
 	ctx, err := fakeContext("FROM scratch\nADD foo /\nADD foo /", map[string]string{"foo": "bar"})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 	{
 		containerCountBefore, err := getContainerCount()
 		if err != nil {
-			t.Fatalf("failed to get the container count: %s", err)
+			c.Fatalf("failed to get the container count: %s", err)
 		}
 
-		out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm", "-t", name, ".")
+		out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm", "-t", name, ".")
 
 		if err != nil {
-			t.Fatal("failed to build the image", out)
+			c.Fatal("failed to build the image", out)
 		}
 
 		containerCountAfter, err := getContainerCount()
 		if err != nil {
-			t.Fatalf("failed to get the container count: %s", err)
+			c.Fatalf("failed to get the container count: %s", err)
 		}
 
 		if containerCountBefore != containerCountAfter {
-			t.Fatalf("-rm shouldn't have left containers behind")
+			c.Fatalf("-rm shouldn't have left containers behind")
 		}
 		deleteImages(name)
 	}
@@ -2106,22 +1986,22 @@
 	{
 		containerCountBefore, err := getContainerCount()
 		if err != nil {
-			t.Fatalf("failed to get the container count: %s", err)
+			c.Fatalf("failed to get the container count: %s", err)
 		}
 
-		out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name, ".")
+		out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name, ".")
 
 		if err != nil {
-			t.Fatal("failed to build the image", out)
+			c.Fatal("failed to build the image", out)
 		}
 
 		containerCountAfter, err := getContainerCount()
 		if err != nil {
-			t.Fatalf("failed to get the container count: %s", err)
+			c.Fatalf("failed to get the container count: %s", err)
 		}
 
 		if containerCountBefore != containerCountAfter {
-			t.Fatalf("--rm shouldn't have left containers behind")
+			c.Fatalf("--rm shouldn't have left containers behind")
 		}
 		deleteImages(name)
 	}
@@ -2129,33 +2009,30 @@
 	{
 		containerCountBefore, err := getContainerCount()
 		if err != nil {
-			t.Fatalf("failed to get the container count: %s", err)
+			c.Fatalf("failed to get the container count: %s", err)
 		}
 
-		out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm=false", "-t", name, ".")
+		out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm=false", "-t", name, ".")
 
 		if err != nil {
-			t.Fatal("failed to build the image", out)
+			c.Fatal("failed to build the image", out)
 		}
 
 		containerCountAfter, err := getContainerCount()
 		if err != nil {
-			t.Fatalf("failed to get the container count: %s", err)
+			c.Fatalf("failed to get the container count: %s", err)
 		}
 
 		if containerCountBefore == containerCountAfter {
-			t.Fatalf("--rm=false should have left containers behind")
+			c.Fatalf("--rm=false should have left containers behind")
 		}
-		deleteAllContainers()
 		deleteImages(name)
 
 	}
 
-	logDone("build - ensure --rm doesn't leave containers behind and that --rm=true is the default")
-	logDone("build - ensure --rm=false overrides the default")
 }
 
-func TestBuildWithVolumes(t *testing.T) {
+func (s *DockerSuite) TestBuildWithVolumes(c *check.C) {
 	var (
 		result   map[string]map[string]struct{}
 		name     = "testbuildvolumes"
@@ -2171,7 +2048,6 @@
 			"/test8]": emptyMap,
 		}
 	)
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM scratch
 		VOLUME /test1
@@ -2182,52 +2058,48 @@
     `,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectFieldJSON(name, "Config.Volumes")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	err = unmarshalJSON([]byte(res), &result)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	equal := reflect.DeepEqual(&result, &expected)
 
 	if !equal {
-		t.Fatalf("Volumes %s, expected %s", result, expected)
+		c.Fatalf("Volumes %s, expected %s", result, expected)
 	}
 
-	logDone("build - with volumes")
 }
 
-func TestBuildMaintainer(t *testing.T) {
+func (s *DockerSuite) TestBuildMaintainer(c *check.C) {
 	name := "testbuildmaintainer"
 	expected := "dockerio"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM scratch
         MAINTAINER dockerio`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Author")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != expected {
-		t.Fatalf("Maintainer %s, expected %s", res, expected)
+		c.Fatalf("Maintainer %s, expected %s", res, expected)
 	}
-	logDone("build - maintainer")
 }
 
-func TestBuildUser(t *testing.T) {
+func (s *DockerSuite) TestBuildUser(c *check.C) {
 	name := "testbuilduser"
 	expected := "dockerio"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
 		RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
@@ -2235,22 +2107,20 @@
 		RUN [ $(whoami) = 'dockerio' ]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Config.User")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != expected {
-		t.Fatalf("User %s, expected %s", res, expected)
+		c.Fatalf("User %s, expected %s", res, expected)
 	}
-	logDone("build - user")
 }
 
-func TestBuildRelativeWorkdir(t *testing.T) {
+func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) {
 	name := "testbuildrelativeworkdir"
 	expected := "/test2/test3"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
 		RUN [ "$PWD" = '/' ]
@@ -2262,22 +2132,20 @@
 		RUN [ "$PWD" = '/test2/test3' ]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Config.WorkingDir")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != expected {
-		t.Fatalf("Workdir %s, expected %s", res, expected)
+		c.Fatalf("Workdir %s, expected %s", res, expected)
 	}
-	logDone("build - relative workdir")
 }
 
-func TestBuildWorkdirWithEnvVariables(t *testing.T) {
+func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) {
 	name := "testbuildworkdirwithenvvariables"
 	expected := "/test1/test2"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
 		ENV DIRPATH /test1
@@ -2286,21 +2154,19 @@
 		WORKDIR $SUBDIRNAME/$MISSING_VAR`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Config.WorkingDir")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != expected {
-		t.Fatalf("Workdir %s, expected %s", res, expected)
+		c.Fatalf("Workdir %s, expected %s", res, expected)
 	}
-	logDone("build - workdir with env variables")
 }
 
-func TestBuildRelativeCopy(t *testing.T) {
+func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) {
 	name := "testbuildrelativecopy"
-	defer deleteImages(name)
 	dockerfile := `
 		FROM busybox
 			WORKDIR /test1
@@ -2329,19 +2195,17 @@
 	})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	_, err = buildImageFromContext(name, ctx, false)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - relative copy/add")
 }
 
-func TestBuildEnv(t *testing.T) {
+func (s *DockerSuite) TestBuildEnv(c *check.C) {
 	name := "testbuildenv"
 	expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
 		ENV PATH /test:$PATH
@@ -2349,116 +2213,106 @@
 		RUN [ $(env | grep PORT) = 'PORT=2375' ]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Config.Env")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != expected {
-		t.Fatalf("Env %s, expected %s", res, expected)
+		c.Fatalf("Env %s, expected %s", res, expected)
 	}
-	logDone("build - env")
 }
 
-func TestBuildContextCleanup(t *testing.T) {
-	testRequires(t, SameHostDaemon)
+func (s *DockerSuite) TestBuildContextCleanup(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	name := "testbuildcontextcleanup"
-	defer deleteImages(name)
 	entries, err := ioutil.ReadDir("/var/lib/docker/tmp")
 	if err != nil {
-		t.Fatalf("failed to list contents of tmp dir: %s", err)
+		c.Fatalf("failed to list contents of tmp dir: %s", err)
 	}
 	_, err = buildImage(name,
 		`FROM scratch
         ENTRYPOINT ["/bin/echo"]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	entriesFinal, err := ioutil.ReadDir("/var/lib/docker/tmp")
 	if err != nil {
-		t.Fatalf("failed to list contents of tmp dir: %s", err)
+		c.Fatalf("failed to list contents of tmp dir: %s", err)
 	}
 	if err = compareDirectoryEntries(entries, entriesFinal); err != nil {
-		t.Fatalf("context should have been deleted, but wasn't")
+		c.Fatalf("context should have been deleted, but wasn't")
 	}
 
-	logDone("build - verify context cleanup works properly")
 }
 
-func TestBuildContextCleanupFailedBuild(t *testing.T) {
-	testRequires(t, SameHostDaemon)
+func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	name := "testbuildcontextcleanup"
-	defer deleteImages(name)
-	defer deleteAllContainers()
 	entries, err := ioutil.ReadDir("/var/lib/docker/tmp")
 	if err != nil {
-		t.Fatalf("failed to list contents of tmp dir: %s", err)
+		c.Fatalf("failed to list contents of tmp dir: %s", err)
 	}
 	_, err = buildImage(name,
 		`FROM scratch
 	RUN /non/existing/command`,
 		true)
 	if err == nil {
-		t.Fatalf("expected build to fail, but it didn't")
+		c.Fatalf("expected build to fail, but it didn't")
 	}
 	entriesFinal, err := ioutil.ReadDir("/var/lib/docker/tmp")
 	if err != nil {
-		t.Fatalf("failed to list contents of tmp dir: %s", err)
+		c.Fatalf("failed to list contents of tmp dir: %s", err)
 	}
 	if err = compareDirectoryEntries(entries, entriesFinal); err != nil {
-		t.Fatalf("context should have been deleted, but wasn't")
+		c.Fatalf("context should have been deleted, but wasn't")
 	}
 
-	logDone("build - verify context cleanup works properly after an unsuccessful build")
 }
 
-func TestBuildCmd(t *testing.T) {
+func (s *DockerSuite) TestBuildCmd(c *check.C) {
 	name := "testbuildcmd"
-	expected := "[/bin/echo Hello World]"
-	defer deleteImages(name)
+	expected := "{[/bin/echo Hello World]}"
 	_, err := buildImage(name,
 		`FROM scratch
         CMD ["/bin/echo", "Hello World"]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Config.Cmd")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != expected {
-		t.Fatalf("Cmd %s, expected %s", res, expected)
+		c.Fatalf("Cmd %s, expected %s", res, expected)
 	}
-	logDone("build - cmd")
 }
 
-func TestBuildExpose(t *testing.T) {
+func (s *DockerSuite) TestBuildExpose(c *check.C) {
 	name := "testbuildexpose"
-	expected := "map[2375/tcp:map[]]"
-	defer deleteImages(name)
+	expected := "map[2375/tcp:{}]"
 	_, err := buildImage(name,
 		`FROM scratch
         EXPOSE 2375`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Config.ExposedPorts")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != expected {
-		t.Fatalf("Exposed ports %s, expected %s", res, expected)
+		c.Fatalf("Exposed ports %s, expected %s", res, expected)
 	}
-	logDone("build - expose")
 }
 
-func TestBuildExposeMorePorts(t *testing.T) {
+func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) {
 	// start building docker file with a large number of ports
 	portList := make([]string, 50)
 	line := make([]string, 100)
@@ -2484,127 +2338,118 @@
 	tmpl.Execute(buf, portList)
 
 	name := "testbuildexpose"
-	defer deleteImages(name)
 	_, err := buildImage(name, buf.String(), true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// check if all the ports are saved inside Config.ExposedPorts
 	res, err := inspectFieldJSON(name, "Config.ExposedPorts")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	var exposedPorts map[string]interface{}
 	if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	for _, p := range expectedPorts {
 		ep := fmt.Sprintf("%d/tcp", p)
 		if _, ok := exposedPorts[ep]; !ok {
-			t.Errorf("Port(%s) is not exposed", ep)
+			c.Errorf("Port(%s) is not exposed", ep)
 		} else {
 			delete(exposedPorts, ep)
 		}
 	}
 	if len(exposedPorts) != 0 {
-		t.Errorf("Unexpected extra exposed ports %v", exposedPorts)
+		c.Errorf("Unexpected extra exposed ports %v", exposedPorts)
 	}
-	logDone("build - expose large number of ports")
 }
 
-func TestBuildExposeOrder(t *testing.T) {
+func (s *DockerSuite) TestBuildExposeOrder(c *check.C) {
 	buildID := func(name, exposed string) string {
 		_, err := buildImage(name, fmt.Sprintf(`FROM scratch
 		EXPOSE %s`, exposed), true)
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		id, err := inspectField(name, "Id")
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		return id
 	}
 
 	id1 := buildID("testbuildexpose1", "80 2375")
 	id2 := buildID("testbuildexpose2", "2375 80")
-	defer deleteImages("testbuildexpose1", "testbuildexpose2")
 	if id1 != id2 {
-		t.Errorf("EXPOSE should invalidate the cache only when ports actually changed")
+		c.Errorf("EXPOSE should invalidate the cache only when ports actually changed")
 	}
-	logDone("build - expose order")
 }
 
-func TestBuildExposeUpperCaseProto(t *testing.T) {
+func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) {
 	name := "testbuildexposeuppercaseproto"
-	expected := "map[5678/udp:map[]]"
-	defer deleteImages(name)
+	expected := "map[5678/udp:{}]"
 	_, err := buildImage(name,
 		`FROM scratch
         EXPOSE 5678/UDP`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Config.ExposedPorts")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != expected {
-		t.Fatalf("Exposed ports %s, expected %s", res, expected)
+		c.Fatalf("Exposed ports %s, expected %s", res, expected)
 	}
-	logDone("build - expose port with upper case proto")
 }
 
-func TestBuildExposeHostPort(t *testing.T) {
+func (s *DockerSuite) TestBuildExposeHostPort(c *check.C) {
 	// start building docker file with ip:hostPort:containerPort
 	name := "testbuildexpose"
-	expected := "map[5678/tcp:map[]]"
-	defer deleteImages(name)
+	expected := "map[5678/tcp:{}]"
 	_, out, err := buildImageWithOut(name,
 		`FROM scratch
         EXPOSE 192.168.1.2:2375:5678`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if !strings.Contains(out, "to map host ports to container ports (ip:hostPort:containerPort) is deprecated.") {
-		t.Fatal("Missing warning message")
+		c.Fatal("Missing warning message")
 	}
 
 	res, err := inspectField(name, "Config.ExposedPorts")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != expected {
-		t.Fatalf("Exposed ports %s, expected %s", res, expected)
+		c.Fatalf("Exposed ports %s, expected %s", res, expected)
 	}
-	logDone("build - ignore exposing host's port")
 }
 
-func TestBuildEmptyEntrypointInheritance(t *testing.T) {
+func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) {
 	name := "testbuildentrypointinheritance"
 	name2 := "testbuildentrypointinheritance2"
-	defer deleteImages(name, name2)
 
 	_, err := buildImage(name,
 		`FROM busybox
         ENTRYPOINT ["/bin/echo"]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Config.Entrypoint")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	expected := "[/bin/echo]"
+	expected := "{[/bin/echo]}"
 	if res != expected {
-		t.Fatalf("Entrypoint %s, expected %s", res, expected)
+		c.Fatalf("Entrypoint %s, expected %s", res, expected)
 	}
 
 	_, err = buildImage(name2,
@@ -2612,69 +2457,64 @@
         ENTRYPOINT []`, name),
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err = inspectField(name2, "Config.Entrypoint")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	expected = "[]"
+	expected = "{[]}"
 
 	if res != expected {
-		t.Fatalf("Entrypoint %s, expected %s", res, expected)
+		c.Fatalf("Entrypoint %s, expected %s", res, expected)
 	}
 
-	logDone("build - empty entrypoint inheritance")
 }
 
-func TestBuildEmptyEntrypoint(t *testing.T) {
+func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) {
 	name := "testbuildentrypoint"
-	defer deleteImages(name)
-	expected := "[]"
+	expected := "{[]}"
 
 	_, err := buildImage(name,
 		`FROM busybox
         ENTRYPOINT []`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Config.Entrypoint")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != expected {
-		t.Fatalf("Entrypoint %s, expected %s", res, expected)
+		c.Fatalf("Entrypoint %s, expected %s", res, expected)
 	}
 
-	logDone("build - empty entrypoint")
 }
 
-func TestBuildEntrypoint(t *testing.T) {
+func (s *DockerSuite) TestBuildEntrypoint(c *check.C) {
 	name := "testbuildentrypoint"
-	expected := "[/bin/echo]"
-	defer deleteImages(name)
+	expected := "{[/bin/echo]}"
 	_, err := buildImage(name,
 		`FROM scratch
         ENTRYPOINT ["/bin/echo"]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Config.Entrypoint")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != expected {
-		t.Fatalf("Entrypoint %s, expected %s", res, expected)
+		c.Fatalf("Entrypoint %s, expected %s", res, expected)
 	}
 
-	logDone("build - entrypoint")
 }
 
 // #6445 ensure ONBUILD triggers aren't committed to grandchildren
-func TestBuildOnBuildLimitedInheritence(t *testing.T) {
+func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) {
 	var (
 		out2, out3 string
 	)
@@ -2687,15 +2527,14 @@
 		`
 		ctx, err := fakeContext(dockerfile1, nil)
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		defer ctx.Close()
 
-		out1, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name1, ".")
+		out1, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name1, ".")
 		if err != nil {
-			t.Fatalf("build failed to complete: %s, %v", out1, err)
+			c.Fatalf("build failed to complete: %s, %v", out1, err)
 		}
-		defer deleteImages(name1)
 	}
 	{
 		name2 := "testonbuildtrigger2"
@@ -2704,15 +2543,14 @@
 		`
 		ctx, err := fakeContext(dockerfile2, nil)
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		defer ctx.Close()
 
-		out2, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name2, ".")
+		out2, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name2, ".")
 		if err != nil {
-			t.Fatalf("build failed to complete: %s, %v", out2, err)
+			c.Fatalf("build failed to complete: %s, %v", out2, err)
 		}
-		defer deleteImages(name2)
 	}
 	{
 		name3 := "testonbuildtrigger3"
@@ -2721,34 +2559,31 @@
 		`
 		ctx, err := fakeContext(dockerfile3, nil)
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		defer ctx.Close()
 
-		out3, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name3, ".")
+		out3, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name3, ".")
 		if err != nil {
-			t.Fatalf("build failed to complete: %s, %v", out3, err)
+			c.Fatalf("build failed to complete: %s, %v", out3, err)
 		}
 
-		defer deleteImages(name3)
 	}
 
 	// ONBUILD should be run in second build.
 	if !strings.Contains(out2, "ONBUILD PARENT") {
-		t.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent")
+		c.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent")
 	}
 
 	// ONBUILD should *not* be run in third build.
 	if strings.Contains(out3, "ONBUILD PARENT") {
-		t.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent")
+		c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent")
 	}
 
-	logDone("build - onbuild")
 }
 
-func TestBuildWithCache(t *testing.T) {
+func (s *DockerSuite) TestBuildWithCache(c *check.C) {
 	name := "testbuildwithcache"
-	defer deleteImages(name)
 	id1, err := buildImage(name,
 		`FROM scratch
 		MAINTAINER dockerio
@@ -2756,7 +2591,7 @@
         ENTRYPOINT ["/bin/echo"]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id2, err := buildImage(name,
 		`FROM scratch
@@ -2765,18 +2600,16 @@
         ENTRYPOINT ["/bin/echo"]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 != id2 {
-		t.Fatal("The cache should have been used but hasn't.")
+		c.Fatal("The cache should have been used but hasn't.")
 	}
-	logDone("build - with cache")
 }
 
-func TestBuildWithoutCache(t *testing.T) {
+func (s *DockerSuite) TestBuildWithoutCache(c *check.C) {
 	name := "testbuildwithoutcache"
 	name2 := "testbuildwithoutcache2"
-	defer deleteImages(name, name2)
 	id1, err := buildImage(name,
 		`FROM scratch
 		MAINTAINER dockerio
@@ -2784,7 +2617,7 @@
         ENTRYPOINT ["/bin/echo"]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	id2, err := buildImage(name2,
@@ -2794,18 +2627,15 @@
         ENTRYPOINT ["/bin/echo"]`,
 		false)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 == id2 {
-		t.Fatal("The cache should have been invalided but hasn't.")
+		c.Fatal("The cache should have been invalided but hasn't.")
 	}
-	logDone("build - without cache")
 }
 
-func TestBuildConditionalCache(t *testing.T) {
+func (s *DockerSuite) TestBuildConditionalCache(c *check.C) {
 	name := "testbuildconditionalcache"
-	name2 := "testbuildconditionalcache2"
-	defer deleteImages(name, name2)
 
 	dockerfile := `
 		FROM busybox
@@ -2814,42 +2644,39 @@
 		"foo": "hello",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	id1, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatalf("Error building #1: %s", err)
+		c.Fatalf("Error building #1: %s", err)
 	}
 
 	if err := ctx.Add("foo", "bye"); err != nil {
-		t.Fatalf("Error modifying foo: %s", err)
+		c.Fatalf("Error modifying foo: %s", err)
 	}
 
 	id2, err := buildImageFromContext(name, ctx, false)
 	if err != nil {
-		t.Fatalf("Error building #2: %s", err)
+		c.Fatalf("Error building #2: %s", err)
 	}
 	if id2 == id1 {
-		t.Fatal("Should not have used the cache")
+		c.Fatal("Should not have used the cache")
 	}
 
 	id3, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatalf("Error building #3: %s", err)
+		c.Fatalf("Error building #3: %s", err)
 	}
 	if id3 != id2 {
-		t.Fatal("Should have used the cache")
+		c.Fatal("Should have used the cache")
 	}
-
-	logDone("build - conditional cache")
 }
 
-func TestBuildADDLocalFileWithCache(t *testing.T) {
+func (s *DockerSuite) TestBuildAddLocalFileWithCache(c *check.C) {
 	name := "testbuildaddlocalfilewithcache"
 	name2 := "testbuildaddlocalfilewithcache2"
-	defer deleteImages(name, name2)
 	dockerfile := `
 		FROM busybox
         MAINTAINER dockerio
@@ -2860,26 +2687,24 @@
 	})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id1, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id2, err := buildImageFromContext(name2, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 != id2 {
-		t.Fatal("The cache should have been used but hasn't.")
+		c.Fatal("The cache should have been used but hasn't.")
 	}
-	logDone("build - add local file with cache")
 }
 
-func TestBuildADDMultipleLocalFileWithCache(t *testing.T) {
+func (s *DockerSuite) TestBuildAddMultipleLocalFileWithCache(c *check.C) {
 	name := "testbuildaddmultiplelocalfilewithcache"
 	name2 := "testbuildaddmultiplelocalfilewithcache2"
-	defer deleteImages(name, name2)
 	dockerfile := `
 		FROM busybox
         MAINTAINER dockerio
@@ -2890,26 +2715,24 @@
 	})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id1, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id2, err := buildImageFromContext(name2, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 != id2 {
-		t.Fatal("The cache should have been used but hasn't.")
+		c.Fatal("The cache should have been used but hasn't.")
 	}
-	logDone("build - add multiple local files with cache")
 }
 
-func TestBuildADDLocalFileWithoutCache(t *testing.T) {
+func (s *DockerSuite) TestBuildAddLocalFileWithoutCache(c *check.C) {
 	name := "testbuildaddlocalfilewithoutcache"
 	name2 := "testbuildaddlocalfilewithoutcache2"
-	defer deleteImages(name, name2)
 	dockerfile := `
 		FROM busybox
         MAINTAINER dockerio
@@ -2920,26 +2743,24 @@
 	})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id1, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id2, err := buildImageFromContext(name2, ctx, false)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 == id2 {
-		t.Fatal("The cache should have been invalided but hasn't.")
+		c.Fatal("The cache should have been invalided but hasn't.")
 	}
-	logDone("build - add local file without cache")
 }
 
-func TestBuildCopyDirButNotFile(t *testing.T) {
+func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) {
 	name := "testbuildcopydirbutnotfile"
 	name2 := "testbuildcopydirbutnotfile2"
-	defer deleteImages(name, name2)
 	dockerfile := `
         FROM scratch
         COPY dir /tmp/`
@@ -2948,33 +2769,31 @@
 	})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id1, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	// Check that adding file with similar name doesn't mess with cache
 	if err := ctx.Add("dir_file", "hello2"); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id2, err := buildImageFromContext(name2, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 != id2 {
-		t.Fatal("The cache should have been used but wasn't")
+		c.Fatal("The cache should have been used but wasn't")
 	}
-	logDone("build - add current directory but not file")
 }
 
-func TestBuildADDCurrentDirWithCache(t *testing.T) {
+func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) {
 	name := "testbuildaddcurrentdirwithcache"
 	name2 := name + "2"
 	name3 := name + "3"
 	name4 := name + "4"
 	name5 := name + "5"
-	defer deleteImages(name, name2, name3, name4, name5)
 	dockerfile := `
         FROM scratch
         MAINTAINER dockerio
@@ -2984,60 +2803,58 @@
 	})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id1, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	// Check that adding file invalidate cache of "ADD ."
 	if err := ctx.Add("bar", "hello2"); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id2, err := buildImageFromContext(name2, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 == id2 {
-		t.Fatal("The cache should have been invalided but hasn't.")
+		c.Fatal("The cache should have been invalided but hasn't.")
 	}
 	// Check that changing file invalidate cache of "ADD ."
 	if err := ctx.Add("foo", "hello1"); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id3, err := buildImageFromContext(name3, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id2 == id3 {
-		t.Fatal("The cache should have been invalided but hasn't.")
+		c.Fatal("The cache should have been invalided but hasn't.")
 	}
 	// Check that changing file to same content invalidate cache of "ADD ."
 	time.Sleep(1 * time.Second) // wait second because of mtime precision
 	if err := ctx.Add("foo", "hello1"); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id4, err := buildImageFromContext(name4, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id3 == id4 {
-		t.Fatal("The cache should have been invalided but hasn't.")
+		c.Fatal("The cache should have been invalided but hasn't.")
 	}
 	id5, err := buildImageFromContext(name5, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id4 != id5 {
-		t.Fatal("The cache should have been used but hasn't.")
+		c.Fatal("The cache should have been used but hasn't.")
 	}
-	logDone("build - add current directory with cache")
 }
 
-func TestBuildADDCurrentDirWithoutCache(t *testing.T) {
+func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) {
 	name := "testbuildaddcurrentdirwithoutcache"
 	name2 := "testbuildaddcurrentdirwithoutcache2"
-	defer deleteImages(name, name2)
 	dockerfile := `
         FROM scratch
         MAINTAINER dockerio
@@ -3047,30 +2864,28 @@
 	})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id1, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id2, err := buildImageFromContext(name2, ctx, false)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 == id2 {
-		t.Fatal("The cache should have been invalided but hasn't.")
+		c.Fatal("The cache should have been invalided but hasn't.")
 	}
-	logDone("build - add current directory without cache")
 }
 
-func TestBuildADDRemoteFileWithCache(t *testing.T) {
+func (s *DockerSuite) TestBuildAddRemoteFileWithCache(c *check.C) {
 	name := "testbuildaddremotefilewithcache"
-	defer deleteImages(name)
 	server, err := fakeStorage(map[string]string{
 		"baz": "hello",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server.Close()
 
@@ -3080,7 +2895,7 @@
         ADD %s/baz /usr/lib/baz/quux`, server.URL()),
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id2, err := buildImage(name,
 		fmt.Sprintf(`FROM scratch
@@ -3088,23 +2903,21 @@
         ADD %s/baz /usr/lib/baz/quux`, server.URL()),
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 != id2 {
-		t.Fatal("The cache should have been used but hasn't.")
+		c.Fatal("The cache should have been used but hasn't.")
 	}
-	logDone("build - add remote file with cache")
 }
 
-func TestBuildADDRemoteFileWithoutCache(t *testing.T) {
+func (s *DockerSuite) TestBuildAddRemoteFileWithoutCache(c *check.C) {
 	name := "testbuildaddremotefilewithoutcache"
 	name2 := "testbuildaddremotefilewithoutcache2"
-	defer deleteImages(name, name2)
 	server, err := fakeStorage(map[string]string{
 		"baz": "hello",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server.Close()
 
@@ -3114,7 +2927,7 @@
         ADD %s/baz /usr/lib/baz/quux`, server.URL()),
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id2, err := buildImage(name2,
 		fmt.Sprintf(`FROM scratch
@@ -3122,26 +2935,23 @@
         ADD %s/baz /usr/lib/baz/quux`, server.URL()),
 		false)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 == id2 {
-		t.Fatal("The cache should have been invalided but hasn't.")
+		c.Fatal("The cache should have been invalided but hasn't.")
 	}
-	logDone("build - add remote file without cache")
 }
 
-func TestBuildADDRemoteFileMTime(t *testing.T) {
+func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) {
 	name := "testbuildaddremotefilemtime"
 	name2 := name + "2"
 	name3 := name + "3"
 	name4 := name + "4"
 
-	defer deleteImages(name, name2, name3, name4)
-
 	files := map[string]string{"baz": "hello"}
 	server, err := fakeStorage(files)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server.Close()
 
@@ -3149,21 +2959,21 @@
         MAINTAINER dockerio
         ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	id1, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	id2, err := buildImageFromContext(name2, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 != id2 {
-		t.Fatal("The cache should have been used but wasn't - #1")
+		c.Fatal("The cache should have been used but wasn't - #1")
 	}
 
 	// Now create a different server withsame contents (causes different mtim)
@@ -3174,7 +2984,7 @@
 
 	server2, err := fakeStorage(files)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server2.Close()
 
@@ -3182,36 +2992,34 @@
         MAINTAINER dockerio
         ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx2.Close()
 	id3, err := buildImageFromContext(name3, ctx2, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 == id3 {
-		t.Fatal("The cache should not have been used but was")
+		c.Fatal("The cache should not have been used but was")
 	}
 
 	// And for good measure do it again and make sure cache is used this time
 	id4, err := buildImageFromContext(name4, ctx2, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id3 != id4 {
-		t.Fatal("The cache should have been used but wasn't - #2")
+		c.Fatal("The cache should have been used but wasn't - #2")
 	}
-	logDone("build - add remote file testing mtime")
 }
 
-func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) {
+func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithCache(c *check.C) {
 	name := "testbuildaddlocalandremotefilewithcache"
-	defer deleteImages(name)
 	server, err := fakeStorage(map[string]string{
 		"baz": "hello",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server.Close()
 
@@ -3223,24 +3031,23 @@
 			"foo": "hello world",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 	id1, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id2, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 != id2 {
-		t.Fatal("The cache should have been used but hasn't.")
+		c.Fatal("The cache should have been used but hasn't.")
 	}
-	logDone("build - add local and remote file with cache")
 }
 
-func testContextTar(t *testing.T, compression archive.Compression) {
+func testContextTar(c *check.C, compression archive.Compression) {
 	ctx, err := fakeContext(
 		`FROM busybox
 ADD foo /foo
@@ -3251,57 +3058,51 @@
 	)
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	context, err := archive.Tar(ctx.Dir, compression)
 	if err != nil {
-		t.Fatalf("failed to build context tar: %v", err)
+		c.Fatalf("failed to build context tar: %v", err)
 	}
 	name := "contexttar"
 	buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-")
-	defer deleteImages(name)
 	buildCmd.Stdin = context
 
 	if out, _, err := runCommandWithOutput(buildCmd); err != nil {
-		t.Fatalf("build failed to complete: %v %v", out, err)
+		c.Fatalf("build failed to complete: %v %v", out, err)
 	}
-	logDone(fmt.Sprintf("build - build an image with a context tar, compression: %v", compression))
 }
 
-func TestBuildContextTarGzip(t *testing.T) {
-	testContextTar(t, archive.Gzip)
+func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) {
+	testContextTar(c, archive.Gzip)
 }
 
-func TestBuildContextTarNoCompression(t *testing.T) {
-	testContextTar(t, archive.Uncompressed)
+func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) {
+	testContextTar(c, archive.Uncompressed)
 }
 
-func TestBuildNoContext(t *testing.T) {
+func (s *DockerSuite) TestBuildNoContext(c *check.C) {
 	buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-")
 	buildCmd.Stdin = strings.NewReader("FROM busybox\nCMD echo ok\n")
 
 	if out, _, err := runCommandWithOutput(buildCmd); err != nil {
-		t.Fatalf("build failed to complete: %v %v", out, err)
+		c.Fatalf("build failed to complete: %v %v", out, err)
 	}
 
-	if out, _, err := dockerCmd(t, "run", "--rm", "nocontext"); out != "ok\n" || err != nil {
-		t.Fatalf("run produced invalid output: %q, expected %q", out, "ok")
+	if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" {
+		c.Fatalf("run produced invalid output: %q, expected %q", out, "ok")
 	}
-
-	deleteImages("nocontext")
-	logDone("build - build an image with no context")
 }
 
 // TODO: TestCaching
-func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) {
+func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithoutCache(c *check.C) {
 	name := "testbuildaddlocalandremotefilewithoutcache"
 	name2 := "testbuildaddlocalandremotefilewithoutcache2"
-	defer deleteImages(name, name2)
 	server, err := fakeStorage(map[string]string{
 		"baz": "hello",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server.Close()
 
@@ -3313,26 +3114,24 @@
 			"foo": "hello world",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 	id1, err := buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id2, err := buildImageFromContext(name2, ctx, false)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if id1 == id2 {
-		t.Fatal("The cache should have been invalided but hasn't.")
+		c.Fatal("The cache should have been invalided but hasn't.")
 	}
-	logDone("build - add local and remote file without cache")
 }
 
-func TestBuildWithVolumeOwnership(t *testing.T) {
+func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) {
 	name := "testbuildimg"
-	defer deleteImages(name)
 
 	_, err := buildImage(name,
 		`FROM busybox:latest
@@ -3341,36 +3140,34 @@
 		true)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd := exec.Command(dockerBinary, "run", "--rm", "testbuildimg", "ls", "-la", "/test")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if expected := "drw-------"; !strings.Contains(out, expected) {
-		t.Fatalf("expected %s received %s", expected, out)
+		c.Fatalf("expected %s received %s", expected, out)
 	}
 
 	if expected := "daemon   daemon"; !strings.Contains(out, expected) {
-		t.Fatalf("expected %s received %s", expected, out)
+		c.Fatalf("expected %s received %s", expected, out)
 	}
 
-	logDone("build - volume ownership")
 }
 
 // testing #1405 - config.Cmd does not get cleaned up if
 // utilizing cache
-func TestBuildEntrypointRunCleanup(t *testing.T) {
+func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) {
 	name := "testbuildcmdcleanup"
-	defer deleteImages(name)
 	if _, err := buildImage(name,
 		`FROM busybox
         RUN echo "hello"`,
 		true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	ctx, err := fakeContext(`FROM busybox
@@ -3382,25 +3179,23 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Config.Cmd")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	// Cmd must be cleaned up
-	if expected := "<no value>"; res != expected {
-		t.Fatalf("Cmd %s, expected %s", res, expected)
+	if res != "<nil>" {
+		c.Fatalf("Cmd %s, expected nil", res)
 	}
-	logDone("build - cleanup cmd after RUN")
 }
 
-func TestBuildForbiddenContextPath(t *testing.T) {
+func (s *DockerSuite) TestBuildForbiddenContextPath(c *check.C) {
 	name := "testbuildforbidpath"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM scratch
         ADD ../../ test/
         `,
@@ -3410,51 +3205,47 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	expected := "Forbidden path outside the build context: ../../ "
 	if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) {
-		t.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err)
+		c.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err)
 	}
 
-	logDone("build - forbidden context path")
 }
 
-func TestBuildADDFileNotFound(t *testing.T) {
+func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) {
 	name := "testbuildaddnotfound"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM scratch
         ADD foo /usr/local/bar`,
 		map[string]string{"bar": "hello"})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
 		if !strings.Contains(err.Error(), "foo: no such file or directory") {
-			t.Fatalf("Wrong error %v, must be about missing foo file or directory", err)
+			c.Fatalf("Wrong error %v, must be about missing foo file or directory", err)
 		}
 	} else {
-		t.Fatal("Error must not be nil")
+		c.Fatal("Error must not be nil")
 	}
-	logDone("build - add file not found")
 }
 
-func TestBuildInheritance(t *testing.T) {
+func (s *DockerSuite) TestBuildInheritance(c *check.C) {
 	name := "testbuildinheritance"
-	defer deleteImages(name)
 
 	_, err := buildImage(name,
 		`FROM scratch
 		EXPOSE 2375`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	ports1, err := inspectField(name, "Config.ExposedPorts")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	_, err = buildImage(name,
@@ -3462,133 +3253,118 @@
 		ENTRYPOINT ["/bin/echo"]`, name),
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	res, err := inspectField(name, "Config.Entrypoint")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	if expected := "[/bin/echo]"; res != expected {
-		t.Fatalf("Entrypoint %s, expected %s", res, expected)
+	if expected := "{[/bin/echo]}"; res != expected {
+		c.Fatalf("Entrypoint %s, expected %s", res, expected)
 	}
 	ports2, err := inspectField(name, "Config.ExposedPorts")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if ports1 != ports2 {
-		t.Fatalf("Ports must be same: %s != %s", ports1, ports2)
+		c.Fatalf("Ports must be same: %s != %s", ports1, ports2)
 	}
-	logDone("build - inheritance")
 }
 
-func TestBuildFails(t *testing.T) {
+func (s *DockerSuite) TestBuildFails(c *check.C) {
 	name := "testbuildfails"
-	defer deleteImages(name)
-	defer deleteAllContainers()
 	_, err := buildImage(name,
 		`FROM busybox
 		RUN sh -c "exit 23"`,
 		true)
 	if err != nil {
 		if !strings.Contains(err.Error(), "returned a non-zero code: 23") {
-			t.Fatalf("Wrong error %v, must be about non-zero code 23", err)
+			c.Fatalf("Wrong error %v, must be about non-zero code 23", err)
 		}
 	} else {
-		t.Fatal("Error must not be nil")
+		c.Fatal("Error must not be nil")
 	}
-	logDone("build - unsuccessful")
 }
 
-func TestBuildFailsDockerfileEmpty(t *testing.T) {
+func (s *DockerSuite) TestBuildFailsDockerfileEmpty(c *check.C) {
 	name := "testbuildfails"
-	defer deleteImages(name)
 	_, err := buildImage(name, ``, true)
 	if err != nil {
-		if !strings.Contains(err.Error(), "Dockerfile cannot be empty") {
-			t.Fatalf("Wrong error %v, must be about empty Dockerfile", err)
+		if !strings.Contains(err.Error(), "The Dockerfile (Dockerfile) cannot be empty") {
+			c.Fatalf("Wrong error %v, must be about empty Dockerfile", err)
 		}
 	} else {
-		t.Fatal("Error must not be nil")
+		c.Fatal("Error must not be nil")
 	}
-	logDone("build - unsuccessful with empty dockerfile")
 }
 
-func TestBuildOnBuild(t *testing.T) {
+func (s *DockerSuite) TestBuildOnBuild(c *check.C) {
 	name := "testbuildonbuild"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
 		ONBUILD RUN touch foobar`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	_, err = buildImage(name,
 		fmt.Sprintf(`FROM %s
 		RUN [ -f foobar ]`, name),
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - onbuild")
 }
 
-func TestBuildOnBuildForbiddenChained(t *testing.T) {
+func (s *DockerSuite) TestBuildOnBuildForbiddenChained(c *check.C) {
 	name := "testbuildonbuildforbiddenchained"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
 		ONBUILD ONBUILD RUN touch foobar`,
 		true)
 	if err != nil {
 		if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") {
-			t.Fatalf("Wrong error %v, must be about chaining ONBUILD", err)
+			c.Fatalf("Wrong error %v, must be about chaining ONBUILD", err)
 		}
 	} else {
-		t.Fatal("Error must not be nil")
+		c.Fatal("Error must not be nil")
 	}
-	logDone("build - onbuild forbidden chained")
 }
 
-func TestBuildOnBuildForbiddenFrom(t *testing.T) {
+func (s *DockerSuite) TestBuildOnBuildForbiddenFrom(c *check.C) {
 	name := "testbuildonbuildforbiddenfrom"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
 		ONBUILD FROM scratch`,
 		true)
 	if err != nil {
 		if !strings.Contains(err.Error(), "FROM isn't allowed as an ONBUILD trigger") {
-			t.Fatalf("Wrong error %v, must be about FROM forbidden", err)
+			c.Fatalf("Wrong error %v, must be about FROM forbidden", err)
 		}
 	} else {
-		t.Fatal("Error must not be nil")
+		c.Fatal("Error must not be nil")
 	}
-	logDone("build - onbuild forbidden from")
 }
 
-func TestBuildOnBuildForbiddenMaintainer(t *testing.T) {
+func (s *DockerSuite) TestBuildOnBuildForbiddenMaintainer(c *check.C) {
 	name := "testbuildonbuildforbiddenmaintainer"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
 		ONBUILD MAINTAINER docker.io`,
 		true)
 	if err != nil {
 		if !strings.Contains(err.Error(), "MAINTAINER isn't allowed as an ONBUILD trigger") {
-			t.Fatalf("Wrong error %v, must be about MAINTAINER forbidden", err)
+			c.Fatalf("Wrong error %v, must be about MAINTAINER forbidden", err)
 		}
 	} else {
-		t.Fatal("Error must not be nil")
+		c.Fatal("Error must not be nil")
 	}
-	logDone("build - onbuild forbidden maintainer")
 }
 
 // gh #2446
-func TestBuildAddToSymlinkDest(t *testing.T) {
+func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) {
 	name := "testbuildaddtosymlinkdest"
-	defer deleteImages(name)
 	ctx, err := fakeContext(`FROM busybox
         RUN mkdir /foo
         RUN ln -s /foo /bar
@@ -3599,18 +3375,16 @@
 			"foo": "hello",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - add to symlink destination")
 }
 
-func TestBuildEscapeWhitespace(t *testing.T) {
+func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) {
 	name := "testbuildescaping"
-	defer deleteImages(name)
 
 	_, err := buildImage(name, `
   FROM busybox
@@ -3622,20 +3396,18 @@
 	res, err := inspectField(name, "Author")
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if res != "\"Docker IO <io@docker.com>\"" {
-		t.Fatalf("Parsed string did not match the escaped string. Got: %q", res)
+		c.Fatalf("Parsed string did not match the escaped string. Got: %q", res)
 	}
 
-	logDone("build - validate escaping whitespace")
 }
 
-func TestBuildVerifyIntString(t *testing.T) {
+func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) {
 	// Verify that strings that look like ints are still passed as strings
 	name := "testbuildstringing"
-	defer deleteImages(name)
 
 	_, err := buildImage(name, `
   FROM busybox
@@ -3644,19 +3416,17 @@
 
 	out, rc, err := runCommandWithOutput(exec.Command(dockerBinary, "inspect", name))
 	if rc != 0 || err != nil {
-		t.Fatalf("Unexcepted error from inspect: rc: %v  err: %v", rc, err)
+		c.Fatalf("Unexpected error from inspect: rc: %v  err: %v", rc, err)
 	}
 
 	if !strings.Contains(out, "\"123\"") {
-		t.Fatalf("Output does not contain the int as a string:\n%s", out)
+		c.Fatalf("Output does not contain the int as a string:\n%s", out)
 	}
 
-	logDone("build - verify int/strings as strings")
 }
 
-func TestBuildDockerignore(t *testing.T) {
+func (s *DockerSuite) TestBuildDockerignore(c *check.C) {
 	name := "testbuilddockerignore"
-	defer deleteImages(name)
 	dockerfile := `
         FROM busybox
         ADD . /bla
@@ -3665,29 +3435,36 @@
 		RUN [[ ! -e /bla/src/_vendor ]]
 		RUN [[ ! -e /bla/.gitignore ]]
 		RUN [[ ! -e /bla/README.md ]]
+		RUN [[ ! -e /bla/dir/foo ]]
+		RUN [[ ! -e /bla/foo ]]
 		RUN [[ ! -e /bla/.git ]]`
 	ctx, err := fakeContext(dockerfile, map[string]string{
 		"Makefile":         "all:",
 		".git/HEAD":        "ref: foo",
 		"src/x.go":         "package main",
 		"src/_vendor/v.go": "package main",
+		"dir/foo":          "",
 		".gitignore":       "",
 		"README.md":        "readme",
-		".dockerignore":    ".git\npkg\n.gitignore\nsrc/_vendor\n*.md",
+		".dockerignore": `
+.git
+pkg
+.gitignore
+src/_vendor
+*.md
+dir`,
 	})
-	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
+	defer ctx.Close()
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - test .dockerignore")
 }
 
-func TestBuildDockerignoreCleanPaths(t *testing.T) {
+func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) {
 	name := "testbuilddockerignorecleanpaths"
-	defer deleteImages(name)
 	dockerfile := `
         FROM busybox
         ADD . /tmp/
@@ -3699,20 +3476,67 @@
 		".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - test .dockerignore with clean paths")
 }
 
-func TestBuildDockerignoringDockerfile(t *testing.T) {
-	name := "testbuilddockerignoredockerfile"
+func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) {
+	name := "testbuilddockerignoreexceptions"
 	defer deleteImages(name)
 	dockerfile := `
         FROM busybox
+        ADD . /bla
+		RUN [[ -f /bla/src/x.go ]]
+		RUN [[ -f /bla/Makefile ]]
+		RUN [[ ! -e /bla/src/_vendor ]]
+		RUN [[ ! -e /bla/.gitignore ]]
+		RUN [[ ! -e /bla/README.md ]]
+		RUN [[  -e /bla/dir/dir/foo ]]
+		RUN [[ ! -e /bla/dir/foo1 ]]
+		RUN [[ -f /bla/dir/e ]]
+		RUN [[ -f /bla/dir/e-dir/foo ]]
+		RUN [[ ! -e /bla/foo ]]
+		RUN [[ ! -e /bla/.git ]]`
+	ctx, err := fakeContext(dockerfile, map[string]string{
+		"Makefile":         "all:",
+		".git/HEAD":        "ref: foo",
+		"src/x.go":         "package main",
+		"src/_vendor/v.go": "package main",
+		"dir/foo":          "",
+		"dir/foo1":         "",
+		"dir/dir/f1":       "",
+		"dir/dir/foo":      "",
+		"dir/e":            "",
+		"dir/e-dir/foo":    "",
+		".gitignore":       "",
+		"README.md":        "readme",
+		".dockerignore": `
+.git
+pkg
+.gitignore
+src/_vendor
+*.md
+dir
+!dir/e*
+!dir/dir/foo`,
+	})
+	if err != nil {
+		c.Fatal(err)
+	}
+	defer ctx.Close()
+	if _, err := buildImageFromContext(name, ctx, true); err != nil {
+		c.Fatal(err)
+	}
+}
+
+func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) {
+	name := "testbuilddockerignoredockerfile"
+	dockerfile := `
+        FROM busybox
 		ADD . /tmp/
 		RUN ! ls /tmp/Dockerfile
 		RUN ls /tmp/.dockerignore`
@@ -3721,26 +3545,24 @@
 		".dockerignore": "Dockerfile\n",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err = buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatalf("Didn't ignore Dockerfile correctly:%s", err)
+		c.Fatalf("Didn't ignore Dockerfile correctly:%s", err)
 	}
 
 	// now try it with ./Dockerfile
 	ctx.Add(".dockerignore", "./Dockerfile\n")
 	if _, err = buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err)
+		c.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err)
 	}
 
-	logDone("build - test .dockerignore of Dockerfile")
 }
 
-func TestBuildDockerignoringRenamedDockerfile(t *testing.T) {
+func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) {
 	name := "testbuilddockerignoredockerfile"
-	defer deleteImages(name)
 	dockerfile := `
         FROM busybox
 		ADD . /tmp/
@@ -3753,26 +3575,24 @@
 		".dockerignore": "MyDockerfile\n",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err = buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatalf("Didn't ignore MyDockerfile correctly:%s", err)
+		c.Fatalf("Didn't ignore MyDockerfile correctly:%s", err)
 	}
 
 	// now try it with ./MyDockerfile
 	ctx.Add(".dockerignore", "./MyDockerfile\n")
 	if _, err = buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err)
+		c.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err)
 	}
 
-	logDone("build - test .dockerignore of renamed Dockerfile")
 }
 
-func TestBuildDockerignoringDockerignore(t *testing.T) {
+func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) {
 	name := "testbuilddockerignoredockerignore"
-	defer deleteImages(name)
 	dockerfile := `
         FROM busybox
 		ADD . /tmp/
@@ -3784,20 +3604,18 @@
 	})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if _, err = buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatalf("Didn't ignore .dockerignore correctly:%s", err)
+		c.Fatalf("Didn't ignore .dockerignore correctly:%s", err)
 	}
-	logDone("build - test .dockerignore of .dockerignore")
 }
 
-func TestBuildDockerignoreTouchDockerfile(t *testing.T) {
+func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) {
 	var id1 string
 	var id2 string
 
 	name := "testbuilddockerignoretouchdockerfile"
-	defer deleteImages(name)
 	dockerfile := `
         FROM busybox
 		ADD . /tmp/`
@@ -3807,48 +3625,46 @@
 	})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if id1, err = buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatalf("Didn't build it correctly:%s", err)
+		c.Fatalf("Didn't build it correctly:%s", err)
 	}
 
 	if id2, err = buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatalf("Didn't build it correctly:%s", err)
+		c.Fatalf("Didn't build it correctly:%s", err)
 	}
 	if id1 != id2 {
-		t.Fatalf("Didn't use the cache - 1")
+		c.Fatalf("Didn't use the cache - 1")
 	}
 
 	// Now make sure touching Dockerfile doesn't invalidate the cache
 	if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil {
-		t.Fatalf("Didn't add Dockerfile: %s", err)
+		c.Fatalf("Didn't add Dockerfile: %s", err)
 	}
 	if id2, err = buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatalf("Didn't build it correctly:%s", err)
+		c.Fatalf("Didn't build it correctly:%s", err)
 	}
 	if id1 != id2 {
-		t.Fatalf("Didn't use the cache - 2")
+		c.Fatalf("Didn't use the cache - 2")
 	}
 
 	// One more time but just 'touch' it instead of changing the content
 	if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil {
-		t.Fatalf("Didn't add Dockerfile: %s", err)
+		c.Fatalf("Didn't add Dockerfile: %s", err)
 	}
 	if id2, err = buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatalf("Didn't build it correctly:%s", err)
+		c.Fatalf("Didn't build it correctly:%s", err)
 	}
 	if id1 != id2 {
-		t.Fatalf("Didn't use the cache - 3")
+		c.Fatalf("Didn't use the cache - 3")
 	}
 
-	logDone("build - test .dockerignore touch dockerfile")
 }
 
-func TestBuildDockerignoringWholeDir(t *testing.T) {
+func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) {
 	name := "testbuilddockerignorewholedir"
-	defer deleteImages(name)
 	dockerfile := `
         FROM busybox
 		COPY . /
@@ -3857,21 +3673,20 @@
 	ctx, err := fakeContext(dockerfile, map[string]string{
 		"Dockerfile":    "FROM scratch",
 		"Makefile":      "all:",
+		".gitignore":    "",
 		".dockerignore": ".*\n",
 	})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if _, err = buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - test .dockerignore whole dir with .*")
 }
 
-func TestBuildLineBreak(t *testing.T) {
+func (s *DockerSuite) TestBuildLineBreak(c *check.C) {
 	name := "testbuildlinebreak"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM  busybox
 RUN    sh -c 'echo root:testpass \
@@ -3881,14 +3696,12 @@
 RUN    [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - line break with \\")
 }
 
-func TestBuildEOLInLine(t *testing.T) {
+func (s *DockerSuite) TestBuildEOLInLine(c *check.C) {
 	name := "testbuildeolinline"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM   busybox
 RUN    sh -c 'echo root:testpass > /tmp/passwd'
@@ -3898,14 +3711,12 @@
 RUN    [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - end of line in dockerfile instruction")
 }
 
-func TestBuildCommentsShebangs(t *testing.T) {
+func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) {
 	name := "testbuildcomments"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
 # This is an ordinary comment.
@@ -3918,14 +3729,12 @@
 RUN [ "$(/hello.sh)" = "hello world" ]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - comments and shebangs")
 }
 
-func TestBuildUsersAndGroups(t *testing.T) {
+func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) {
 	name := "testbuildusers"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
 
@@ -3982,14 +3791,12 @@
 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - users and groups")
 }
 
-func TestBuildEnvUsage(t *testing.T) {
+func (s *DockerSuite) TestBuildEnvUsage(c *check.C) {
 	name := "testbuildenvusage"
-	defer deleteImages(name)
 	dockerfile := `FROM busybox
 ENV    HOME /root
 ENV    PATH $HOME/bin:$PATH
@@ -4013,20 +3820,18 @@
 		"hello/docker/world": "hello",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	_, err = buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - environment variables usage")
 }
 
-func TestBuildEnvUsage2(t *testing.T) {
+func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) {
 	name := "testbuildenvusage2"
-	defer deleteImages(name)
 	dockerfile := `FROM busybox
 ENV    abc=def
 RUN    [ "$abc" = "def" ]
@@ -4071,6 +3876,27 @@
 ENV    abc \"foo\"
 RUN    [ "$abc" = '"foo"' ]
 
+ENV    abc=ABC
+RUN    [ "$abc" = "ABC" ]
+ENV    def=${abc:-DEF}
+RUN    [ "$def" = "ABC" ]
+ENV    def=${ccc:-DEF}
+RUN    [ "$def" = "DEF" ]
+ENV    def=${ccc:-${def}xx}
+RUN    [ "$def" = "DEFxx" ]
+ENV    def=${def:+ALT}
+RUN    [ "$def" = "ALT" ]
+ENV    def=${def:+${abc}:}
+RUN    [ "$def" = "ABC:" ]
+ENV    def=${ccc:-\$abc:}
+RUN    [ "$def" = '$abc:' ]
+ENV    def=${ccc:-\${abc}:}
+RUN    [ "$def" = '${abc:}' ]
+ENV    mypath=${mypath:+$mypath:}/home
+RUN    [ "$mypath" = '/home' ]
+ENV    mypath=${mypath:+$mypath:}/away
+RUN    [ "$mypath" = '/home:/away' ]
+
 ENV    e1=bar
 ENV    e2=$e1
 ENV    e3=$e11
@@ -4096,20 +3922,18 @@
 		"hello/docker/world": "hello",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	_, err = buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - environment variables usage2")
 }
 
-func TestBuildAddScript(t *testing.T) {
+func (s *DockerSuite) TestBuildAddScript(c *check.C) {
 	name := "testbuildaddscript"
-	defer deleteImages(name)
 	dockerfile := `
 FROM busybox
 ADD test /test
@@ -4120,20 +3944,18 @@
 		"test": "#!/bin/sh\necho 'test!' > /testfile",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	_, err = buildImageFromContext(name, ctx, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	logDone("build - add and run script")
 }
 
-func TestBuildAddTar(t *testing.T) {
+func (s *DockerSuite) TestBuildAddTar(c *check.C) {
 	name := "testbuildaddtar"
-	defer deleteImages(name)
 
 	ctx := func() *FakeContext {
 		dockerfile := `
@@ -4154,7 +3976,7 @@
 		tmpDir, err := ioutil.TempDir("", "fake-context")
 		testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
 		if err != nil {
-			t.Fatalf("failed to create test.tar archive: %v", err)
+			c.Fatalf("failed to create test.tar archive: %v", err)
 		}
 		defer testTar.Close()
 
@@ -4164,32 +3986,30 @@
 			Name: "test/foo",
 			Size: 2,
 		}); err != nil {
-			t.Fatalf("failed to write tar file header: %v", err)
+			c.Fatalf("failed to write tar file header: %v", err)
 		}
 		if _, err := tw.Write([]byte("Hi")); err != nil {
-			t.Fatalf("failed to write tar file content: %v", err)
+			c.Fatalf("failed to write tar file content: %v", err)
 		}
 		if err := tw.Close(); err != nil {
-			t.Fatalf("failed to close tar archive: %v", err)
+			c.Fatalf("failed to close tar archive: %v", err)
 		}
 
 		if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
-			t.Fatalf("failed to open destination dockerfile: %v", err)
+			c.Fatalf("failed to open destination dockerfile: %v", err)
 		}
 		return fakeContextFromDir(tmpDir)
 	}()
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatalf("build failed to complete for TestBuildAddTar: %v", err)
+		c.Fatalf("build failed to complete for TestBuildAddTar: %v", err)
 	}
 
-	logDone("build - ADD tar")
 }
 
-func TestBuildAddTarXz(t *testing.T) {
+func (s *DockerSuite) TestBuildAddTarXz(c *check.C) {
 	name := "testbuildaddtarxz"
-	defer deleteImages(name)
 
 	ctx := func() *FakeContext {
 		dockerfile := `
@@ -4199,7 +4019,7 @@
 		tmpDir, err := ioutil.TempDir("", "fake-context")
 		testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
 		if err != nil {
-			t.Fatalf("failed to create test.tar archive: %v", err)
+			c.Fatalf("failed to create test.tar archive: %v", err)
 		}
 		defer testTar.Close()
 
@@ -4209,23 +4029,23 @@
 			Name: "test/foo",
 			Size: 2,
 		}); err != nil {
-			t.Fatalf("failed to write tar file header: %v", err)
+			c.Fatalf("failed to write tar file header: %v", err)
 		}
 		if _, err := tw.Write([]byte("Hi")); err != nil {
-			t.Fatalf("failed to write tar file content: %v", err)
+			c.Fatalf("failed to write tar file content: %v", err)
 		}
 		if err := tw.Close(); err != nil {
-			t.Fatalf("failed to close tar archive: %v", err)
+			c.Fatalf("failed to close tar archive: %v", err)
 		}
 		xzCompressCmd := exec.Command("xz", "-k", "test.tar")
 		xzCompressCmd.Dir = tmpDir
 		out, _, err := runCommandWithOutput(xzCompressCmd)
 		if err != nil {
-			t.Fatal(err, out)
+			c.Fatal(err, out)
 		}
 
 		if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
-			t.Fatalf("failed to open destination dockerfile: %v", err)
+			c.Fatalf("failed to open destination dockerfile: %v", err)
 		}
 		return fakeContextFromDir(tmpDir)
 	}()
@@ -4233,15 +4053,13 @@
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
+		c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
 	}
 
-	logDone("build - ADD tar.xz")
 }
 
-func TestBuildAddTarXzGz(t *testing.T) {
+func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) {
 	name := "testbuildaddtarxzgz"
-	defer deleteImages(name)
 
 	ctx := func() *FakeContext {
 		dockerfile := `
@@ -4251,7 +4069,7 @@
 		tmpDir, err := ioutil.TempDir("", "fake-context")
 		testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
 		if err != nil {
-			t.Fatalf("failed to create test.tar archive: %v", err)
+			c.Fatalf("failed to create test.tar archive: %v", err)
 		}
 		defer testTar.Close()
 
@@ -4261,31 +4079,31 @@
 			Name: "test/foo",
 			Size: 2,
 		}); err != nil {
-			t.Fatalf("failed to write tar file header: %v", err)
+			c.Fatalf("failed to write tar file header: %v", err)
 		}
 		if _, err := tw.Write([]byte("Hi")); err != nil {
-			t.Fatalf("failed to write tar file content: %v", err)
+			c.Fatalf("failed to write tar file content: %v", err)
 		}
 		if err := tw.Close(); err != nil {
-			t.Fatalf("failed to close tar archive: %v", err)
+			c.Fatalf("failed to close tar archive: %v", err)
 		}
 
 		xzCompressCmd := exec.Command("xz", "-k", "test.tar")
 		xzCompressCmd.Dir = tmpDir
 		out, _, err := runCommandWithOutput(xzCompressCmd)
 		if err != nil {
-			t.Fatal(err, out)
+			c.Fatal(err, out)
 		}
 
 		gzipCompressCmd := exec.Command("gzip", "test.tar.xz")
 		gzipCompressCmd.Dir = tmpDir
 		out, _, err = runCommandWithOutput(gzipCompressCmd)
 		if err != nil {
-			t.Fatal(err, out)
+			c.Fatal(err, out)
 		}
 
 		if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
-			t.Fatalf("failed to open destination dockerfile: %v", err)
+			c.Fatalf("failed to open destination dockerfile: %v", err)
 		}
 		return fakeContextFromDir(tmpDir)
 	}()
@@ -4293,15 +4111,13 @@
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
+		c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
 	}
 
-	logDone("build - ADD tar.xz.gz")
 }
 
-func TestBuildFromGIT(t *testing.T) {
+func (s *DockerSuite) TestBuildFromGIT(c *check.C) {
 	name := "testbuildfromgit"
-	defer deleteImages(name)
 	git, err := fakeGIT("repo", map[string]string{
 		"Dockerfile": `FROM busybox
 					ADD first /first
@@ -4310,317 +4126,315 @@
 		"first": "test git data",
 	}, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer git.Close()
 
 	_, err = buildImageFromPath(name, git.RepoURL, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Author")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != "docker" {
-		t.Fatalf("Maintainer should be docker, got %s", res)
+		c.Fatalf("Maintainer should be docker, got %s", res)
 	}
-	logDone("build - build from GIT")
 }
 
-func TestBuildCleanupCmdOnEntrypoint(t *testing.T) {
-	name := "testbuildcmdcleanuponentrypoint"
+func (s *DockerSuite) TestBuildFromGITWithContext(c *check.C) {
+	name := "testbuildfromgit"
 	defer deleteImages(name)
+	git, err := fakeGIT("repo", map[string]string{
+		"docker/Dockerfile": `FROM busybox
+					ADD first /first
+					RUN [ -f /first ]
+					MAINTAINER docker`,
+		"docker/first": "test git data",
+	}, true)
+	if err != nil {
+		c.Fatal(err)
+	}
+	defer git.Close()
+
+	u := fmt.Sprintf("%s#master:docker", git.RepoURL)
+	_, err = buildImageFromPath(name, u, true)
+	if err != nil {
+		c.Fatal(err)
+	}
+	res, err := inspectField(name, "Author")
+	if err != nil {
+		c.Fatal(err)
+	}
+	if res != "docker" {
+		c.Fatalf("Maintainer should be docker, got %s", res)
+	}
+}
+
+func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) {
+	name := "testbuildcmdcleanuponentrypoint"
 	if _, err := buildImage(name,
 		`FROM scratch
         CMD ["test"]
 		ENTRYPOINT ["echo"]`,
 		true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if _, err := buildImage(name,
 		fmt.Sprintf(`FROM %s
 		ENTRYPOINT ["cat"]`, name),
 		true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectField(name, "Config.Cmd")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	if expected := "<no value>"; res != expected {
-		t.Fatalf("Cmd %s, expected %s", res, expected)
+	if res != "<nil>" {
+		c.Fatalf("Cmd %s, expected nil", res)
 	}
+
 	res, err = inspectField(name, "Config.Entrypoint")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	if expected := "[cat]"; res != expected {
-		t.Fatalf("Entrypoint %s, expected %s", res, expected)
+	if expected := "{[cat]}"; res != expected {
+		c.Fatalf("Entrypoint %s, expected %s", res, expected)
 	}
-	logDone("build - cleanup cmd on ENTRYPOINT")
 }
 
-func TestBuildClearCmd(t *testing.T) {
+func (s *DockerSuite) TestBuildClearCmd(c *check.C) {
 	name := "testbuildclearcmd"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`From scratch
    ENTRYPOINT ["/bin/bash"]
    CMD []`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectFieldJSON(name, "Config.Cmd")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != "[]" {
-		t.Fatalf("Cmd %s, expected %s", res, "[]")
+		c.Fatalf("Cmd %s, expected %s", res, "[]")
 	}
-	logDone("build - clearcmd")
 }
 
-func TestBuildEmptyCmd(t *testing.T) {
+func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) {
 	name := "testbuildemptycmd"
-	defer deleteImages(name)
 	if _, err := buildImage(name, "FROM scratch\nMAINTAINER quux\n", true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectFieldJSON(name, "Config.Cmd")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != "null" {
-		t.Fatalf("Cmd %s, expected %s", res, "null")
+		c.Fatalf("Cmd %s, expected %s", res, "null")
 	}
-	logDone("build - empty cmd")
 }
 
-func TestBuildOnBuildOutput(t *testing.T) {
+func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) {
 	name := "testbuildonbuildparent"
-	defer deleteImages(name)
 	if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	childname := "testbuildonbuildchild"
-	defer deleteImages(childname)
-
 	_, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if !strings.Contains(out, "Trigger 0, RUN echo foo") {
-		t.Fatal("failed to find the ONBUILD output", out)
+		c.Fatal("failed to find the ONBUILD output", out)
 	}
 
-	logDone("build - onbuild output")
 }
 
-func TestBuildInvalidTag(t *testing.T) {
-	name := "abcd:" + makeRandomString(200)
-	defer deleteImages(name)
+func (s *DockerSuite) TestBuildInvalidTag(c *check.C) {
+	name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200)
 	_, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true)
 	// if the error doesnt check for illegal tag name, or the image is built
 	// then this should fail
 	if !strings.Contains(out, "Illegal tag name") || strings.Contains(out, "Sending build context to Docker daemon") {
-		t.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out)
+		c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out)
 	}
-	logDone("build - invalid tag")
 }
 
-func TestBuildCmdShDashC(t *testing.T) {
+func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) {
 	name := "testbuildcmdshc"
-	defer deleteImages(name)
 	if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	res, err := inspectFieldJSON(name, "Config.Cmd")
 	if err != nil {
-		t.Fatal(err, res)
+		c.Fatal(err, res)
 	}
 
 	expected := `["/bin/sh","-c","echo cmd"]`
 
 	if res != expected {
-		t.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
+		c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
 	}
 
-	logDone("build - cmd should have sh -c for non-json")
 }
 
-func TestBuildCmdSpaces(t *testing.T) {
+func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) {
 	// Test to make sure that when we strcat arrays we take into account
 	// the arg separator to make sure ["echo","hi"] and ["echo hi"] don't
 	// look the same
 	name := "testbuildcmdspaces"
-	defer deleteImages(name)
 	var id1 string
 	var id2 string
 	var err error
 
 	if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if id1 == id2 {
-		t.Fatal("Should not have resulted in the same CMD")
+		c.Fatal("Should not have resulted in the same CMD")
 	}
 
 	// Now do the same with ENTRYPOINT
 	if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if id1 == id2 {
-		t.Fatal("Should not have resulted in the same ENTRYPOINT")
+		c.Fatal("Should not have resulted in the same ENTRYPOINT")
 	}
 
-	logDone("build - cmd with spaces")
 }
 
-func TestBuildCmdJSONNoShDashC(t *testing.T) {
+func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) {
 	name := "testbuildcmdjson"
-	defer deleteImages(name)
 	if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	res, err := inspectFieldJSON(name, "Config.Cmd")
 	if err != nil {
-		t.Fatal(err, res)
+		c.Fatal(err, res)
 	}
 
 	expected := `["echo","cmd"]`
 
 	if res != expected {
-		t.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
+		c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
 	}
 
-	logDone("build - cmd should not have /bin/sh -c for json")
 }
 
-func TestBuildErrorInvalidInstruction(t *testing.T) {
+func (s *DockerSuite) TestBuildErrorInvalidInstruction(c *check.C) {
 	name := "testbuildignoreinvalidinstruction"
-	defer deleteImages(name)
 
 	out, _, err := buildImageWithOut(name, "FROM busybox\nfoo bar", true)
 	if err == nil {
-		t.Fatalf("Should have failed: %s", out)
+		c.Fatalf("Should have failed: %s", out)
 	}
 
-	logDone("build - error invalid Dockerfile instruction")
 }
 
-func TestBuildEntrypointInheritance(t *testing.T) {
-	defer deleteImages("parent", "child")
-	defer deleteAllContainers()
+func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) {
 
 	if _, err := buildImage("parent", `
     FROM busybox
     ENTRYPOINT exit 130
     `, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	status, _ := runCommand(exec.Command(dockerBinary, "run", "parent"))
 
 	if status != 130 {
-		t.Fatalf("expected exit code 130 but received %d", status)
+		c.Fatalf("expected exit code 130 but received %d", status)
 	}
 
 	if _, err := buildImage("child", `
     FROM parent
     ENTRYPOINT exit 5
     `, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	status, _ = runCommand(exec.Command(dockerBinary, "run", "child"))
 
 	if status != 5 {
-		t.Fatalf("expected exit code 5 but received %d", status)
+		c.Fatalf("expected exit code 5 but received %d", status)
 	}
 
-	logDone("build - clear entrypoint")
 }
 
-func TestBuildEntrypointInheritanceInspect(t *testing.T) {
+func (s *DockerSuite) TestBuildEntrypointInheritanceInspect(c *check.C) {
 	var (
 		name     = "testbuildepinherit"
 		name2    = "testbuildepinherit2"
 		expected = `["/bin/sh","-c","echo quux"]`
 	)
 
-	defer deleteImages(name, name2)
-	defer deleteAllContainers()
-
 	if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	res, err := inspectFieldJSON(name2, "Config.Entrypoint")
 	if err != nil {
-		t.Fatal(err, res)
+		c.Fatal(err, res)
 	}
 
 	if res != expected {
-		t.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res)
+		c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res)
 	}
 
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2))
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	expected = "quux"
 
 	if strings.TrimSpace(out) != expected {
-		t.Fatalf("Expected output is %s, got %s", expected, out)
+		c.Fatalf("Expected output is %s, got %s", expected, out)
 	}
 
-	logDone("build - entrypoint override inheritance properly")
 }
 
-func TestBuildRunShEntrypoint(t *testing.T) {
+func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) {
 	name := "testbuildentrypoint"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
                                 ENTRYPOINT /bin/echo`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name))
 
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
-	logDone("build - entrypoint with /bin/echo running successfully")
 }
 
-func TestBuildExoticShellInterpolation(t *testing.T) {
+func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) {
 	name := "testbuildexoticshellinterpolation"
-	defer deleteImages(name)
 
 	_, err := buildImage(name, `
 		FROM busybox
@@ -4642,21 +4456,18 @@
 		RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ]
 	`, false)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	logDone("build - exotic shell interpolation")
 }
 
-func TestBuildVerifySingleQuoteFails(t *testing.T) {
+func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) {
 	// This testcase is supposed to generate an error because the
 	// JSON array we're passing in on the CMD uses single quotes instead
 	// of double quotes (per the JSON spec). This means we interpret it
 	// as a "string" insead of "JSON array" and pass it on to "sh -c" and
 	// it should barf on it.
 	name := "testbuildsinglequotefails"
-	defer deleteImages(name)
-	defer deleteAllContainers()
 
 	_, err := buildImage(name,
 		`FROM busybox
@@ -4665,15 +4476,13 @@
 	_, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name))
 
 	if err == nil {
-		t.Fatal("The image was not supposed to be able to run")
+		c.Fatal("The image was not supposed to be able to run")
 	}
 
-	logDone("build - verify single quotes break the build")
 }
 
-func TestBuildVerboseOut(t *testing.T) {
+func (s *DockerSuite) TestBuildVerboseOut(c *check.C) {
 	name := "testbuildverboseout"
-	defer deleteImages(name)
 
 	_, out, err := buildImageWithOut(name,
 		`FROM busybox
@@ -4681,87 +4490,81 @@
 		false)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if !strings.Contains(out, "\n123\n") {
-		t.Fatalf("Output should contain %q: %q", "123", out)
+		c.Fatalf("Output should contain %q: %q", "123", out)
 	}
 
-	logDone("build - verbose output from commands")
 }
 
-func TestBuildWithTabs(t *testing.T) {
+func (s *DockerSuite) TestBuildWithTabs(c *check.C) {
 	name := "testbuildwithtabs"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		"FROM busybox\nRUN echo\tone\t\ttwo", true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectFieldJSON(name, "ContainerConfig.Cmd")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]`
 	expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates
 	if res != expected1 && res != expected2 {
-		t.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2)
+		c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2)
 	}
-	logDone("build - with tabs")
 }
 
-func TestBuildLabels(t *testing.T) {
+func (s *DockerSuite) TestBuildLabels(c *check.C) {
 	name := "testbuildlabel"
 	expected := `{"License":"GPL","Vendor":"Acme"}`
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM busybox
 		LABEL Vendor=Acme
                 LABEL License GPL`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	res, err := inspectFieldJSON(name, "Config.Labels")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res != expected {
-		t.Fatalf("Labels %s, expected %s", res, expected)
+		c.Fatalf("Labels %s, expected %s", res, expected)
 	}
-	logDone("build - label")
 }
 
-func TestBuildLabelsCache(t *testing.T) {
+func (s *DockerSuite) TestBuildLabelsCache(c *check.C) {
 	name := "testbuildlabelcache"
-	defer deleteImages(name)
 
 	id1, err := buildImage(name,
 		`FROM busybox
 		LABEL Vendor=Acme`, false)
 	if err != nil {
-		t.Fatalf("Build 1 should have worked: %v", err)
+		c.Fatalf("Build 1 should have worked: %v", err)
 	}
 
 	id2, err := buildImage(name,
 		`FROM busybox
 		LABEL Vendor=Acme`, true)
 	if err != nil || id1 != id2 {
-		t.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err)
+		c.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err)
 	}
 
 	id2, err = buildImage(name,
 		`FROM busybox
 		LABEL Vendor=Acme1`, true)
 	if err != nil || id1 == id2 {
-		t.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err)
+		c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err)
 	}
 
 	id2, err = buildImage(name,
 		`FROM busybox
 		LABEL Vendor Acme`, true) // Note: " " and "=" should be same
 	if err != nil || id1 != id2 {
-		t.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err)
+		c.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err)
 	}
 
 	// Now make sure the cache isn't used by mistake
@@ -4769,28 +4572,26 @@
 		`FROM busybox
        LABEL f1=b1 f2=b2`, false)
 	if err != nil {
-		t.Fatalf("Build 5 should have worked: %q", err)
+		c.Fatalf("Build 5 should have worked: %q", err)
 	}
 
 	id2, err = buildImage(name,
 		`FROM busybox
        LABEL f1="b1 f2=b2"`, true)
 	if err != nil || id1 == id2 {
-		t.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err)
+		c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err)
 	}
 
-	logDone("build - label cache")
 }
 
-func TestBuildStderr(t *testing.T) {
+func (s *DockerSuite) TestBuildStderr(c *check.C) {
 	// This test just makes sure that no non-error output goes
 	// to stderr
 	name := "testbuildstderr"
-	defer deleteImages(name)
 	_, _, stderr, err := buildImageWithStdoutStderr(name,
 		"FROM busybox\nRUN echo one", true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if runtime.GOOS == "windows" {
@@ -4798,22 +4599,20 @@
 		lines := strings.Split(stderr, "\n")
 		for _, v := range lines {
 			if v != "" && !strings.Contains(v, "SECURITY WARNING:") {
-				t.Fatalf("Stderr contains unexpected output line: %q", v)
+				c.Fatalf("Stderr contains unexpected output line: %q", v)
 			}
 		}
 	} else {
 		if stderr != "" {
-			t.Fatalf("Stderr should have been empty, instead its: %q", stderr)
+			c.Fatalf("Stderr should have been empty, instead its: %q", stderr)
 		}
 	}
-	logDone("build - testing stderr")
 }
 
-func TestBuildChownSingleFile(t *testing.T) {
-	testRequires(t, UnixCli) // test uses chown: not available on windows
+func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) {
+	testRequires(c, UnixCli) // test uses chown: not available on windows
 
 	name := "testbuildchownsinglefile"
-	defer deleteImages(name)
 
 	ctx, err := fakeContext(`
 FROM busybox
@@ -4824,46 +4623,45 @@
 		"test": "test",
 	})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	logDone("build - change permission on single file")
 }
 
-func TestBuildSymlinkBreakout(t *testing.T) {
+func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) {
 	name := "testbuildsymlinkbreakout"
 	tmpdir, err := ioutil.TempDir("", name)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.RemoveAll(tmpdir)
 	ctx := filepath.Join(tmpdir, "context")
 	if err := os.MkdirAll(ctx, 0755); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(`
 	from busybox
 	add symlink.tar /
 	add inject /symlink/
 	`), 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	inject := filepath.Join(ctx, "inject")
 	if err := ioutil.WriteFile(inject, nil, 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	f, err := os.Create(filepath.Join(ctx, "symlink.tar"))
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	w := tar.NewWriter(f)
 	w.WriteHeader(&tar.Header{
@@ -4883,19 +4681,17 @@
 	w.Close()
 	f.Close()
 	if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil {
-		t.Fatal("symlink breakout - inject")
+		c.Fatal("symlink breakout - inject")
 	} else if !os.IsNotExist(err) {
-		t.Fatalf("unexpected error: %v", err)
+		c.Fatalf("unexpected error: %v", err)
 	}
-	logDone("build - symlink breakout")
 }
 
-func TestBuildXZHost(t *testing.T) {
+func (s *DockerSuite) TestBuildXZHost(c *check.C) {
 	name := "testbuildxzhost"
-	defer deleteImages(name)
 
 	ctx, err := fakeContext(`
 FROM busybox
@@ -4911,23 +4707,21 @@
 		})
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	logDone("build - xz host is being used")
 }
 
-func TestBuildVolumesRetainContents(t *testing.T) {
+func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) {
 	var (
 		name     = "testbuildvolumescontent"
 		expected = "some text"
 	)
-	defer deleteImages(name)
 	ctx, err := fakeContext(`
 FROM busybox
 COPY content /foo/file
@@ -4937,27 +4731,25 @@
 			"content": expected,
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err := buildImageFromContext(name, ctx, false); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name))
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if out != expected {
-		t.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out)
+		c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out)
 	}
 
-	logDone("build - volumes retain contents in build")
 }
 
-func TestBuildRenamedDockerfile(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) {
 
 	ctx, err := fakeContext(`FROM busybox
 	RUN echo from Dockerfile`,
@@ -4970,100 +4762,98 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", "test1", ".")
+	out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".")
 	if err != nil {
-		t.Fatalf("Failed to build: %s\n%s", out, err)
+		c.Fatalf("Failed to build: %s\n%s", out, err)
 	}
 	if !strings.Contains(out, "from Dockerfile") {
-		t.Fatalf("test1 should have used Dockerfile, output:%s", out)
+		c.Fatalf("test1 should have used Dockerfile, output:%s", out)
 	}
 
-	out, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".")
+	out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if !strings.Contains(out, "from files/Dockerfile") {
-		t.Fatalf("test2 should have used files/Dockerfile, output:%s", out)
+		c.Fatalf("test2 should have used files/Dockerfile, output:%s", out)
 	}
 
-	out, _, err = dockerCmdInDir(t, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".")
+	out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if !strings.Contains(out, "from files/dFile") {
-		t.Fatalf("test3 should have used files/dFile, output:%s", out)
+		c.Fatalf("test3 should have used files/dFile, output:%s", out)
 	}
 
-	out, _, err = dockerCmdInDir(t, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".")
+	out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if !strings.Contains(out, "from dFile") {
-		t.Fatalf("test4 should have used dFile, output:%s", out)
+		c.Fatalf("test4 should have used dFile, output:%s", out)
 	}
 
 	dirWithNoDockerfile, _ := ioutil.TempDir(os.TempDir(), "test5")
 	nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile")
 	if _, err = os.Create(nonDockerfileFile); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	out, _, err = dockerCmdInDir(t, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".")
+	out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".")
 
 	if err == nil {
-		t.Fatalf("test5 was supposed to fail to find passwd")
+		c.Fatalf("test5 was supposed to fail to find passwd")
 	}
 
-	if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", strings.Replace(nonDockerfileFile, `\`, `\\`, -1)); !strings.Contains(out, expected) {
-		t.Fatalf("wrong error messsage:%v\nexpected to contain=%v", out, expected)
+	if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) {
+		c.Fatalf("wrong error messsage:%v\nexpected to contain=%v", out, expected)
 	}
 
-	out, _, err = dockerCmdInDir(t, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..")
+	out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..")
 	if err != nil {
-		t.Fatalf("test6 failed: %s", err)
+		c.Fatalf("test6 failed: %s", err)
 	}
 	if !strings.Contains(out, "from Dockerfile") {
-		t.Fatalf("test6 should have used root Dockerfile, output:%s", out)
+		c.Fatalf("test6 should have used root Dockerfile, output:%s", out)
 	}
 
-	out, _, err = dockerCmdInDir(t, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..")
+	out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..")
 	if err != nil {
-		t.Fatalf("test7 failed: %s", err)
+		c.Fatalf("test7 failed: %s", err)
 	}
 	if !strings.Contains(out, "from files/Dockerfile") {
-		t.Fatalf("test7 should have used files Dockerfile, output:%s", out)
+		c.Fatalf("test7 should have used files Dockerfile, output:%s", out)
 	}
 
-	out, _, err = dockerCmdInDir(t, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".")
+	out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".")
 	if err == nil || !strings.Contains(out, "must be within the build context") {
-		t.Fatalf("test8 should have failed with Dockerfile out of context: %s", err)
+		c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err)
 	}
 
 	tmpDir := os.TempDir()
-	out, _, err = dockerCmdInDir(t, tmpDir, "build", "-t", "test9", ctx.Dir)
+	out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir)
 	if err != nil {
-		t.Fatalf("test9 - failed: %s", err)
+		c.Fatalf("test9 - failed: %s", err)
 	}
 	if !strings.Contains(out, "from Dockerfile") {
-		t.Fatalf("test9 should have used root Dockerfile, output:%s", out)
+		c.Fatalf("test9 should have used root Dockerfile, output:%s", out)
 	}
 
-	out, _, err = dockerCmdInDir(t, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".")
+	out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".")
 	if err != nil {
-		t.Fatalf("test10 should have worked: %s", err)
+		c.Fatalf("test10 should have worked: %s", err)
 	}
 	if !strings.Contains(out, "from files/dFile2") {
-		t.Fatalf("test10 should have used files/dFile2, output:%s", out)
+		c.Fatalf("test10 should have used files/dFile2, output:%s", out)
 	}
 
-	logDone("build - rename dockerfile")
 }
 
-func TestBuildFromMixedcaseDockerfile(t *testing.T) {
-	testRequires(t, UnixCli) // Dockerfile overwrites dockerfile on windows
-	defer deleteImages("test1")
+func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) {
+	testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows
 
 	ctx, err := fakeContext(`FROM busybox
 	RUN echo from dockerfile`,
@@ -5072,24 +4862,22 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", "test1", ".")
+	out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".")
 	if err != nil {
-		t.Fatalf("Failed to build: %s\n%s", out, err)
+		c.Fatalf("Failed to build: %s\n%s", out, err)
 	}
 
 	if !strings.Contains(out, "from dockerfile") {
-		t.Fatalf("Missing proper output: %s", out)
+		c.Fatalf("Missing proper output: %s", out)
 	}
 
-	logDone("build - mixedcase Dockerfile")
 }
 
-func TestBuildWithTwoDockerfiles(t *testing.T) {
-	testRequires(t, UnixCli) // Dockerfile overwrites dockerfile on windows
-	defer deleteImages("test1")
+func (s *DockerSuite) TestBuildWithTwoDockerfiles(c *check.C) {
+	testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows
 
 	ctx, err := fakeContext(`FROM busybox
 RUN echo from Dockerfile`,
@@ -5098,30 +4886,28 @@
 		})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", "test1", ".")
+	out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".")
 	if err != nil {
-		t.Fatalf("Failed to build: %s\n%s", out, err)
+		c.Fatalf("Failed to build: %s\n%s", out, err)
 	}
 
 	if !strings.Contains(out, "from Dockerfile") {
-		t.Fatalf("Missing proper output: %s", out)
+		c.Fatalf("Missing proper output: %s", out)
 	}
 
-	logDone("build - two Dockerfiles")
 }
 
-func TestBuildFromURLWithF(t *testing.T) {
-	defer deleteImages("test1")
+func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) {
 
 	server, err := fakeStorage(map[string]string{"baz": `FROM busybox
 RUN echo from baz
 COPY * /tmp/
 RUN find /tmp/`})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer server.Close()
 
@@ -5130,34 +4916,32 @@
 		map[string]string{})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// Make sure that -f is ignored and that we don't use the Dockerfile
 	// that's in the current dir
-	out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz")
+	out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz")
 	if err != nil {
-		t.Fatalf("Failed to build: %s\n%s", out, err)
+		c.Fatalf("Failed to build: %s\n%s", out, err)
 	}
 
 	if !strings.Contains(out, "from baz") ||
 		strings.Contains(out, "/tmp/baz") ||
 		!strings.Contains(out, "/tmp/Dockerfile") {
-		t.Fatalf("Missing proper output: %s", out)
+		c.Fatalf("Missing proper output: %s", out)
 	}
 
-	logDone("build - from URL with -f")
 }
 
-func TestBuildFromStdinWithF(t *testing.T) {
-	defer deleteImages("test1")
+func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) {
 
 	ctx, err := fakeContext(`FROM busybox
 RUN echo from Dockerfile`,
 		map[string]string{})
 	defer ctx.Close()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// Make sure that -f is ignored and that we don't use the Dockerfile
@@ -5170,19 +4954,18 @@
 RUN find /tmp/`)
 	out, status, err := runCommandWithOutput(dockerCommand)
 	if err != nil || status != 0 {
-		t.Fatalf("Error building: %s", err)
+		c.Fatalf("Error building: %s", err)
 	}
 
 	if !strings.Contains(out, "from baz") ||
 		strings.Contains(out, "/tmp/baz") ||
 		!strings.Contains(out, "/tmp/Dockerfile") {
-		t.Fatalf("Missing proper output: %s", out)
+		c.Fatalf("Missing proper output: %s", out)
 	}
 
-	logDone("build - from stdin with -f")
 }
 
-func TestBuildFromOfficialNames(t *testing.T) {
+func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) {
 	name := "testbuildfromofficial"
 	fromNames := []string{
 		"busybox",
@@ -5196,45 +4979,44 @@
 		imgName := fmt.Sprintf("%s%d", name, idx)
 		_, err := buildImage(imgName, "FROM "+fromName, true)
 		if err != nil {
-			t.Errorf("Build failed using FROM %s: %s", fromName, err)
+			c.Errorf("Build failed using FROM %s: %s", fromName, err)
 		}
 		deleteImages(imgName)
 	}
-	logDone("build - from official names")
 }
 
-func TestBuildDockerfileOutsideContext(t *testing.T) {
-	testRequires(t, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2)
+func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) {
+	testRequires(c, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2)
 
 	name := "testbuilddockerfileoutsidecontext"
 	tmpdir, err := ioutil.TempDir("", name)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.RemoveAll(tmpdir)
 	ctx := filepath.Join(tmpdir, "context")
 	if err := os.MkdirAll(ctx, 0755); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	wd, err := os.Getwd()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.Chdir(wd)
 	if err := os.Chdir(ctx); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	for _, dockerfilePath := range []string{
@@ -5244,10 +5026,10 @@
 	} {
 		out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "build", "-t", name, "--no-cache", "-f", dockerfilePath, "."))
 		if err == nil {
-			t.Fatalf("Expected error with %s. Out: %s", dockerfilePath, out)
+			c.Fatalf("Expected error with %s. Out: %s", dockerfilePath, out)
 		}
 		if !strings.Contains(out, "must be within the build context") && !strings.Contains(out, "Cannot locate Dockerfile") {
-			t.Fatalf("Unexpected error with %s. Out: %s", dockerfilePath, out)
+			c.Fatalf("Unexpected error with %s. Out: %s", dockerfilePath, out)
 		}
 		deleteImages(name)
 	}
@@ -5258,14 +5040,11 @@
 	// There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx))
 	if err == nil {
-		t.Fatalf("Expected error. Out: %s", out)
+		c.Fatalf("Expected error. Out: %s", out)
 	}
-	deleteImages(name)
-
-	logDone("build - Dockerfile outside context")
 }
 
-func TestBuildSpaces(t *testing.T) {
+func (s *DockerSuite) TestBuildSpaces(c *check.C) {
 	// Test to make sure that leading/trailing spaces on a command
 	// doesn't change the error msg we get
 	var (
@@ -5274,23 +5053,22 @@
 	)
 
 	name := "testspaces"
-	defer deleteImages(name)
 	ctx, err := fakeContext("FROM busybox\nCOPY\n",
 		map[string]string{
 			"Dockerfile": "FROM busybox\nCOPY\n",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil {
-		t.Fatal("Build 1 was supposed to fail, but didn't")
+		c.Fatal("Build 1 was supposed to fail, but didn't")
 	}
 
 	ctx.Add("Dockerfile", "FROM busybox\nCOPY    ")
 	if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil {
-		t.Fatal("Build 2 was supposed to fail, but didn't")
+		c.Fatal("Build 2 was supposed to fail, but didn't")
 	}
 
 	removeLogTimestamps := func(s string) string {
@@ -5303,12 +5081,12 @@
 
 	// Ignore whitespace since that's what were verifying doesn't change stuff
 	if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
-		t.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2)
+		c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2)
 	}
 
 	ctx.Add("Dockerfile", "FROM busybox\n   COPY")
 	if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil {
-		t.Fatal("Build 3 was supposed to fail, but didn't")
+		c.Fatal("Build 3 was supposed to fail, but didn't")
 	}
 
 	// Skip over the times
@@ -5317,12 +5095,12 @@
 
 	// Ignore whitespace since that's what were verifying doesn't change stuff
 	if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
-		t.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2)
+		c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2)
 	}
 
 	ctx.Add("Dockerfile", "FROM busybox\n   COPY    ")
 	if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil {
-		t.Fatal("Build 4 was supposed to fail, but didn't")
+		c.Fatal("Build 4 was supposed to fail, but didn't")
 	}
 
 	// Skip over the times
@@ -5331,16 +5109,14 @@
 
 	// Ignore whitespace since that's what were verifying doesn't change stuff
 	if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
-		t.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2)
+		c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2)
 	}
 
-	logDone("build - test spaces")
 }
 
-func TestBuildSpacesWithQuotes(t *testing.T) {
+func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) {
 	// Test to make sure that spaces in quotes aren't lost
 	name := "testspacesquotes"
-	defer deleteImages(name)
 
 	dockerfile := `FROM busybox
 RUN echo "  \
@@ -5348,19 +5124,18 @@
 
 	_, out, err := buildImageWithOut(name, dockerfile, false)
 	if err != nil {
-		t.Fatal("Build failed:", err)
+		c.Fatal("Build failed:", err)
 	}
 
 	expecting := "\n    foo  \n"
 	if !strings.Contains(out, expecting) {
-		t.Fatalf("Bad output: %q expecting to contian %q", out, expecting)
+		c.Fatalf("Bad output: %q expecting to contain %q", out, expecting)
 	}
 
-	logDone("build - test spaces with quotes")
 }
 
 // #4393
-func TestBuildVolumeFileExistsinContainer(t *testing.T) {
+func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) {
 	buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-")
 	buildCmd.Stdin = strings.NewReader(`
 	FROM busybox
@@ -5370,13 +5145,12 @@
 
 	out, _, err := runCommandWithOutput(buildCmd)
 	if err == nil || !strings.Contains(out, "file exists") {
-		t.Fatalf("expected build to fail when file exists in container at requested volume path")
+		c.Fatalf("expected build to fail when file exists in container at requested volume path")
 	}
 
-	logDone("build - errors when volume is specified where a file exists")
 }
 
-func TestBuildMissingArgs(t *testing.T) {
+func (s *DockerSuite) TestBuildMissingArgs(c *check.C) {
 	// Test to make sure that all Dockerfile commands (except the ones listed
 	// in skipCmds) will generate an error if no args are provided.
 	// Note: INSERT is deprecated so we exclude it because of that.
@@ -5387,8 +5161,6 @@
 		"INSERT":     {},
 	}
 
-	defer deleteAllContainers()
-
 	for cmd := range command.Commands {
 		cmd = strings.ToUpper(cmd)
 		if _, ok := skipCmds[cmd]; ok {
@@ -5405,57 +5177,50 @@
 
 		ctx, err := fakeContext(dockerfile, map[string]string{})
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		defer ctx.Close()
 		var out string
 		if out, err = buildImageFromContext("args", ctx, true); err == nil {
-			t.Fatalf("%s was supposed to fail. Out:%s", cmd, out)
+			c.Fatalf("%s was supposed to fail. Out:%s", cmd, out)
 		}
 		if !strings.Contains(err.Error(), cmd+" requires") {
-			t.Fatalf("%s returned the wrong type of error:%s", cmd, err)
+			c.Fatalf("%s returned the wrong type of error:%s", cmd, err)
 		}
 	}
 
-	logDone("build - verify missing args")
 }
 
-func TestBuildEmptyScratch(t *testing.T) {
-	defer deleteImages("sc")
+func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) {
 	_, out, err := buildImageWithOut("sc", "FROM scratch", true)
 	if err == nil {
-		t.Fatalf("Build was supposed to fail")
+		c.Fatalf("Build was supposed to fail")
 	}
 	if !strings.Contains(out, "No image was generated") {
-		t.Fatalf("Wrong error message: %v", out)
+		c.Fatalf("Wrong error message: %v", out)
 	}
-	logDone("build - empty scratch Dockerfile")
 }
 
-func TestBuildDotDotFile(t *testing.T) {
-	defer deleteImages("sc")
+func (s *DockerSuite) TestBuildDotDotFile(c *check.C) {
 	ctx, err := fakeContext("FROM busybox\n",
 		map[string]string{
 			"..gitme": "",
 		})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
 	if _, err = buildImageFromContext("sc", ctx, false); err != nil {
-		t.Fatalf("Build was supposed to work: %s", err)
+		c.Fatalf("Build was supposed to work: %s", err)
 	}
-	logDone("build - ..file")
 }
 
-func TestBuildNotVerbose(t *testing.T) {
-	defer deleteAllContainers()
-	defer deleteImages("verbose")
+func (s *DockerSuite) TestBuildNotVerbose(c *check.C) {
 
 	ctx, err := fakeContext("FROM busybox\nENV abc=hi\nRUN echo $abc there", map[string]string{})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
@@ -5464,10 +5229,10 @@
 	buildCmd.Dir = ctx.Dir
 	out, _, err := runCommandWithOutput(buildCmd)
 	if err != nil {
-		t.Fatalf("failed to build the image w/o -q: %s, %v", out, err)
+		c.Fatalf("failed to build the image w/o -q: %s, %v", out, err)
 	}
 	if !strings.Contains(out, "hi there") {
-		t.Fatalf("missing output:%s\n", out)
+		c.Fatalf("missing output:%s\n", out)
 	}
 
 	// Now do it w/o verbose
@@ -5475,25 +5240,21 @@
 	buildCmd.Dir = ctx.Dir
 	out, _, err = runCommandWithOutput(buildCmd)
 	if err != nil {
-		t.Fatalf("failed to build the image w/ -q: %s, %v", out, err)
+		c.Fatalf("failed to build the image w/ -q: %s, %v", out, err)
 	}
 	if strings.Contains(out, "hi there") {
-		t.Fatalf("Bad output, should not contain 'hi there':%s", out)
+		c.Fatalf("Bad output, should not contain 'hi there':%s", out)
 	}
 
-	logDone("build - not verbose")
 }
 
-func TestBuildRUNoneJSON(t *testing.T) {
+func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) {
 	name := "testbuildrunonejson"
 
-	defer deleteAllContainers()
-	defer deleteImages(name)
-
 	ctx, err := fakeContext(`FROM hello-world:frozen
 RUN [ "/hello" ]`, map[string]string{})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer ctx.Close()
 
@@ -5501,90 +5262,80 @@
 	buildCmd.Dir = ctx.Dir
 	out, _, err := runCommandWithOutput(buildCmd)
 	if err != nil {
-		t.Fatalf("failed to build the image: %s, %v", out, err)
+		c.Fatalf("failed to build the image: %s, %v", out, err)
 	}
 
 	if !strings.Contains(out, "Hello from Docker") {
-		t.Fatalf("bad output: %s", out)
+		c.Fatalf("bad output: %s", out)
 	}
 
-	logDone("build - RUN with one JSON arg")
 }
 
-func TestBuildResourceConstraintsAreUsed(t *testing.T) {
+func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) {
 	name := "testbuildresourceconstraints"
-	defer deleteAllContainers()
-	defer deleteImages(name)
 
 	ctx, err := fakeContext(`
 	FROM hello-world:frozen
 	RUN ["/hello"]
 	`, map[string]string{})
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	cmd := exec.Command(dockerBinary, "build", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=1", "--cpu-shares=100", "-t", name, ".")
+	cmd := exec.Command(dockerBinary, "build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "-t", name, ".")
 	cmd.Dir = ctx.Dir
 
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
-	out, _, err = dockerCmd(t, "ps", "-lq")
-	if err != nil {
-		t.Fatal(err, out)
-	}
+	out, _ = dockerCmd(c, "ps", "-lq")
 
-	cID := stripTrailingCharacters(out)
+	cID := strings.TrimSpace(out)
 
 	type hostConfig struct {
-		Memory     float64 // Use float64 here since the json decoder sees it that way
-		MemorySwap int
+		Memory     int64
+		MemorySwap int64
 		CpusetCpus string
-		CpuShares  int
+		CpusetMems string
+		CpuShares  int64
+		CpuQuota   int64
 	}
 
 	cfg, err := inspectFieldJSON(cID, "HostConfig")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	var c1 hostConfig
 	if err := json.Unmarshal([]byte(cfg), &c1); err != nil {
-		t.Fatal(err, cfg)
+		c.Fatal(err, cfg)
 	}
-	mem := int64(c1.Memory)
-	if mem != 67108864 || c1.MemorySwap != -1 || c1.CpusetCpus != "1" || c1.CpuShares != 100 {
-		t.Fatalf("resource constraints not set properly:\nMemory: %d, MemSwap: %d, CpusetCpus: %s, CpuShares: %d",
-			mem, c1.MemorySwap, c1.CpusetCpus, c1.CpuShares)
+	if c1.Memory != 67108864 || c1.MemorySwap != -1 || c1.CpusetCpus != "0" || c1.CpusetMems != "0" || c1.CpuShares != 100 || c1.CpuQuota != 8000 {
+		c.Fatalf("resource constraints not set properly:\nMemory: %d, MemSwap: %d, CpusetCpus: %s, CpusetMems: %s, CpuShares: %d, CpuQuota: %d",
+			c1.Memory, c1.MemorySwap, c1.CpusetCpus, c1.CpusetMems, c1.CpuShares, c1.CpuQuota)
 	}
 
 	// Make sure constraints aren't saved to image
-	_, _, err = dockerCmd(t, "run", "--name=test", name)
-	if err != nil {
-		t.Fatal(err)
-	}
+	_, _ = dockerCmd(c, "run", "--name=test", name)
+
 	cfg, err = inspectFieldJSON("test", "HostConfig")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	var c2 hostConfig
 	if err := json.Unmarshal([]byte(cfg), &c2); err != nil {
-		t.Fatal(err, cfg)
+		c.Fatal(err, cfg)
 	}
-	mem = int64(c2.Memory)
-	if mem == 67108864 || c2.MemorySwap == -1 || c2.CpusetCpus == "1" || c2.CpuShares == 100 {
-		t.Fatalf("resource constraints leaked from build:\nMemory: %d, MemSwap: %d, CpusetCpus: %s, CpuShares: %d",
-			mem, c2.MemorySwap, c2.CpusetCpus, c2.CpuShares)
+	if c2.Memory == 67108864 || c2.MemorySwap == -1 || c2.CpusetCpus == "0" || c2.CpusetMems == "0" || c2.CpuShares == 100 || c2.CpuQuota == 8000 {
+		c.Fatalf("resource constraints leaked from build:\nMemory: %d, MemSwap: %d, CpusetCpus: %s, CpusetMems: %s, CpuShares: %d, CpuQuota: %d",
+			c2.Memory, c2.MemorySwap, c2.CpusetCpus, c2.CpusetMems, c2.CpuShares, c2.CpuQuota)
 	}
 
-	logDone("build - resource constraints applied")
 }
 
-func TestBuildEmptyStringVolume(t *testing.T) {
+func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) {
 	name := "testbuildemptystringvolume"
-	defer deleteImages(name)
 
 	_, err := buildImage(name, `
   FROM busybox
@@ -5592,8 +5343,86 @@
   VOLUME $foo
   `, false)
 	if err == nil {
-		t.Fatal("Should have failed to build")
+		c.Fatal("Should have failed to build")
 	}
 
-	logDone("build - empty string volume")
+}
+
+func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) {
+	testRequires(c, NativeExecDriver)
+	testRequires(c, SameHostDaemon)
+	defer deleteImages()
+
+	cgroupParent := "test"
+	data, err := ioutil.ReadFile("/proc/self/cgroup")
+	if err != nil {
+		c.Fatalf("failed to read '/proc/self/cgroup - %v", err)
+	}
+	selfCgroupPaths := parseCgroupPaths(string(data))
+	_, found := selfCgroupPaths["memory"]
+	if !found {
+		c.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths)
+	}
+	cmd := exec.Command(dockerBinary, "build", "--cgroup-parent", cgroupParent, "-")
+	cmd.Stdin = strings.NewReader(`
+FROM busybox
+RUN cat /proc/self/cgroup
+`)
+
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
+	}
+}
+
+func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) {
+	// Check to make sure our build output prints the Dockerfile cmd
+	// property - there was a bug that caused it to be duplicated on the
+	// Step X  line
+	name := "testbuildnodupoutput"
+
+	_, out, err := buildImageWithOut(name, `
+  FROM busybox
+  RUN env`, false)
+	if err != nil {
+		c.Fatalf("Build should have worked: %q", err)
+	}
+
+	exp := "\nStep 1 : RUN env\n"
+	if !strings.Contains(out, exp) {
+		c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp)
+	}
+}
+
+func (s *DockerSuite) TestBuildBadCmdFlag(c *check.C) {
+	name := "testbuildbadcmdflag"
+
+	_, out, err := buildImageWithOut(name, `
+  FROM busybox
+  MAINTAINER --boo joe@example.com`, false)
+	if err == nil {
+		c.Fatal("Build should have failed")
+	}
+
+	exp := "\nUnknown flag: boo\n"
+	if !strings.Contains(out, exp) {
+		c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp)
+	}
+}
+
+func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) {
+	// Test to make sure the bad command is quoted with just "s and
+	// not as a Go []string
+	name := "testbuildbadrunerrmsg"
+	_, out, err := buildImageWithOut(name, `
+  FROM busybox
+  RUN badEXE a1 \& a2	a3`, false) // tab between a2 and a3
+	if err == nil {
+		c.Fatal("Should have failed to build")
+	}
+
+	exp := `The command '/bin/sh -c badEXE a1 \& a2	a3' returned a non-zero code: 127`
+	if !strings.Contains(out, exp) {
+		c.Fatalf("RUN doesn't have the correct output:\nGot:%s\nExpected:%s", out, exp)
+	}
 }
diff --git a/integration-cli/docker_cli_by_digest_test.go b/integration-cli/docker_cli_by_digest_test.go
index 24ebf0c..d4d4949 100644
--- a/integration-cli/docker_cli_by_digest_test.go
+++ b/integration-cli/docker_cli_by_digest_test.go
@@ -5,9 +5,9 @@
 	"os/exec"
 	"regexp"
 	"strings"
-	"testing"
 
 	"github.com/docker/docker/utils"
+	"github.com/go-check/check"
 )
 
 var (
@@ -22,18 +22,17 @@
 func setupImageWithTag(tag string) (string, error) {
 	containerName := "busyboxbydigest"
 
-	c := exec.Command(dockerBinary, "run", "-d", "-e", "digest=1", "--name", containerName, "busybox")
-	if _, err := runCommand(c); err != nil {
+	cmd := exec.Command(dockerBinary, "run", "-d", "-e", "digest=1", "--name", containerName, "busybox")
+	if _, err := runCommand(cmd); err != nil {
 		return "", err
 	}
 
 	// tag the image to upload it to the private registry
 	repoAndTag := utils.ImageReference(repoName, tag)
-	c = exec.Command(dockerBinary, "commit", containerName, repoAndTag)
-	if out, _, err := runCommandWithOutput(c); err != nil {
+	cmd = exec.Command(dockerBinary, "commit", containerName, repoAndTag)
+	if out, _, err := runCommandWithOutput(cmd); err != nil {
 		return "", fmt.Errorf("image tagging failed: %s, %v", out, err)
 	}
-	defer deleteImages(repoAndTag)
 
 	// delete the container as we don't need it any more
 	if err := deleteContainer(containerName); err != nil {
@@ -41,15 +40,15 @@
 	}
 
 	// push the image
-	c = exec.Command(dockerBinary, "push", repoAndTag)
-	out, _, err := runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "push", repoAndTag)
+	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
 		return "", fmt.Errorf("pushing the image to the private registry has failed: %s, %v", out, err)
 	}
 
 	// delete our local repo that we previously tagged
-	c = exec.Command(dockerBinary, "rmi", repoAndTag)
-	if out, _, err := runCommandWithOutput(c); err != nil {
+	cmd = exec.Command(dockerBinary, "rmi", repoAndTag)
+	if out, _, err := runCommandWithOutput(cmd); err != nil {
 		return "", fmt.Errorf("error deleting images prior to real test: %s, %v", out, err)
 	}
 
@@ -63,473 +62,436 @@
 	return pushDigest, nil
 }
 
-func TestPullByTagDisplaysDigest(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
 	pushDigest, err := setupImage()
 	if err != nil {
-		t.Fatalf("error setting up image: %v", err)
+		c.Fatalf("error setting up image: %v", err)
 	}
 
 	// pull from the registry using the tag
-	c := exec.Command(dockerBinary, "pull", repoName)
-	out, _, err := runCommandWithOutput(c)
+	cmd := exec.Command(dockerBinary, "pull", repoName)
+	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error pulling by tag: %s, %v", out, err)
+		c.Fatalf("error pulling by tag: %s, %v", out, err)
 	}
-	defer deleteImages(repoName)
 
 	// the pull output includes "Digest: <digest>", so find that
 	matches := digestRegex.FindStringSubmatch(out)
 	if len(matches) != 2 {
-		t.Fatalf("unable to parse digest from pull output: %s", out)
+		c.Fatalf("unable to parse digest from pull output: %s", out)
 	}
 	pullDigest := matches[1]
 
 	// make sure the pushed and pull digests match
 	if pushDigest != pullDigest {
-		t.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest)
+		c.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest)
 	}
-
-	logDone("by_digest - pull by tag displays digest")
 }
 
-func TestPullByDigest(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) {
 	pushDigest, err := setupImage()
 	if err != nil {
-		t.Fatalf("error setting up image: %v", err)
+		c.Fatalf("error setting up image: %v", err)
 	}
 
 	// pull from the registry using the <name>@<digest> reference
 	imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest)
-	c := exec.Command(dockerBinary, "pull", imageReference)
-	out, _, err := runCommandWithOutput(c)
+	cmd := exec.Command(dockerBinary, "pull", imageReference)
+	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error pulling by digest: %s, %v", out, err)
+		c.Fatalf("error pulling by digest: %s, %v", out, err)
 	}
-	defer deleteImages(imageReference)
 
 	// the pull output includes "Digest: <digest>", so find that
 	matches := digestRegex.FindStringSubmatch(out)
 	if len(matches) != 2 {
-		t.Fatalf("unable to parse digest from pull output: %s", out)
+		c.Fatalf("unable to parse digest from pull output: %s", out)
 	}
 	pullDigest := matches[1]
 
 	// make sure the pushed and pull digests match
 	if pushDigest != pullDigest {
-		t.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest)
+		c.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest)
 	}
-
-	logDone("by_digest - pull by digest")
 }
 
-func TestCreateByDigest(t *testing.T) {
-	defer setupRegistry(t)()
+func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) {
+	// pull from the registry using the <name>@<digest> reference
+	imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName)
+	cmd := exec.Command(dockerBinary, "pull", imageReference)
+	out, _, err := runCommandWithOutput(cmd)
+	if err == nil || !strings.Contains(out, "pulling with digest reference failed from v2 registry") {
+		c.Fatalf("expected non-zero exit status and correct error message when pulling non-existing image: %s", out)
+	}
+}
 
+func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) {
 	pushDigest, err := setupImage()
 	if err != nil {
-		t.Fatalf("error setting up image: %v", err)
+		c.Fatalf("error setting up image: %v", err)
 	}
 
 	imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest)
 
 	containerName := "createByDigest"
-	c := exec.Command(dockerBinary, "create", "--name", containerName, imageReference)
-	out, _, err := runCommandWithOutput(c)
+	cmd := exec.Command(dockerBinary, "create", "--name", containerName, imageReference)
+	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error creating by digest: %s, %v", out, err)
+		c.Fatalf("error creating by digest: %s, %v", out, err)
 	}
-	defer deleteContainer(containerName)
 
 	res, err := inspectField(containerName, "Config.Image")
 	if err != nil {
-		t.Fatalf("failed to get Config.Image: %s, %v", out, err)
+		c.Fatalf("failed to get Config.Image: %s, %v", out, err)
 	}
 	if res != imageReference {
-		t.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference)
+		c.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference)
 	}
-
-	logDone("by_digest - create by digest")
 }
 
-func TestRunByDigest(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestRunByDigest(c *check.C) {
 	pushDigest, err := setupImage()
 	if err != nil {
-		t.Fatalf("error setting up image: %v", err)
+		c.Fatalf("error setting up image: %v", err)
 	}
 
 	imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest)
 
 	containerName := "runByDigest"
-	c := exec.Command(dockerBinary, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest")
-	out, _, err := runCommandWithOutput(c)
+	cmd := exec.Command(dockerBinary, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest")
+	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error run by digest: %s, %v", out, err)
+		c.Fatalf("error run by digest: %s, %v", out, err)
 	}
-	defer deleteContainer(containerName)
 
 	foundRegex := regexp.MustCompile("found=([^\n]+)")
 	matches := foundRegex.FindStringSubmatch(out)
 	if len(matches) != 2 {
-		t.Fatalf("error locating expected 'found=1' output: %s", out)
+		c.Fatalf("error locating expected 'found=1' output: %s", out)
 	}
 	if matches[1] != "1" {
-		t.Fatalf("Expected %q, got %q", "1", matches[1])
+		c.Fatalf("Expected %q, got %q", "1", matches[1])
 	}
 
 	res, err := inspectField(containerName, "Config.Image")
 	if err != nil {
-		t.Fatalf("failed to get Config.Image: %s, %v", out, err)
+		c.Fatalf("failed to get Config.Image: %s, %v", out, err)
 	}
 	if res != imageReference {
-		t.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference)
+		c.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference)
 	}
-
-	logDone("by_digest - run by digest")
 }
 
-func TestRemoveImageByDigest(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *check.C) {
 	digest, err := setupImage()
 	if err != nil {
-		t.Fatalf("error setting up image: %v", err)
+		c.Fatalf("error setting up image: %v", err)
 	}
 
 	imageReference := fmt.Sprintf("%s@%s", repoName, digest)
 
 	// pull from the registry using the <name>@<digest> reference
-	c := exec.Command(dockerBinary, "pull", imageReference)
-	out, _, err := runCommandWithOutput(c)
+	cmd := exec.Command(dockerBinary, "pull", imageReference)
+	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error pulling by digest: %s, %v", out, err)
+		c.Fatalf("error pulling by digest: %s, %v", out, err)
 	}
 
 	// make sure inspect runs ok
 	if _, err := inspectField(imageReference, "Id"); err != nil {
-		t.Fatalf("failed to inspect image: %v", err)
+		c.Fatalf("failed to inspect image: %v", err)
 	}
 
 	// do the delete
 	if err := deleteImages(imageReference); err != nil {
-		t.Fatalf("unexpected error deleting image: %v", err)
+		c.Fatalf("unexpected error deleting image: %v", err)
 	}
 
 	// try to inspect again - it should error this time
 	if _, err := inspectField(imageReference, "Id"); err == nil {
-		t.Fatalf("unexpected nil err trying to inspect what should be a non-existent image")
+		c.Fatalf("unexpected nil err trying to inspect what should be a non-existent image")
 	} else if !strings.Contains(err.Error(), "No such image") {
-		t.Fatalf("expected 'No such image' output, got %v", err)
+		c.Fatalf("expected 'No such image' output, got %v", err)
 	}
-
-	logDone("by_digest - remove image by digest")
 }
 
-func TestBuildByDigest(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestBuildByDigest(c *check.C) {
 	digest, err := setupImage()
 	if err != nil {
-		t.Fatalf("error setting up image: %v", err)
+		c.Fatalf("error setting up image: %v", err)
 	}
 
 	imageReference := fmt.Sprintf("%s@%s", repoName, digest)
 
 	// pull from the registry using the <name>@<digest> reference
-	c := exec.Command(dockerBinary, "pull", imageReference)
-	out, _, err := runCommandWithOutput(c)
+	cmd := exec.Command(dockerBinary, "pull", imageReference)
+	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error pulling by digest: %s, %v", out, err)
+		c.Fatalf("error pulling by digest: %s, %v", out, err)
 	}
 
 	// get the image id
 	imageID, err := inspectField(imageReference, "Id")
 	if err != nil {
-		t.Fatalf("error getting image id: %v", err)
+		c.Fatalf("error getting image id: %v", err)
 	}
 
 	// do the build
 	name := "buildbydigest"
-	defer deleteImages(name)
 	_, err = buildImage(name, fmt.Sprintf(
 		`FROM %s
      CMD ["/bin/echo", "Hello World"]`, imageReference),
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// get the build's image id
 	res, err := inspectField(name, "Config.Image")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	// make sure they match
 	if res != imageID {
-		t.Fatalf("Image %s, expected %s", res, imageID)
+		c.Fatalf("Image %s, expected %s", res, imageID)
 	}
-
-	logDone("by_digest - build by digest")
 }
 
-func TestTagByDigest(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestTagByDigest(c *check.C) {
 	digest, err := setupImage()
 	if err != nil {
-		t.Fatalf("error setting up image: %v", err)
+		c.Fatalf("error setting up image: %v", err)
 	}
 
 	imageReference := fmt.Sprintf("%s@%s", repoName, digest)
 
 	// pull from the registry using the <name>@<digest> reference
-	c := exec.Command(dockerBinary, "pull", imageReference)
-	out, _, err := runCommandWithOutput(c)
+	cmd := exec.Command(dockerBinary, "pull", imageReference)
+	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error pulling by digest: %s, %v", out, err)
+		c.Fatalf("error pulling by digest: %s, %v", out, err)
 	}
 
 	// tag it
 	tag := "tagbydigest"
-	c = exec.Command(dockerBinary, "tag", imageReference, tag)
-	if _, err := runCommand(c); err != nil {
-		t.Fatalf("unexpected error tagging: %v", err)
+	cmd = exec.Command(dockerBinary, "tag", imageReference, tag)
+	if _, err := runCommand(cmd); err != nil {
+		c.Fatalf("unexpected error tagging: %v", err)
 	}
 
 	expectedID, err := inspectField(imageReference, "Id")
 	if err != nil {
-		t.Fatalf("error getting original image id: %v", err)
+		c.Fatalf("error getting original image id: %v", err)
 	}
 
 	tagID, err := inspectField(tag, "Id")
 	if err != nil {
-		t.Fatalf("error getting tagged image id: %v", err)
+		c.Fatalf("error getting tagged image id: %v", err)
 	}
 
 	if tagID != expectedID {
-		t.Fatalf("expected image id %q, got %q", expectedID, tagID)
+		c.Fatalf("expected image id %q, got %q", expectedID, tagID)
 	}
-
-	logDone("by_digest - tag by digest")
 }
 
-func TestListImagesWithoutDigests(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestListImagesWithoutDigests(c *check.C) {
 	digest, err := setupImage()
 	if err != nil {
-		t.Fatalf("error setting up image: %v", err)
+		c.Fatalf("error setting up image: %v", err)
 	}
 
 	imageReference := fmt.Sprintf("%s@%s", repoName, digest)
 
 	// pull from the registry using the <name>@<digest> reference
-	c := exec.Command(dockerBinary, "pull", imageReference)
-	out, _, err := runCommandWithOutput(c)
+	cmd := exec.Command(dockerBinary, "pull", imageReference)
+	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error pulling by digest: %s, %v", out, err)
+		c.Fatalf("error pulling by digest: %s, %v", out, err)
 	}
 
-	c = exec.Command(dockerBinary, "images")
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "images")
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error listing images: %s, %v", out, err)
+		c.Fatalf("error listing images: %s, %v", out, err)
 	}
 
 	if strings.Contains(out, "DIGEST") {
-		t.Fatalf("list output should not have contained DIGEST header: %s", out)
+		c.Fatalf("list output should not have contained DIGEST header: %s", out)
 	}
 
-	logDone("by_digest - list images - digest header not displayed by default")
 }
 
-func TestListImagesWithDigests(t *testing.T) {
-	defer setupRegistry(t)()
-	defer deleteImages(repoName+":tag1", repoName+":tag2")
+func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) {
 
 	// setup image1
 	digest1, err := setupImageWithTag("tag1")
 	if err != nil {
-		t.Fatalf("error setting up image: %v", err)
+		c.Fatalf("error setting up image: %v", err)
 	}
 	imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1)
-	defer deleteImages(imageReference1)
-	t.Logf("imageReference1 = %s", imageReference1)
+	c.Logf("imageReference1 = %s", imageReference1)
 
 	// pull image1 by digest
-	c := exec.Command(dockerBinary, "pull", imageReference1)
-	out, _, err := runCommandWithOutput(c)
+	cmd := exec.Command(dockerBinary, "pull", imageReference1)
+	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error pulling by digest: %s, %v", out, err)
+		c.Fatalf("error pulling by digest: %s, %v", out, err)
 	}
 
 	// list images
-	c = exec.Command(dockerBinary, "images", "--digests")
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "images", "--digests")
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error listing images: %s, %v", out, err)
+		c.Fatalf("error listing images: %s, %v", out, err)
 	}
 
 	// make sure repo shown, tag=<none>, digest = $digest1
 	re1 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest1 + `\s`)
 	if !re1.MatchString(out) {
-		t.Fatalf("expected %q: %s", re1.String(), out)
+		c.Fatalf("expected %q: %s", re1.String(), out)
 	}
 
 	// setup image2
 	digest2, err := setupImageWithTag("tag2")
 	if err != nil {
-		t.Fatalf("error setting up image: %v", err)
+		c.Fatalf("error setting up image: %v", err)
 	}
 	imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2)
-	defer deleteImages(imageReference2)
-	t.Logf("imageReference2 = %s", imageReference2)
+	c.Logf("imageReference2 = %s", imageReference2)
 
 	// pull image1 by digest
-	c = exec.Command(dockerBinary, "pull", imageReference1)
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "pull", imageReference1)
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error pulling by digest: %s, %v", out, err)
+		c.Fatalf("error pulling by digest: %s, %v", out, err)
 	}
 
 	// pull image2 by digest
-	c = exec.Command(dockerBinary, "pull", imageReference2)
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "pull", imageReference2)
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error pulling by digest: %s, %v", out, err)
+		c.Fatalf("error pulling by digest: %s, %v", out, err)
 	}
 
 	// list images
-	c = exec.Command(dockerBinary, "images", "--digests")
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "images", "--digests")
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error listing images: %s, %v", out, err)
+		c.Fatalf("error listing images: %s, %v", out, err)
 	}
 
 	// make sure repo shown, tag=<none>, digest = $digest1
 	if !re1.MatchString(out) {
-		t.Fatalf("expected %q: %s", re1.String(), out)
+		c.Fatalf("expected %q: %s", re1.String(), out)
 	}
 
 	// make sure repo shown, tag=<none>, digest = $digest2
 	re2 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest2 + `\s`)
 	if !re2.MatchString(out) {
-		t.Fatalf("expected %q: %s", re2.String(), out)
+		c.Fatalf("expected %q: %s", re2.String(), out)
 	}
 
 	// pull tag1
-	c = exec.Command(dockerBinary, "pull", repoName+":tag1")
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "pull", repoName+":tag1")
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error pulling tag1: %s, %v", out, err)
+		c.Fatalf("error pulling tag1: %s, %v", out, err)
 	}
 
 	// list images
-	c = exec.Command(dockerBinary, "images", "--digests")
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "images", "--digests")
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error listing images: %s, %v", out, err)
+		c.Fatalf("error listing images: %s, %v", out, err)
 	}
 
 	// make sure image 1 has repo, tag, <none> AND repo, <none>, digest
 	reWithTag1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*<none>\s`)
 	reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest1 + `\s`)
 	if !reWithTag1.MatchString(out) {
-		t.Fatalf("expected %q: %s", reWithTag1.String(), out)
+		c.Fatalf("expected %q: %s", reWithTag1.String(), out)
 	}
 	if !reWithDigest1.MatchString(out) {
-		t.Fatalf("expected %q: %s", reWithDigest1.String(), out)
+		c.Fatalf("expected %q: %s", reWithDigest1.String(), out)
 	}
 	// make sure image 2 has repo, <none>, digest
 	if !re2.MatchString(out) {
-		t.Fatalf("expected %q: %s", re2.String(), out)
+		c.Fatalf("expected %q: %s", re2.String(), out)
 	}
 
 	// pull tag 2
-	c = exec.Command(dockerBinary, "pull", repoName+":tag2")
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "pull", repoName+":tag2")
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error pulling tag2: %s, %v", out, err)
+		c.Fatalf("error pulling tag2: %s, %v", out, err)
 	}
 
 	// list images
-	c = exec.Command(dockerBinary, "images", "--digests")
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "images", "--digests")
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error listing images: %s, %v", out, err)
+		c.Fatalf("error listing images: %s, %v", out, err)
 	}
 
 	// make sure image 1 has repo, tag, digest
 	if !reWithTag1.MatchString(out) {
-		t.Fatalf("expected %q: %s", re1.String(), out)
+		c.Fatalf("expected %q: %s", re1.String(), out)
 	}
 
 	// make sure image 2 has repo, tag, digest
 	reWithTag2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*<none>\s`)
 	reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest2 + `\s`)
 	if !reWithTag2.MatchString(out) {
-		t.Fatalf("expected %q: %s", reWithTag2.String(), out)
+		c.Fatalf("expected %q: %s", reWithTag2.String(), out)
 	}
 	if !reWithDigest2.MatchString(out) {
-		t.Fatalf("expected %q: %s", reWithDigest2.String(), out)
+		c.Fatalf("expected %q: %s", reWithDigest2.String(), out)
 	}
 
 	// list images
-	c = exec.Command(dockerBinary, "images", "--digests")
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "images", "--digests")
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error listing images: %s, %v", out, err)
+		c.Fatalf("error listing images: %s, %v", out, err)
 	}
 
 	// make sure image 1 has repo, tag, digest
 	if !reWithTag1.MatchString(out) {
-		t.Fatalf("expected %q: %s", re1.String(), out)
+		c.Fatalf("expected %q: %s", re1.String(), out)
 	}
 	// make sure image 2 has repo, tag, digest
 	if !reWithTag2.MatchString(out) {
-		t.Fatalf("expected %q: %s", re2.String(), out)
+		c.Fatalf("expected %q: %s", re2.String(), out)
 	}
 	// make sure busybox has tag, but not digest
 	busyboxRe := regexp.MustCompile(`\s*busybox\s*latest\s*<none>\s`)
 	if !busyboxRe.MatchString(out) {
-		t.Fatalf("expected %q: %s", busyboxRe.String(), out)
+		c.Fatalf("expected %q: %s", busyboxRe.String(), out)
 	}
-
-	logDone("by_digest - list images with digests")
 }
 
-func TestDeleteImageByIDOnlyPulledByDigest(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) {
 	pushDigest, err := setupImage()
 	if err != nil {
-		t.Fatalf("error setting up image: %v", err)
+		c.Fatalf("error setting up image: %v", err)
 	}
 
 	// pull from the registry using the <name>@<digest> reference
 	imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest)
-	c := exec.Command(dockerBinary, "pull", imageReference)
-	out, _, err := runCommandWithOutput(c)
+	cmd := exec.Command(dockerBinary, "pull", imageReference)
+	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error pulling by digest: %s, %v", out, err)
+		c.Fatalf("error pulling by digest: %s, %v", out, err)
 	}
 	// just in case...
-	defer deleteImages(imageReference)
 
 	imageID, err := inspectField(imageReference, ".Id")
 	if err != nil {
-		t.Fatalf("error inspecting image id: %v", err)
+		c.Fatalf("error inspecting image id: %v", err)
 	}
 
-	c = exec.Command(dockerBinary, "rmi", imageID)
-	if _, err := runCommand(c); err != nil {
-		t.Fatalf("error deleting image by id: %v", err)
+	cmd = exec.Command(dockerBinary, "rmi", imageID)
+	if _, err := runCommand(cmd); err != nil {
+		c.Fatalf("error deleting image by id: %v", err)
 	}
-
-	logDone("by_digest - delete image by id only pulled by digest")
 }
diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go
index 8d596bd..ef157c6 100644
--- a/integration-cli/docker_cli_commit_test.go
+++ b/integration-cli/docker_cli_commit_test.go
@@ -3,149 +3,127 @@
 import (
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestCommitAfterContainerIsDone(t *testing.T) {
+func (s *DockerSuite) TestCommitAfterContainerIsDone(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %s, %v", out, err)
+		c.Fatalf("failed to run container: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID)
 	if _, _, err = runCommandWithOutput(waitCmd); err != nil {
-		t.Fatalf("error thrown while waiting for container: %s, %v", out, err)
+		c.Fatalf("error thrown while waiting for container: %s, %v", out, err)
 	}
 
 	commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID)
 	out, _, err = runCommandWithOutput(commitCmd)
 	if err != nil {
-		t.Fatalf("failed to commit container to image: %s, %v", out, err)
+		c.Fatalf("failed to commit container to image: %s, %v", out, err)
 	}
 
-	cleanedImageID := stripTrailingCharacters(out)
+	cleanedImageID := strings.TrimSpace(out)
 
 	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID)
 	if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
-		t.Fatalf("failed to inspect image: %s, %v", out, err)
+		c.Fatalf("failed to inspect image: %s, %v", out, err)
 	}
-
-	deleteContainer(cleanedContainerID)
-	deleteImages(cleanedImageID)
-
-	logDone("commit - echo foo and commit the image")
 }
 
-func TestCommitWithoutPause(t *testing.T) {
+func (s *DockerSuite) TestCommitWithoutPause(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %s, %v", out, err)
+		c.Fatalf("failed to run container: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID)
 	if _, _, err = runCommandWithOutput(waitCmd); err != nil {
-		t.Fatalf("error thrown while waiting for container: %s, %v", out, err)
+		c.Fatalf("error thrown while waiting for container: %s, %v", out, err)
 	}
 
 	commitCmd := exec.Command(dockerBinary, "commit", "-p=false", cleanedContainerID)
 	out, _, err = runCommandWithOutput(commitCmd)
 	if err != nil {
-		t.Fatalf("failed to commit container to image: %s, %v", out, err)
+		c.Fatalf("failed to commit container to image: %s, %v", out, err)
 	}
 
-	cleanedImageID := stripTrailingCharacters(out)
+	cleanedImageID := strings.TrimSpace(out)
 
 	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID)
 	if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
-		t.Fatalf("failed to inspect image: %s, %v", out, err)
+		c.Fatalf("failed to inspect image: %s, %v", out, err)
 	}
-
-	deleteContainer(cleanedContainerID)
-	deleteImages(cleanedImageID)
-
-	logDone("commit - echo foo and commit the image with --pause=false")
 }
 
 //test commit a paused container should not unpause it after commit
-func TestCommitPausedContainer(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestCommitPausedContainer(c *check.C) {
 	defer unpauseAllContainers()
 	cmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox")
 	out, _, _, err := runCommandWithStdoutStderr(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 	cmd = exec.Command(dockerBinary, "pause", cleanedContainerID)
 	out, _, _, err = runCommandWithStdoutStderr(cmd)
 	if err != nil {
-		t.Fatalf("failed to pause container: %v, output: %q", err, out)
+		c.Fatalf("failed to pause container: %v, output: %q", err, out)
 	}
 
 	commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID)
 	out, _, err = runCommandWithOutput(commitCmd)
 	if err != nil {
-		t.Fatalf("failed to commit container to image: %s, %v", out, err)
-	}
-	cleanedImageID := stripTrailingCharacters(out)
-	defer deleteImages(cleanedImageID)
-
-	cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Paused}}", cleanedContainerID)
-	out, _, _, err = runCommandWithStdoutStderr(cmd)
-	if err != nil {
-		t.Fatalf("failed to inspect container: %v, output: %q", err, out)
+		c.Fatalf("failed to commit container to image: %s, %v", out, err)
 	}
 
+	out, err = inspectField(cleanedContainerID, "State.Paused")
+	c.Assert(err, check.IsNil)
 	if !strings.Contains(out, "true") {
-		t.Fatalf("commit should not unpause a paused container")
+		c.Fatalf("commit should not unpause a paused container")
 	}
-
-	logDone("commit - commit a paused container will not unpause it")
 }
 
-func TestCommitNewFile(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestCommitNewFile(c *check.C) {
 
 	cmd := exec.Command(dockerBinary, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "commit", "commiter")
 	imageID, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	imageID = strings.Trim(imageID, "\r\n")
-	defer deleteImages(imageID)
 
 	cmd = exec.Command(dockerBinary, "run", imageID, "cat", "/foo")
 
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	if actual := strings.Trim(out, "\r\n"); actual != "koye" {
-		t.Fatalf("expected output koye received %q", actual)
+		c.Fatalf("expected output koye received %q", actual)
 	}
 
-	logDone("commit - commit file and read")
 }
 
-func TestCommitHardlink(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestCommitHardlink(c *check.C) {
 
 	cmd := exec.Command(dockerBinary, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2")
 	firstOuput, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	chunks := strings.Split(strings.TrimSpace(firstOuput), " ")
@@ -158,21 +136,20 @@
 		}
 	}
 	if !found {
-		t.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])
+		c.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])
 	}
 
 	cmd = exec.Command(dockerBinary, "commit", "hardlinks", "hardlinks")
 	imageID, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(imageID, err)
+		c.Fatal(imageID, err)
 	}
 	imageID = strings.Trim(imageID, "\r\n")
-	defer deleteImages(imageID)
 
 	cmd = exec.Command(dockerBinary, "run", "-t", "hardlinks", "ls", "-di", "file1", "file2")
 	secondOuput, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	chunks = strings.Split(strings.TrimSpace(secondOuput), " ")
@@ -185,68 +162,60 @@
 		}
 	}
 	if !found {
-		t.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])
+		c.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])
 	}
 
-	logDone("commit - commit hardlinks")
 }
 
-func TestCommitTTY(t *testing.T) {
-	defer deleteImages("ttytest")
-	defer deleteAllContainers()
+func (s *DockerSuite) TestCommitTTY(c *check.C) {
 
 	cmd := exec.Command(dockerBinary, "run", "-t", "--name", "tty", "busybox", "/bin/ls")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "commit", "tty", "ttytest")
 	imageID, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	imageID = strings.Trim(imageID, "\r\n")
 
 	cmd = exec.Command(dockerBinary, "run", "ttytest", "/bin/ls")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	logDone("commit - commit tty")
 }
 
-func TestCommitWithHostBindMount(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestCommitWithHostBindMount(c *check.C) {
 
 	cmd := exec.Command(dockerBinary, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "commit", "bind-commit", "bindtest")
 	imageID, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(imageID, err)
+		c.Fatal(imageID, err)
 	}
 
 	imageID = strings.Trim(imageID, "\r\n")
-	defer deleteImages(imageID)
 
 	cmd = exec.Command(dockerBinary, "run", "bindtest", "true")
 
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	logDone("commit - commit bind mounted file")
 }
 
-func TestCommitChange(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestCommitChange(c *check.C) {
 
 	cmd := exec.Command(dockerBinary, "run", "--name", "test", "busybox", "true")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "commit",
@@ -257,25 +226,68 @@
 		"test", "test-commit")
 	imageId, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(imageId, err)
+		c.Fatal(imageId, err)
 	}
 	imageId = strings.Trim(imageId, "\r\n")
-	defer deleteImages(imageId)
 
 	expected := map[string]string{
-		"Config.ExposedPorts": "map[8080/tcp:map[]]",
+		"Config.ExposedPorts": "map[8080/tcp:{}]",
 		"Config.Env":          "[DEBUG=true test=1 PATH=/foo]",
 	}
 
 	for conf, value := range expected {
 		res, err := inspectField(imageId, conf)
-		if err != nil {
-			t.Errorf("failed to get value %s, error: %s", conf, err)
-		}
+		c.Assert(err, check.IsNil)
 		if res != value {
-			t.Errorf("%s('%s'), expected %s", conf, res, value)
+			c.Errorf("%s('%s'), expected %s", conf, res, value)
 		}
 	}
 
-	logDone("commit - commit --change")
+}
+
+// TODO: commit --run is deprecated, remove this once --run is removed
+func (s *DockerSuite) TestCommitMergeConfigRun(c *check.C) {
+	name := "commit-test"
+	out, _ := dockerCmd(c, "run", "-d", "-e=FOO=bar", "busybox", "/bin/sh", "-c", "echo testing > /tmp/foo")
+	id := strings.TrimSpace(out)
+
+	dockerCmd(c, "commit", `--run={"Cmd": ["cat", "/tmp/foo"]}`, id, "commit-test")
+
+	out, _ = dockerCmd(c, "run", "--name", name, "commit-test")
+	if strings.TrimSpace(out) != "testing" {
+		c.Fatal("run config in committed container was not merged")
+	}
+
+	type cfg struct {
+		Env []string
+		Cmd []string
+	}
+	config1 := cfg{}
+	if err := inspectFieldAndMarshall(id, "Config", &config1); err != nil {
+		c.Fatal(err)
+	}
+	config2 := cfg{}
+	if err := inspectFieldAndMarshall(name, "Config", &config2); err != nil {
+		c.Fatal(err)
+	}
+
+	// Env has at least PATH loaded as well here, so let's just grab the FOO one
+	var env1, env2 string
+	for _, e := range config1.Env {
+		if strings.HasPrefix(e, "FOO") {
+			env1 = e
+			break
+		}
+	}
+	for _, e := range config2.Env {
+		if strings.HasPrefix(e, "FOO") {
+			env2 = e
+			break
+		}
+	}
+
+	if len(config1.Env) != len(config2.Env) || env1 != env2 && env2 != "" {
+		c.Fatalf("expected envs to match: %v - %v", config1.Env, config2.Env)
+	}
+
 }
diff --git a/integration-cli/docker_cli_config_test.go b/integration-cli/docker_cli_config_test.go
new file mode 100644
index 0000000..5ccd7af
--- /dev/null
+++ b/integration-cli/docker_cli_config_test.go
@@ -0,0 +1,56 @@
+package main
+
+import (
+	"io/ioutil"
+	"net/http"
+	"net/http/httptest"
+	"os"
+	"os/exec"
+	"path/filepath"
+
+	"github.com/docker/docker/pkg/homedir"
+	"github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestConfigHttpHeader(c *check.C) {
+	testRequires(c, UnixCli) // Can't set/unset HOME on windows right now
+	// We either need a level of Go that supports Unsetenv (for cases
+	// when HOME/USERPROFILE isn't set), or we need to be able to use
+	// os/user but user.Current() only works if we aren't statically compiling
+
+	var headers map[string][]string
+
+	server := httptest.NewServer(http.HandlerFunc(
+		func(w http.ResponseWriter, r *http.Request) {
+			headers = r.Header
+		}))
+	defer server.Close()
+
+	homeKey := homedir.Key()
+	homeVal := homedir.Get()
+	tmpDir, _ := ioutil.TempDir("", "fake-home")
+	defer os.RemoveAll(tmpDir)
+
+	dotDocker := filepath.Join(tmpDir, ".docker")
+	os.Mkdir(dotDocker, 0600)
+	tmpCfg := filepath.Join(dotDocker, "config.json")
+
+	defer func() { os.Setenv(homeKey, homeVal) }()
+	os.Setenv(homeKey, tmpDir)
+
+	data := `{
+		"HttpHeaders": { "MyHeader": "MyValue" }
+	}`
+
+	err := ioutil.WriteFile(tmpCfg, []byte(data), 0600)
+	if err != nil {
+		c.Fatalf("Err creating file(%s): %v", tmpCfg, err)
+	}
+
+	cmd := exec.Command(dockerBinary, "-H="+server.URL[7:], "ps")
+	out, _, _ := runCommandWithOutput(cmd)
+
+	if headers["Myheader"] == nil || headers["Myheader"][0] != "MyValue" {
+		c.Fatalf("Missing/bad header: %q\nout:%v", headers, out)
+	}
+}
diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go
index db5f363..fd63bdb 100644
--- a/integration-cli/docker_cli_cp_test.go
+++ b/integration-cli/docker_cli_cp_test.go
@@ -9,7 +9,8 @@
 	"path"
 	"path/filepath"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
 const (
@@ -24,27 +25,26 @@
 
 // Test for #5656
 // Check that garbage paths don't escape the container's rootfs
-func TestCpGarbagePath(t *testing.T) {
-	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
-	if err != nil || exitCode != 0 {
-		t.Fatal("failed to create a container", out, err)
+func (s *DockerSuite) TestCpGarbagePath(c *check.C) {
+	out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
+	if exitCode != 0 {
+		c.Fatal("failed to create a container", out)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-	defer deleteContainer(cleanedContainerID)
+	cleanedContainerID := strings.TrimSpace(out)
 
-	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
-	if err != nil || stripTrailingCharacters(out) != "0" {
-		t.Fatal("failed to set up container", out, err)
+	out, _ = dockerCmd(c, "wait", cleanedContainerID)
+	if strings.TrimSpace(out) != "0" {
+		c.Fatal("failed to set up container", out)
 	}
 
 	if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	hostFile, err := os.Create(cpFullPath)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer hostFile.Close()
 	defer os.RemoveAll(cpTestPathParent)
@@ -53,7 +53,7 @@
 
 	tmpdir, err := ioutil.TempDir("", "docker-integration")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	tmpname := filepath.Join(tmpdir, cpTestName)
@@ -61,52 +61,47 @@
 
 	path := path.Join("../../../../../../../../../../../../", cpFullPath)
 
-	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
-	if err != nil {
-		t.Fatalf("couldn't copy from garbage path: %s:%s %s", cleanedContainerID, path, err)
-	}
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir)
 
 	file, _ := os.Open(tmpname)
 	defer file.Close()
 
 	test, err := ioutil.ReadAll(file)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if string(test) == cpHostContents {
-		t.Errorf("output matched host file -- garbage path can escape container rootfs")
+		c.Errorf("output matched host file -- garbage path can escape container rootfs")
 	}
 
 	if string(test) != cpContainerContents {
-		t.Errorf("output doesn't match the input for garbage path")
+		c.Errorf("output doesn't match the input for garbage path")
 	}
 
-	logDone("cp - garbage paths relative to container's rootfs")
 }
 
 // Check that relative paths are relative to the container's rootfs
-func TestCpRelativePath(t *testing.T) {
-	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
-	if err != nil || exitCode != 0 {
-		t.Fatal("failed to create a container", out, err)
+func (s *DockerSuite) TestCpRelativePath(c *check.C) {
+	out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
+	if exitCode != 0 {
+		c.Fatal("failed to create a container", out)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-	defer deleteContainer(cleanedContainerID)
+	cleanedContainerID := strings.TrimSpace(out)
 
-	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
-	if err != nil || stripTrailingCharacters(out) != "0" {
-		t.Fatal("failed to set up container", out, err)
+	out, _ = dockerCmd(c, "wait", cleanedContainerID)
+	if strings.TrimSpace(out) != "0" {
+		c.Fatal("failed to set up container", out)
 	}
 
 	if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	hostFile, err := os.Create(cpFullPath)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer hostFile.Close()
 	defer os.RemoveAll(cpTestPathParent)
@@ -116,7 +111,7 @@
 	tmpdir, err := ioutil.TempDir("", "docker-integration")
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	tmpname := filepath.Join(tmpdir, cpTestName)
@@ -128,55 +123,50 @@
 		// get this unix-path manipulation on windows with filepath.
 		relPath = cpFullPath[1:]
 	} else {
-		t.Fatalf("path %s was assumed to be an absolute path", cpFullPath)
+		c.Fatalf("path %s was assumed to be an absolute path", cpFullPath)
 	}
 
-	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+relPath, tmpdir)
-	if err != nil {
-		t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, relPath, err)
-	}
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":"+relPath, tmpdir)
 
 	file, _ := os.Open(tmpname)
 	defer file.Close()
 
 	test, err := ioutil.ReadAll(file)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if string(test) == cpHostContents {
-		t.Errorf("output matched host file -- relative path can escape container rootfs")
+		c.Errorf("output matched host file -- relative path can escape container rootfs")
 	}
 
 	if string(test) != cpContainerContents {
-		t.Errorf("output doesn't match the input for relative path")
+		c.Errorf("output doesn't match the input for relative path")
 	}
 
-	logDone("cp - relative paths relative to container's rootfs")
 }
 
 // Check that absolute paths are relative to the container's rootfs
-func TestCpAbsolutePath(t *testing.T) {
-	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
-	if err != nil || exitCode != 0 {
-		t.Fatal("failed to create a container", out, err)
+func (s *DockerSuite) TestCpAbsolutePath(c *check.C) {
+	out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath)
+	if exitCode != 0 {
+		c.Fatal("failed to create a container", out)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-	defer deleteContainer(cleanedContainerID)
+	cleanedContainerID := strings.TrimSpace(out)
 
-	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
-	if err != nil || stripTrailingCharacters(out) != "0" {
-		t.Fatal("failed to set up container", out, err)
+	out, _ = dockerCmd(c, "wait", cleanedContainerID)
+	if strings.TrimSpace(out) != "0" {
+		c.Fatal("failed to set up container", out)
 	}
 
 	if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	hostFile, err := os.Create(cpFullPath)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer hostFile.Close()
 	defer os.RemoveAll(cpTestPathParent)
@@ -186,7 +176,7 @@
 	tmpdir, err := ioutil.TempDir("", "docker-integration")
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	tmpname := filepath.Join(tmpdir, cpTestName)
@@ -194,53 +184,48 @@
 
 	path := cpFullPath
 
-	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
-	if err != nil {
-		t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err)
-	}
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir)
 
 	file, _ := os.Open(tmpname)
 	defer file.Close()
 
 	test, err := ioutil.ReadAll(file)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if string(test) == cpHostContents {
-		t.Errorf("output matched host file -- absolute path can escape container rootfs")
+		c.Errorf("output matched host file -- absolute path can escape container rootfs")
 	}
 
 	if string(test) != cpContainerContents {
-		t.Errorf("output doesn't match the input for absolute path")
+		c.Errorf("output doesn't match the input for absolute path")
 	}
 
-	logDone("cp - absolute paths relative to container's rootfs")
 }
 
 // Test for #5619
 // Check that absolute symlinks are still relative to the container's rootfs
-func TestCpAbsoluteSymlink(t *testing.T) {
-	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path")
-	if err != nil || exitCode != 0 {
-		t.Fatal("failed to create a container", out, err)
+func (s *DockerSuite) TestCpAbsoluteSymlink(c *check.C) {
+	out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path")
+	if exitCode != 0 {
+		c.Fatal("failed to create a container", out)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-	defer deleteContainer(cleanedContainerID)
+	cleanedContainerID := strings.TrimSpace(out)
 
-	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
-	if err != nil || stripTrailingCharacters(out) != "0" {
-		t.Fatal("failed to set up container", out, err)
+	out, _ = dockerCmd(c, "wait", cleanedContainerID)
+	if strings.TrimSpace(out) != "0" {
+		c.Fatal("failed to set up container", out)
 	}
 
 	if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	hostFile, err := os.Create(cpFullPath)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer hostFile.Close()
 	defer os.RemoveAll(cpTestPathParent)
@@ -250,7 +235,7 @@
 	tmpdir, err := ioutil.TempDir("", "docker-integration")
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	tmpname := filepath.Join(tmpdir, cpTestName)
@@ -258,53 +243,48 @@
 
 	path := path.Join("/", "container_path")
 
-	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
-	if err != nil {
-		t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err)
-	}
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir)
 
 	file, _ := os.Open(tmpname)
 	defer file.Close()
 
 	test, err := ioutil.ReadAll(file)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if string(test) == cpHostContents {
-		t.Errorf("output matched host file -- absolute symlink can escape container rootfs")
+		c.Errorf("output matched host file -- absolute symlink can escape container rootfs")
 	}
 
 	if string(test) != cpContainerContents {
-		t.Errorf("output doesn't match the input for absolute symlink")
+		c.Errorf("output doesn't match the input for absolute symlink")
 	}
 
-	logDone("cp - absolute symlink relative to container's rootfs")
 }
 
 // Test for #5619
 // Check that symlinks which are part of the resource path are still relative to the container's rootfs
-func TestCpSymlinkComponent(t *testing.T) {
-	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path")
-	if err != nil || exitCode != 0 {
-		t.Fatal("failed to create a container", out, err)
+func (s *DockerSuite) TestCpSymlinkComponent(c *check.C) {
+	out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path")
+	if exitCode != 0 {
+		c.Fatal("failed to create a container", out)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-	defer deleteContainer(cleanedContainerID)
+	cleanedContainerID := strings.TrimSpace(out)
 
-	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
-	if err != nil || stripTrailingCharacters(out) != "0" {
-		t.Fatal("failed to set up container", out, err)
+	out, _ = dockerCmd(c, "wait", cleanedContainerID)
+	if strings.TrimSpace(out) != "0" {
+		c.Fatal("failed to set up container", out)
 	}
 
 	if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	hostFile, err := os.Create(cpFullPath)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer hostFile.Close()
 	defer os.RemoveAll(cpTestPathParent)
@@ -314,7 +294,7 @@
 	tmpdir, err := ioutil.TempDir("", "docker-integration")
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	tmpname := filepath.Join(tmpdir, cpTestName)
@@ -322,237 +302,333 @@
 
 	path := path.Join("/", "container_path", cpTestName)
 
-	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
-	if err != nil {
-		t.Fatalf("couldn't copy from symlink path component: %s:%s %s", cleanedContainerID, path, err)
-	}
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir)
 
 	file, _ := os.Open(tmpname)
 	defer file.Close()
 
 	test, err := ioutil.ReadAll(file)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if string(test) == cpHostContents {
-		t.Errorf("output matched host file -- symlink path component can escape container rootfs")
+		c.Errorf("output matched host file -- symlink path component can escape container rootfs")
 	}
 
 	if string(test) != cpContainerContents {
-		t.Errorf("output doesn't match the input for symlink path component")
+		c.Errorf("output doesn't match the input for symlink path component")
 	}
 
-	logDone("cp - symlink path components relative to container's rootfs")
 }
 
 // Check that cp with unprivileged user doesn't return any error
-func TestCpUnprivilegedUser(t *testing.T) {
-	testRequires(t, UnixCli) // uses chmod/su: not available on windows
+func (s *DockerSuite) TestCpUnprivilegedUser(c *check.C) {
+	testRequires(c, UnixCli) // uses chmod/su: not available on windows
 
-	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName)
-	if err != nil || exitCode != 0 {
-		t.Fatal("failed to create a container", out, err)
+	out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName)
+	if exitCode != 0 {
+		c.Fatal("failed to create a container", out)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-	defer deleteContainer(cleanedContainerID)
+	cleanedContainerID := strings.TrimSpace(out)
 
-	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
-	if err != nil || stripTrailingCharacters(out) != "0" {
-		t.Fatal("failed to set up container", out, err)
+	out, _ = dockerCmd(c, "wait", cleanedContainerID)
+	if strings.TrimSpace(out) != "0" {
+		c.Fatal("failed to set up container", out)
 	}
 
 	tmpdir, err := ioutil.TempDir("", "docker-integration")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	defer os.RemoveAll(tmpdir)
 
 	if err = os.Chmod(tmpdir, 0777); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	path := cpTestName
 
 	_, _, err = runCommandWithOutput(exec.Command("su", "unprivilegeduser", "-c", dockerBinary+" cp "+cleanedContainerID+":"+path+" "+tmpdir))
 	if err != nil {
-		t.Fatalf("couldn't copy with unprivileged user: %s:%s %s", cleanedContainerID, path, err)
+		c.Fatalf("couldn't copy with unprivileged user: %s:%s %s", cleanedContainerID, path, err)
 	}
 
-	logDone("cp - unprivileged user")
 }
 
-func TestCpVolumePath(t *testing.T) {
-	testRequires(t, SameHostDaemon)
+func (s *DockerSuite) TestCpSpecialFiles(c *check.C) {
+	testRequires(c, SameHostDaemon)
+
+	outDir, err := ioutil.TempDir("", "cp-test-special-files")
+	if err != nil {
+		c.Fatal(err)
+	}
+	defer os.RemoveAll(outDir)
+
+	out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch /foo")
+	if exitCode != 0 {
+		c.Fatal("failed to create a container", out)
+	}
+
+	cleanedContainerID := strings.TrimSpace(out)
+
+	out, _ = dockerCmd(c, "wait", cleanedContainerID)
+	if strings.TrimSpace(out) != "0" {
+		c.Fatal("failed to set up container", out)
+	}
+
+	// Copy actual /etc/resolv.conf
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":/etc/resolv.conf", outDir)
+
+	expected, err := ioutil.ReadFile("/var/lib/docker/containers/" + cleanedContainerID + "/resolv.conf")
+	actual, err := ioutil.ReadFile(outDir + "/resolv.conf")
+
+	if !bytes.Equal(actual, expected) {
+		c.Fatalf("Expected copied file to be duplicate of the container resolvconf")
+	}
+
+	// Copy actual /etc/hosts
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":/etc/hosts", outDir)
+
+	expected, err = ioutil.ReadFile("/var/lib/docker/containers/" + cleanedContainerID + "/hosts")
+	actual, err = ioutil.ReadFile(outDir + "/hosts")
+
+	if !bytes.Equal(actual, expected) {
+		c.Fatalf("Expected copied file to be duplicate of the container hosts")
+	}
+
+	// Copy actual /etc/resolv.conf
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":/etc/hostname", outDir)
+
+	expected, err = ioutil.ReadFile("/var/lib/docker/containers/" + cleanedContainerID + "/hostname")
+	actual, err = ioutil.ReadFile(outDir + "/hostname")
+
+	if !bytes.Equal(actual, expected) {
+		c.Fatalf("Expected copied file to be duplicate of the container resolvconf")
+	}
+
+}
+
+func (s *DockerSuite) TestCpVolumePath(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	tmpDir, err := ioutil.TempDir("", "cp-test-volumepath")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.RemoveAll(tmpDir)
 	outDir, err := ioutil.TempDir("", "cp-test-volumepath-out")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.RemoveAll(outDir)
 	_, err = os.Create(tmpDir + "/test")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	out, exitCode, err := dockerCmd(t, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar")
-	if err != nil || exitCode != 0 {
-		t.Fatal("failed to create a container", out, err)
+	out, exitCode := dockerCmd(c, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar")
+	if exitCode != 0 {
+		c.Fatal("failed to create a container", out)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-	defer deleteContainer(cleanedContainerID)
+	cleanedContainerID := strings.TrimSpace(out)
 
-	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
-	if err != nil || stripTrailingCharacters(out) != "0" {
-		t.Fatal("failed to set up container", out, err)
+	out, _ = dockerCmd(c, "wait", cleanedContainerID)
+	if strings.TrimSpace(out) != "0" {
+		c.Fatal("failed to set up container", out)
 	}
 
 	// Copy actual volume path
-	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo", outDir)
-	if err != nil {
-		t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err)
-	}
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":/foo", outDir)
+
 	stat, err := os.Stat(outDir + "/foo")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if !stat.IsDir() {
-		t.Fatal("expected copied content to be dir")
+		c.Fatal("expected copied content to be dir")
 	}
 	stat, err = os.Stat(outDir + "/foo/bar")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if stat.IsDir() {
-		t.Fatal("Expected file `bar` to be a file")
+		c.Fatal("Expected file `bar` to be a file")
 	}
 
 	// Copy file nested in volume
-	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo/bar", outDir)
-	if err != nil {
-		t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err)
-	}
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":/foo/bar", outDir)
+
 	stat, err = os.Stat(outDir + "/bar")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if stat.IsDir() {
-		t.Fatal("Expected file `bar` to be a file")
+		c.Fatal("Expected file `bar` to be a file")
 	}
 
 	// Copy Bind-mounted dir
-	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz", outDir)
-	if err != nil {
-		t.Fatalf("couldn't copy from bind-mounted volume path: %s:%s %v", cleanedContainerID, "/baz", err)
-	}
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":/baz", outDir)
 	stat, err = os.Stat(outDir + "/baz")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if !stat.IsDir() {
-		t.Fatal("Expected `baz` to be a dir")
+		c.Fatal("Expected `baz` to be a dir")
 	}
 
 	// Copy file nested in bind-mounted dir
-	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz/test", outDir)
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":/baz/test", outDir)
 	fb, err := ioutil.ReadFile(outDir + "/baz/test")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	fb2, err := ioutil.ReadFile(tmpDir + "/test")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if !bytes.Equal(fb, fb2) {
-		t.Fatalf("Expected copied file to be duplicate of bind-mounted file")
+		c.Fatalf("Expected copied file to be duplicate of bind-mounted file")
 	}
 
 	// Copy bind-mounted file
-	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", outDir)
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":/test", outDir)
 	fb, err = ioutil.ReadFile(outDir + "/test")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	fb2, err = ioutil.ReadFile(tmpDir + "/test")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if !bytes.Equal(fb, fb2) {
-		t.Fatalf("Expected copied file to be duplicate of bind-mounted file")
+		c.Fatalf("Expected copied file to be duplicate of bind-mounted file")
 	}
 
-	logDone("cp - volume path")
 }
 
-func TestCpToDot(t *testing.T) {
-	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test")
-	if err != nil || exitCode != 0 {
-		t.Fatal("failed to create a container", out, err)
+func (s *DockerSuite) TestCpToDot(c *check.C) {
+	out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test")
+	if exitCode != 0 {
+		c.Fatal("failed to create a container", out)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-	defer deleteContainer(cleanedContainerID)
+	cleanedContainerID := strings.TrimSpace(out)
 
-	out, _, err = dockerCmd(t, "wait", cleanedContainerID)
-	if err != nil || stripTrailingCharacters(out) != "0" {
-		t.Fatal("failed to set up container", out, err)
+	out, _ = dockerCmd(c, "wait", cleanedContainerID)
+	if strings.TrimSpace(out) != "0" {
+		c.Fatal("failed to set up container", out)
 	}
 
 	tmpdir, err := ioutil.TempDir("", "docker-integration")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.RemoveAll(tmpdir)
 	cwd, err := os.Getwd()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.Chdir(cwd)
 	if err := os.Chdir(tmpdir); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	_, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", ".")
-	if err != nil {
-		t.Fatalf("couldn't docker cp to \".\" path: %s", err)
-	}
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":/test", ".")
 	content, err := ioutil.ReadFile("./test")
 	if string(content) != "lololol\n" {
-		t.Fatalf("Wrong content in copied file %q, should be %q", content, "lololol\n")
+		c.Fatalf("Wrong content in copied file %q, should be %q", content, "lololol\n")
 	}
-	logDone("cp - to dot path")
 }
 
-func TestCpToStdout(t *testing.T) {
-	out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test")
-	if err != nil || exitCode != 0 {
-		t.Fatalf("failed to create a container:%s\n%s", out, err)
+func (s *DockerSuite) TestCpToStdout(c *check.C) {
+	out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test")
+	if exitCode != 0 {
+		c.Fatalf("failed to create a container:%s\n", out)
 	}
 
-	cID := stripTrailingCharacters(out)
-	defer deleteContainer(cID)
+	cID := strings.TrimSpace(out)
 
-	out, _, err = dockerCmd(t, "wait", cID)
-	if err != nil || stripTrailingCharacters(out) != "0" {
-		t.Fatalf("failed to set up container:%s\n%s", out, err)
+	out, _ = dockerCmd(c, "wait", cID)
+	if strings.TrimSpace(out) != "0" {
+		c.Fatalf("failed to set up container:%s\n", out)
 	}
 
-	out, _, err = runCommandPipelineWithOutput(
+	out, _, err := runCommandPipelineWithOutput(
 		exec.Command(dockerBinary, "cp", cID+":/test", "-"),
 		exec.Command("tar", "-vtf", "-"))
+
 	if err != nil {
-		t.Fatalf("Failed to run commands: %s", err)
+		c.Fatalf("Failed to run commands: %s", err)
 	}
 
 	if !strings.Contains(out, "test") || !strings.Contains(out, "-rw") {
-		t.Fatalf("Missing file from tar TOC:\n%s", out)
+		c.Fatalf("Missing file from tar TOC:\n%s", out)
 	}
-	logDone("cp - to stdout")
+}
+
+func (s *DockerSuite) TestCpNameHasColon(c *check.C) {
+	testRequires(c, SameHostDaemon)
+
+	out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /te:s:t")
+	if exitCode != 0 {
+		c.Fatal("failed to create a container", out)
+	}
+
+	cleanedContainerID := strings.TrimSpace(out)
+
+	out, _ = dockerCmd(c, "wait", cleanedContainerID)
+	if strings.TrimSpace(out) != "0" {
+		c.Fatal("failed to set up container", out)
+	}
+
+	tmpdir, err := ioutil.TempDir("", "docker-integration")
+	if err != nil {
+		c.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+	_, _ = dockerCmd(c, "cp", cleanedContainerID+":/te:s:t", tmpdir)
+	content, err := ioutil.ReadFile(tmpdir + "/te:s:t")
+	if string(content) != "lololol\n" {
+		c.Fatalf("Wrong content in copied file %q, should be %q", content, "lololol\n")
+	}
+}
+
+func (s *DockerSuite) TestCopyAndRestart(c *check.C) {
+	expectedMsg := "hello"
+	out, err := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", expectedMsg).CombinedOutput()
+	if err != nil {
+		c.Fatal(string(out), err)
+	}
+	id := strings.TrimSpace(string(out))
+
+	if out, err = exec.Command(dockerBinary, "wait", id).CombinedOutput(); err != nil {
+		c.Fatalf("unable to wait for container: %s", err)
+	}
+
+	status := strings.TrimSpace(string(out))
+	if status != "0" {
+		c.Fatalf("container exited with status %s", status)
+	}
+
+	tmpDir, err := ioutil.TempDir("", "test-docker-restart-after-copy-")
+	if err != nil {
+		c.Fatalf("unable to make temporary directory: %s", err)
+	}
+	defer os.RemoveAll(tmpDir)
+
+	if _, err = exec.Command(dockerBinary, "cp", fmt.Sprintf("%s:/etc/issue", id), tmpDir).CombinedOutput(); err != nil {
+		c.Fatalf("unable to copy from busybox container: %s", err)
+	}
+
+	if out, err = exec.Command(dockerBinary, "start", "-a", id).CombinedOutput(); err != nil {
+		c.Fatalf("unable to start busybox container after copy: %s - %s", err, out)
+	}
+
+	msg := strings.TrimSpace(string(out))
+	if msg != expectedMsg {
+		c.Fatalf("expected %q but got %q", expectedMsg, msg)
+	}
 }
diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go
index e32400e..019ea97 100644
--- a/integration-cli/docker_cli_create_test.go
+++ b/integration-cli/docker_cli_create_test.go
@@ -2,31 +2,31 @@
 
 import (
 	"encoding/json"
+	"fmt"
 	"os"
 	"os/exec"
 	"reflect"
-	"testing"
+	"strings"
 	"time"
 
 	"github.com/docker/docker/nat"
+	"github.com/go-check/check"
 )
 
 // Make sure we can create a simple container with some args
-func TestCreateArgs(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestCreateArgs(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "create", "busybox", "command", "arg1", "arg2", "arg with space")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
 	out, _, err = runCommandWithOutput(inspectCmd)
 	if err != nil {
-		t.Fatalf("out should've been a container id: %s, %v", out, err)
+		c.Fatalf("out should've been a container id: %s, %v", out, err)
 	}
 
 	containers := []struct {
@@ -37,48 +37,46 @@
 		Image   string
 	}{}
 	if err := json.Unmarshal([]byte(out), &containers); err != nil {
-		t.Fatalf("Error inspecting the container: %s", err)
+		c.Fatalf("Error inspecting the container: %s", err)
 	}
 	if len(containers) != 1 {
-		t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers))
+		c.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers))
 	}
 
-	c := containers[0]
-	if c.Path != "command" {
-		t.Fatalf("Unexpected container path. Expected command, received: %s", c.Path)
+	cont := containers[0]
+	if cont.Path != "command" {
+		c.Fatalf("Unexpected container path. Expected command, received: %s", cont.Path)
 	}
 
 	b := false
 	expected := []string{"arg1", "arg2", "arg with space"}
 	for i, arg := range expected {
-		if arg != c.Args[i] {
+		if arg != cont.Args[i] {
 			b = true
 			break
 		}
 	}
-	if len(c.Args) != len(expected) || b {
-		t.Fatalf("Unexpected args. Expected %v, received: %v", expected, c.Args)
+	if len(cont.Args) != len(expected) || b {
+		c.Fatalf("Unexpected args. Expected %v, received: %v", expected, cont.Args)
 	}
 
-	logDone("create - args")
 }
 
 // Make sure we can set hostconfig options too
-func TestCreateHostConfig(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestCreateHostConfig(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "create", "-P", "busybox", "echo")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
 	out, _, err = runCommandWithOutput(inspectCmd)
 	if err != nil {
-		t.Fatalf("out should've been a container id: %s, %v", out, err)
+		c.Fatalf("out should've been a container id: %s, %v", out, err)
 	}
 
 	containers := []struct {
@@ -87,39 +85,37 @@
 		}
 	}{}
 	if err := json.Unmarshal([]byte(out), &containers); err != nil {
-		t.Fatalf("Error inspecting the container: %s", err)
+		c.Fatalf("Error inspecting the container: %s", err)
 	}
 	if len(containers) != 1 {
-		t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers))
+		c.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers))
 	}
 
-	c := containers[0]
-	if c.HostConfig == nil {
-		t.Fatalf("Expected HostConfig, got none")
+	cont := containers[0]
+	if cont.HostConfig == nil {
+		c.Fatalf("Expected HostConfig, got none")
 	}
 
-	if !c.HostConfig.PublishAllPorts {
-		t.Fatalf("Expected PublishAllPorts, got false")
+	if !cont.HostConfig.PublishAllPorts {
+		c.Fatalf("Expected PublishAllPorts, got false")
 	}
 
-	logDone("create - hostconfig")
 }
 
-func TestCreateWithPortRange(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestCreateWithPortRange(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "create", "-p", "3300-3303:3300-3303/tcp", "busybox", "echo")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
 	out, _, err = runCommandWithOutput(inspectCmd)
 	if err != nil {
-		t.Fatalf("out should've been a container id: %s, %v", out, err)
+		c.Fatalf("out should've been a container id: %s, %v", out, err)
 	}
 
 	containers := []struct {
@@ -128,47 +124,45 @@
 		}
 	}{}
 	if err := json.Unmarshal([]byte(out), &containers); err != nil {
-		t.Fatalf("Error inspecting the container: %s", err)
+		c.Fatalf("Error inspecting the container: %s", err)
 	}
 	if len(containers) != 1 {
-		t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers))
+		c.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers))
 	}
 
-	c := containers[0]
-	if c.HostConfig == nil {
-		t.Fatalf("Expected HostConfig, got none")
+	cont := containers[0]
+	if cont.HostConfig == nil {
+		c.Fatalf("Expected HostConfig, got none")
 	}
 
-	if len(c.HostConfig.PortBindings) != 4 {
-		t.Fatalf("Expected 4 ports bindings, got %d", len(c.HostConfig.PortBindings))
+	if len(cont.HostConfig.PortBindings) != 4 {
+		c.Fatalf("Expected 4 ports bindings, got %d", len(cont.HostConfig.PortBindings))
 	}
-	for k, v := range c.HostConfig.PortBindings {
+	for k, v := range cont.HostConfig.PortBindings {
 		if len(v) != 1 {
-			t.Fatalf("Expected 1 ports binding, for the port  %s but found %s", k, v)
+			c.Fatalf("Expected 1 ports binding, for the port  %s but found %s", k, v)
 		}
 		if k.Port() != v[0].HostPort {
-			t.Fatalf("Expected host port %d to match published port  %d", k.Port(), v[0].HostPort)
+			c.Fatalf("Expected host port %d to match published port  %d", k.Port(), v[0].HostPort)
 		}
 	}
 
-	logDone("create - port range")
 }
 
-func TestCreateWithiLargePortRange(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestCreateWithiLargePortRange(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "create", "-p", "1-65535:1-65535/tcp", "busybox", "echo")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
 	out, _, err = runCommandWithOutput(inspectCmd)
 	if err != nil {
-		t.Fatalf("out should've been a container id: %s, %v", out, err)
+		c.Fatalf("out should've been a container id: %s, %v", out, err)
 	}
 
 	containers := []struct {
@@ -177,130 +171,173 @@
 		}
 	}{}
 	if err := json.Unmarshal([]byte(out), &containers); err != nil {
-		t.Fatalf("Error inspecting the container: %s", err)
+		c.Fatalf("Error inspecting the container: %s", err)
 	}
 	if len(containers) != 1 {
-		t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers))
+		c.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers))
 	}
 
-	c := containers[0]
-	if c.HostConfig == nil {
-		t.Fatalf("Expected HostConfig, got none")
+	cont := containers[0]
+	if cont.HostConfig == nil {
+		c.Fatalf("Expected HostConfig, got none")
 	}
 
-	if len(c.HostConfig.PortBindings) != 65535 {
-		t.Fatalf("Expected 65535 ports bindings, got %d", len(c.HostConfig.PortBindings))
+	if len(cont.HostConfig.PortBindings) != 65535 {
+		c.Fatalf("Expected 65535 ports bindings, got %d", len(cont.HostConfig.PortBindings))
 	}
-	for k, v := range c.HostConfig.PortBindings {
+	for k, v := range cont.HostConfig.PortBindings {
 		if len(v) != 1 {
-			t.Fatalf("Expected 1 ports binding, for the port  %s but found %s", k, v)
+			c.Fatalf("Expected 1 ports binding, for the port  %s but found %s", k, v)
 		}
 		if k.Port() != v[0].HostPort {
-			t.Fatalf("Expected host port %d to match published port  %d", k.Port(), v[0].HostPort)
+			c.Fatalf("Expected host port %d to match published port  %d", k.Port(), v[0].HostPort)
 		}
 	}
 
-	logDone("create - large port range")
 }
 
 // "test123" should be printed by docker create + start
-func TestCreateEchoStdout(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestCreateEchoStdout(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "create", "busybox", "echo", "test123")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	runCmd = exec.Command(dockerBinary, "start", "-ai", cleanedContainerID)
 	out, _, _, err = runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if out != "test123\n" {
-		t.Errorf("container should've printed 'test123', got %q", out)
+		c.Errorf("container should've printed 'test123', got %q", out)
 	}
 
-	logDone("create - echo test123")
 }
 
-func TestCreateVolumesCreated(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestCreateVolumesCreated(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	name := "test_create_volume"
 	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "--name", name, "-v", "/foo", "busybox")); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	dir, err := inspectFieldMap(name, "Volumes", "/foo")
 	if err != nil {
-		t.Fatalf("Error getting volume host path: %q", err)
+		c.Fatalf("Error getting volume host path: %q", err)
 	}
 
 	if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) {
-		t.Fatalf("Volume was not created")
+		c.Fatalf("Volume was not created")
 	}
 	if err != nil {
-		t.Fatalf("Error statting volume host path: %q", err)
+		c.Fatalf("Error statting volume host path: %q", err)
 	}
 
-	logDone("create - volumes are created")
 }
 
-func TestCreateLabels(t *testing.T) {
+func (s *DockerSuite) TestCreateLabels(c *check.C) {
 	name := "test_create_labels"
 	expected := map[string]string{"k1": "v1", "k2": "v2"}
 	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "--name", name, "-l", "k1=v1", "--label", "k2=v2", "busybox")); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	actual := make(map[string]string)
 	err := inspectFieldAndMarshall(name, "Config.Labels", &actual)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if !reflect.DeepEqual(expected, actual) {
-		t.Fatalf("Expected %s got %s", expected, actual)
+		c.Fatalf("Expected %s got %s", expected, actual)
 	}
-
-	deleteAllContainers()
-
-	logDone("create - labels")
 }
 
-func TestCreateLabelFromImage(t *testing.T) {
+func (s *DockerSuite) TestCreateLabelFromImage(c *check.C) {
 	imageName := "testcreatebuildlabel"
-	defer deleteImages(imageName)
 	_, err := buildImage(imageName,
 		`FROM busybox
 		LABEL k1=v1 k2=v2`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	name := "test_create_labels_from_image"
 	expected := map[string]string{"k2": "x", "k3": "v3", "k1": "v1"}
 	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "--name", name, "-l", "k2=x", "--label", "k3=v3", imageName)); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	actual := make(map[string]string)
 	err = inspectFieldAndMarshall(name, "Config.Labels", &actual)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if !reflect.DeepEqual(expected, actual) {
-		t.Fatalf("Expected %s got %s", expected, actual)
+		c.Fatalf("Expected %s got %s", expected, actual)
+	}
+}
+
+func (s *DockerSuite) TestCreateHostnameWithNumber(c *check.C) {
+	out, _ := dockerCmd(c, "run", "-h", "web.0", "busybox", "hostname")
+	if strings.TrimSpace(out) != "web.0" {
+		c.Fatalf("hostname not set, expected `web.0`, got: %s", out)
+	}
+}
+
+func (s *DockerSuite) TestCreateRM(c *check.C) {
+	// Test to make sure we can 'rm' a new container that is in
+	// "Created" state, and has ever been run. Test "rm -f" too.
+
+	// create a container
+	createCmd := exec.Command(dockerBinary, "create", "busybox")
+	out, _, err := runCommandWithOutput(createCmd)
+	if err != nil {
+		c.Fatalf("Failed to create container:%s\n%s", out, err)
+	}
+	cID := strings.TrimSpace(out)
+
+	rmCmd := exec.Command(dockerBinary, "rm", cID)
+	out, _, err = runCommandWithOutput(rmCmd)
+	if err != nil {
+		c.Fatalf("Failed to rm container:%s\n%s", out, err)
 	}
 
-	deleteAllContainers()
+	// Now do it again so we can "rm -f" this time
+	createCmd = exec.Command(dockerBinary, "create", "busybox")
+	out, _, err = runCommandWithOutput(createCmd)
+	if err != nil {
+		c.Fatalf("Failed to create 2nd container:%s\n%s", out, err)
+	}
 
-	logDone("create - labels from image")
+	cID = strings.TrimSpace(out)
+	rmCmd = exec.Command(dockerBinary, "rm", "-f", cID)
+	out, _, err = runCommandWithOutput(rmCmd)
+	if err != nil {
+		c.Fatalf("Failed to rm -f container:%s\n%s", out, err)
+	}
+}
+
+func (s *DockerSuite) TestCreateModeIpcContainer(c *check.C) {
+	testRequires(c, SameHostDaemon)
+
+	cmd := exec.Command(dockerBinary, "create", "busybox")
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		c.Fatal(err, out)
+	}
+	id := strings.TrimSpace(out)
+
+	cmd = exec.Command(dockerBinary, "create", fmt.Sprintf("--ipc=container:%s", id), "busybox")
+	out, _, err = runCommandWithOutput(cmd)
+	if err != nil {
+		c.Fatalf("Create container with ipc mode container should success with non running container: %s\n%s", out, err)
+	}
 }
diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go
index c515a63..d668781 100644
--- a/integration-cli/docker_cli_daemon_test.go
+++ b/integration-cli/docker_cli_daemon_test.go
@@ -6,148 +6,127 @@
 	"encoding/json"
 	"fmt"
 	"io/ioutil"
+	"net"
 	"os"
 	"os/exec"
 	"path/filepath"
+	"regexp"
+	"strconv"
 	"strings"
-	"testing"
 	"time"
 
+	"github.com/docker/libnetwork/iptables"
 	"github.com/docker/libtrust"
+	"github.com/go-check/check"
 )
 
-func TestDaemonRestartWithRunningContainersPorts(t *testing.T) {
-	d := NewDaemon(t)
-	if err := d.StartWithBusybox(); err != nil {
-		t.Fatalf("Could not start daemon with busybox: %v", err)
+func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatalf("Could not start daemon with busybox: %v", err)
 	}
-	defer d.Stop()
 
-	if out, err := d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil {
-		t.Fatalf("Could not run top1: err=%v\n%s", err, out)
+	if out, err := s.d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil {
+		c.Fatalf("Could not run top1: err=%v\n%s", err, out)
 	}
 	// --restart=no by default
-	if out, err := d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil {
-		t.Fatalf("Could not run top2: err=%v\n%s", err, out)
+	if out, err := s.d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil {
+		c.Fatalf("Could not run top2: err=%v\n%s", err, out)
 	}
 
 	testRun := func(m map[string]bool, prefix string) {
 		var format string
-		for c, shouldRun := range m {
-			out, err := d.Cmd("ps")
+		for cont, shouldRun := range m {
+			out, err := s.d.Cmd("ps")
 			if err != nil {
-				t.Fatalf("Could not run ps: err=%v\n%q", err, out)
+				c.Fatalf("Could not run ps: err=%v\n%q", err, out)
 			}
 			if shouldRun {
 				format = "%scontainer %q is not running"
 			} else {
 				format = "%scontainer %q is running"
 			}
-			if shouldRun != strings.Contains(out, c) {
-				t.Fatalf(format, prefix, c)
+			if shouldRun != strings.Contains(out, cont) {
+				c.Fatalf(format, prefix, cont)
 			}
 		}
 	}
 
 	testRun(map[string]bool{"top1": true, "top2": true}, "")
 
-	if err := d.Restart(); err != nil {
-		t.Fatalf("Could not restart daemon: %v", err)
+	if err := s.d.Restart(); err != nil {
+		c.Fatalf("Could not restart daemon: %v", err)
 	}
-
 	testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ")
-
-	logDone("daemon - running containers on daemon restart")
 }
 
-func TestDaemonRestartWithVolumesRefs(t *testing.T) {
-	d := NewDaemon(t)
-	if err := d.StartWithBusybox(); err != nil {
-		t.Fatal(err)
+func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatal(err)
 	}
-	defer d.Stop()
 
-	if out, err := d.Cmd("run", "-d", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil {
-		t.Fatal(err, out)
+	if out, err := s.d.Cmd("run", "-d", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil {
+		c.Fatal(err, out)
 	}
-	if err := d.Restart(); err != nil {
-		t.Fatal(err)
+	if err := s.d.Restart(); err != nil {
+		c.Fatal(err)
 	}
-	if _, err := d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil {
-		t.Fatal(err)
+	if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil {
+		c.Fatal(err)
 	}
-	if out, err := d.Cmd("rm", "-fv", "volrestarttest2"); err != nil {
-		t.Fatal(err, out)
+	if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil {
+		c.Fatal(err, out)
 	}
-	v, err := d.Cmd("inspect", "--format", "{{ json .Volumes }}", "volrestarttest1")
+	v, err := s.d.Cmd("inspect", "--format", "{{ json .Volumes }}", "volrestarttest1")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	volumes := make(map[string]string)
 	json.Unmarshal([]byte(v), &volumes)
 	if _, err := os.Stat(volumes["/foo"]); err != nil {
-		t.Fatalf("Expected volume to exist: %s - %s", volumes["/foo"], err)
+		c.Fatalf("Expected volume to exist: %s - %s", volumes["/foo"], err)
 	}
-
-	logDone("daemon - volume refs are restored")
 }
 
-func TestDaemonStartIptablesFalse(t *testing.T) {
-	d := NewDaemon(t)
-	if err := d.Start("--iptables=false"); err != nil {
-		t.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err)
+func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) {
+	if err := s.d.Start("--iptables=false"); err != nil {
+		c.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err)
 	}
-	d.Stop()
-
-	logDone("daemon - started daemon with iptables=false")
 }
 
 // Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and
 // no longer has an IP associated, we should gracefully handle that case and associate
 // an IP with it rather than fail daemon start
-func TestDaemonStartBridgeWithoutIPAssociation(t *testing.T) {
-	d := NewDaemon(t)
+func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C) {
 	// rather than depending on brctl commands to verify docker0 is created and up
 	// let's start the daemon and stop it, and then make a modification to run the
 	// actual test
-	if err := d.Start(); err != nil {
-		t.Fatalf("Could not start daemon: %v", err)
+	if err := s.d.Start(); err != nil {
+		c.Fatalf("Could not start daemon: %v", err)
 	}
-	if err := d.Stop(); err != nil {
-		t.Fatalf("Could not stop daemon: %v", err)
+	if err := s.d.Stop(); err != nil {
+		c.Fatalf("Could not stop daemon: %v", err)
 	}
 
 	// now we will remove the ip from docker0 and then try starting the daemon
 	ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0")
 	stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd)
 	if err != nil {
-		t.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr)
+		c.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr)
 	}
 
-	if err := d.Start(); err != nil {
+	if err := s.d.Start(); err != nil {
 		warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix"
-		t.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning)
+		c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning)
 	}
-
-	// cleanup - stop the daemon if test passed
-	if err := d.Stop(); err != nil {
-		t.Fatalf("Could not stop daemon: %v", err)
-	}
-
-	logDone("daemon - successful daemon start when bridge has no IP association")
 }
 
-func TestDaemonIptablesClean(t *testing.T) {
-	defer deleteAllContainers()
-
-	d := NewDaemon(t)
-	if err := d.StartWithBusybox(); err != nil {
-		t.Fatalf("Could not start daemon with busybox: %v", err)
+func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatalf("Could not start daemon with busybox: %v", err)
 	}
-	defer d.Stop()
 
-	if out, err := d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil {
-		t.Fatalf("Could not run top: %s, %v", out, err)
+	if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil {
+		c.Fatalf("Could not run top: %s, %v", out, err)
 	}
 
 	// get output from iptables with container running
@@ -155,42 +134,36 @@
 	ipTablesCmd := exec.Command("iptables", "-nvL")
 	out, _, err := runCommandWithOutput(ipTablesCmd)
 	if err != nil {
-		t.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
+		c.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
 	}
 
 	if !strings.Contains(out, ipTablesSearchString) {
-		t.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out)
+		c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out)
 	}
 
-	if err := d.Stop(); err != nil {
-		t.Fatalf("Could not stop daemon: %v", err)
+	if err := s.d.Stop(); err != nil {
+		c.Fatalf("Could not stop daemon: %v", err)
 	}
 
 	// get output from iptables after restart
 	ipTablesCmd = exec.Command("iptables", "-nvL")
 	out, _, err = runCommandWithOutput(ipTablesCmd)
 	if err != nil {
-		t.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
+		c.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
 	}
 
 	if strings.Contains(out, ipTablesSearchString) {
-		t.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out)
+		c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out)
 	}
-
-	logDone("daemon - run,iptables - iptables rules cleaned after daemon restart")
 }
 
-func TestDaemonIptablesCreate(t *testing.T) {
-	defer deleteAllContainers()
-
-	d := NewDaemon(t)
-	if err := d.StartWithBusybox(); err != nil {
-		t.Fatalf("Could not start daemon with busybox: %v", err)
+func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatalf("Could not start daemon with busybox: %v", err)
 	}
-	defer d.Stop()
 
-	if out, err := d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil {
-		t.Fatalf("Could not run top: %s, %v", out, err)
+	if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil {
+		c.Fatalf("Could not run top: %s, %v", out, err)
 	}
 
 	// get output from iptables with container running
@@ -198,101 +171,94 @@
 	ipTablesCmd := exec.Command("iptables", "-nvL")
 	out, _, err := runCommandWithOutput(ipTablesCmd)
 	if err != nil {
-		t.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
+		c.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
 	}
 
 	if !strings.Contains(out, ipTablesSearchString) {
-		t.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out)
+		c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out)
 	}
 
-	if err := d.Restart(); err != nil {
-		t.Fatalf("Could not restart daemon: %v", err)
+	if err := s.d.Restart(); err != nil {
+		c.Fatalf("Could not restart daemon: %v", err)
 	}
 
 	// make sure the container is not running
-	runningOut, err := d.Cmd("inspect", "--format='{{.State.Running}}'", "top")
+	runningOut, err := s.d.Cmd("inspect", "--format='{{.State.Running}}'", "top")
 	if err != nil {
-		t.Fatalf("Could not inspect on container: %s, %v", out, err)
+		c.Fatalf("Could not inspect on container: %s, %v", out, err)
 	}
 	if strings.TrimSpace(runningOut) != "true" {
-		t.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut))
+		c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut))
 	}
 
 	// get output from iptables after restart
 	ipTablesCmd = exec.Command("iptables", "-nvL")
 	out, _, err = runCommandWithOutput(ipTablesCmd)
 	if err != nil {
-		t.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
+		c.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
 	}
 
 	if !strings.Contains(out, ipTablesSearchString) {
-		t.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out)
+		c.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out)
 	}
-
-	logDone("daemon - run,iptables - iptables rules for always restarted container created after daemon restart")
 }
 
-func TestDaemonLoggingLevel(t *testing.T) {
-	d := NewDaemon(t)
+func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) {
+	c.Assert(s.d.Start("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level"))
+}
 
-	if err := d.Start("--log-level=bogus"); err == nil {
-		t.Fatal("Daemon should not have been able to start")
+func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) {
+	if err := s.d.Start("--log-level=debug"); err != nil {
+		c.Fatal(err)
 	}
-
-	d = NewDaemon(t)
-	if err := d.Start("--log-level=debug"); err != nil {
-		t.Fatal(err)
-	}
-	d.Stop()
-	content, _ := ioutil.ReadFile(d.logFile.Name())
+	content, _ := ioutil.ReadFile(s.d.logFile.Name())
 	if !strings.Contains(string(content), `level=debug`) {
-		t.Fatalf(`Missing level="debug" in log file:\n%s`, string(content))
+		c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content))
 	}
+}
 
-	d = NewDaemon(t)
-	if err := d.Start("--log-level=fatal"); err != nil {
-		t.Fatal(err)
+func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) {
+	// we creating new daemons to create new logFile
+	if err := s.d.Start("--log-level=fatal"); err != nil {
+		c.Fatal(err)
 	}
-	d.Stop()
-	content, _ = ioutil.ReadFile(d.logFile.Name())
+	content, _ := ioutil.ReadFile(s.d.logFile.Name())
 	if strings.Contains(string(content), `level=debug`) {
-		t.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content))
+		c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content))
 	}
-
-	d = NewDaemon(t)
-	if err := d.Start("-D"); err != nil {
-		t.Fatal(err)
-	}
-	d.Stop()
-	content, _ = ioutil.ReadFile(d.logFile.Name())
-	if !strings.Contains(string(content), `level=debug`) {
-		t.Fatalf(`Missing level="debug" in log file using -D:\n%s`, string(content))
-	}
-
-	d = NewDaemon(t)
-	if err := d.Start("--debug"); err != nil {
-		t.Fatal(err)
-	}
-	d.Stop()
-	content, _ = ioutil.ReadFile(d.logFile.Name())
-	if !strings.Contains(string(content), `level=debug`) {
-		t.Fatalf(`Missing level="debug" in log file using --debug:\n%s`, string(content))
-	}
-
-	d = NewDaemon(t)
-	if err := d.Start("--debug", "--log-level=fatal"); err != nil {
-		t.Fatal(err)
-	}
-	d.Stop()
-	content, _ = ioutil.ReadFile(d.logFile.Name())
-	if !strings.Contains(string(content), `level=debug`) {
-		t.Fatalf(`Missing level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content))
-	}
-
-	logDone("daemon - Logging Level")
 }
 
-func TestDaemonAllocatesListeningPort(t *testing.T) {
+func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) {
+	if err := s.d.Start("-D"); err != nil {
+		c.Fatal(err)
+	}
+	content, _ := ioutil.ReadFile(s.d.logFile.Name())
+	if !strings.Contains(string(content), `level=debug`) {
+		c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content))
+	}
+}
+
+func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) {
+	if err := s.d.Start("--debug"); err != nil {
+		c.Fatal(err)
+	}
+	content, _ := ioutil.ReadFile(s.d.logFile.Name())
+	if !strings.Contains(string(content), `level=debug`) {
+		c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content))
+	}
+}
+
+func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) {
+	if err := s.d.Start("--debug", "--log-level=fatal"); err != nil {
+		c.Fatal(err)
+	}
+	content, _ := ioutil.ReadFile(s.d.logFile.Name())
+	if !strings.Contains(string(content), `level=debug`) {
+		c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content))
+	}
+}
+
+func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) {
 	listeningPorts := [][]string{
 		{"0.0.0.0", "0.0.0.0", "5678"},
 		{"127.0.0.1", "127.0.0.1", "1234"},
@@ -304,527 +270,898 @@
 		cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2]))
 	}
 
-	d := NewDaemon(t)
-	if err := d.StartWithBusybox(cmdArgs...); err != nil {
-		t.Fatalf("Could not start daemon with busybox: %v", err)
+	if err := s.d.StartWithBusybox(cmdArgs...); err != nil {
+		c.Fatalf("Could not start daemon with busybox: %v", err)
 	}
-	defer d.Stop()
 
 	for _, hostDirective := range listeningPorts {
-		output, err := d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true")
+		output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true")
 		if err == nil {
-			t.Fatalf("Container should not start, expected port already allocated error: %q", output)
+			c.Fatalf("Container should not start, expected port already allocated error: %q", output)
 		} else if !strings.Contains(output, "port is already allocated") {
-			t.Fatalf("Expected port is already allocated error: %q", output)
+			c.Fatalf("Expected port is already allocated error: %q", output)
 		}
 	}
-
-	logDone("daemon - daemon listening port is allocated")
 }
 
-// #9629
-func TestDaemonVolumesBindsRefs(t *testing.T) {
-	d := NewDaemon(t)
-
-	if err := d.StartWithBusybox(); err != nil {
-		t.Fatal(err)
-	}
-
-	tmp, err := ioutil.TempDir(os.TempDir(), "")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmp)
-
-	if err := ioutil.WriteFile(tmp+"/test", []byte("testing"), 0655); err != nil {
-		t.Fatal(err)
-	}
-
-	if out, err := d.Cmd("create", "-v", tmp+":/foo", "--name=voltest", "busybox"); err != nil {
-		t.Fatal(err, out)
-	}
-
-	if err := d.Restart(); err != nil {
-		t.Fatal(err)
-	}
-
-	if out, err := d.Cmd("run", "--volumes-from=voltest", "--name=consumer", "busybox", "/bin/sh", "-c", "[ -f /foo/test ]"); err != nil {
-		t.Fatal(err, out)
-	}
-
-	logDone("daemon - bind refs in data-containers survive daemon restart")
-}
-
-func TestDaemonKeyGeneration(t *testing.T) {
+func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) {
 	// TODO: skip or update for Windows daemon
 	os.Remove("/etc/docker/key.json")
-	d := NewDaemon(t)
-	if err := d.Start(); err != nil {
-		t.Fatalf("Could not start daemon: %v", err)
+	if err := s.d.Start(); err != nil {
+		c.Fatalf("Could not start daemon: %v", err)
 	}
-	d.Stop()
+	s.d.Stop()
 
 	k, err := libtrust.LoadKeyFile("/etc/docker/key.json")
 	if err != nil {
-		t.Fatalf("Error opening key file")
+		c.Fatalf("Error opening key file")
 	}
 	kid := k.KeyID()
 	// Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF)
 	if len(kid) != 59 {
-		t.Fatalf("Bad key ID: %s", kid)
+		c.Fatalf("Bad key ID: %s", kid)
 	}
-
-	logDone("daemon - key generation")
 }
 
-func TestDaemonKeyMigration(t *testing.T) {
+func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) {
 	// TODO: skip or update for Windows daemon
 	os.Remove("/etc/docker/key.json")
 	k1, err := libtrust.GenerateECP256PrivateKey()
 	if err != nil {
-		t.Fatalf("Error generating private key: %s", err)
+		c.Fatalf("Error generating private key: %s", err)
 	}
 	if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil {
-		t.Fatalf("Error creating .docker directory: %s", err)
+		c.Fatalf("Error creating .docker directory: %s", err)
 	}
 	if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil {
-		t.Fatalf("Error saving private key: %s", err)
+		c.Fatalf("Error saving private key: %s", err)
 	}
 
-	d := NewDaemon(t)
-	if err := d.Start(); err != nil {
-		t.Fatalf("Could not start daemon: %v", err)
+	if err := s.d.Start(); err != nil {
+		c.Fatalf("Could not start daemon: %v", err)
 	}
-	d.Stop()
+	s.d.Stop()
 
 	k2, err := libtrust.LoadKeyFile("/etc/docker/key.json")
 	if err != nil {
-		t.Fatalf("Error opening key file")
+		c.Fatalf("Error opening key file")
 	}
 	if k1.KeyID() != k2.KeyID() {
-		t.Fatalf("Key not migrated")
+		c.Fatalf("Key not migrated")
 	}
-
-	logDone("daemon - key migration")
-}
-
-// Simulate an older daemon (pre 1.3) coming up with volumes specified in containers
-//	without corresponding volume json
-func TestDaemonUpgradeWithVolumes(t *testing.T) {
-	d := NewDaemon(t)
-
-	graphDir := filepath.Join(os.TempDir(), "docker-test")
-	defer os.RemoveAll(graphDir)
-	if err := d.StartWithBusybox("-g", graphDir); err != nil {
-		t.Fatal(err)
-	}
-
-	tmpDir := filepath.Join(os.TempDir(), "test")
-	defer os.RemoveAll(tmpDir)
-
-	if out, err := d.Cmd("create", "-v", tmpDir+":/foo", "--name=test", "busybox"); err != nil {
-		t.Fatal(err, out)
-	}
-
-	if err := d.Stop(); err != nil {
-		t.Fatal(err)
-	}
-
-	// Remove this since we're expecting the daemon to re-create it too
-	if err := os.RemoveAll(tmpDir); err != nil {
-		t.Fatal(err)
-	}
-
-	configDir := filepath.Join(graphDir, "volumes")
-
-	if err := os.RemoveAll(configDir); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := d.Start("-g", graphDir); err != nil {
-		t.Fatal(err)
-	}
-
-	if _, err := os.Stat(tmpDir); os.IsNotExist(err) {
-		t.Fatalf("expected volume path %s to exist but it does not", tmpDir)
-	}
-
-	dir, err := ioutil.ReadDir(configDir)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(dir) == 0 {
-		t.Fatalf("expected volumes config dir to contain data for new volume")
-	}
-
-	// Now with just removing the volume config and not the volume data
-	if err := d.Stop(); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := os.RemoveAll(configDir); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := d.Start("-g", graphDir); err != nil {
-		t.Fatal(err)
-	}
-
-	dir, err = ioutil.ReadDir(configDir)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if len(dir) == 0 {
-		t.Fatalf("expected volumes config dir to contain data for new volume")
-	}
-
-	logDone("daemon - volumes from old(pre 1.3) daemon work")
 }
 
 // GH#11320 - verify that the daemon exits on failure properly
 // Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means
 // to get a daemon init failure; no other tests for -b/--bip conflict are therefore required
-func TestDaemonExitOnFailure(t *testing.T) {
-	d := NewDaemon(t)
-	defer d.Stop()
-
+func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) {
 	//attempt to start daemon with incorrect flags (we know -b and --bip conflict)
-	if err := d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil {
+	if err := s.d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil {
 		//verify we got the right error
 		if !strings.Contains(err.Error(), "Daemon exited and never started") {
-			t.Fatalf("Expected daemon not to start, got %v", err)
+			c.Fatalf("Expected daemon not to start, got %v", err)
 		}
 		// look in the log and make sure we got the message that daemon is shutting down
-		runCmd := exec.Command("grep", "Shutting down daemon due to", d.LogfileName())
+		runCmd := exec.Command("grep", "Error starting daemon", s.d.LogfileName())
 		if out, _, err := runCommandWithOutput(runCmd); err != nil {
-			t.Fatalf("Expected 'shutting down daemon due to error' message; but doesn't exist in log: %q, err: %v", out, err)
+			c.Fatalf("Expected 'Error starting daemon' message; but doesn't exist in log: %q, err: %v", out, err)
 		}
 	} else {
 		//if we didn't get an error and the daemon is running, this is a failure
-		d.Stop()
-		t.Fatal("Conflicting options should cause the daemon to error out with a failure")
+		c.Fatal("Conflicting options should cause the daemon to error out with a failure")
 	}
-
-	logDone("daemon - verify no start on daemon init errors")
 }
 
-func TestDaemonUlimitDefaults(t *testing.T) {
-	testRequires(t, NativeExecDriver)
-	d := NewDaemon(t)
+func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) {
+	d := s.d
+	err := d.Start("--bridge", "nosuchbridge")
+	c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail"))
+	defer d.Restart()
 
-	if err := d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil {
-		t.Fatal(err)
+	bridgeName := "external-bridge"
+	bridgeIp := "192.169.1.1/24"
+	_, bridgeIPNet, _ := net.ParseCIDR(bridgeIp)
+
+	out, err := createInterface(c, "bridge", bridgeName, bridgeIp)
+	c.Assert(err, check.IsNil, check.Commentf(out))
+	defer deleteInterface(c, bridgeName)
+
+	err = d.StartWithBusybox("--bridge", bridgeName)
+	c.Assert(err, check.IsNil)
+
+	ipTablesSearchString := bridgeIPNet.String()
+	ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL")
+	out, _, err = runCommandWithOutput(ipTablesCmd)
+	c.Assert(err, check.IsNil)
+
+	c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true,
+		check.Commentf("iptables output should have contained %q, but was %q",
+			ipTablesSearchString, out))
+
+	_, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top")
+	c.Assert(err, check.IsNil)
+
+	containerIp := d.findContainerIP("ExtContainer")
+	ip := net.ParseIP(containerIp)
+	c.Assert(bridgeIPNet.Contains(ip), check.Equals, true,
+		check.Commentf("Container IP-Address must be in the same subnet range : %s",
+			containerIp))
+}
+
+func createInterface(c *check.C, ifType string, ifName string, ipNet string) (string, error) {
+	args := []string{"link", "add", "name", ifName, "type", ifType}
+	ipLinkCmd := exec.Command("ip", args...)
+	out, _, err := runCommandWithOutput(ipLinkCmd)
+	if err != nil {
+		return out, err
 	}
 
-	out, err := d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)")
+	ifCfgCmd := exec.Command("ifconfig", ifName, ipNet, "up")
+	out, _, err = runCommandWithOutput(ifCfgCmd)
+	return out, err
+}
+
+func deleteInterface(c *check.C, ifName string) {
+	ifCmd := exec.Command("ip", "link", "delete", ifName)
+	out, _, err := runCommandWithOutput(ifCmd)
+	c.Assert(err, check.IsNil, check.Commentf(out))
+
+	flushCmd := exec.Command("iptables", "-t", "nat", "--flush")
+	out, _, err = runCommandWithOutput(flushCmd)
+	c.Assert(err, check.IsNil, check.Commentf(out))
+
+	flushCmd = exec.Command("iptables", "--flush")
+	out, _, err = runCommandWithOutput(flushCmd)
+	c.Assert(err, check.IsNil, check.Commentf(out))
+}
+
+func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) {
+	// TestDaemonBridgeIP Steps
+	// 1. Delete the existing docker0 Bridge
+	// 2. Set --bip daemon configuration and start the new Docker Daemon
+	// 3. Check if the bip config has taken effect using ifconfig and iptables commands
+	// 4. Launch a Container and make sure the IP-Address is in the expected subnet
+	// 5. Delete the docker0 Bridge
+	// 6. Restart the Docker Daemon (via defered action)
+	//    This Restart takes care of bringing docker0 interface back to auto-assigned IP
+
+	defaultNetworkBridge := "docker0"
+	deleteInterface(c, defaultNetworkBridge)
+
+	d := s.d
+
+	bridgeIp := "192.169.1.1/24"
+	ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIp)
+
+	err := d.StartWithBusybox("--bip", bridgeIp)
+	c.Assert(err, check.IsNil)
+	defer d.Restart()
+
+	ifconfigSearchString := ip.String()
+	ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge)
+	out, _, _, err := runCommandWithStdoutStderr(ifconfigCmd)
+	c.Assert(err, check.IsNil)
+
+	c.Assert(strings.Contains(out, ifconfigSearchString), check.Equals, true,
+		check.Commentf("ifconfig output should have contained %q, but was %q",
+			ifconfigSearchString, out))
+
+	ipTablesSearchString := bridgeIPNet.String()
+	ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL")
+	out, _, err = runCommandWithOutput(ipTablesCmd)
+	c.Assert(err, check.IsNil)
+
+	c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true,
+		check.Commentf("iptables output should have contained %q, but was %q",
+			ipTablesSearchString, out))
+
+	out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top")
+	c.Assert(err, check.IsNil)
+
+	containerIp := d.findContainerIP("test")
+	ip = net.ParseIP(containerIp)
+	c.Assert(bridgeIPNet.Contains(ip), check.Equals, true,
+		check.Commentf("Container IP-Address must be in the same subnet range : %s",
+			containerIp))
+	deleteInterface(c, defaultNetworkBridge)
+}
+
+func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) {
+	if err := s.d.Start(); err != nil {
+		c.Fatalf("Could not start daemon: %v", err)
+	}
+	defer s.d.Restart()
+	if err := s.d.Stop(); err != nil {
+		c.Fatalf("Could not stop daemon: %v", err)
+	}
+
+	// now we will change the docker0's IP and then try starting the daemon
+	bridgeIP := "192.169.100.1/24"
+	_, bridgeIPNet, _ := net.ParseCIDR(bridgeIP)
+
+	ipCmd := exec.Command("ifconfig", "docker0", bridgeIP)
+	stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatalf("failed to change docker0's IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr)
+	}
+
+	if err := s.d.Start("--bip", bridgeIP); err != nil {
+		c.Fatalf("Could not start daemon: %v", err)
+	}
+
+	//check if the iptables contains new bridgeIP MASQUERADE rule
+	ipTablesSearchString := bridgeIPNet.String()
+	ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL")
+	out, _, err := runCommandWithOutput(ipTablesCmd)
+	if err != nil {
+		c.Fatalf("Could not run iptables -nvL: %s, %v", out, err)
+	}
+	if !strings.Contains(out, ipTablesSearchString) {
+		c.Fatalf("iptables output should have contained new MASQUERADE rule with IP %q, but was %q", ipTablesSearchString, out)
+	}
+}
+
+func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) {
+	d := s.d
+
+	bridgeName := "external-bridge"
+	bridgeIp := "192.169.1.1/24"
+
+	out, err := createInterface(c, "bridge", bridgeName, bridgeIp)
+	c.Assert(err, check.IsNil, check.Commentf(out))
+	defer deleteInterface(c, bridgeName)
+
+	args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"}
+	err = d.StartWithBusybox(args...)
+	c.Assert(err, check.IsNil)
+	defer d.Restart()
+
+	for i := 0; i < 4; i++ {
+		cName := "Container" + strconv.Itoa(i)
+		out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top")
+		if err != nil {
+			c.Assert(strings.Contains(out, "no available ip addresses"), check.Equals, true,
+				check.Commentf("Could not run a Container : %s %s", err.Error(), out))
+		}
+	}
+}
+
+func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) {
+	d := s.d
+
+	ipStr := "192.170.1.1/24"
+	ip, _, _ := net.ParseCIDR(ipStr)
+	args := []string{"--ip", ip.String()}
+	err := d.StartWithBusybox(args...)
+	c.Assert(err, check.IsNil)
+	defer d.Restart()
+
+	out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top")
+	c.Assert(err, check.NotNil,
+		check.Commentf("Running a container must fail with an invalid --ip option"))
+	c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true)
+
+	ifName := "dummy"
+	out, err = createInterface(c, "dummy", ifName, ipStr)
+	c.Assert(err, check.IsNil, check.Commentf(out))
+	defer deleteInterface(c, ifName)
+
+	_, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top")
+	c.Assert(err, check.IsNil)
+
+	ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL")
+	out, _, err = runCommandWithOutput(ipTablesCmd)
+	c.Assert(err, check.IsNil)
+
+	regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String())
+	matched, _ := regexp.MatchString(regex, out)
+	c.Assert(matched, check.Equals, true,
+		check.Commentf("iptables output should have contained %q, but was %q", regex, out))
+}
+
+func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) {
+	d := s.d
+
+	bridgeName := "external-bridge"
+	bridgeIp := "192.169.1.1/24"
+
+	out, err := createInterface(c, "bridge", bridgeName, bridgeIp)
+	c.Assert(err, check.IsNil, check.Commentf(out))
+	defer deleteInterface(c, bridgeName)
+
+	args := []string{"--bridge", bridgeName, "--icc=false"}
+	err = d.StartWithBusybox(args...)
+	c.Assert(err, check.IsNil)
+	defer d.Restart()
+
+	ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD")
+	out, _, err = runCommandWithOutput(ipTablesCmd)
+	c.Assert(err, check.IsNil)
+
+	regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName)
+	matched, _ := regexp.MatchString(regex, out)
+	c.Assert(matched, check.Equals, true,
+		check.Commentf("iptables output should have contained %q, but was %q", regex, out))
+
+	// Pinging another container must fail with --icc=false
+	pingContainers(c, d, true)
+
+	ipStr := "192.171.1.1/24"
+	ip, _, _ := net.ParseCIDR(ipStr)
+	ifName := "icc-dummy"
+
+	createInterface(c, "dummy", ifName, ipStr)
+
+	// But, Pinging external or a Host interface must succeed
+	pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String())
+	runArgs := []string{"--rm", "busybox", "sh", "-c", pingCmd}
+	_, err = d.Cmd("run", runArgs...)
+	c.Assert(err, check.IsNil)
+}
+
+func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) {
+	d := s.d
+
+	bridgeName := "external-bridge"
+	bridgeIp := "192.169.1.1/24"
+
+	out, err := createInterface(c, "bridge", bridgeName, bridgeIp)
+	c.Assert(err, check.IsNil, check.Commentf(out))
+	defer deleteInterface(c, bridgeName)
+
+	args := []string{"--bridge", bridgeName, "--icc=false"}
+	err = d.StartWithBusybox(args...)
+	c.Assert(err, check.IsNil)
+	defer d.Restart()
+
+	ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD")
+	out, _, err = runCommandWithOutput(ipTablesCmd)
+	c.Assert(err, check.IsNil)
+
+	regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName)
+	matched, _ := regexp.MatchString(regex, out)
+	c.Assert(matched, check.Equals, true,
+		check.Commentf("iptables output should have contained %q, but was %q", regex, out))
+
+	out, err = d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+
+	out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+}
+
+func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) {
+	bridgeName := "external-bridge"
+	bridgeIp := "192.169.1.1/24"
+
+	out, err := createInterface(c, "bridge", bridgeName, bridgeIp)
+	c.Assert(err, check.IsNil, check.Commentf(out))
+	defer deleteInterface(c, bridgeName)
+
+	args := []string{"--bridge", bridgeName, "--icc=false"}
+	err = s.d.StartWithBusybox(args...)
+	c.Assert(err, check.IsNil)
+	defer s.d.Restart()
+
+	_, err = s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top")
+	c.Assert(err, check.IsNil)
+	_, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top")
+	c.Assert(err, check.IsNil)
+
+	childIP := s.d.findContainerIP("child")
+	parentIP := s.d.findContainerIP("parent")
+
+	sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"}
+	destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"}
+	if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) {
+		c.Fatal("Iptables rules not found")
+	}
+
+	s.d.Cmd("rm", "--link", "parent/http")
+	if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) {
+		c.Fatal("Iptables rules should be removed when unlink")
+	}
+
+	s.d.Cmd("kill", "child")
+	s.d.Cmd("kill", "parent")
+}
+
+func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) {
+	testRequires(c, NativeExecDriver)
+
+	if err := s.d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil {
+		c.Fatal(err)
+	}
+
+	out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)")
+	if err != nil {
+		c.Fatal(out, err)
 	}
 
 	outArr := strings.Split(out, "\n")
 	if len(outArr) < 2 {
-		t.Fatal("got unexpected output: %s", out)
+		c.Fatalf("got unexpected output: %s", out)
 	}
 	nofile := strings.TrimSpace(outArr[0])
 	nproc := strings.TrimSpace(outArr[1])
 
 	if nofile != "42" {
-		t.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile)
+		c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile)
 	}
 	if nproc != "2048" {
-		t.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc)
+		c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc)
 	}
 
 	// Now restart daemon with a new default
-	if err := d.Restart("--default-ulimit", "nofile=43"); err != nil {
-		t.Fatal(err)
+	if err := s.d.Restart("--default-ulimit", "nofile=43"); err != nil {
+		c.Fatal(err)
 	}
 
-	out, err = d.Cmd("start", "-a", "test")
+	out, err = s.d.Cmd("start", "-a", "test")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	outArr = strings.Split(out, "\n")
 	if len(outArr) < 2 {
-		t.Fatal("got unexpected output: %s", out)
+		c.Fatalf("got unexpected output: %s", out)
 	}
 	nofile = strings.TrimSpace(outArr[0])
 	nproc = strings.TrimSpace(outArr[1])
 
 	if nofile != "43" {
-		t.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile)
+		c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile)
 	}
 	if nproc != "2048" {
-		t.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc)
+		c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc)
 	}
-
-	logDone("daemon - default ulimits are applied")
 }
 
 // #11315
-func TestDaemonRestartRenameContainer(t *testing.T) {
-	d := NewDaemon(t)
-	if err := d.StartWithBusybox(); err != nil {
-		t.Fatal(err)
+func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatal(err)
 	}
 
-	if out, err := d.Cmd("run", "--name=test", "busybox"); err != nil {
-		t.Fatal(err, out)
+	if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil {
+		c.Fatal(err, out)
 	}
 
-	if out, err := d.Cmd("rename", "test", "test2"); err != nil {
-		t.Fatal(err, out)
+	if out, err := s.d.Cmd("rename", "test", "test2"); err != nil {
+		c.Fatal(err, out)
 	}
 
-	if err := d.Restart(); err != nil {
-		t.Fatal(err)
+	if err := s.d.Restart(); err != nil {
+		c.Fatal(err)
 	}
 
-	if out, err := d.Cmd("start", "test2"); err != nil {
-		t.Fatal(err, out)
+	if out, err := s.d.Cmd("start", "test2"); err != nil {
+		c.Fatal(err, out)
 	}
-
-	logDone("daemon - rename persists through daemon restart")
 }
 
-func TestDaemonLoggingDriverDefault(t *testing.T) {
-	d := NewDaemon(t)
-
-	if err := d.StartWithBusybox(); err != nil {
-		t.Fatal(err)
+func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatal(err)
 	}
-	defer d.Stop()
 
-	out, err := d.Cmd("run", "-d", "busybox", "echo", "testline")
+	out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline")
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	id := strings.TrimSpace(out)
 
-	if out, err := d.Cmd("wait", id); err != nil {
-		t.Fatal(out, err)
+	if out, err := s.d.Cmd("wait", id); err != nil {
+		c.Fatal(out, err)
 	}
-	logPath := filepath.Join(d.folder, "graph", "containers", id, id+"-json.log")
+	logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log")
 
 	if _, err := os.Stat(logPath); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	f, err := os.Open(logPath)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	var res struct {
-		Log    string    `json:log`
-		Stream string    `json:stream`
-		Time   time.Time `json:time`
+		Log    string    `json:"log"`
+		Stream string    `json:"stream"`
+		Time   time.Time `json:"time"`
 	}
 	if err := json.NewDecoder(f).Decode(&res); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res.Log != "testline\n" {
-		t.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n")
+		c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n")
 	}
 	if res.Stream != "stdout" {
-		t.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout")
+		c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout")
 	}
 	if !time.Now().After(res.Time) {
-		t.Fatalf("Log time %v in future", res.Time)
+		c.Fatalf("Log time %v in future", res.Time)
 	}
-	logDone("daemon - default 'json-file' logging driver")
 }
 
-func TestDaemonLoggingDriverDefaultOverride(t *testing.T) {
-	d := NewDaemon(t)
-
-	if err := d.StartWithBusybox(); err != nil {
-		t.Fatal(err)
+func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatal(err)
 	}
-	defer d.Stop()
 
-	out, err := d.Cmd("run", "-d", "--log-driver=none", "busybox", "echo", "testline")
+	out, err := s.d.Cmd("run", "-d", "--log-driver=none", "busybox", "echo", "testline")
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	id := strings.TrimSpace(out)
 
-	if out, err := d.Cmd("wait", id); err != nil {
-		t.Fatal(out, err)
+	if out, err := s.d.Cmd("wait", id); err != nil {
+		c.Fatal(out, err)
 	}
-	logPath := filepath.Join(d.folder, "graph", "containers", id, id+"-json.log")
+	logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log")
 
 	if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) {
-		t.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err)
+		c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err)
 	}
-	logDone("daemon - default logging driver override in run")
 }
 
-func TestDaemonLoggingDriverNone(t *testing.T) {
-	d := NewDaemon(t)
-
-	if err := d.StartWithBusybox("--log-driver=none"); err != nil {
-		t.Fatal(err)
+func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) {
+	if err := s.d.StartWithBusybox("--log-driver=none"); err != nil {
+		c.Fatal(err)
 	}
-	defer d.Stop()
 
-	out, err := d.Cmd("run", "-d", "busybox", "echo", "testline")
+	out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline")
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	id := strings.TrimSpace(out)
-	if out, err := d.Cmd("wait", id); err != nil {
-		t.Fatal(out, err)
+	if out, err := s.d.Cmd("wait", id); err != nil {
+		c.Fatal(out, err)
 	}
 
-	logPath := filepath.Join(d.folder, "graph", "containers", id, id+"-json.log")
+	logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log")
 
 	if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) {
-		t.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err)
+		c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err)
 	}
-	logDone("daemon - 'none' logging driver")
 }
 
-func TestDaemonLoggingDriverNoneOverride(t *testing.T) {
-	d := NewDaemon(t)
-
-	if err := d.StartWithBusybox("--log-driver=none"); err != nil {
-		t.Fatal(err)
+func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) {
+	if err := s.d.StartWithBusybox("--log-driver=none"); err != nil {
+		c.Fatal(err)
 	}
-	defer d.Stop()
 
-	out, err := d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "echo", "testline")
+	out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "echo", "testline")
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	id := strings.TrimSpace(out)
 
-	if out, err := d.Cmd("wait", id); err != nil {
-		t.Fatal(out, err)
+	if out, err := s.d.Cmd("wait", id); err != nil {
+		c.Fatal(out, err)
 	}
-	logPath := filepath.Join(d.folder, "graph", "containers", id, id+"-json.log")
+	logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log")
 
 	if _, err := os.Stat(logPath); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	f, err := os.Open(logPath)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	var res struct {
-		Log    string    `json:log`
-		Stream string    `json:stream`
-		Time   time.Time `json:time`
+		Log    string    `json:"log"`
+		Stream string    `json:"stream"`
+		Time   time.Time `json:"time"`
 	}
 	if err := json.NewDecoder(f).Decode(&res); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if res.Log != "testline\n" {
-		t.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n")
+		c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n")
 	}
 	if res.Stream != "stdout" {
-		t.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout")
+		c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout")
 	}
 	if !time.Now().After(res.Time) {
-		t.Fatalf("Log time %v in future", res.Time)
+		c.Fatalf("Log time %v in future", res.Time)
 	}
-	logDone("daemon - 'none' logging driver override in run")
 }
 
-func TestDaemonLoggingDriverNoneLogsError(t *testing.T) {
-	d := NewDaemon(t)
-
-	if err := d.StartWithBusybox("--log-driver=none"); err != nil {
-		t.Fatal(err)
+func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) {
+	if err := s.d.StartWithBusybox("--log-driver=none"); err != nil {
+		c.Fatal(err)
 	}
-	defer d.Stop()
 
-	out, err := d.Cmd("run", "-d", "busybox", "echo", "testline")
+	out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline")
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	id := strings.TrimSpace(out)
-	out, err = d.Cmd("logs", id)
+	out, err = s.d.Cmd("logs", id)
 	if err == nil {
-		t.Fatalf("Logs should fail with \"none\" driver")
+		c.Fatalf("Logs should fail with \"none\" driver")
 	}
-	if !strings.Contains(out, `\"logs\" command is supported only for \"json-file\" logging driver`) {
-		t.Fatalf("There should be error about non-json-file driver, got %s", out)
+	if !strings.Contains(out, `"logs" command is supported only for "json-file" logging driver`) {
+		c.Fatalf("There should be error about non-json-file driver, got: %s", out)
 	}
-	logDone("daemon - logs not available for non-json-file drivers")
 }
 
-func TestDaemonDots(t *testing.T) {
-	defer deleteAllContainers()
-	d := NewDaemon(t)
-	if err := d.StartWithBusybox(); err != nil {
-		t.Fatal(err)
+func (s *DockerDaemonSuite) TestDaemonDots(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatal(err)
 	}
 
 	// Now create 4 containers
-	if _, err := d.Cmd("create", "busybox"); err != nil {
-		t.Fatalf("Error creating container: %q", err)
+	if _, err := s.d.Cmd("create", "busybox"); err != nil {
+		c.Fatalf("Error creating container: %q", err)
 	}
-	if _, err := d.Cmd("create", "busybox"); err != nil {
-		t.Fatalf("Error creating container: %q", err)
+	if _, err := s.d.Cmd("create", "busybox"); err != nil {
+		c.Fatalf("Error creating container: %q", err)
 	}
-	if _, err := d.Cmd("create", "busybox"); err != nil {
-		t.Fatalf("Error creating container: %q", err)
+	if _, err := s.d.Cmd("create", "busybox"); err != nil {
+		c.Fatalf("Error creating container: %q", err)
 	}
-	if _, err := d.Cmd("create", "busybox"); err != nil {
-		t.Fatalf("Error creating container: %q", err)
+	if _, err := s.d.Cmd("create", "busybox"); err != nil {
+		c.Fatalf("Error creating container: %q", err)
 	}
 
-	d.Stop()
+	s.d.Stop()
 
-	d.Start("--log-level=debug")
-	d.Stop()
-	content, _ := ioutil.ReadFile(d.logFile.Name())
+	s.d.Start("--log-level=debug")
+	s.d.Stop()
+	content, _ := ioutil.ReadFile(s.d.logFile.Name())
 	if strings.Contains(string(content), "....") {
-		t.Fatalf("Debug level should not have ....\n%s", string(content))
+		c.Fatalf("Debug level should not have ....\n%s", string(content))
 	}
 
-	d.Start("--log-level=error")
-	d.Stop()
-	content, _ = ioutil.ReadFile(d.logFile.Name())
+	s.d.Start("--log-level=error")
+	s.d.Stop()
+	content, _ = ioutil.ReadFile(s.d.logFile.Name())
 	if strings.Contains(string(content), "....") {
-		t.Fatalf("Error level should not have ....\n%s", string(content))
+		c.Fatalf("Error level should not have ....\n%s", string(content))
 	}
 
-	d.Start("--log-level=info")
-	d.Stop()
-	content, _ = ioutil.ReadFile(d.logFile.Name())
+	s.d.Start("--log-level=info")
+	s.d.Stop()
+	content, _ = ioutil.ReadFile(s.d.logFile.Name())
 	if !strings.Contains(string(content), "....") {
-		t.Fatalf("Info level should have ....\n%s", string(content))
+		c.Fatalf("Info level should have ....\n%s", string(content))
 	}
-
-	logDone("daemon - test dots on INFO")
 }
 
-func TestDaemonUnixSockCleanedUp(t *testing.T) {
-	d := NewDaemon(t)
+func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) {
 	dir, err := ioutil.TempDir("", "socket-cleanup-test")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.RemoveAll(dir)
 
 	sockPath := filepath.Join(dir, "docker.sock")
-	if err := d.Start("--host", "unix://"+sockPath); err != nil {
-		t.Fatal(err)
+	if err := s.d.Start("--host", "unix://"+sockPath); err != nil {
+		c.Fatal(err)
 	}
 
 	if _, err := os.Stat(sockPath); err != nil {
-		t.Fatal("socket does not exist")
+		c.Fatal("socket does not exist")
 	}
 
-	if err := d.Stop(); err != nil {
-		t.Fatal(err)
+	if err := s.d.Stop(); err != nil {
+		c.Fatal(err)
 	}
 
 	if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) {
-		t.Fatal("unix socket is not cleaned up")
+		c.Fatal("unix socket is not cleaned up")
+	}
+}
+
+func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) {
+	type Config struct {
+		Crv string `json:"crv"`
+		D   string `json:"d"`
+		Kid string `json:"kid"`
+		Kty string `json:"kty"`
+		X   string `json:"x"`
+		Y   string `json:"y"`
 	}
 
-	logDone("daemon - unix socket is cleaned up")
+	os.Remove("/etc/docker/key.json")
+	if err := s.d.Start(); err != nil {
+		c.Fatalf("Failed to start daemon: %v", err)
+	}
+
+	if err := s.d.Stop(); err != nil {
+		c.Fatalf("Could not stop daemon: %v", err)
+	}
+
+	config := &Config{}
+	bytes, err := ioutil.ReadFile("/etc/docker/key.json")
+	if err != nil {
+		c.Fatalf("Error reading key.json file: %s", err)
+	}
+
+	// byte[] to Data-Struct
+	if err := json.Unmarshal(bytes, &config); err != nil {
+		c.Fatalf("Error Unmarshal: %s", err)
+	}
+
+	//replace config.Kid with the fake value
+	config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4"
+
+	// NEW Data-Struct to byte[]
+	newBytes, err := json.Marshal(&config)
+	if err != nil {
+		c.Fatalf("Error Marshal: %s", err)
+	}
+
+	// write back
+	if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil {
+		c.Fatalf("Error ioutil.WriteFile: %s", err)
+	}
+
+	defer os.Remove("/etc/docker/key.json")
+
+	if err := s.d.Start(); err == nil {
+		c.Fatalf("It should not be successful to start daemon with wrong key: %v", err)
+	}
+
+	content, _ := ioutil.ReadFile(s.d.logFile.Name())
+
+	if !strings.Contains(string(content), "Public Key ID does not match") {
+		c.Fatal("Missing KeyID message from daemon logs")
+	}
+}
+
+func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatalf("Could not start daemon with busybox: %v", err)
+	}
+
+	out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat")
+	if err != nil {
+		c.Fatalf("Could not run /bin/cat: err=%v\n%s", err, out)
+	}
+	containerID := strings.TrimSpace(out)
+
+	if out, err := s.d.Cmd("kill", containerID); err != nil {
+		c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out)
+	}
+
+	if err := s.d.Restart(); err != nil {
+		c.Fatalf("Could not restart daemon: %v", err)
+	}
+
+	errchan := make(chan error)
+	go func() {
+		if out, err := s.d.Cmd("wait", containerID); err != nil {
+			errchan <- fmt.Errorf("%v:\n%s", err, out)
+		}
+		close(errchan)
+	}()
+
+	select {
+	case <-time.After(5 * time.Second):
+		c.Fatal("Waiting on a stopped (killed) container timed out")
+	case err := <-errchan:
+		if err != nil {
+			c.Fatal(err)
+		}
+	}
+}
+
+// TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint
+func (s *DockerDaemonSuite) TestHttpsInfo(c *check.C) {
+	const (
+		testDaemonHttpsAddr = "localhost:4271"
+	)
+
+	if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem",
+		"--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHttpsAddr); err != nil {
+		c.Fatalf("Could not start daemon with busybox: %v", err)
+	}
+
+	//force tcp protocol
+	host := fmt.Sprintf("tcp://%s", testDaemonHttpsAddr)
+	daemonArgs := []string{"--host", host, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-cert.pem", "--tlskey", "fixtures/https/client-key.pem"}
+	out, err := s.d.CmdWithArgs(daemonArgs, "info")
+	if err != nil {
+		c.Fatalf("Error Occurred: %s and output: %s", err, out)
+	}
+}
+
+// TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint
+// by using a rogue client certificate and checks that it fails with the expected error.
+func (s *DockerDaemonSuite) TestHttpsInfoRogueCert(c *check.C) {
+	const (
+		errBadCertificate   = "remote error: bad certificate"
+		testDaemonHttpsAddr = "localhost:4271"
+	)
+	if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem",
+		"--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHttpsAddr); err != nil {
+		c.Fatalf("Could not start daemon with busybox: %v", err)
+	}
+
+	//force tcp protocol
+	host := fmt.Sprintf("tcp://%s", testDaemonHttpsAddr)
+	daemonArgs := []string{"--host", host, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"}
+	out, err := s.d.CmdWithArgs(daemonArgs, "info")
+	if err == nil || !strings.Contains(out, errBadCertificate) {
+		c.Fatalf("Expected err: %s, got instead: %s and output: %s", errBadCertificate, err, out)
+	}
+}
+
+// TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint
+// which provides a rogue server certificate and checks that it fails with the expected error
+func (s *DockerDaemonSuite) TestHttpsInfoRogueServerCert(c *check.C) {
+	const (
+		errCaUnknown             = "x509: certificate signed by unknown authority"
+		testDaemonRogueHttpsAddr = "localhost:4272"
+	)
+	if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-rogue-cert.pem",
+		"--tlskey", "fixtures/https/server-rogue-key.pem", "-H", testDaemonRogueHttpsAddr); err != nil {
+		c.Fatalf("Could not start daemon with busybox: %v", err)
+	}
+
+	//force tcp protocol
+	host := fmt.Sprintf("tcp://%s", testDaemonRogueHttpsAddr)
+	daemonArgs := []string{"--host", host, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"}
+	out, err := s.d.CmdWithArgs(daemonArgs, "info")
+	if err == nil || !strings.Contains(out, errCaUnknown) {
+		c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out)
+	}
+}
+
+func pingContainers(c *check.C, d *Daemon, expectFailure bool) {
+	var dargs []string
+	if d != nil {
+		dargs = []string{"--host", d.sock()}
+	}
+
+	args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top")
+	_, err := runCommand(exec.Command(dockerBinary, args...))
+	c.Assert(err, check.IsNil)
+
+	args = append(dargs, "run", "--rm", "--link", "container1:alias1", "busybox", "sh", "-c")
+	pingCmd := "ping -c 1 %s -W 1"
+	args = append(args, fmt.Sprintf(pingCmd, "alias1"))
+	_, err = runCommand(exec.Command(dockerBinary, args...))
+
+	if expectFailure {
+		c.Assert(err, check.NotNil)
+	} else {
+		c.Assert(err, check.IsNil)
+	}
+
+	args = append(dargs, "rm", "-f", "container1")
+	runCommand(exec.Command(dockerBinary, args...))
+}
+
+func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) {
+	c.Assert(s.d.StartWithBusybox(), check.IsNil)
+
+	socket := filepath.Join(s.d.folder, "docker.sock")
+
+	out, err := s.d.Cmd("run", "-d", "-v", socket+":/sock", "busybox")
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
+	c.Assert(s.d.Restart(), check.IsNil)
+}
+
+func (s *DockerDaemonSuite) TestCleanupMountsAfterCrash(c *check.C) {
+	c.Assert(s.d.StartWithBusybox(), check.IsNil)
+
+	out, err := s.d.Cmd("run", "-d", "busybox", "top")
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
+	id := strings.TrimSpace(out)
+	c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil)
+	c.Assert(s.d.Start(), check.IsNil)
+	mountOut, err := exec.Command("mount").CombinedOutput()
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
+	c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, check.Commentf("Something mounted from older daemon start: %s", mountOut))
+}
+
+func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) {
+	c.Assert(s.d.StartWithBusybox("-b", "none"), check.IsNil)
+
+	out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l")
+	c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
+	c.Assert(strings.Contains(out, "eth0"), check.Equals, false,
+		check.Commentf("There shouldn't be eth0 in container when network is disabled: %s", out))
 }
diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go
index 36881a5..725b762 100644
--- a/integration-cli/docker_cli_diff_test.go
+++ b/integration-cli/docker_cli_diff_test.go
@@ -3,24 +3,25 @@
 import (
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
 // ensure that an added file shows up in docker diff
-func TestDiffFilenameShownInOutput(t *testing.T) {
+func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) {
 	containerCmd := `echo foo > /root/bar`
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd)
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("failed to start the container: %s, %v", out, err)
+		c.Fatalf("failed to start the container: %s, %v", out, err)
 	}
 
-	cleanCID := stripTrailingCharacters(out)
+	cleanCID := strings.TrimSpace(out)
 
 	diffCmd := exec.Command(dockerBinary, "diff", cleanCID)
 	out, _, err = runCommandWithOutput(diffCmd)
 	if err != nil {
-		t.Fatalf("failed to run diff: %s %v", out, err)
+		c.Fatalf("failed to run diff: %s %v", out, err)
 	}
 
 	found := false
@@ -31,62 +32,56 @@
 		}
 	}
 	if !found {
-		t.Errorf("couldn't find the new file in docker diff's output: %v", out)
+		c.Errorf("couldn't find the new file in docker diff's output: %v", out)
 	}
-	deleteContainer(cleanCID)
-
-	logDone("diff - check if created file shows up")
 }
 
 // test to ensure GH #3840 doesn't occur any more
-func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) {
+func (s *DockerSuite) TestDiffEnsureDockerinitFilesAreIgnored(c *check.C) {
 	// this is a list of files which shouldn't show up in `docker diff`
 	dockerinitFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerinit", "/.dockerenv"}
+	containerCount := 5
 
 	// we might not run into this problem from the first run, so start a few containers
-	for i := 0; i < 20; i++ {
+	for i := 0; i < containerCount; i++ {
 		containerCmd := `echo foo > /root/bar`
 		runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd)
 		out, _, err := runCommandWithOutput(runCmd)
+
 		if err != nil {
-			t.Fatal(out, err)
+			c.Fatal(out, err)
 		}
 
-		cleanCID := stripTrailingCharacters(out)
+		cleanCID := strings.TrimSpace(out)
 
 		diffCmd := exec.Command(dockerBinary, "diff", cleanCID)
 		out, _, err = runCommandWithOutput(diffCmd)
 		if err != nil {
-			t.Fatalf("failed to run diff: %s, %v", out, err)
+			c.Fatalf("failed to run diff: %s, %v", out, err)
 		}
 
-		deleteContainer(cleanCID)
-
 		for _, filename := range dockerinitFiles {
 			if strings.Contains(out, filename) {
-				t.Errorf("found file which should've been ignored %v in diff output", filename)
+				c.Errorf("found file which should've been ignored %v in diff output", filename)
 			}
 		}
 	}
-
-	logDone("diff - check if ignored files show up in diff")
 }
 
-func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) {
+func (s *DockerSuite) TestDiffEnsureOnlyKmsgAndPtmx(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep", "0")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanCID := stripTrailingCharacters(out)
+	cleanCID := strings.TrimSpace(out)
 
 	diffCmd := exec.Command(dockerBinary, "diff", cleanCID)
 	out, _, err = runCommandWithOutput(diffCmd)
 	if err != nil {
-		t.Fatalf("failed to run diff: %s, %v", out, err)
+		c.Fatalf("failed to run diff: %s, %v", out, err)
 	}
-	deleteContainer(cleanCID)
 
 	expected := map[string]bool{
 		"C /dev":         true,
@@ -109,9 +104,7 @@
 
 	for _, line := range strings.Split(out, "\n") {
 		if line != "" && !expected[line] {
-			t.Errorf("%q is shown in the diff but shouldn't", line)
+			c.Errorf("%q is shown in the diff but shouldn't", line)
 		}
 	}
-
-	logDone("diff - ensure that only kmsg and ptmx in diff")
 }
diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go
index a74ce15..d6518ce 100644
--- a/integration-cli/docker_cli_events_test.go
+++ b/integration-cli/docker_cli_events_test.go
@@ -1,25 +1,63 @@
 package main
 
 import (
+	"bufio"
 	"fmt"
 	"os/exec"
 	"regexp"
 	"strconv"
 	"strings"
-	"testing"
+	"sync"
 	"time"
+
+	"github.com/go-check/check"
 )
 
-func TestEventsUntag(t *testing.T) {
+func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) {
 	image := "busybox"
-	dockerCmd(t, "tag", image, "utest:tag1")
-	dockerCmd(t, "tag", image, "utest:tag2")
-	dockerCmd(t, "rmi", "utest:tag1")
-	dockerCmd(t, "rmi", "utest:tag2")
+
+	// Start stopwatch, generate an event
+	time.Sleep(time.Second) // so that we don't grab events from previous test occured in the same second
+	start := daemonTime(c)
+	time.Sleep(time.Second) // remote API precision is only a second, wait a while before creating an event
+	dockerCmd(c, "tag", image, "timestamptest:1")
+	dockerCmd(c, "rmi", "timestamptest:1")
+	time.Sleep(time.Second) // so that until > since
+	end := daemonTime(c)
+
+	// List of available time formats to --since
+	unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) }
+	rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) }
+
+	// --since=$start must contain only the 'untag' event
+	for _, f := range []func(time.Time) string{unixTs, rfc3339} {
+		since, until := f(start), f(end)
+		cmd := exec.Command(dockerBinary, "events", "--since="+since, "--until="+until)
+		out, _, err := runCommandWithOutput(cmd)
+		if err != nil {
+			c.Fatalf("docker events cmd failed: %v\nout=%s", err, out)
+		}
+		events := strings.Split(strings.TrimSpace(out), "\n")
+		if len(events) != 2 {
+			c.Fatalf("unexpected events, was expecting only 2 events tag/untag (since=%s, until=%s) out=%s", since, until, out)
+		}
+		if !strings.Contains(out, "untag") {
+			c.Fatalf("expected 'untag' event not found (since=%s, until=%s) out=%s", since, until, out)
+		}
+	}
+
+}
+
+func (s *DockerSuite) TestEventsUntag(c *check.C) {
+	image := "busybox"
+	dockerCmd(c, "tag", image, "utest:tag1")
+	dockerCmd(c, "tag", image, "utest:tag2")
+	dockerCmd(c, "rmi", "utest:tag1")
+	dockerCmd(c, "rmi", "utest:tag2")
 	eventsCmd := exec.Command(dockerBinary, "events", "--since=1")
 	out, exitCode, _, err := runCommandWithOutputForDuration(eventsCmd, time.Duration(time.Millisecond*200))
 	if exitCode != 0 || err != nil {
-		t.Fatalf("Failed to get events - exit code %d: %s", exitCode, err)
+		c.Fatalf("Failed to get events - exit code %d: %s", exitCode, err)
 	}
 	events := strings.Split(out, "\n")
 	nEvents := len(events)
@@ -28,185 +66,271 @@
 	// looking for.
 	for _, v := range events[nEvents-3 : nEvents-1] {
 		if !strings.Contains(v, "untag") {
-			t.Fatalf("event should be untag, not %#v", v)
+			c.Fatalf("event should be untag, not %#v", v)
 		}
 	}
-	logDone("events - untags are logged")
 }
 
-func TestEventsContainerFailStartDie(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestEventsContainerFailStartDie(c *check.C) {
 
-	out, _, _ := dockerCmd(t, "images", "-q")
+	out, _ := dockerCmd(c, "images", "-q")
 	image := strings.Split(out, "\n")[0]
 	eventsCmd := exec.Command(dockerBinary, "run", "--name", "testeventdie", image, "blerg")
 	_, _, err := runCommandWithOutput(eventsCmd)
 	if err == nil {
-		t.Fatalf("Container run with command blerg should have failed, but it did not")
+		c.Fatalf("Container run with command blerg should have failed, but it did not")
 	}
 
-	eventsCmd = exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix()))
+	eventsCmd = exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix()))
 	out, _, _ = runCommandWithOutput(eventsCmd)
 	events := strings.Split(out, "\n")
 	if len(events) <= 1 {
-		t.Fatalf("Missing expected event")
+		c.Fatalf("Missing expected event")
 	}
 
 	startEvent := strings.Fields(events[len(events)-3])
 	dieEvent := strings.Fields(events[len(events)-2])
 
 	if startEvent[len(startEvent)-1] != "start" {
-		t.Fatalf("event should be start, not %#v", startEvent)
+		c.Fatalf("event should be start, not %#v", startEvent)
 	}
 	if dieEvent[len(dieEvent)-1] != "die" {
-		t.Fatalf("event should be die, not %#v", dieEvent)
+		c.Fatalf("event should be die, not %#v", dieEvent)
 	}
 
-	logDone("events - container unwilling to start logs die")
 }
 
-func TestEventsLimit(t *testing.T) {
-	defer deleteAllContainers()
-	for i := 0; i < 30; i++ {
-		dockerCmd(t, "run", "busybox", "echo", strconv.Itoa(i))
+func (s *DockerSuite) TestEventsLimit(c *check.C) {
+
+	var waitGroup sync.WaitGroup
+	errChan := make(chan error, 17)
+
+	args := []string{"run", "--rm", "busybox", "true"}
+	for i := 0; i < 17; i++ {
+		waitGroup.Add(1)
+		go func() {
+			defer waitGroup.Done()
+			errChan <- exec.Command(dockerBinary, args...).Run()
+		}()
 	}
-	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix()))
+
+	waitGroup.Wait()
+	close(errChan)
+
+	for err := range errChan {
+		if err != nil {
+			c.Fatalf("%q failed with error: %v", strings.Join(args, " "), err)
+		}
+	}
+
+	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix()))
 	out, _, _ := runCommandWithOutput(eventsCmd)
 	events := strings.Split(out, "\n")
 	nEvents := len(events) - 1
 	if nEvents != 64 {
-		t.Fatalf("events should be limited to 64, but received %d", nEvents)
+		c.Fatalf("events should be limited to 64, but received %d", nEvents)
 	}
-	logDone("events - limited to 64 entries")
 }
 
-func TestEventsContainerEvents(t *testing.T) {
-	dockerCmd(t, "run", "--rm", "busybox", "true")
-	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix()))
+func (s *DockerSuite) TestEventsContainerEvents(c *check.C) {
+	dockerCmd(c, "run", "--rm", "busybox", "true")
+	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix()))
 	out, exitCode, err := runCommandWithOutput(eventsCmd)
 	if exitCode != 0 || err != nil {
-		t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err)
+		c.Fatalf("Failed to get events with exit code %d: %s", exitCode, err)
 	}
 	events := strings.Split(out, "\n")
 	events = events[:len(events)-1]
 	if len(events) < 4 {
-		t.Fatalf("Missing expected event")
+		c.Fatalf("Missing expected event")
 	}
 	createEvent := strings.Fields(events[len(events)-4])
 	startEvent := strings.Fields(events[len(events)-3])
 	dieEvent := strings.Fields(events[len(events)-2])
 	destroyEvent := strings.Fields(events[len(events)-1])
 	if createEvent[len(createEvent)-1] != "create" {
-		t.Fatalf("event should be create, not %#v", createEvent)
+		c.Fatalf("event should be create, not %#v", createEvent)
 	}
 	if startEvent[len(startEvent)-1] != "start" {
-		t.Fatalf("event should be start, not %#v", startEvent)
+		c.Fatalf("event should be start, not %#v", startEvent)
 	}
 	if dieEvent[len(dieEvent)-1] != "die" {
-		t.Fatalf("event should be die, not %#v", dieEvent)
+		c.Fatalf("event should be die, not %#v", dieEvent)
 	}
 	if destroyEvent[len(destroyEvent)-1] != "destroy" {
-		t.Fatalf("event should be destroy, not %#v", destroyEvent)
+		c.Fatalf("event should be destroy, not %#v", destroyEvent)
 	}
 
-	logDone("events - container create, start, die, destroy is logged")
 }
 
-func TestEventsImageUntagDelete(t *testing.T) {
+func (s *DockerSuite) TestEventsContainerEventsSinceUnixEpoch(c *check.C) {
+	dockerCmd(c, "run", "--rm", "busybox", "true")
+	timeBeginning := time.Unix(0, 0).Format(time.RFC3339Nano)
+	timeBeginning = strings.Replace(timeBeginning, "Z", ".000000000Z", -1)
+	eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since='%s'", timeBeginning),
+		fmt.Sprintf("--until=%d", daemonTime(c).Unix()))
+	out, exitCode, err := runCommandWithOutput(eventsCmd)
+	if exitCode != 0 || err != nil {
+		c.Fatalf("Failed to get events with exit code %d: %s", exitCode, err)
+	}
+	events := strings.Split(out, "\n")
+	events = events[:len(events)-1]
+	if len(events) < 4 {
+		c.Fatalf("Missing expected event")
+	}
+	createEvent := strings.Fields(events[len(events)-4])
+	startEvent := strings.Fields(events[len(events)-3])
+	dieEvent := strings.Fields(events[len(events)-2])
+	destroyEvent := strings.Fields(events[len(events)-1])
+	if createEvent[len(createEvent)-1] != "create" {
+		c.Fatalf("event should be create, not %#v", createEvent)
+	}
+	if startEvent[len(startEvent)-1] != "start" {
+		c.Fatalf("event should be start, not %#v", startEvent)
+	}
+	if dieEvent[len(dieEvent)-1] != "die" {
+		c.Fatalf("event should be die, not %#v", dieEvent)
+	}
+	if destroyEvent[len(destroyEvent)-1] != "destroy" {
+		c.Fatalf("event should be destroy, not %#v", destroyEvent)
+	}
+
+}
+
+func (s *DockerSuite) TestEventsImageUntagDelete(c *check.C) {
 	name := "testimageevents"
-	defer deleteImages(name)
 	_, err := buildImage(name,
 		`FROM scratch
 		MAINTAINER "docker"`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if err := deleteImages(name); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix()))
+	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix()))
 	out, exitCode, err := runCommandWithOutput(eventsCmd)
 	if exitCode != 0 || err != nil {
-		t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err)
+		c.Fatalf("Failed to get events with exit code %d: %s", exitCode, err)
 	}
 	events := strings.Split(out, "\n")
 
 	events = events[:len(events)-1]
 	if len(events) < 2 {
-		t.Fatalf("Missing expected event")
+		c.Fatalf("Missing expected event")
 	}
 	untagEvent := strings.Fields(events[len(events)-2])
 	deleteEvent := strings.Fields(events[len(events)-1])
 	if untagEvent[len(untagEvent)-1] != "untag" {
-		t.Fatalf("untag should be untag, not %#v", untagEvent)
+		c.Fatalf("untag should be untag, not %#v", untagEvent)
 	}
 	if deleteEvent[len(deleteEvent)-1] != "delete" {
-		t.Fatalf("delete should be delete, not %#v", deleteEvent)
+		c.Fatalf("delete should be delete, not %#v", deleteEvent)
 	}
-	logDone("events - image untag, delete is logged")
 }
 
-func TestEventsImagePull(t *testing.T) {
-	since := daemonTime(t).Unix()
+func (s *DockerSuite) TestEventsImageTag(c *check.C) {
+	time.Sleep(time.Second * 2) // because API has seconds granularity
+	since := daemonTime(c).Unix()
+	image := "testimageevents:tag"
+	dockerCmd(c, "tag", "busybox", image)
 
-	defer deleteImages("hello-world")
+	eventsCmd := exec.Command(dockerBinary, "events",
+		fmt.Sprintf("--since=%d", since),
+		fmt.Sprintf("--until=%d", daemonTime(c).Unix()))
+	out, _, err := runCommandWithOutput(eventsCmd)
+	c.Assert(err, check.IsNil)
+
+	events := strings.Split(strings.TrimSpace(out), "\n")
+	if len(events) != 1 {
+		c.Fatalf("was expecting 1 event. out=%s", out)
+	}
+	event := strings.TrimSpace(events[0])
+	expectedStr := image + ": tag"
+
+	if !strings.HasSuffix(event, expectedStr) {
+		c.Fatalf("wrong event format. expected='%s' got=%s", expectedStr, event)
+	}
+
+}
+
+func (s *DockerSuite) TestEventsImagePull(c *check.C) {
+	since := daemonTime(c).Unix()
+	testRequires(c, Network)
 
 	pullCmd := exec.Command(dockerBinary, "pull", "hello-world")
 	if out, _, err := runCommandWithOutput(pullCmd); err != nil {
-		t.Fatalf("pulling the hello-world image from has failed: %s, %v", out, err)
+		c.Fatalf("pulling the hello-world image from has failed: %s, %v", out, err)
 	}
 
 	eventsCmd := exec.Command(dockerBinary, "events",
 		fmt.Sprintf("--since=%d", since),
-		fmt.Sprintf("--until=%d", daemonTime(t).Unix()))
+		fmt.Sprintf("--until=%d", daemonTime(c).Unix()))
 	out, _, _ := runCommandWithOutput(eventsCmd)
 
 	events := strings.Split(strings.TrimSpace(out), "\n")
 	event := strings.TrimSpace(events[len(events)-1])
 
 	if !strings.HasSuffix(event, "hello-world:latest: pull") {
-		t.Fatalf("Missing pull event - got:%q", event)
+		c.Fatalf("Missing pull event - got:%q", event)
 	}
 
-	logDone("events - image pull is logged")
 }
 
-func TestEventsImageImport(t *testing.T) {
-	defer deleteAllContainers()
-	since := daemonTime(t).Unix()
+func (s *DockerSuite) TestEventsImageImport(c *check.C) {
+	since := daemonTime(c).Unix()
+
+	id := make(chan string)
+	eventImport := make(chan struct{})
+	eventsCmd := exec.Command(dockerBinary, "events", "--since", strconv.FormatInt(since, 10))
+	stdout, err := eventsCmd.StdoutPipe()
+	if err != nil {
+		c.Fatal(err)
+	}
+	if err := eventsCmd.Start(); err != nil {
+		c.Fatal(err)
+	}
+	defer eventsCmd.Process.Kill()
+
+	go func() {
+		containerID := <-id
+
+		matchImport := regexp.MustCompile(containerID + `: import$`)
+		scanner := bufio.NewScanner(stdout)
+		for scanner.Scan() {
+			if matchImport.MatchString(scanner.Text()) {
+				close(eventImport)
+			}
+		}
+	}()
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal("failed to create a container", out, err)
+		c.Fatal("failed to create a container", out, err)
 	}
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	out, _, err = runCommandPipelineWithOutput(
 		exec.Command(dockerBinary, "export", cleanedContainerID),
 		exec.Command(dockerBinary, "import", "-"),
 	)
 	if err != nil {
-		t.Errorf("import failed with errors: %v, output: %q", err, out)
+		c.Errorf("import failed with errors: %v, output: %q", err, out)
 	}
+	newContainerID := strings.TrimSpace(out)
+	id <- newContainerID
 
-	eventsCmd := exec.Command(dockerBinary, "events",
-		fmt.Sprintf("--since=%d", since),
-		fmt.Sprintf("--until=%d", daemonTime(t).Unix()))
-	out, _, _ = runCommandWithOutput(eventsCmd)
-
-	events := strings.Split(strings.TrimSpace(out), "\n")
-	event := strings.TrimSpace(events[len(events)-1])
-
-	if !strings.HasSuffix(event, ": import") {
-		t.Fatalf("Missing pull event - got:%q", event)
+	select {
+	case <-time.After(5 * time.Second):
+		c.Fatal("failed to observe image import in timely fashion")
+	case <-eventImport:
+		// ignore, done
 	}
-
-	logDone("events - image import is logged")
 }
 
-func TestEventsFilters(t *testing.T) {
+func (s *DockerSuite) TestEventsFilters(c *check.C) {
 	parseEvents := func(out, match string) {
 		events := strings.Split(out, "\n")
 		events = events[:len(events)-1]
@@ -214,175 +338,228 @@
 			eventFields := strings.Fields(event)
 			eventName := eventFields[len(eventFields)-1]
 			if ok, err := regexp.MatchString(match, eventName); err != nil || !ok {
-				t.Fatalf("event should match %s, got %#v, err: %v", match, eventFields, err)
+				c.Fatalf("event should match %s, got %#v, err: %v", match, eventFields, err)
 			}
 		}
 	}
 
-	since := daemonTime(t).Unix()
+	since := daemonTime(c).Unix()
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", "busybox", "true"))
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", "busybox", "true"))
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", "event=die"))
+	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", "event=die"))
 	if err != nil {
-		t.Fatalf("Failed to get events: %s", err)
+		c.Fatalf("Failed to get events: %s", err)
 	}
 	parseEvents(out, "die")
 
-	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", "event=die", "--filter", "event=start"))
+	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", "event=die", "--filter", "event=start"))
 	if err != nil {
-		t.Fatalf("Failed to get events: %s", err)
+		c.Fatalf("Failed to get events: %s", err)
 	}
 	parseEvents(out, "((die)|(start))")
 
 	// make sure we at least got 2 start events
 	count := strings.Count(out, "start")
 	if count < 2 {
-		t.Fatalf("should have had 2 start events but had %d, out: %s", count, out)
+		c.Fatalf("should have had 2 start events but had %d, out: %s", count, out)
 	}
 
-	logDone("events - filters")
 }
 
-func TestEventsFilterImageName(t *testing.T) {
-	since := daemonTime(t).Unix()
-	defer deleteAllContainers()
+func (s *DockerSuite) TestEventsFilterImageName(c *check.C) {
+	since := daemonTime(c).Unix()
 
-	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "container_1", "-d", "busybox", "true"))
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "container_1", "-d", "busybox:latest", "true"))
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	container1 := stripTrailingCharacters(out)
+	container1 := strings.TrimSpace(out)
 
 	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "container_2", "-d", "busybox", "true"))
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	container2 := stripTrailingCharacters(out)
+	container2 := strings.TrimSpace(out)
 
-	for _, s := range []string{"busybox", "busybox:latest"} {
-		eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", fmt.Sprintf("image=%s", s))
-		out, _, err := runCommandWithOutput(eventsCmd)
-		if err != nil {
-			t.Fatalf("Failed to get events, error: %s(%s)", err, out)
-		}
-		events := strings.Split(out, "\n")
-		events = events[:len(events)-1]
-		if len(events) == 0 {
-			t.Fatalf("Expected events but found none for the image busybox:latest")
-		}
-		count1 := 0
-		count2 := 0
-		for _, e := range events {
-			if strings.Contains(e, container1) {
-				count1++
-			} else if strings.Contains(e, container2) {
-				count2++
-			}
-		}
-		if count1 == 0 || count2 == 0 {
-			t.Fatalf("Expected events from each container but got %d from %s and %d from %s", count1, container1, count2, container2)
+	name := "busybox"
+	eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", fmt.Sprintf("image=%s", name))
+	out, _, err = runCommandWithOutput(eventsCmd)
+	if err != nil {
+		c.Fatalf("Failed to get events, error: %s(%s)", err, out)
+	}
+	events := strings.Split(out, "\n")
+	events = events[:len(events)-1]
+	if len(events) == 0 {
+		c.Fatalf("Expected events but found none for the image busybox:latest")
+	}
+	count1 := 0
+	count2 := 0
+
+	for _, e := range events {
+		if strings.Contains(e, container1) {
+			count1++
+		} else if strings.Contains(e, container2) {
+			count2++
 		}
 	}
+	if count1 == 0 || count2 == 0 {
+		c.Fatalf("Expected events from each container but got %d from %s and %d from %s", count1, container1, count2, container2)
+	}
 
-	logDone("events - filters using image")
 }
 
-func TestEventsFilterContainerID(t *testing.T) {
-	since := daemonTime(t).Unix()
-	defer deleteAllContainers()
+func (s *DockerSuite) TestEventsFilterContainer(c *check.C) {
+	since := fmt.Sprintf("%d", daemonTime(c).Unix())
+	nameID := make(map[string]string)
 
-	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "busybox", "true"))
-	if err != nil {
-		t.Fatal(out, err)
+	for _, name := range []string{"container_1", "container_2"} {
+		out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", name, "busybox", "true"))
+		if err != nil {
+			c.Fatalf("Error: %v, Output: %s", err, out)
+		}
+		id, err := inspectField(name, "Id")
+		if err != nil {
+			c.Fatal(err)
+		}
+		nameID[name] = id
 	}
-	container1 := stripTrailingCharacters(out)
 
-	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "busybox", "true"))
-	if err != nil {
-		t.Fatal(out, err)
+	until := fmt.Sprintf("%d", daemonTime(c).Unix())
+
+	checkEvents := func(id string, events []string) error {
+		if len(events) != 3 { // create, start, die
+			return fmt.Errorf("expected 3 events, got %v", events)
+		}
+		for _, event := range events {
+			e := strings.Fields(event)
+			if len(e) < 3 {
+				return fmt.Errorf("got malformed event: %s", event)
+			}
+
+			// Check the id
+			parsedID := strings.TrimSuffix(e[1], ":")
+			if parsedID != id {
+				return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, parsedID)
+			}
+		}
+		return nil
 	}
-	container2 := stripTrailingCharacters(out)
 
-	for _, s := range []string{container1, container2, container1[:12], container2[:12]} {
-		eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", fmt.Sprintf("container=%s", s))
+	for name, ID := range nameID {
+		// filter by names
+		eventsCmd := exec.Command(dockerBinary, "events", "--since", since, "--until", until, "--filter", "container="+name)
 		out, _, err := runCommandWithOutput(eventsCmd)
 		if err != nil {
-			t.Fatalf("Failed to get events, error: %s(%s)", err, out)
+			c.Fatal(err)
 		}
-		events := strings.Split(out, "\n")
-		events = events[:len(events)-1]
-		if len(events) == 0 || len(events) > 3 {
-			t.Fatalf("Expected 3 events, got %d: %v", len(events), events)
+
+		events := strings.Split(strings.TrimSuffix(out, "\n"), "\n")
+		if err := checkEvents(ID, events); err != nil {
+			c.Fatal(err)
 		}
-		createEvent := strings.Fields(events[0])
-		if createEvent[len(createEvent)-1] != "create" {
-			t.Fatalf("first event should be create, not %#v", createEvent)
+
+		// filter by ID's
+		eventsCmd = exec.Command(dockerBinary, "events", "--since", since, "--until", until, "--filter", "container="+ID)
+		out, _, err = runCommandWithOutput(eventsCmd)
+		if err != nil {
+			c.Fatal(err)
 		}
-		if len(events) > 1 {
-			startEvent := strings.Fields(events[1])
-			if startEvent[len(startEvent)-1] != "start" {
-				t.Fatalf("second event should be start, not %#v", startEvent)
-			}
-		}
-		if len(events) == 3 {
-			dieEvent := strings.Fields(events[len(events)-1])
-			if dieEvent[len(dieEvent)-1] != "die" {
-				t.Fatalf("event should be die, not %#v", dieEvent)
-			}
+
+		events = strings.Split(strings.TrimSuffix(out, "\n"), "\n")
+		if err := checkEvents(ID, events); err != nil {
+			c.Fatal(err)
 		}
 	}
 
-	logDone("events - filters using container id")
 }
 
-func TestEventsFilterContainerName(t *testing.T) {
-	since := daemonTime(t).Unix()
-	defer deleteAllContainers()
+func (s *DockerSuite) TestEventsStreaming(c *check.C) {
+	start := daemonTime(c).Unix()
 
-	_, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "container_1", "busybox", "true"))
+	id := make(chan string)
+	eventCreate := make(chan struct{})
+	eventStart := make(chan struct{})
+	eventDie := make(chan struct{})
+	eventDestroy := make(chan struct{})
+
+	eventsCmd := exec.Command(dockerBinary, "events", "--since", strconv.FormatInt(start, 10))
+	stdout, err := eventsCmd.StdoutPipe()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-
-	_, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "container_2", "busybox", "true"))
-	if err != nil {
-		t.Fatal(err)
+	if err := eventsCmd.Start(); err != nil {
+		c.Fatalf("failed to start 'docker events': %s", err)
 	}
+	defer eventsCmd.Process.Kill()
 
-	for _, s := range []string{"container_1", "container_2"} {
-		eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", fmt.Sprintf("container=%s", s))
-		out, _, err := runCommandWithOutput(eventsCmd)
-		if err != nil {
-			t.Fatalf("Failed to get events, error : %s(%s)", err, out)
-		}
-		events := strings.Split(out, "\n")
-		events = events[:len(events)-1]
-		if len(events) == 0 || len(events) > 3 {
-			t.Fatalf("Expected 3 events, got %d: %v", len(events), events)
-		}
-		createEvent := strings.Fields(events[0])
-		if createEvent[len(createEvent)-1] != "create" {
-			t.Fatalf("first event should be create, not %#v", createEvent)
-		}
-		if len(events) > 1 {
-			startEvent := strings.Fields(events[1])
-			if startEvent[len(startEvent)-1] != "start" {
-				t.Fatalf("second event should be start, not %#v", startEvent)
+	go func() {
+		containerID := <-id
+
+		matchCreate := regexp.MustCompile(containerID + `: \(from busybox:latest\) create$`)
+		matchStart := regexp.MustCompile(containerID + `: \(from busybox:latest\) start$`)
+		matchDie := regexp.MustCompile(containerID + `: \(from busybox:latest\) die$`)
+		matchDestroy := regexp.MustCompile(containerID + `: \(from busybox:latest\) destroy$`)
+
+		scanner := bufio.NewScanner(stdout)
+		for scanner.Scan() {
+			switch {
+			case matchCreate.MatchString(scanner.Text()):
+				close(eventCreate)
+			case matchStart.MatchString(scanner.Text()):
+				close(eventStart)
+			case matchDie.MatchString(scanner.Text()):
+				close(eventDie)
+			case matchDestroy.MatchString(scanner.Text()):
+				close(eventDestroy)
 			}
 		}
-		if len(events) == 3 {
-			dieEvent := strings.Fields(events[len(events)-1])
-			if dieEvent[len(dieEvent)-1] != "die" {
-				t.Fatalf("event should be die, not %#v", dieEvent)
-			}
-		}
+	}()
+
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox:latest", "true")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatal(out, err)
+	}
+	cleanedContainerID := strings.TrimSpace(out)
+	id <- cleanedContainerID
+
+	select {
+	case <-time.After(5 * time.Second):
+		c.Fatal("failed to observe container create in timely fashion")
+	case <-eventCreate:
+		// ignore, done
 	}
 
-	logDone("events - filters using container name")
+	select {
+	case <-time.After(5 * time.Second):
+		c.Fatal("failed to observe container start in timely fashion")
+	case <-eventStart:
+		// ignore, done
+	}
+
+	select {
+	case <-time.After(5 * time.Second):
+		c.Fatal("failed to observe container die in timely fashion")
+	case <-eventDie:
+		// ignore, done
+	}
+
+	rmCmd := exec.Command(dockerBinary, "rm", cleanedContainerID)
+	out, _, err = runCommandWithOutput(rmCmd)
+	if err != nil {
+		c.Fatal(out, err)
+	}
+
+	select {
+	case <-time.After(5 * time.Second):
+		c.Fatal("failed to observe container destroy in timely fashion")
+	case <-eventDestroy:
+		// ignore, done
+	}
 }
diff --git a/integration-cli/docker_cli_events_unix_test.go b/integration-cli/docker_cli_events_unix_test.go
index 4e54283..1a08f2b 100644
--- a/integration-cli/docker_cli_events_unix_test.go
+++ b/integration-cli/docker_cli_events_unix_test.go
@@ -8,48 +8,46 @@
 	"io/ioutil"
 	"os"
 	"os/exec"
-	"testing"
 	"unicode"
 
+	"github.com/go-check/check"
 	"github.com/kr/pty"
 )
 
 // #5979
-func TestEventsRedirectStdout(t *testing.T) {
-	since := daemonTime(t).Unix()
-	dockerCmd(t, "run", "busybox", "true")
-	defer deleteAllContainers()
+func (s *DockerSuite) TestEventsRedirectStdout(c *check.C) {
+	since := daemonTime(c).Unix()
+	dockerCmd(c, "run", "busybox", "true")
 
 	file, err := ioutil.TempFile("", "")
 	if err != nil {
-		t.Fatalf("could not create temp file: %v", err)
+		c.Fatalf("could not create temp file: %v", err)
 	}
 	defer os.Remove(file.Name())
 
-	command := fmt.Sprintf("%s events --since=%d --until=%d > %s", dockerBinary, since, daemonTime(t).Unix(), file.Name())
+	command := fmt.Sprintf("%s events --since=%d --until=%d > %s", dockerBinary, since, daemonTime(c).Unix(), file.Name())
 	_, tty, err := pty.Open()
 	if err != nil {
-		t.Fatalf("Could not open pty: %v", err)
+		c.Fatalf("Could not open pty: %v", err)
 	}
 	cmd := exec.Command("sh", "-c", command)
 	cmd.Stdin = tty
 	cmd.Stdout = tty
 	cmd.Stderr = tty
 	if err := cmd.Run(); err != nil {
-		t.Fatalf("run err for command %q: %v", command, err)
+		c.Fatalf("run err for command %q: %v", command, err)
 	}
 
 	scanner := bufio.NewScanner(file)
 	for scanner.Scan() {
-		for _, c := range scanner.Text() {
-			if unicode.IsControl(c) {
-				t.Fatalf("found control character %v", []byte(string(c)))
+		for _, ch := range scanner.Text() {
+			if unicode.IsControl(ch) {
+				c.Fatalf("found control character %v", []byte(string(ch)))
 			}
 		}
 	}
 	if err := scanner.Err(); err != nil {
-		t.Fatalf("Scan err for command %q: %v", command, err)
+		c.Fatalf("Scan err for command %q: %v", command, err)
 	}
 
-	logDone("events - redirect stdout")
 }
diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go
index 01adc43..9abafb8 100644
--- a/integration-cli/docker_cli_exec_test.go
+++ b/integration-cli/docker_cli_exec_test.go
@@ -12,224 +12,170 @@
 	"sort"
 	"strings"
 	"sync"
-	"testing"
 	"time"
+
+	"github.com/go-check/check"
 )
 
-func TestExec(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestExec(c *check.C) {
 
-	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100")
+	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top")
 	if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	execCmd := exec.Command(dockerBinary, "exec", "testing", "cat", "/tmp/file")
 	out, _, err := runCommandWithOutput(execCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	out = strings.Trim(out, "\r\n")
 
 	if expected := "test"; out != expected {
-		t.Errorf("container exec should've printed %q but printed %q", expected, out)
+		c.Errorf("container exec should've printed %q but printed %q", expected, out)
 	}
 
-	logDone("exec - basic test")
 }
 
-func TestExecInteractiveStdinClose(t *testing.T) {
-	defer deleteAllContainers()
-	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "busybox", "/bin/cat"))
-	if err != nil {
-		t.Fatal(err)
-	}
+func (s *DockerSuite) TestExecInteractive(c *check.C) {
 
-	contId := strings.TrimSpace(out)
-
-	returnchan := make(chan struct{})
-
-	go func() {
-		var err error
-		cmd := exec.Command(dockerBinary, "exec", "-i", contId, "/bin/ls", "/")
-		cmd.Stdin = os.Stdin
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		out, err := cmd.CombinedOutput()
-		if err != nil {
-			t.Fatal(err, string(out))
-		}
-
-		if string(out) == "" {
-			t.Fatalf("Output was empty, likely blocked by standard input")
-		}
-
-		returnchan <- struct{}{}
-	}()
-
-	select {
-	case <-returnchan:
-	case <-time.After(10 * time.Second):
-		t.Fatal("timed out running docker exec")
-	}
-
-	logDone("exec - interactive mode closes stdin after execution")
-}
-
-func TestExecInteractive(t *testing.T) {
-	defer deleteAllContainers()
-
-	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100")
+	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top")
 	if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh")
 	stdin, err := execCmd.StdinPipe()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	stdout, err := execCmd.StdoutPipe()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if err := execCmd.Start(); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if _, err := stdin.Write([]byte("cat /tmp/file\n")); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	r := bufio.NewReader(stdout)
 	line, err := r.ReadString('\n')
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	line = strings.TrimSpace(line)
 	if line != "test" {
-		t.Fatalf("Output should be 'test', got '%q'", line)
+		c.Fatalf("Output should be 'test', got '%q'", line)
 	}
 	if err := stdin.Close(); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	finish := make(chan struct{})
+	errChan := make(chan error)
 	go func() {
-		if err := execCmd.Wait(); err != nil {
-			t.Fatal(err)
-		}
-		close(finish)
+		errChan <- execCmd.Wait()
+		close(errChan)
 	}()
 	select {
-	case <-finish:
+	case err := <-errChan:
+		c.Assert(err, check.IsNil)
 	case <-time.After(1 * time.Second):
-		t.Fatal("docker exec failed to exit on stdin close")
+		c.Fatal("docker exec failed to exit on stdin close")
 	}
 
-	logDone("exec - Interactive test")
 }
 
-func TestExecAfterContainerRestart(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID)
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "exec", cleanedContainerID, "echo", "hello")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	outStr := strings.TrimSpace(out)
 	if outStr != "hello" {
-		t.Errorf("container should've printed hello, instead printed %q", outStr)
+		c.Errorf("container should've printed hello, instead printed %q", outStr)
 	}
 
-	logDone("exec - exec running container after container restart")
 }
 
-func TestExecAfterDaemonRestart(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-	defer deleteAllContainers()
+func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
-	d := NewDaemon(t)
-	if err := d.StartWithBusybox(); err != nil {
-		t.Fatalf("Could not start daemon with busybox: %v", err)
-	}
-	defer d.Stop()
-
-	if out, err := d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil {
-		t.Fatalf("Could not run top: err=%v\n%s", err, out)
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatalf("Could not start daemon with busybox: %v", err)
 	}
 
-	if err := d.Restart(); err != nil {
-		t.Fatalf("Could not restart daemon: %v", err)
+	if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil {
+		c.Fatalf("Could not run top: err=%v\n%s", err, out)
 	}
 
-	if out, err := d.Cmd("start", "top"); err != nil {
-		t.Fatalf("Could not start top after daemon restart: err=%v\n%s", err, out)
+	if err := s.d.Restart(); err != nil {
+		c.Fatalf("Could not restart daemon: %v", err)
 	}
 
-	out, err := d.Cmd("exec", "top", "echo", "hello")
+	if out, err := s.d.Cmd("start", "top"); err != nil {
+		c.Fatalf("Could not start top after daemon restart: err=%v\n%s", err, out)
+	}
+
+	out, err := s.d.Cmd("exec", "top", "echo", "hello")
 	if err != nil {
-		t.Fatalf("Could not exec on container top: err=%v\n%s", err, out)
+		c.Fatalf("Could not exec on container top: err=%v\n%s", err, out)
 	}
 
 	outStr := strings.TrimSpace(string(out))
 	if outStr != "hello" {
-		t.Errorf("container should've printed hello, instead printed %q", outStr)
+		c.Errorf("container should've printed hello, instead printed %q", outStr)
 	}
-
-	logDone("exec - exec running container after daemon restart")
 }
 
 // Regression test for #9155, #9044
-func TestExecEnv(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestExecEnv(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "run",
 		"-e", "LALA=value1",
 		"-e", "LALA=value2",
 		"-d", "--name", "testing", "busybox", "top")
 	if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	execCmd := exec.Command(dockerBinary, "exec", "testing", "env")
 	out, _, err := runCommandWithOutput(execCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if strings.Contains(out, "LALA=value1") ||
 		!strings.Contains(out, "LALA=value2") ||
 		!strings.Contains(out, "HOME=/root") {
-		t.Errorf("exec env(%q), expect %q, %q", out, "LALA=value2", "HOME=/root")
+		c.Errorf("exec env(%q), expect %q, %q", out, "LALA=value2", "HOME=/root")
 	}
 
-	logDone("exec - exec inherits correct env")
 }
 
-func TestExecExitStatus(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestExecExitStatus(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "top", "busybox", "top")
 	if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	// Test normal (non-detached) case first
@@ -237,193 +183,191 @@
 	ec, _ := runCommand(cmd)
 
 	if ec != 23 {
-		t.Fatalf("Should have had an ExitCode of 23, not: %d", ec)
+		c.Fatalf("Should have had an ExitCode of 23, not: %d", ec)
 	}
 
-	logDone("exec - exec non-zero ExitStatus")
 }
 
-func TestExecPausedContainer(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestExecPausedContainer(c *check.C) {
 	defer unpauseAllContainers()
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	ContainerID := stripTrailingCharacters(out)
+	ContainerID := strings.TrimSpace(out)
 
 	pausedCmd := exec.Command(dockerBinary, "pause", "testing")
 	out, _, _, err = runCommandWithStdoutStderr(pausedCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	execCmd := exec.Command(dockerBinary, "exec", "-i", "-t", ContainerID, "echo", "hello")
 	out, _, err = runCommandWithOutput(execCmd)
 	if err == nil {
-		t.Fatal("container should fail to exec new command if it is paused")
+		c.Fatal("container should fail to exec new command if it is paused")
 	}
 
 	expected := ContainerID + " is paused, unpause the container before exec"
 	if !strings.Contains(out, expected) {
-		t.Fatal("container should not exec new command if it is paused")
+		c.Fatal("container should not exec new command if it is paused")
 	}
 
-	logDone("exec - exec should not exec a pause container")
 }
 
 // regression test for #9476
-func TestExecTtyCloseStdin(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestExecTtyCloseStdin(c *check.C) {
 
 	cmd := exec.Command(dockerBinary, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox")
 	if out, _, err := runCommandWithOutput(cmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	cmd = exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat")
 	stdinRw, err := cmd.StdinPipe()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	stdinRw.Write([]byte("test"))
 	stdinRw.Close()
 
 	if out, _, err := runCommandWithOutput(cmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	cmd = exec.Command(dockerBinary, "top", "exec_tty_stdin")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	outArr := strings.Split(out, "\n")
 	if len(outArr) > 3 || strings.Contains(out, "nsenter-exec") {
 		// This is the really bad part
 		if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "-f", "exec_tty_stdin")); err != nil {
-			t.Fatal(out, err)
+			c.Fatal(out, err)
 		}
 
-		t.Fatalf("exec process left running\n\t %s", out)
+		c.Fatalf("exec process left running\n\t %s", out)
 	}
 
-	logDone("exec - stdin is closed properly with tty enabled")
 }
 
-func TestExecTtyWithoutStdin(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestExecTtyWithoutStdin(c *check.C) {
 
 	cmd := exec.Command(dockerBinary, "run", "-d", "-ti", "busybox")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to start container: %v (%v)", out, err)
+		c.Fatalf("failed to start container: %v (%v)", out, err)
 	}
 
 	id := strings.TrimSpace(out)
 	if err := waitRun(id); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	defer func() {
 		cmd := exec.Command(dockerBinary, "kill", id)
 		if out, _, err := runCommandWithOutput(cmd); err != nil {
-			t.Fatalf("failed to kill container: %v (%v)", out, err)
+			c.Fatalf("failed to kill container: %v (%v)", out, err)
 		}
 	}()
 
-	done := make(chan struct{})
+	errChan := make(chan error)
 	go func() {
-		defer close(done)
+		defer close(errChan)
 
 		cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true")
 		if _, err := cmd.StdinPipe(); err != nil {
-			t.Fatal(err)
+			errChan <- err
+			return
 		}
 
 		expected := "cannot enable tty mode"
 		if out, _, err := runCommandWithOutput(cmd); err == nil {
-			t.Fatal("exec should have failed")
+			errChan <- fmt.Errorf("exec should have failed")
+			return
 		} else if !strings.Contains(out, expected) {
-			t.Fatalf("exec failed with error %q: expected %q", out, expected)
+			errChan <- fmt.Errorf("exec failed with error %q: expected %q", out, expected)
+			return
 		}
 	}()
 
 	select {
-	case <-done:
+	case err := <-errChan:
+		c.Assert(err, check.IsNil)
 	case <-time.After(3 * time.Second):
-		t.Fatal("exec is running but should have failed")
+		c.Fatal("exec is running but should have failed")
 	}
 
-	logDone("exec - forbid piped stdin to tty enabled container")
 }
 
-func TestExecParseError(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestExecParseError(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "top", "busybox", "top")
 	if out, _, err := runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	// Test normal (non-detached) case first
 	cmd := exec.Command(dockerBinary, "exec", "top")
 	if _, stderr, code, err := runCommandWithStdoutStderr(cmd); err == nil || !strings.Contains(stderr, "See '"+dockerBinary+" exec --help'") || code == 0 {
-		t.Fatalf("Should have thrown error & point to help: %s", stderr)
+		c.Fatalf("Should have thrown error & point to help: %s", stderr)
 	}
-	logDone("exec - error on parseExec should point to help")
 }
 
-func TestExecStopNotHanging(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestExecStopNotHanging(c *check.C) {
 	if out, err := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top").CombinedOutput(); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if err := exec.Command(dockerBinary, "exec", "testing", "top").Start(); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	wait := make(chan struct{})
+	type dstop struct {
+		out []byte
+		err error
+	}
+
+	ch := make(chan dstop)
 	go func() {
-		if out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput(); err != nil {
-			t.Fatal(out, err)
-		}
-		close(wait)
+		out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput()
+		ch <- dstop{out, err}
+		close(ch)
 	}()
 	select {
 	case <-time.After(3 * time.Second):
-		t.Fatal("Container stop timed out")
-	case <-wait:
+		c.Fatal("Container stop timed out")
+	case s := <-ch:
+		c.Assert(s.err, check.IsNil)
 	}
-	logDone("exec - container with exec not hanging on stop")
 }
 
-func TestExecCgroup(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestExecCgroup(c *check.C) {
 	var cmd *exec.Cmd
 
 	cmd = exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top")
 	_, err := runCommand(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "exec", "testing", "cat", "/proc/1/cgroup")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	containerCgroups := sort.StringSlice(strings.Split(string(out), "\n"))
 
 	var wg sync.WaitGroup
-	var s sync.Mutex
+	var mu sync.Mutex
 	execCgroups := []sort.StringSlice{}
+	errChan := make(chan error)
 	// exec a few times concurrently to get consistent failure
 	for i := 0; i < 5; i++ {
 		wg.Add(1)
@@ -431,17 +375,23 @@
 			cmd := exec.Command(dockerBinary, "exec", "testing", "cat", "/proc/self/cgroup")
 			out, _, err := runCommandWithOutput(cmd)
 			if err != nil {
-				t.Fatal(out, err)
+				errChan <- err
+				return
 			}
 			cg := sort.StringSlice(strings.Split(string(out), "\n"))
 
-			s.Lock()
+			mu.Lock()
 			execCgroups = append(execCgroups, cg)
-			s.Unlock()
+			mu.Unlock()
 			wg.Done()
 		}()
 	}
 	wg.Wait()
+	close(errChan)
+
+	for err := range errChan {
+		c.Assert(err, check.IsNil)
+	}
 
 	for _, cg := range execCgroups {
 		if !reflect.DeepEqual(cg, containerCgroups) {
@@ -454,86 +404,81 @@
 			for _, name := range containerCgroups {
 				fmt.Printf(" %s\n", name)
 			}
-			t.Fatal("cgroups mismatched")
+			c.Fatal("cgroups mismatched")
 		}
 	}
 
-	logDone("exec - exec has the container cgroups")
 }
 
-func TestInspectExecID(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestInspectExecID(c *check.C) {
 
 	out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "busybox", "top"))
 	if exitCode != 0 || err != nil {
-		t.Fatalf("failed to run container: %s, %v", out, err)
+		c.Fatalf("failed to run container: %s, %v", out, err)
 	}
 	id := strings.TrimSuffix(out, "\n")
 
 	out, err = inspectField(id, "ExecIDs")
 	if err != nil {
-		t.Fatalf("failed to inspect container: %s, %v", out, err)
+		c.Fatalf("failed to inspect container: %s, %v", out, err)
 	}
-	if out != "<no value>" {
-		t.Fatalf("ExecIDs should be empty, got: %s", out)
+	if out != "[]" {
+		c.Fatalf("ExecIDs should be empty, got: %s", out)
 	}
 
 	exitCode, err = runCommand(exec.Command(dockerBinary, "exec", "-d", id, "ls", "/"))
 	if exitCode != 0 || err != nil {
-		t.Fatalf("failed to exec in container: %s, %v", out, err)
+		c.Fatalf("failed to exec in container: %s, %v", out, err)
 	}
 
 	out, err = inspectField(id, "ExecIDs")
 	if err != nil {
-		t.Fatalf("failed to inspect container: %s, %v", out, err)
+		c.Fatalf("failed to inspect container: %s, %v", out, err)
 	}
 
 	out = strings.TrimSuffix(out, "\n")
 	if out == "[]" || out == "<no value>" {
-		t.Fatalf("ExecIDs should not be empty, got: %s", out)
+		c.Fatalf("ExecIDs should not be empty, got: %s", out)
 	}
 
-	logDone("inspect - inspect a container with ExecIDs")
 }
 
-func TestLinksPingLinkedContainersOnRename(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestLinksPingLinkedContainersOnRename(c *check.C) {
 
 	var out string
-	out, _, _ = dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
-	idA := stripTrailingCharacters(out)
+	out, _ = dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top")
+	idA := strings.TrimSpace(out)
 	if idA == "" {
-		t.Fatal(out, "id should not be nil")
+		c.Fatal(out, "id should not be nil")
 	}
-	out, _, _ = dockerCmd(t, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "sleep", "10")
-	idB := stripTrailingCharacters(out)
+	out, _ = dockerCmd(c, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "top")
+	idB := strings.TrimSpace(out)
 	if idB == "" {
-		t.Fatal(out, "id should not be nil")
+		c.Fatal(out, "id should not be nil")
 	}
 
 	execCmd := exec.Command(dockerBinary, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1")
 	out, _, err := runCommandWithOutput(execCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	dockerCmd(t, "rename", "container1", "container_new")
+	dockerCmd(c, "rename", "container1", "container_new")
 
 	execCmd = exec.Command(dockerBinary, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1")
 	out, _, err = runCommandWithOutput(execCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	logDone("links - ping linked container upon rename")
 }
 
-func TestRunExecDir(t *testing.T) {
-	testRequires(t, SameHostDaemon)
+func (s *DockerSuite) TestRunExecDir(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	id := strings.TrimSpace(out)
 	execDir := filepath.Join(execDriverPath, id)
@@ -542,92 +487,90 @@
 	{
 		fi, err := os.Stat(execDir)
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		if !fi.IsDir() {
-			t.Fatalf("%q must be a directory", execDir)
+			c.Fatalf("%q must be a directory", execDir)
 		}
 		fi, err = os.Stat(stateFile)
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 	}
 
 	stopCmd := exec.Command(dockerBinary, "stop", id)
 	out, _, err = runCommandWithOutput(stopCmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	{
 		_, err := os.Stat(execDir)
 		if err == nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		if err == nil {
-			t.Fatalf("Exec directory %q exists for removed container!", execDir)
+			c.Fatalf("Exec directory %q exists for removed container!", execDir)
 		}
 		if !os.IsNotExist(err) {
-			t.Fatalf("Error should be about non-existing, got %s", err)
+			c.Fatalf("Error should be about non-existing, got %s", err)
 		}
 	}
 	startCmd := exec.Command(dockerBinary, "start", id)
 	out, _, err = runCommandWithOutput(startCmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	{
 		fi, err := os.Stat(execDir)
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		if !fi.IsDir() {
-			t.Fatalf("%q must be a directory", execDir)
+			c.Fatalf("%q must be a directory", execDir)
 		}
 		fi, err = os.Stat(stateFile)
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 	}
 	rmCmd := exec.Command(dockerBinary, "rm", "-f", id)
 	out, _, err = runCommandWithOutput(rmCmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	{
 		_, err := os.Stat(execDir)
 		if err == nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		if err == nil {
-			t.Fatalf("Exec directory %q is exists for removed container!", execDir)
+			c.Fatalf("Exec directory %q is exists for removed container!", execDir)
 		}
 		if !os.IsNotExist(err) {
-			t.Fatalf("Error should be about non-existing, got %s", err)
+			c.Fatalf("Error should be about non-existing, got %s", err)
 		}
 	}
 
-	logDone("run - check execdriver dir behavior")
 }
 
-func TestRunMutableNetworkFiles(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	for _, fn := range []string{"resolv.conf", "hosts"} {
 		deleteAllContainers()
 
 		content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn)))
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 
 		if strings.TrimSpace(string(content)) != "success" {
-			t.Fatal("Content was not what was modified in the container", string(content))
+			c.Fatal("Content was not what was modified in the container", string(content))
 		}
 
 		out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c2", "busybox", "top"))
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 
 		contID := strings.TrimSpace(out)
@@ -636,32 +579,58 @@
 
 		f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644)
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 
 		if _, err := f.Seek(0, 0); err != nil {
 			f.Close()
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 
 		if err := f.Truncate(0); err != nil {
 			f.Close()
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 
 		if _, err := f.Write([]byte("success2\n")); err != nil {
 			f.Close()
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		f.Close()
 
 		res, err := exec.Command(dockerBinary, "exec", contID, "cat", "/etc/"+fn).CombinedOutput()
 		if err != nil {
-			t.Fatalf("Output: %s, error: %s", res, err)
+			c.Fatalf("Output: %s, error: %s", res, err)
 		}
 		if string(res) != "success2\n" {
-			t.Fatalf("Expected content of %s: %q, got: %q", fn, "success2\n", res)
+			c.Fatalf("Expected content of %s: %q, got: %q", fn, "success2\n", res)
 		}
 	}
-	logDone("run - mutable network files")
+}
+
+func (s *DockerSuite) TestExecWithUser(c *check.C) {
+
+	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "parent", "busybox", "top")
+	if out, _, err := runCommandWithOutput(runCmd); err != nil {
+		c.Fatal(out, err)
+	}
+
+	cmd := exec.Command(dockerBinary, "exec", "-u", "1", "parent", "id")
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		c.Fatal(err, out)
+	}
+	if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") {
+		c.Fatalf("exec with user by id expected daemon user got %s", out)
+	}
+
+	cmd = exec.Command(dockerBinary, "exec", "-u", "root", "parent", "id")
+	out, _, err = runCommandWithOutput(cmd)
+	if err != nil {
+		c.Fatal(err, out)
+	}
+	if !strings.Contains(out, "uid=0(root) gid=0(root)") {
+		c.Fatalf("exec with user by root expected root user got %s", out)
+	}
+
 }
diff --git a/integration-cli/docker_cli_exec_unix_test.go b/integration-cli/docker_cli_exec_unix_test.go
new file mode 100644
index 0000000..bee44b9
--- /dev/null
+++ b/integration-cli/docker_cli_exec_unix_test.go
@@ -0,0 +1,47 @@
+// +build !windows,!test_no_exec
+
+package main
+
+import (
+	"bytes"
+	"io"
+	"os/exec"
+	"strings"
+	"time"
+
+	"github.com/go-check/check"
+	"github.com/kr/pty"
+)
+
+// regression test for #12546
+func (s *DockerSuite) TestExecInteractiveStdinClose(c *check.C) {
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "busybox", "/bin/cat"))
+	if err != nil {
+		c.Fatal(err)
+	}
+	contId := strings.TrimSpace(out)
+
+	cmd := exec.Command(dockerBinary, "exec", "-i", contId, "echo", "-n", "hello")
+	p, err := pty.Start(cmd)
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	b := bytes.NewBuffer(nil)
+	go io.Copy(b, p)
+
+	ch := make(chan error)
+	go func() { ch <- cmd.Wait() }()
+
+	select {
+	case err := <-ch:
+		if err != nil {
+			c.Errorf("cmd finished with error %v", err)
+		}
+		if output := b.String(); strings.TrimSpace(output) != "hello" {
+			c.Fatalf("Unexpected output %s", output)
+		}
+	case <-time.After(1 * time.Second):
+		c.Fatal("timed out running docker exec")
+	}
+}
diff --git a/integration-cli/docker_cli_experimental_test.go b/integration-cli/docker_cli_experimental_test.go
new file mode 100644
index 0000000..4cf05c9
--- /dev/null
+++ b/integration-cli/docker_cli_experimental_test.go
@@ -0,0 +1,24 @@
+// +build experimental
+
+package main
+
+import (
+	"os/exec"
+	"strings"
+
+	"github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestExperimentalVersion(c *check.C) {
+	versionCmd := exec.Command(dockerBinary, "version")
+	out, _, err := runCommandWithOutput(versionCmd)
+	if err != nil {
+		c.Fatalf("failed to execute docker version: %s, %v", out, err)
+	}
+
+	for _, line := range strings.Split(out, "\n") {
+		if strings.HasPrefix(line, "Client version:") || strings.HasPrefix(line, "Server version:") {
+			c.Assert(line, check.Matches, "*-experimental")
+		}
+	}
+}
diff --git a/integration-cli/docker_cli_export_import_test.go b/integration-cli/docker_cli_export_import_test.go
index 5b2a016..2277270 100644
--- a/integration-cli/docker_cli_export_import_test.go
+++ b/integration-cli/docker_cli_export_import_test.go
@@ -4,96 +4,69 @@
 	"os"
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
 // export an image and try to import it into a new one
-func TestExportContainerAndImportImage(t *testing.T) {
-	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) {
+	containerID := "testexportcontainerandimportimage"
+
+	runCmd := exec.Command(dockerBinary, "run", "--name", containerID, "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal("failed to create a container", out, err)
+		c.Fatal("failed to create a container", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-
-	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
-	out, _, err = runCommandWithOutput(inspectCmd)
-	if err != nil {
-		t.Fatalf("output should've been a container id: %s %s ", cleanedContainerID, err)
-	}
-
-	exportCmd := exec.Command(dockerBinary, "export", cleanedContainerID)
+	exportCmd := exec.Command(dockerBinary, "export", containerID)
 	if out, _, err = runCommandWithOutput(exportCmd); err != nil {
-		t.Fatalf("failed to export container: %s, %v", out, err)
+		c.Fatalf("failed to export container: %s, %v", out, err)
 	}
 
 	importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1")
 	importCmd.Stdin = strings.NewReader(out)
 	out, _, err = runCommandWithOutput(importCmd)
 	if err != nil {
-		t.Fatalf("failed to import image: %s, %v", out, err)
+		c.Fatalf("failed to import image: %s, %v", out, err)
 	}
 
-	cleanedImageID := stripTrailingCharacters(out)
-
-	inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID)
-	if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
-		t.Fatalf("output should've been an image id: %s, %v", out, err)
+	cleanedImageID := strings.TrimSpace(out)
+	if cleanedImageID == "" {
+		c.Fatalf("output should have been an image id, got: %s", out)
 	}
-
-	deleteContainer(cleanedContainerID)
-	deleteImages("repo/testexp:v1")
-
-	logDone("export - export a container")
-	logDone("import - import an image")
 }
 
 // Used to test output flag in the export command
-func TestExportContainerWithOutputAndImportImage(t *testing.T) {
-	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) {
+	containerID := "testexportcontainerwithoutputandimportimage"
+
+	runCmd := exec.Command(dockerBinary, "run", "--name", containerID, "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal("failed to create a container", out, err)
+		c.Fatal("failed to create a container", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	defer os.Remove("testexp.tar")
 
-	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
-	out, _, err = runCommandWithOutput(inspectCmd)
-	if err != nil {
-		t.Fatalf("output should've been a container id: %s %s ", cleanedContainerID, err)
-	}
-
-	exportCmd := exec.Command(dockerBinary, "export", "--output=testexp.tar", cleanedContainerID)
+	exportCmd := exec.Command(dockerBinary, "export", "--output=testexp.tar", containerID)
 	if out, _, err = runCommandWithOutput(exportCmd); err != nil {
-		t.Fatalf("failed to export container: %s, %v", out, err)
+		c.Fatalf("failed to export container: %s, %v", out, err)
 	}
 
 	out, _, err = runCommandWithOutput(exec.Command("cat", "testexp.tar"))
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1")
 	importCmd.Stdin = strings.NewReader(out)
 	out, _, err = runCommandWithOutput(importCmd)
 	if err != nil {
-		t.Fatalf("failed to import image: %s, %v", out, err)
+		c.Fatalf("failed to import image: %s, %v", out, err)
 	}
 
-	cleanedImageID := stripTrailingCharacters(out)
-
-	inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID)
-	if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
-		t.Fatalf("output should've been an image id: %s, %v", out, err)
+	cleanedImageID := strings.TrimSpace(out)
+	if cleanedImageID == "" {
+		c.Fatalf("output should have been an image id, got: %s", out)
 	}
-
-	deleteContainer(cleanedContainerID)
-	deleteImages("repo/testexp:v1")
-
-	os.Remove("/tmp/testexp.tar")
-
-	logDone("export - export a container with output flag")
-	logDone("import - import an image with output flag")
 }
diff --git a/integration-cli/docker_cli_help_test.go b/integration-cli/docker_cli_help_test.go
index 8fc5cd1..86b0b3b 100644
--- a/integration-cli/docker_cli_help_test.go
+++ b/integration-cli/docker_cli_help_test.go
@@ -5,13 +5,13 @@
 	"os/exec"
 	"runtime"
 	"strings"
-	"testing"
 	"unicode"
 
 	"github.com/docker/docker/pkg/homedir"
+	"github.com/go-check/check"
 )
 
-func TestHelpTextVerify(t *testing.T) {
+func (s *DockerSuite) TestHelpTextVerify(c *check.C) {
 	// Make sure main help text fits within 80 chars and that
 	// on non-windows system we use ~ when possible (to shorten things).
 	// Test for HOME set to its default value and set to "/" on linux
@@ -51,26 +51,26 @@
 		helpCmd.Env = newEnvs
 		out, ec, err := runCommandWithOutput(helpCmd)
 		if err != nil || ec != 0 {
-			t.Fatalf("docker help should have worked\nout:%s\nec:%d", out, ec)
+			c.Fatalf("docker help should have worked\nout:%s\nec:%d", out, ec)
 		}
 		lines := strings.Split(out, "\n")
 		for _, line := range lines {
 			if len(line) > 80 {
-				t.Fatalf("Line is too long(%d chars):\n%s", len(line), line)
+				c.Fatalf("Line is too long(%d chars):\n%s", len(line), line)
 			}
 
 			// All lines should not end with a space
 			if strings.HasSuffix(line, " ") {
-				t.Fatalf("Line should not end with a space: %s", line)
+				c.Fatalf("Line should not end with a space: %s", line)
 			}
 
 			if scanForHome && strings.Contains(line, `=`+home) {
-				t.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line)
+				c.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line)
 			}
 			if runtime.GOOS != "windows" {
 				i := strings.Index(line, homedir.GetShortcutString())
 				if i >= 0 && i != len(line)-1 && line[i+1] != '/' {
-					t.Fatalf("Main help should not have used home shortcut:\n%s", line)
+					c.Fatalf("Main help should not have used home shortcut:\n%s", line)
 				}
 			}
 		}
@@ -82,11 +82,11 @@
 		helpCmd.Env = newEnvs
 		out, ec, err = runCommandWithOutput(helpCmd)
 		if err != nil || ec != 0 {
-			t.Fatalf("docker help should have worked\nout:%s\nec:%d", out, ec)
+			c.Fatalf("docker help should have worked\nout:%s\nec:%d", out, ec)
 		}
 		i := strings.Index(out, "Commands:")
 		if i < 0 {
-			t.Fatalf("Missing 'Commands:' in:\n%s", out)
+			c.Fatalf("Missing 'Commands:' in:\n%s", out)
 		}
 
 		// Grab all chars starting at "Commands:"
@@ -106,39 +106,39 @@
 			helpCmd.Env = newEnvs
 			out, ec, err := runCommandWithOutput(helpCmd)
 			if err != nil || ec != 0 {
-				t.Fatalf("Error on %q help: %s\nexit code:%d", cmd, out, ec)
+				c.Fatalf("Error on %q help: %s\nexit code:%d", cmd, out, ec)
 			}
 			lines := strings.Split(out, "\n")
 			for _, line := range lines {
 				if len(line) > 80 {
-					t.Fatalf("Help for %q is too long(%d chars):\n%s", cmd,
+					c.Fatalf("Help for %q is too long(%d chars):\n%s", cmd,
 						len(line), line)
 				}
 
 				if scanForHome && strings.Contains(line, `"`+home) {
-					t.Fatalf("Help for %q should use ~ instead of %q on:\n%s",
+					c.Fatalf("Help for %q should use ~ instead of %q on:\n%s",
 						cmd, home, line)
 				}
 				i := strings.Index(line, "~")
 				if i >= 0 && i != len(line)-1 && line[i+1] != '/' {
-					t.Fatalf("Help for %q should not have used ~:\n%s", cmd, line)
+					c.Fatalf("Help for %q should not have used ~:\n%s", cmd, line)
 				}
 
 				// If a line starts with 4 spaces then assume someone
 				// added a multi-line description for an option and we need
 				// to flag it
 				if strings.HasPrefix(line, "    ") {
-					t.Fatalf("Help for %q should not have a multi-line option: %s", cmd, line)
+					c.Fatalf("Help for %q should not have a multi-line option: %s", cmd, line)
 				}
 
 				// Options should NOT end with a period
 				if strings.HasPrefix(line, "  -") && strings.HasSuffix(line, ".") {
-					t.Fatalf("Help for %q should not end with a period: %s", cmd, line)
+					c.Fatalf("Help for %q should not end with a period: %s", cmd, line)
 				}
 
 				// Options should NOT end with a space
 				if strings.HasSuffix(line, " ") {
-					t.Fatalf("Help for %q should not end with a space: %s", cmd, line)
+					c.Fatalf("Help for %q should not end with a space: %s", cmd, line)
 				}
 
 			}
@@ -146,10 +146,38 @@
 
 		expected := 39
 		if len(cmds) != expected {
-			t.Fatalf("Wrong # of cmds(%d), it should be: %d\nThe list:\n%q",
+			c.Fatalf("Wrong # of cmds(%d), it should be: %d\nThe list:\n%q",
 				len(cmds), expected, cmds)
 		}
 	}
 
-	logDone("help - verify text")
+}
+
+func (s *DockerSuite) TestHelpErrorStderr(c *check.C) {
+	// If we had a generic CLI test file this one shoudl go in there
+
+	cmd := exec.Command(dockerBinary, "boogie")
+	out, ec, err := runCommandWithOutput(cmd)
+	if err == nil || ec == 0 {
+		c.Fatalf("Boogie command should have failed")
+	}
+
+	expected := "docker: 'boogie' is not a docker command. See 'docker --help'.\n"
+	if out != expected {
+		c.Fatalf("Bad output from boogie\nGot:%s\nExpected:%s", out, expected)
+	}
+
+	cmd = exec.Command(dockerBinary, "rename", "foo", "bar")
+	out, ec, err = runCommandWithOutput(cmd)
+	if err == nil || ec == 0 {
+		c.Fatalf("Rename should have failed")
+	}
+
+	expected = `Error response from daemon: no such id: foo
+Error: failed to rename container named foo
+`
+	if out != expected {
+		c.Fatalf("Bad output from rename\nGot:%s\nExpected:%s", out, expected)
+	}
+
 }
diff --git a/integration-cli/docker_cli_history_test.go b/integration-cli/docker_cli_history_test.go
index ecb0a3a..d229f1a 100644
--- a/integration-cli/docker_cli_history_test.go
+++ b/integration-cli/docker_cli_history_test.go
@@ -3,15 +3,17 @@
 import (
 	"fmt"
 	"os/exec"
+	"regexp"
+	"strconv"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
 // This is a heisen-test.  Because the created timestamp of images and the behavior of
 // sort is not predictable it doesn't always fail.
-func TestBuildHistory(t *testing.T) {
+func (s *DockerSuite) TestBuildHistory(c *check.C) {
 	name := "testbuildhistory"
-	defer deleteImages(name)
 	_, err := buildImage(name, `FROM busybox
 RUN echo "A"
 RUN echo "B"
@@ -42,12 +44,12 @@
 		true)
 
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "history", "testbuildhistory"))
 	if err != nil || exitCode != 0 {
-		t.Fatalf("failed to get image history: %s, %v", out, err)
+		c.Fatalf("failed to get image history: %s, %v", out, err)
 	}
 
 	actualValues := strings.Split(out, "\n")[1:27]
@@ -58,27 +60,101 @@
 		actualValue := actualValues[i]
 
 		if !strings.Contains(actualValue, echoValue) {
-			t.Fatalf("Expected layer \"%s\", but was: %s", expectedValues[i], actualValue)
+			c.Fatalf("Expected layer \"%s\", but was: %s", expectedValues[i], actualValue)
 		}
 	}
 
-	logDone("history - build history")
 }
 
-func TestHistoryExistentImage(t *testing.T) {
+func (s *DockerSuite) TestHistoryExistentImage(c *check.C) {
 	historyCmd := exec.Command(dockerBinary, "history", "busybox")
 	_, exitCode, err := runCommandWithOutput(historyCmd)
 	if err != nil || exitCode != 0 {
-		t.Fatal("failed to get image history")
+		c.Fatal("failed to get image history")
 	}
-	logDone("history - history on existent image must pass")
 }
 
-func TestHistoryNonExistentImage(t *testing.T) {
+func (s *DockerSuite) TestHistoryNonExistentImage(c *check.C) {
 	historyCmd := exec.Command(dockerBinary, "history", "testHistoryNonExistentImage")
 	_, exitCode, err := runCommandWithOutput(historyCmd)
 	if err == nil || exitCode == 0 {
-		t.Fatal("history on a non-existent image didn't result in a non-zero exit status")
+		c.Fatal("history on a non-existent image didn't result in a non-zero exit status")
 	}
-	logDone("history - history on non-existent image must pass")
+}
+
+func (s *DockerSuite) TestHistoryImageWithComment(c *check.C) {
+	name := "testhistoryimagewithcomment"
+
+	// make a image through docker commit <container id> [ -m messages ]
+	//runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo")
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf("failed to run container: %s, %v", out, err)
+	}
+
+	waitCmd := exec.Command(dockerBinary, "wait", name)
+	if out, _, err := runCommandWithOutput(waitCmd); err != nil {
+		c.Fatalf("error thrown while waiting for container: %s, %v", out, err)
+	}
+
+	comment := "This_is_a_comment"
+
+	commitCmd := exec.Command(dockerBinary, "commit", "-m="+comment, name, name)
+	if out, _, err := runCommandWithOutput(commitCmd); err != nil {
+		c.Fatalf("failed to commit container to image: %s, %v", out, err)
+	}
+
+	// test docker history <image id> to check comment messages
+	historyCmd := exec.Command(dockerBinary, "history", name)
+	out, exitCode, err := runCommandWithOutput(historyCmd)
+	if err != nil || exitCode != 0 {
+		c.Fatalf("failed to get image history: %s, %v", out, err)
+	}
+
+	outputTabs := strings.Fields(strings.Split(out, "\n")[1])
+	//outputTabs := regexp.MustCompile("  +").Split(outputLine, -1)
+	actualValue := outputTabs[len(outputTabs)-1]
+
+	if !strings.Contains(actualValue, comment) {
+		c.Fatalf("Expected comments %q, but found %q", comment, actualValue)
+	}
+
+}
+
+func (s *DockerSuite) TestHistoryHumanOptionFalse(c *check.C) {
+	out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "history", "--human=false", "busybox"))
+	lines := strings.Split(out, "\n")
+	sizeColumnRegex, _ := regexp.Compile("SIZE +")
+	indices := sizeColumnRegex.FindStringIndex(lines[0])
+	startIndex := indices[0]
+	endIndex := indices[1]
+	for i := 1; i < len(lines)-1; i++ {
+		if endIndex > len(lines[i]) {
+			endIndex = len(lines[i])
+		}
+		sizeString := lines[i][startIndex:endIndex]
+		if _, err := strconv.Atoi(strings.TrimSpace(sizeString)); err != nil {
+			c.Fatalf("The size '%s' was not an Integer", sizeString)
+		}
+	}
+}
+
+func (s *DockerSuite) TestHistoryHumanOptionTrue(c *check.C) {
+	out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "history", "--human=true", "busybox"))
+	lines := strings.Split(out, "\n")
+	sizeColumnRegex, _ := regexp.Compile("SIZE +")
+	humanSizeRegex, _ := regexp.Compile("^\\d+.*B$") // Matches human sizes like 10 MB, 3.2 KB, etc
+	indices := sizeColumnRegex.FindStringIndex(lines[0])
+	startIndex := indices[0]
+	endIndex := indices[1]
+	for i := 1; i < len(lines)-1; i++ {
+		if endIndex > len(lines[i]) {
+			endIndex = len(lines[i])
+		}
+		sizeString := lines[i][startIndex:endIndex]
+		if matchSuccess := humanSizeRegex.MatchString(strings.TrimSpace(sizeString)); !matchSuccess {
+			c.Fatalf("The size '%s' was not in human format", sizeString)
+		}
+	}
 }
diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go
index 6949711..0ab6462 100644
--- a/integration-cli/docker_cli_images_test.go
+++ b/integration-cli/docker_cli_images_test.go
@@ -6,137 +6,124 @@
 	"reflect"
 	"sort"
 	"strings"
-	"testing"
 	"time"
 
-	"github.com/docker/docker/pkg/common"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/go-check/check"
 )
 
-func TestImagesEnsureImageIsListed(t *testing.T) {
+func (s *DockerSuite) TestImagesEnsureImageIsListed(c *check.C) {
 	imagesCmd := exec.Command(dockerBinary, "images")
 	out, _, err := runCommandWithOutput(imagesCmd)
 	if err != nil {
-		t.Fatalf("listing images failed with errors: %s, %v", out, err)
+		c.Fatalf("listing images failed with errors: %s, %v", out, err)
 	}
 
 	if !strings.Contains(out, "busybox") {
-		t.Fatal("images should've listed busybox")
+		c.Fatal("images should've listed busybox")
 	}
 
-	logDone("images - busybox should be listed")
 }
 
-func TestImagesOrderedByCreationDate(t *testing.T) {
-	defer deleteImages("order:test_a")
-	defer deleteImages("order:test_c")
-	defer deleteImages("order:test_b")
+func (s *DockerSuite) TestImagesOrderedByCreationDate(c *check.C) {
 	id1, err := buildImage("order:test_a",
 		`FROM scratch
 		MAINTAINER dockerio1`, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	time.Sleep(time.Second)
 	id2, err := buildImage("order:test_c",
 		`FROM scratch
 		MAINTAINER dockerio2`, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	time.Sleep(time.Second)
 	id3, err := buildImage("order:test_b",
 		`FROM scratch
 		MAINTAINER dockerio3`, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc"))
 	if err != nil {
-		t.Fatalf("listing images failed with errors: %s, %v", out, err)
+		c.Fatalf("listing images failed with errors: %s, %v", out, err)
 	}
 	imgs := strings.Split(out, "\n")
 	if imgs[0] != id3 {
-		t.Fatalf("First image must be %s, got %s", id3, imgs[0])
+		c.Fatalf("First image must be %s, got %s", id3, imgs[0])
 	}
 	if imgs[1] != id2 {
-		t.Fatalf("Second image must be %s, got %s", id2, imgs[1])
+		c.Fatalf("Second image must be %s, got %s", id2, imgs[1])
 	}
 	if imgs[2] != id1 {
-		t.Fatalf("Third image must be %s, got %s", id1, imgs[2])
+		c.Fatalf("Third image must be %s, got %s", id1, imgs[2])
 	}
 
-	logDone("images - ordering by creation date")
 }
 
-func TestImagesErrorWithInvalidFilterNameTest(t *testing.T) {
+func (s *DockerSuite) TestImagesErrorWithInvalidFilterNameTest(c *check.C) {
 	imagesCmd := exec.Command(dockerBinary, "images", "-f", "FOO=123")
 	out, _, err := runCommandWithOutput(imagesCmd)
 	if !strings.Contains(out, "Invalid filter") {
-		t.Fatalf("error should occur when listing images with invalid filter name FOO, %s, %v", out, err)
+		c.Fatalf("error should occur when listing images with invalid filter name FOO, %s, %v", out, err)
 	}
 
-	logDone("images - invalid filter name check working")
 }
 
-func TestImagesFilterLabel(t *testing.T) {
+func (s *DockerSuite) TestImagesFilterLabel(c *check.C) {
 	imageName1 := "images_filter_test1"
 	imageName2 := "images_filter_test2"
 	imageName3 := "images_filter_test3"
-	defer deleteAllContainers()
-	defer deleteImages(imageName1)
-	defer deleteImages(imageName2)
-	defer deleteImages(imageName3)
 	image1ID, err := buildImage(imageName1,
 		`FROM scratch
 		 LABEL match me`, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	image2ID, err := buildImage(imageName2,
 		`FROM scratch
 		 LABEL match="me too"`, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	image3ID, err := buildImage(imageName3,
 		`FROM scratch
 		 LABEL nomatch me`, true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd := exec.Command(dockerBinary, "images", "--no-trunc", "-q", "-f", "label=match")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	out = strings.TrimSpace(out)
 
 	if (!strings.Contains(out, image1ID) && !strings.Contains(out, image2ID)) || strings.Contains(out, image3ID) {
-		t.Fatalf("Expected ids %s,%s got %s", image1ID, image2ID, out)
+		c.Fatalf("Expected ids %s,%s got %s", image1ID, image2ID, out)
 	}
 
 	cmd = exec.Command(dockerBinary, "images", "--no-trunc", "-q", "-f", "label=match=me too")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	out = strings.TrimSpace(out)
 
 	if out != image2ID {
-		t.Fatalf("Expected %s got %s", image2ID, out)
+		c.Fatalf("Expected %s got %s", image2ID, out)
 	}
 
-	logDone("images - filter label")
 }
 
-func TestImagesFilterWhiteSpaceTrimmingAndLowerCasingWorking(t *testing.T) {
+func (s *DockerSuite) TestImagesFilterSpaceTrimCase(c *check.C) {
 	imageName := "images_filter_test"
-	defer deleteAllContainers()
-	defer deleteImages(imageName)
 	buildImage(imageName,
 		`FROM scratch
 		 RUN touch /test/foo
@@ -156,7 +143,7 @@
 		cmd := exec.Command(dockerBinary, "images", "-q", "-f", filter)
 		out, _, err := runCommandWithOutput(cmd)
 		if err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 		listing := strings.Split(out, "\n")
 		sort.Strings(listing)
@@ -172,50 +159,45 @@
 				}
 				fmt.Print("")
 			}
-			t.Fatalf("All output must be the same")
+			c.Fatalf("All output must be the same")
 		}
 	}
 
-	logDone("images - white space trimming and lower casing")
 }
 
-func TestImagesEnsureDanglingImageOnlyListedOnce(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) {
 
 	// create container 1
-	c := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
-	out, _, err := runCommandWithOutput(c)
+	cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error running busybox: %s, %v", out, err)
+		c.Fatalf("error running busybox: %s, %v", out, err)
 	}
 	containerId1 := strings.TrimSpace(out)
 
 	// tag as foobox
-	c = exec.Command(dockerBinary, "commit", containerId1, "foobox")
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "commit", containerId1, "foobox")
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error tagging foobox: %s", err)
+		c.Fatalf("error tagging foobox: %s", err)
 	}
-	imageId := common.TruncateID(strings.TrimSpace(out))
-	defer deleteImages(imageId)
+	imageId := stringid.TruncateID(strings.TrimSpace(out))
 
 	// overwrite the tag, making the previous image dangling
-	c = exec.Command(dockerBinary, "tag", "-f", "busybox", "foobox")
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "tag", "-f", "busybox", "foobox")
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("error tagging foobox: %s", err)
+		c.Fatalf("error tagging foobox: %s", err)
 	}
-	defer deleteImages("foobox")
 
-	c = exec.Command(dockerBinary, "images", "-q", "-f", "dangling=true")
-	out, _, err = runCommandWithOutput(c)
+	cmd = exec.Command(dockerBinary, "images", "-q", "-f", "dangling=true")
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("listing images failed with errors: %s, %v", out, err)
+		c.Fatalf("listing images failed with errors: %s, %v", out, err)
 	}
 
 	if e, a := 1, strings.Count(out, imageId); e != a {
-		t.Fatalf("expected 1 dangling image, got %d: %s", a, out)
+		c.Fatalf("expected 1 dangling image, got %d: %s", a, out)
 	}
 
-	logDone("images - dangling image only listed once")
 }
diff --git a/integration-cli/docker_cli_import_test.go b/integration-cli/docker_cli_import_test.go
index 1a7ee6f..f4bd085 100644
--- a/integration-cli/docker_cli_import_test.go
+++ b/integration-cli/docker_cli_import_test.go
@@ -3,41 +3,50 @@
 import (
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestImportDisplay(t *testing.T) {
+func (s *DockerSuite) TestImportDisplay(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal("failed to create a container", out, err)
+		c.Fatal("failed to create a container", out, err)
 	}
-	cleanedContainerID := stripTrailingCharacters(out)
-	defer deleteContainer(cleanedContainerID)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	out, _, err = runCommandPipelineWithOutput(
 		exec.Command(dockerBinary, "export", cleanedContainerID),
 		exec.Command(dockerBinary, "import", "-"),
 	)
 	if err != nil {
-		t.Errorf("import failed with errors: %v, output: %q", err, out)
+		c.Errorf("import failed with errors: %v, output: %q", err, out)
 	}
 
 	if n := strings.Count(out, "\n"); n != 1 {
-		t.Fatalf("display is messed up: %d '\\n' instead of 1:\n%s", n, out)
+		c.Fatalf("display is messed up: %d '\\n' instead of 1:\n%s", n, out)
 	}
 	image := strings.TrimSpace(out)
-	defer deleteImages(image)
 
 	runCmd = exec.Command(dockerBinary, "run", "--rm", image, "true")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal("failed to create a container", out, err)
+		c.Fatal("failed to create a container", out, err)
 	}
 
 	if out != "" {
-		t.Fatalf("command output should've been nothing, was %q", out)
+		c.Fatalf("command output should've been nothing, was %q", out)
 	}
 
-	logDone("import - display is fine, imported image runs")
+}
+
+func (s *DockerSuite) TestImportBadURL(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "import", "http://nourl/bad")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err == nil {
+		c.Fatal("import was supposed to fail but didn't")
+	}
+	if !strings.Contains(out, "dial tcp") {
+		c.Fatalf("expected an error msg but didn't get one:\n%s", out)
+	}
 }
diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go
index 2e8239a..cf7738d 100644
--- a/integration-cli/docker_cli_info_test.go
+++ b/integration-cli/docker_cli_info_test.go
@@ -3,24 +3,40 @@
 import (
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/docker/docker/utils"
+	"github.com/go-check/check"
 )
 
 // ensure docker info succeeds
-func TestInfoEnsureSucceeds(t *testing.T) {
+func (s *DockerSuite) TestInfoEnsureSucceeds(c *check.C) {
 	versionCmd := exec.Command(dockerBinary, "info")
 	out, exitCode, err := runCommandWithOutput(versionCmd)
 	if err != nil || exitCode != 0 {
-		t.Fatalf("failed to execute docker info: %s, %v", out, err)
+		c.Fatalf("failed to execute docker info: %s, %v", out, err)
 	}
 
-	stringsToCheck := []string{"Containers:", "Execution Driver:", "Kernel Version:"}
+	// always shown fields
+	stringsToCheck := []string{
+		"ID:",
+		"Containers:",
+		"Images:",
+		"Execution Driver:",
+		"Logging Driver:",
+		"Operating System:",
+		"CPUs:",
+		"Total Memory:",
+		"Kernel Version:",
+		"Storage Driver:",
+	}
+
+	if utils.ExperimentalBuild() {
+		stringsToCheck = append(stringsToCheck, "Experimental: true")
+	}
 
 	for _, linePrefix := range stringsToCheck {
 		if !strings.Contains(out, linePrefix) {
-			t.Errorf("couldn't find string %v in output", linePrefix)
+			c.Errorf("couldn't find string %v in output", linePrefix)
 		}
 	}
-
-	logDone("info - verify that it works")
 }
diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go
index cf42217..066e014 100644
--- a/integration-cli/docker_cli_inspect_test.go
+++ b/integration-cli/docker_cli_inspect_test.go
@@ -1,23 +1,91 @@
 package main
 
 import (
+	"fmt"
 	"os/exec"
+	"strconv"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestInspectImage(t *testing.T) {
+func (s *DockerSuite) TestInspectImage(c *check.C) {
 	imageTest := "emptyfs"
 	imageTestID := "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"
-	imagesCmd := exec.Command(dockerBinary, "inspect", "--format='{{.Id}}'", imageTest)
+	id, err := inspectField(imageTest, "Id")
+	c.Assert(err, check.IsNil)
+
+	if id != imageTestID {
+		c.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestID, imageTest, id)
+	}
+
+}
+
+func (s *DockerSuite) TestInspectInt64(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "-m=300M", "busybox", "true")
+	out, _, _, err := runCommandWithStdoutStderr(runCmd)
+	if err != nil {
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
+	}
+
+	out = strings.TrimSpace(out)
+
+	inspectOut, err := inspectField(out, "HostConfig.Memory")
+	c.Assert(err, check.IsNil)
+
+	if inspectOut != "314572800" {
+		c.Fatalf("inspect got wrong value, got: %q, expected: 314572800", inspectOut)
+	}
+}
+
+func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) {
+	imageTest := "emptyfs"
+	out, err := inspectField(imageTest, "Size")
+	c.Assert(err, check.IsNil)
+
+	size, err := strconv.Atoi(out)
+	if err != nil {
+		c.Fatalf("failed to inspect size of the image: %s, %v", out, err)
+	}
+
+	//now see if the size turns out to be the same
+	formatStr := fmt.Sprintf("--format='{{eq .Size %d}}'", size)
+	imagesCmd := exec.Command(dockerBinary, "inspect", formatStr, imageTest)
 	out, exitCode, err := runCommandWithOutput(imagesCmd)
 	if exitCode != 0 || err != nil {
-		t.Fatalf("failed to inspect image: %s, %v", out, err)
+		c.Fatalf("failed to inspect image: %s, %v", out, err)
+	}
+	if result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")); err != nil || !result {
+		c.Fatalf("Expected size: %d for image: %s but received size: %s", size, imageTest, strings.TrimSuffix(out, "\n"))
+	}
+}
+
+func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat")
+	runCmd.Stdin = strings.NewReader("blahblah")
+	out, _, _, err := runCommandWithStdoutStderr(runCmd)
+	if err != nil {
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
-	if id := strings.TrimSuffix(out, "\n"); id != imageTestID {
-		t.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestID, imageTest, id)
+	id := strings.TrimSpace(out)
+
+	out, err = inspectField(id, "State.ExitCode")
+	c.Assert(err, check.IsNil)
+
+	exitCode, err := strconv.Atoi(out)
+	if err != nil {
+		c.Fatalf("failed to inspect exitcode of the container: %s, %v", out, err)
 	}
 
-	logDone("inspect - inspect an image")
+	//now get the exit code to verify
+	formatStr := fmt.Sprintf("--format='{{eq .State.ExitCode %d}}'", exitCode)
+	runCmd = exec.Command(dockerBinary, "inspect", formatStr, id)
+	out, _, err = runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf("failed to inspect container: %s, %v", out, err)
+	}
+	if result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")); err != nil || !result {
+		c.Fatalf("Expected exitcode: %d for container: %s", exitCode, id)
+	}
 }
diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go
index 33135a3..1371a0b 100644
--- a/integration-cli/docker_cli_kill_test.go
+++ b/integration-cli/docker_cli_kill_test.go
@@ -3,73 +3,58 @@
 import (
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestKillContainer(t *testing.T) {
-	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 10")
+func (s *DockerSuite) TestKillContainer(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-
-	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
-	if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
-		t.Fatalf("out should've been a container id: %s, %v", out, err)
-	}
+	cleanedContainerID := strings.TrimSpace(out)
+	c.Assert(waitRun(cleanedContainerID), check.IsNil)
 
 	killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID)
 	if out, _, err = runCommandWithOutput(killCmd); err != nil {
-		t.Fatalf("failed to kill container: %s, %v", out, err)
+		c.Fatalf("failed to kill container: %s, %v", out, err)
 	}
 
 	listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q")
 	out, _, err = runCommandWithOutput(listRunningContainersCmd)
 	if err != nil {
-		t.Fatalf("failed to list running containers: %s, %v", out, err)
+		c.Fatalf("failed to list running containers: %s, %v", out, err)
 	}
 
 	if strings.Contains(out, cleanedContainerID) {
-		t.Fatal("killed container is still running")
+		c.Fatal("killed container is still running")
 	}
-
-	deleteContainer(cleanedContainerID)
-
-	logDone("kill - kill container running sleep 10")
 }
 
-func TestKillDifferentUserContainer(t *testing.T) {
-	runCmd := exec.Command(dockerBinary, "run", "-u", "daemon", "-d", "busybox", "sh", "-c", "sleep 10")
+func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-u", "daemon", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-
-	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
-	if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
-		t.Fatalf("out should've been a container id: %s, %v", out, err)
-	}
+	cleanedContainerID := strings.TrimSpace(out)
+	c.Assert(waitRun(cleanedContainerID), check.IsNil)
 
 	killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID)
 	if out, _, err = runCommandWithOutput(killCmd); err != nil {
-		t.Fatalf("failed to kill container: %s, %v", out, err)
+		c.Fatalf("failed to kill container: %s, %v", out, err)
 	}
 
 	listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q")
 	out, _, err = runCommandWithOutput(listRunningContainersCmd)
 	if err != nil {
-		t.Fatalf("failed to list running containers: %s, %v", out, err)
+		c.Fatalf("failed to list running containers: %s, %v", out, err)
 	}
 
 	if strings.Contains(out, cleanedContainerID) {
-		t.Fatal("killed container is still running")
+		c.Fatal("killed container is still running")
 	}
-
-	deleteContainer(cleanedContainerID)
-
-	logDone("kill - kill container running sleep 10 from a different user")
 }
diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go
index efee8d0..1f7432a 100644
--- a/integration-cli/docker_cli_links_test.go
+++ b/integration-cli/docker_cli_links_test.go
@@ -8,89 +8,79 @@
 	"reflect"
 	"regexp"
 	"strings"
-	"testing"
 	"time"
 
-	"github.com/docker/docker/pkg/iptables"
+	"github.com/go-check/check"
 )
 
-func TestLinksEtcHostsRegularFile(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestLinksEtcHostsRegularFile(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if !strings.HasPrefix(out, "-") {
-		t.Errorf("/etc/hosts should be a regular file")
+		c.Errorf("/etc/hosts should be a regular file")
 	}
-	logDone("link - /etc/hosts is a regular file")
 }
 
-func TestLinksEtcHostsContentMatch(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestLinksEtcHostsContentMatch(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hosts")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	hosts, err := ioutil.ReadFile("/etc/hosts")
 	if os.IsNotExist(err) {
-		t.Skip("/etc/hosts does not exist, skip this test")
+		c.Skip("/etc/hosts does not exist, skip this test")
 	}
 
 	if out != string(hosts) {
-		t.Errorf("container")
+		c.Errorf("container")
 	}
 
-	logDone("link - /etc/hosts matches hosts copy")
 }
 
-func TestLinksPingUnlinkedContainers(t *testing.T) {
+func (s *DockerSuite) TestLinksPingUnlinkedContainers(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1")
 	exitCode, err := runCommand(runCmd)
 
 	if exitCode == 0 {
-		t.Fatal("run ping did not fail")
+		c.Fatal("run ping did not fail")
 	} else if exitCode != 1 {
-		t.Fatalf("run ping failed with errors: %v", err)
+		c.Fatalf("run ping failed with errors: %v", err)
 	}
 
-	logDone("links - ping unlinked container")
 }
 
 // Test for appropriate error when calling --link with an invalid target container
-func TestLinksInvalidContainerTarget(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestLinksInvalidContainerTarget(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "run", "--link", "bogus:alias", "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 
 	if err == nil {
-		t.Fatal("an invalid container target should produce an error")
+		c.Fatal("an invalid container target should produce an error")
 	}
 	if !strings.Contains(out, "Could not get container") {
-		t.Fatal("error output expected 'Could not get container', but got %q instead; err: %v", out, err)
+		c.Fatalf("error output expected 'Could not get container', but got %q instead; err: %v", out, err)
 	}
 
-	logDone("links - linking to non-existent container should not work")
 }
 
-func TestLinksPingLinkedContainers(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestLinksPingLinkedContainers(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "container1", "--hostname", "fred", "busybox", "top")
 	if _, err := runCommand(runCmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	runCmd = exec.Command(dockerBinary, "run", "-d", "--name", "container2", "--hostname", "wilma", "busybox", "top")
 	if _, err := runCommand(runCmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	runArgs := []string{"run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c"}
@@ -98,74 +88,43 @@
 
 	// test ping by alias, ping by name, and ping by hostname
 	// 1. Ping by alias
-	dockerCmd(t, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...)
+	dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...)
 	// 2. Ping by container name
-	dockerCmd(t, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...)
+	dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...)
 	// 3. Ping by hostname
-	dockerCmd(t, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...)
+	dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...)
 
-	logDone("links - ping linked container")
 }
 
-func TestLinksPingLinkedContainersAfterRename(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestLinksPingLinkedContainersAfterRename(c *check.C) {
 
-	out, _, _ := dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
-	idA := stripTrailingCharacters(out)
-	out, _, _ = dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
-	idB := stripTrailingCharacters(out)
-	dockerCmd(t, "rename", "container1", "container_new")
-	dockerCmd(t, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1")
-	dockerCmd(t, "kill", idA)
-	dockerCmd(t, "kill", idB)
+	out, _ := dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top")
+	idA := strings.TrimSpace(out)
+	out, _ = dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top")
+	idB := strings.TrimSpace(out)
+	dockerCmd(c, "rename", "container1", "container_new")
+	dockerCmd(c, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1")
+	dockerCmd(c, "kill", idA)
+	dockerCmd(c, "kill", idB)
 
-	logDone("links - ping linked container after rename")
 }
 
-func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-	defer deleteAllContainers()
-
-	dockerCmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10")
-	dockerCmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10")
-
-	childIP := findContainerIP(t, "child")
-	parentIP := findContainerIP(t, "parent")
-
-	sourceRule := []string{"-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"}
-	destinationRule := []string{"-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"}
-	if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) {
-		t.Fatal("Iptables rules not found")
-	}
-
-	dockerCmd(t, "rm", "--link", "parent/http")
-	if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) {
-		t.Fatal("Iptables rules should be removed when unlink")
-	}
-
-	dockerCmd(t, "kill", "child")
-	dockerCmd(t, "kill", "parent")
-
-	logDone("link - verify iptables when link and unlink")
-}
-
-func TestLinksInspectLinksStarted(t *testing.T) {
+func (s *DockerSuite) TestLinksInspectLinksStarted(c *check.C) {
 	var (
 		expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}}
 		result   []string
 	)
-	defer deleteAllContainers()
-	dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
-	dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
-	dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10")
+	dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top")
+	dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top")
+	dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "top")
 	links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	err = unmarshalJSON([]byte(links), &result)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	output := convertSliceOfStringsToMap(result)
@@ -173,28 +132,26 @@
 	equal := reflect.DeepEqual(output, expected)
 
 	if !equal {
-		t.Fatalf("Links %s, expected %s", result, expected)
+		c.Fatalf("Links %s, expected %s", result, expected)
 	}
-	logDone("link - links in started container inspect")
 }
 
-func TestLinksInspectLinksStopped(t *testing.T) {
+func (s *DockerSuite) TestLinksInspectLinksStopped(c *check.C) {
 	var (
 		expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}}
 		result   []string
 	)
-	defer deleteAllContainers()
-	dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
-	dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
-	dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true")
+	dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top")
+	dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top")
+	dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true")
 	links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	err = unmarshalJSON([]byte(links), &result)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	output := convertSliceOfStringsToMap(result)
@@ -202,47 +159,42 @@
 	equal := reflect.DeepEqual(output, expected)
 
 	if !equal {
-		t.Fatalf("Links %s, but expected %s", result, expected)
+		c.Fatalf("Links %s, but expected %s", result, expected)
 	}
 
-	logDone("link - links in stopped container inspect")
 }
 
-func TestLinksNotStartedParentNotFail(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestLinksNotStartedParentNotFail(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "create", "--name=first", "busybox", "top")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	runCmd = exec.Command(dockerBinary, "create", "--name=second", "--link=first:first", "busybox", "top")
 	out, _, _, err = runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	runCmd = exec.Command(dockerBinary, "start", "first")
 	out, _, _, err = runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	logDone("link - container start successfully updating stopped parent links")
 }
 
-func TestLinksHostsFilesInject(t *testing.T) {
-	testRequires(t, SameHostDaemon, ExecSupport)
-
-	defer deleteAllContainers()
+func (s *DockerSuite) TestLinksHostsFilesInject(c *check.C) {
+	testRequires(c, SameHostDaemon, ExecSupport)
 
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "--name", "one", "busybox", "top"))
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	idOne := strings.TrimSpace(out)
 
 	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top"))
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	idTwo := strings.TrimSpace(out)
@@ -251,89 +203,124 @@
 
 	contentOne, err := readContainerFileWithExec(idOne, "/etc/hosts")
 	if err != nil {
-		t.Fatal(err, string(contentOne))
+		c.Fatal(err, string(contentOne))
 	}
 
 	contentTwo, err := readContainerFileWithExec(idTwo, "/etc/hosts")
 	if err != nil {
-		t.Fatal(err, string(contentTwo))
+		c.Fatal(err, string(contentTwo))
 	}
 
 	if !strings.Contains(string(contentTwo), "onetwo") {
-		t.Fatal("Host is not present in updated hosts file", string(contentTwo))
+		c.Fatal("Host is not present in updated hosts file", string(contentTwo))
 	}
 
-	logDone("link - ensure containers hosts files are updated with the link alias.")
 }
 
-func TestLinksNetworkHostContainer(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestLinksNetworkHostContainer(c *check.C) {
 
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top"))
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true"))
-	if err == nil || !strings.Contains(out, "--net=host can't be used with links. This would result in undefined behavior.") {
-		t.Fatalf("Running container linking to a container with --net host should have failed: %s", out)
+	if err == nil || !strings.Contains(out, "--net=host can't be used with links. This would result in undefined behavior") {
+		c.Fatalf("Running container linking to a container with --net host should have failed: %s", out)
 	}
 
-	logDone("link - error thrown when linking to container with --net host")
 }
 
-func TestLinksUpdateOnRestart(t *testing.T) {
-	testRequires(t, SameHostDaemon, ExecSupport)
-
-	defer deleteAllContainers()
+func (s *DockerSuite) TestLinksUpdateOnRestart(c *check.C) {
+	testRequires(c, SameHostDaemon, ExecSupport)
 
 	if out, err := exec.Command(dockerBinary, "run", "-d", "--name", "one", "busybox", "top").CombinedOutput(); err != nil {
-		t.Fatal(err, string(out))
+		c.Fatal(err, string(out))
 	}
 	out, err := exec.Command(dockerBinary, "run", "-d", "--name", "two", "--link", "one:onetwo", "--link", "one:one", "busybox", "top").CombinedOutput()
 	if err != nil {
-		t.Fatal(err, string(out))
+		c.Fatal(err, string(out))
 	}
 	id := strings.TrimSpace(string(out))
 
 	realIP, err := inspectField("one", "NetworkSettings.IPAddress")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	content, err := readContainerFileWithExec(id, "/etc/hosts")
 	if err != nil {
-		t.Fatal(err, string(content))
+		c.Fatal(err, string(content))
 	}
 	getIP := func(hosts []byte, hostname string) string {
 		re := regexp.MustCompile(fmt.Sprintf(`(\S*)\t%s`, regexp.QuoteMeta(hostname)))
 		matches := re.FindSubmatch(hosts)
 		if matches == nil {
-			t.Fatalf("Hostname %s have no matches in hosts", hostname)
+			c.Fatalf("Hostname %s have no matches in hosts", hostname)
 		}
 		return string(matches[1])
 	}
 	if ip := getIP(content, "one"); ip != realIP {
-		t.Fatalf("For 'one' alias expected IP: %s, got: %s", realIP, ip)
+		c.Fatalf("For 'one' alias expected IP: %s, got: %s", realIP, ip)
 	}
 	if ip := getIP(content, "onetwo"); ip != realIP {
-		t.Fatalf("For 'onetwo' alias expected IP: %s, got: %s", realIP, ip)
+		c.Fatalf("For 'onetwo' alias expected IP: %s, got: %s", realIP, ip)
 	}
 	if out, err := exec.Command(dockerBinary, "restart", "one").CombinedOutput(); err != nil {
-		t.Fatal(err, string(out))
+		c.Fatal(err, string(out))
 	}
 	realIP, err = inspectField("one", "NetworkSettings.IPAddress")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	content, err = readContainerFileWithExec(id, "/etc/hosts")
 	if err != nil {
-		t.Fatal(err, string(content))
+		c.Fatal(err, string(content))
 	}
 	if ip := getIP(content, "one"); ip != realIP {
-		t.Fatalf("For 'one' alias expected IP: %s, got: %s", realIP, ip)
+		c.Fatalf("For 'one' alias expected IP: %s, got: %s", realIP, ip)
 	}
 	if ip := getIP(content, "onetwo"); ip != realIP {
-		t.Fatalf("For 'onetwo' alias expected IP: %s, got: %s", realIP, ip)
+		c.Fatalf("For 'onetwo' alias expected IP: %s, got: %s", realIP, ip)
 	}
-	logDone("link - ensure containers hosts files are updated on restart")
+}
+
+func (s *DockerSuite) TestLinksEnvs(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "-e", "e1=", "-e", "e2=v2", "-e", "e3=v3=v3", "--name=first", "busybox", "top")
+	out, _, _, err := runCommandWithStdoutStderr(runCmd)
+	if err != nil {
+		c.Fatalf("Run of first failed: %s\n%s", out, err)
+	}
+
+	runCmd = exec.Command(dockerBinary, "run", "--name=second", "--link=first:first", "busybox", "env")
+
+	out, stde, rc, err := runCommandWithStdoutStderr(runCmd)
+	if err != nil || rc != 0 {
+		c.Fatalf("run of 2nd failed: rc: %d, out: %s\n err: %s", rc, out, stde)
+	}
+
+	if !strings.Contains(out, "FIRST_ENV_e1=\n") ||
+		!strings.Contains(out, "FIRST_ENV_e2=v2") ||
+		!strings.Contains(out, "FIRST_ENV_e3=v3=v3") {
+		c.Fatalf("Incorrect output: %s", out)
+	}
+}
+
+func (s *DockerSuite) TestLinkShortDefinition(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "shortlinkdef", "busybox", "top")
+	out, _, err := runCommandWithOutput(runCmd)
+	c.Assert(err, check.IsNil)
+
+	cid := strings.TrimSpace(out)
+	c.Assert(waitRun(cid), check.IsNil)
+
+	runCmd = exec.Command(dockerBinary, "run", "-d", "--name", "link2", "--link", "shortlinkdef", "busybox", "top")
+	out, _, err = runCommandWithOutput(runCmd)
+	c.Assert(err, check.IsNil)
+
+	cid2 := strings.TrimSpace(out)
+	c.Assert(waitRun(cid2), check.IsNil)
+
+	links, err := inspectFieldJSON(cid2, "HostConfig.Links")
+	c.Assert(err, check.IsNil)
+	c.Assert(links, check.Equals, "[\"/shortlinkdef:/link2/shortlinkdef\"]")
 }
diff --git a/integration-cli/docker_cli_login_test.go b/integration-cli/docker_cli_login_test.go
index 9bf90f3..3b4431d 100644
--- a/integration-cli/docker_cli_login_test.go
+++ b/integration-cli/docker_cli_login_test.go
@@ -3,10 +3,11 @@
 import (
 	"bytes"
 	"os/exec"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestLoginWithoutTTY(t *testing.T) {
+func (s *DockerSuite) TestLoginWithoutTTY(c *check.C) {
 	cmd := exec.Command(dockerBinary, "login")
 
 	// Send to stdin so the process does not get the TTY
@@ -14,8 +15,7 @@
 
 	// run the command and block until it's done
 	if err := cmd.Run(); err == nil {
-		t.Fatal("Expected non nil err when loginning in & TTY not available")
+		c.Fatal("Expected non nil err when loginning in & TTY not available")
 	}
 
-	logDone("login - login without TTY")
 }
diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go
index b86a504..2e41f8a 100644
--- a/integration-cli/docker_cli_logs_test.go
+++ b/integration-cli/docker_cli_logs_test.go
@@ -1,119 +1,110 @@
 package main
 
 import (
+	"encoding/json"
 	"fmt"
+	"io"
 	"os/exec"
 	"regexp"
+	"strconv"
 	"strings"
-	"testing"
 	"time"
 
 	"github.com/docker/docker/pkg/timeutils"
+	"github.com/go-check/check"
 )
 
 // This used to work, it test a log of PageSize-1 (gh#4851)
-func TestLogsContainerSmallerThanPage(t *testing.T) {
+func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) {
 	testLen := 32767
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen))
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("run failed with errors: %s, %v", out, err)
+		c.Fatalf("run failed with errors: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 	exec.Command(dockerBinary, "wait", cleanedContainerID).Run()
 
 	logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID)
 	out, _, _, err = runCommandWithStdoutStderr(logsCmd)
 	if err != nil {
-		t.Fatalf("failed to log container: %s, %v", out, err)
+		c.Fatalf("failed to log container: %s, %v", out, err)
 	}
 
 	if len(out) != testLen+1 {
-		t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out))
+		c.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out))
 	}
-
-	deleteContainer(cleanedContainerID)
-
-	logDone("logs - logs container running echo smaller than page size")
 }
 
 // Regression test: When going over the PageSize, it used to panic (gh#4851)
-func TestLogsContainerBiggerThanPage(t *testing.T) {
+func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) {
 	testLen := 32768
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen))
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("run failed with errors: %s, %v", out, err)
+		c.Fatalf("run failed with errors: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 	exec.Command(dockerBinary, "wait", cleanedContainerID).Run()
 
 	logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID)
 	out, _, _, err = runCommandWithStdoutStderr(logsCmd)
 	if err != nil {
-		t.Fatalf("failed to log container: %s, %v", out, err)
+		c.Fatalf("failed to log container: %s, %v", out, err)
 	}
 
 	if len(out) != testLen+1 {
-		t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out))
+		c.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out))
 	}
-
-	deleteContainer(cleanedContainerID)
-
-	logDone("logs - logs container running echo bigger than page size")
 }
 
 // Regression test: When going much over the PageSize, it used to block (gh#4851)
-func TestLogsContainerMuchBiggerThanPage(t *testing.T) {
+func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) {
 	testLen := 33000
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen))
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("run failed with errors: %s, %v", out, err)
+		c.Fatalf("run failed with errors: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 	exec.Command(dockerBinary, "wait", cleanedContainerID).Run()
 
 	logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID)
 	out, _, _, err = runCommandWithStdoutStderr(logsCmd)
 	if err != nil {
-		t.Fatalf("failed to log container: %s, %v", out, err)
+		c.Fatalf("failed to log container: %s, %v", out, err)
 	}
 
 	if len(out) != testLen+1 {
-		t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out))
+		c.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out))
 	}
-
-	deleteContainer(cleanedContainerID)
-
-	logDone("logs - logs container running echo much bigger than page size")
 }
 
-func TestLogsTimestamps(t *testing.T) {
+func (s *DockerSuite) TestLogsTimestamps(c *check.C) {
 	testLen := 100
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen))
 
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("run failed with errors: %s, %v", out, err)
+		c.Fatalf("run failed with errors: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 	exec.Command(dockerBinary, "wait", cleanedContainerID).Run()
 
 	logsCmd := exec.Command(dockerBinary, "logs", "-t", cleanedContainerID)
 	out, _, _, err = runCommandWithStdoutStderr(logsCmd)
 	if err != nil {
-		t.Fatalf("failed to log container: %s, %v", out, err)
+		c.Fatalf("failed to log container: %s, %v", out, err)
 	}
 
 	lines := strings.Split(out, "\n")
 
 	if len(lines) != testLen+1 {
-		t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines))
+		c.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines))
 	}
 
 	ts := regexp.MustCompile(`^.* `)
@@ -122,180 +113,235 @@
 		if l != "" {
 			_, err := time.Parse(timeutils.RFC3339NanoFixed+" ", ts.FindString(l))
 			if err != nil {
-				t.Fatalf("Failed to parse timestamp from %v: %v", l, err)
+				c.Fatalf("Failed to parse timestamp from %v: %v", l, err)
 			}
 			if l[29] != 'Z' { // ensure we have padded 0's
-				t.Fatalf("Timestamp isn't padded properly: %s", l)
+				c.Fatalf("Timestamp isn't padded properly: %s", l)
 			}
 		}
 	}
-
-	deleteContainer(cleanedContainerID)
-
-	logDone("logs - logs with timestamps")
 }
 
-func TestLogsSeparateStderr(t *testing.T) {
+func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) {
 	msg := "stderr_log"
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg))
 
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("run failed with errors: %s, %v", out, err)
+		c.Fatalf("run failed with errors: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 	exec.Command(dockerBinary, "wait", cleanedContainerID).Run()
 
 	logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID)
 	stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd)
 	if err != nil {
-		t.Fatalf("failed to log container: %s, %v", out, err)
+		c.Fatalf("failed to log container: %s, %v", out, err)
 	}
 
 	if stdout != "" {
-		t.Fatalf("Expected empty stdout stream, got %v", stdout)
+		c.Fatalf("Expected empty stdout stream, got %v", stdout)
 	}
 
 	stderr = strings.TrimSpace(stderr)
 	if stderr != msg {
-		t.Fatalf("Expected %v in stderr stream, got %v", msg, stderr)
+		c.Fatalf("Expected %v in stderr stream, got %v", msg, stderr)
 	}
-
-	deleteContainer(cleanedContainerID)
-
-	logDone("logs - separate stderr (without pseudo-tty)")
 }
 
-func TestLogsStderrInStdout(t *testing.T) {
+func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) {
 	msg := "stderr_log"
 	runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg))
 
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("run failed with errors: %s, %v", out, err)
+		c.Fatalf("run failed with errors: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 	exec.Command(dockerBinary, "wait", cleanedContainerID).Run()
 
 	logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID)
 	stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd)
 	if err != nil {
-		t.Fatalf("failed to log container: %s, %v", out, err)
+		c.Fatalf("failed to log container: %s, %v", out, err)
 	}
 
 	if stderr != "" {
-		t.Fatalf("Expected empty stderr stream, got %v", stdout)
+		c.Fatalf("Expected empty stderr stream, got %v", stdout)
 	}
 
 	stdout = strings.TrimSpace(stdout)
 	if stdout != msg {
-		t.Fatalf("Expected %v in stdout stream, got %v", msg, stdout)
+		c.Fatalf("Expected %v in stdout stream, got %v", msg, stdout)
 	}
-
-	deleteContainer(cleanedContainerID)
-
-	logDone("logs - stderr in stdout (with pseudo-tty)")
 }
 
-func TestLogsTail(t *testing.T) {
+func (s *DockerSuite) TestLogsTail(c *check.C) {
 	testLen := 100
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen))
 
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("run failed with errors: %s, %v", out, err)
+		c.Fatalf("run failed with errors: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 	exec.Command(dockerBinary, "wait", cleanedContainerID).Run()
 
 	logsCmd := exec.Command(dockerBinary, "logs", "--tail", "5", cleanedContainerID)
 	out, _, _, err = runCommandWithStdoutStderr(logsCmd)
 	if err != nil {
-		t.Fatalf("failed to log container: %s, %v", out, err)
+		c.Fatalf("failed to log container: %s, %v", out, err)
 	}
 
 	lines := strings.Split(out, "\n")
 
 	if len(lines) != 6 {
-		t.Fatalf("Expected log %d lines, received %d\n", 6, len(lines))
+		c.Fatalf("Expected log %d lines, received %d\n", 6, len(lines))
 	}
 
 	logsCmd = exec.Command(dockerBinary, "logs", "--tail", "all", cleanedContainerID)
 	out, _, _, err = runCommandWithStdoutStderr(logsCmd)
 	if err != nil {
-		t.Fatalf("failed to log container: %s, %v", out, err)
+		c.Fatalf("failed to log container: %s, %v", out, err)
 	}
 
 	lines = strings.Split(out, "\n")
 
 	if len(lines) != testLen+1 {
-		t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines))
+		c.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines))
 	}
 
 	logsCmd = exec.Command(dockerBinary, "logs", "--tail", "random", cleanedContainerID)
 	out, _, _, err = runCommandWithStdoutStderr(logsCmd)
 	if err != nil {
-		t.Fatalf("failed to log container: %s, %v", out, err)
+		c.Fatalf("failed to log container: %s, %v", out, err)
 	}
 
 	lines = strings.Split(out, "\n")
 
 	if len(lines) != testLen+1 {
-		t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines))
+		c.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines))
 	}
-
-	deleteContainer(cleanedContainerID)
-	logDone("logs - logs tail")
 }
 
-func TestLogsFollowStopped(t *testing.T) {
+func (s *DockerSuite) TestLogsFollowStopped(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello")
 
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("run failed with errors: %s, %v", out, err)
+		c.Fatalf("run failed with errors: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 	exec.Command(dockerBinary, "wait", cleanedContainerID).Run()
 
 	logsCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID)
 	if err := logsCmd.Start(); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	c := make(chan struct{})
+	errChan := make(chan error)
 	go func() {
-		if err := logsCmd.Wait(); err != nil {
-			t.Fatal(err)
-		}
-		close(c)
+		errChan <- logsCmd.Wait()
+		close(errChan)
 	}()
 
 	select {
-	case <-c:
+	case err := <-errChan:
+		c.Assert(err, check.IsNil)
 	case <-time.After(1 * time.Second):
-		t.Fatal("Following logs is hanged")
+		c.Fatal("Following logs is hanged")
+	}
+}
+
+func (s *DockerSuite) TestLogsSince(c *check.C) {
+	name := "testlogssince"
+	runCmd := exec.Command(dockerBinary, "run", "--name="+name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do sleep 2; echo `date +%s` log$i; done")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf("run failed with errors: %s, %v", out, err)
 	}
 
-	deleteContainer(cleanedContainerID)
-	logDone("logs - logs follow stopped container")
+	log2Line := strings.Split(strings.Split(out, "\n")[1], " ")
+	t, err := strconv.ParseInt(log2Line[0], 10, 64) // the timestamp log2 is writen
+	c.Assert(err, check.IsNil)
+	since := t + 1 // add 1s so log1 & log2 doesn't show up
+	logsCmd := exec.Command(dockerBinary, "logs", "-t", fmt.Sprintf("--since=%v", since), name)
+
+	out, _, err = runCommandWithOutput(logsCmd)
+	if err != nil {
+		c.Fatalf("failed to log container: %s, %v", out, err)
+	}
+
+	// Skip 2 seconds
+	unexpected := []string{"log1", "log2"}
+	for _, v := range unexpected {
+		if strings.Contains(out, v) {
+			c.Fatalf("unexpected log message returned=%v, since=%v\nout=%v", v, since, out)
+		}
+	}
+
+	// Test with default value specified and parameter omitted
+	expected := []string{"log1", "log2", "log3"}
+	for _, cmd := range []*exec.Cmd{
+		exec.Command(dockerBinary, "logs", "-t", name),
+		exec.Command(dockerBinary, "logs", "-t", "--since=0", name),
+	} {
+		out, _, err = runCommandWithOutput(cmd)
+		if err != nil {
+			c.Fatalf("failed to log container: %s, %v", out, err)
+		}
+		for _, v := range expected {
+			if !strings.Contains(out, v) {
+				c.Fatalf("'%v' does not contain=%v\nout=%s", cmd.Args, v, out)
+			}
+		}
+	}
+}
+
+func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", `for i in $(seq 1 5); do date +%s; sleep 1; done`)
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf("run failed with errors: %s, %v", out, err)
+	}
+	cleanedContainerID := strings.TrimSpace(out)
+
+	now := daemonTime(c).Unix()
+	since := now + 2
+	logCmd := exec.Command(dockerBinary, "logs", "-f", fmt.Sprintf("--since=%v", since), cleanedContainerID)
+	out, _, err = runCommandWithOutput(logCmd)
+	if err != nil {
+		c.Fatalf("failed to log container: %s, %v", out, err)
+	}
+	lines := strings.Split(strings.TrimSpace(out), "\n")
+	if len(lines) == 0 {
+		c.Fatal("got no log lines")
+	}
+	for _, v := range lines {
+		ts, err := strconv.ParseInt(v, 10, 64)
+		if err != nil {
+			c.Fatalf("cannot parse timestamp output from log: '%v'\nout=%s", v, out)
+		}
+		if ts < since {
+			c.Fatalf("earlier log found. since=%v logdate=%v", since, ts)
+		}
+	}
 }
 
 // Regression test for #8832
-func TestLogsFollowSlowStdoutConsumer(t *testing.T) {
+func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 200000;yes X | head -c 200000`)
 
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("run failed with errors: %s, %v", out, err)
+		c.Fatalf("run failed with errors: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-	defer deleteContainer(cleanedContainerID)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	stopSlowRead := make(chan bool)
 
@@ -307,31 +353,75 @@
 	logCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID)
 
 	stdout, err := logCmd.StdoutPipe()
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 
 	if err := logCmd.Start(); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// First read slowly
 	bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead)
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 
 	// After the container has finished we can continue reading fast
 	bytes2, err := consumeWithSpeed(stdout, 32*1024, 0, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 
 	actual := bytes1 + bytes2
 	expected := 200000
 	if actual != expected {
-		t.Fatalf("Invalid bytes read: %d, expected %d", actual, expected)
+		c.Fatalf("Invalid bytes read: %d, expected %d", actual, expected)
 	}
 
-	logDone("logs - follow slow consumer")
+}
+
+func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) {
+	out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done")
+	id := strings.TrimSpace(out)
+	c.Assert(waitRun(id), check.IsNil)
+
+	type info struct {
+		NGoroutines int
+	}
+	getNGoroutines := func() int {
+		var i info
+		status, b, err := sockRequest("GET", "/info", nil)
+		c.Assert(err, check.IsNil)
+		c.Assert(status, check.Equals, 200)
+		c.Assert(json.Unmarshal(b, &i), check.IsNil)
+		return i.NGoroutines
+	}
+
+	nroutines := getNGoroutines()
+
+	cmd := exec.Command(dockerBinary, "logs", "-f", id)
+	r, w := io.Pipe()
+	cmd.Stdout = w
+	c.Assert(cmd.Start(), check.IsNil)
+
+	// Make sure pipe is written to
+	chErr := make(chan error)
+	go func() {
+		b := make([]byte, 1)
+		_, err := r.Read(b)
+		chErr <- err
+	}()
+	c.Assert(<-chErr, check.IsNil)
+	c.Assert(cmd.Process.Kill(), check.IsNil)
+
+	// NGoroutines is not updated right away, so we need to wait before failing
+	t := time.After(30 * time.Second)
+	for {
+		select {
+		case <-t:
+			if n := getNGoroutines(); n > nroutines {
+				c.Fatalf("leaked goroutines: expected less than or equal to %d, got: %d", nroutines, n)
+			}
+		default:
+			if n := getNGoroutines(); n <= nroutines {
+				return
+			}
+			time.Sleep(200 * time.Millisecond)
+		}
+	}
 }
diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go
index 729977b..2f8bd66 100644
--- a/integration-cli/docker_cli_nat_test.go
+++ b/integration-cli/docker_cli_nat_test.go
@@ -5,58 +5,107 @@
 	"net"
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestNetworkNat(t *testing.T) {
-	testRequires(t, SameHostDaemon, NativeExecDriver)
-	defer deleteAllContainers()
+func startServerContainer(c *check.C, proto string, port int) string {
+	pStr := fmt.Sprintf("%d:%d", port, port)
+	bCmd := fmt.Sprintf("nc -lp %d && echo bye", port)
+	cmd := []string{"-d", "-p", pStr, "busybox", "sh", "-c", bCmd}
+	if proto == "udp" {
+		cmd = append(cmd, "-u")
+	}
 
+	name := "server"
+	if err := waitForContainer(name, cmd...); err != nil {
+		c.Fatalf("Failed to launch server container: %v", err)
+	}
+	return name
+}
+
+func getExternalAddress(c *check.C) net.IP {
 	iface, err := net.InterfaceByName("eth0")
 	if err != nil {
-		t.Skipf("Test not running with `make test`. Interface eth0 not found: %s", err)
+		c.Skip(fmt.Sprintf("Test not running with `make test`. Interface eth0 not found: %v", err))
 	}
 
 	ifaceAddrs, err := iface.Addrs()
 	if err != nil || len(ifaceAddrs) == 0 {
-		t.Fatalf("Error retrieving addresses for eth0: %v (%d addresses)", err, len(ifaceAddrs))
+		c.Fatalf("Error retrieving addresses for eth0: %v (%d addresses)", err, len(ifaceAddrs))
 	}
 
 	ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String())
 	if err != nil {
-		t.Fatalf("Error retrieving the up for eth0: %s", err)
+		c.Fatalf("Error retrieving the up for eth0: %s", err)
 	}
 
-	runCmd := exec.Command(dockerBinary, "run", "-dt", "-p", "8080:8080", "busybox", "nc", "-lp", "8080")
+	return ifaceIP
+}
+
+func getContainerLogs(c *check.C, containerID string) string {
+	runCmd := exec.Command(dockerBinary, "logs", containerID)
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
+	}
+	return strings.Trim(out, "\r\n")
+}
+
+func getContainerStatus(c *check.C, containerID string) string {
+	out, err := inspectField(containerID, "State.Running")
+	c.Assert(err, check.IsNil)
+	return out
+}
+
+func (s *DockerSuite) TestNetworkNat(c *check.C) {
+	testRequires(c, SameHostDaemon, NativeExecDriver)
+
+	srv := startServerContainer(c, "tcp", 8080)
+
+	// Spawn a new container which connects to the server through the
+	// interface address.
+	endpoint := getExternalAddress(c)
+	runCmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s 8080", endpoint))
+	if out, _, err := runCommandWithOutput(runCmd); err != nil {
+		c.Fatalf("Failed to connect to server: %v (output: %q)", err, string(out))
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	result := getContainerLogs(c, srv)
 
-	runCmd = exec.Command(dockerBinary, "run", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s 8080", ifaceIP))
-	out, _, err = runCommandWithOutput(runCmd)
+	// Ideally we'd like to check for "hello world" but sometimes
+	// nc doesn't show the data it received so instead let's look for
+	// the output of the 'echo bye' that should be printed once
+	// the nc command gets a connection
+	expected := "bye"
+	if !strings.Contains(result, expected) {
+		c.Fatalf("Unexpected output. Expected: %q, received: %q", expected, result)
+	}
+}
+
+func (s *DockerSuite) TestNetworkLocalhostTCPNat(c *check.C) {
+	testRequires(c, SameHostDaemon, NativeExecDriver)
+
+	srv := startServerContainer(c, "tcp", 8081)
+
+	// Attempt to connect from the host to the listening container.
+	conn, err := net.Dial("tcp", "localhost:8081")
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatalf("Failed to connect to container (%v)", err)
 	}
-
-	runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID)
-	out, _, err = runCommandWithOutput(runCmd)
-	if err != nil {
-		t.Fatalf("failed to retrieve logs for container: %s, %v", out, err)
+	if _, err := conn.Write([]byte("hello world\n")); err != nil {
+		c.Fatal(err)
 	}
+	conn.Close()
 
-	out = strings.Trim(out, "\r\n")
+	result := getContainerLogs(c, srv)
 
-	if expected := "hello world"; out != expected {
-		t.Fatalf("Unexpected output. Expected: %q, received: %q for iface %s", expected, out, ifaceIP)
+	// Ideally we'd like to check for "hello world" but sometimes
+	// nc doesn't show the data it received so instead let's look for
+	// the output of the 'echo bye' that should be printed once
+	// the nc command gets a connection
+	expected := "bye"
+	if !strings.Contains(result, expected) {
+		c.Fatalf("Unexpected output. Expected: %q, received: %q", expected, result)
 	}
-
-	killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID)
-	if out, _, err = runCommandWithOutput(killCmd); err != nil {
-		t.Fatalf("failed to kill container: %s, %v", out, err)
-	}
-
-	logDone("network - make sure nat works through the host")
 }
diff --git a/integration-cli/docker_cli_pause_test.go b/integration-cli/docker_cli_pause_test.go
index f1ccde9..0256fb9 100644
--- a/integration-cli/docker_cli_pause_test.go
+++ b/integration-cli/docker_cli_pause_test.go
@@ -4,83 +4,76 @@
 	"fmt"
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestPause(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestPause(c *check.C) {
 	defer unpauseAllContainers()
 
 	name := "testeventpause"
-	out, _, _ := dockerCmd(t, "images", "-q")
+	out, _ := dockerCmd(c, "images", "-q")
 	image := strings.Split(out, "\n")[0]
-	dockerCmd(t, "run", "-d", "--name", name, image, "sleep", "2")
+	dockerCmd(c, "run", "-d", "--name", name, image, "top")
 
-	dockerCmd(t, "pause", name)
+	dockerCmd(c, "pause", name)
 	pausedContainers, err := getSliceOfPausedContainers()
 	if err != nil {
-		t.Fatalf("error thrown while checking if containers were paused: %v", err)
+		c.Fatalf("error thrown while checking if containers were paused: %v", err)
 	}
 	if len(pausedContainers) != 1 {
-		t.Fatalf("there should be one paused container and not", len(pausedContainers))
+		c.Fatalf("there should be one paused container and not %d", len(pausedContainers))
 	}
 
-	dockerCmd(t, "unpause", name)
+	dockerCmd(c, "unpause", name)
 
-	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix()))
+	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix()))
 	out, _, _ = runCommandWithOutput(eventsCmd)
 	events := strings.Split(out, "\n")
 	if len(events) <= 1 {
-		t.Fatalf("Missing expected event")
+		c.Fatalf("Missing expected event")
 	}
 
 	pauseEvent := strings.Fields(events[len(events)-3])
 	unpauseEvent := strings.Fields(events[len(events)-2])
 
 	if pauseEvent[len(pauseEvent)-1] != "pause" {
-		t.Fatalf("event should be pause, not %#v", pauseEvent)
+		c.Fatalf("event should be pause, not %#v", pauseEvent)
 	}
 	if unpauseEvent[len(unpauseEvent)-1] != "unpause" {
-		t.Fatalf("event should be unpause, not %#v", unpauseEvent)
+		c.Fatalf("event should be unpause, not %#v", unpauseEvent)
 	}
 
-	waitCmd := exec.Command(dockerBinary, "wait", name)
-	if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil {
-		t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err)
-	}
-
-	logDone("pause - pause/unpause is logged")
 }
 
-func TestPauseMultipleContainers(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) {
 	defer unpauseAllContainers()
 
 	containers := []string{
 		"testpausewithmorecontainers1",
 		"testpausewithmorecontainers2",
 	}
-	out, _, _ := dockerCmd(t, "images", "-q")
+	out, _ := dockerCmd(c, "images", "-q")
 	image := strings.Split(out, "\n")[0]
 	for _, name := range containers {
-		dockerCmd(t, "run", "-d", "--name", name, image, "sleep", "2")
+		dockerCmd(c, "run", "-d", "--name", name, image, "top")
 	}
-	dockerCmd(t, append([]string{"pause"}, containers...)...)
+	dockerCmd(c, append([]string{"pause"}, containers...)...)
 	pausedContainers, err := getSliceOfPausedContainers()
 	if err != nil {
-		t.Fatalf("error thrown while checking if containers were paused: %v", err)
+		c.Fatalf("error thrown while checking if containers were paused: %v", err)
 	}
 	if len(pausedContainers) != len(containers) {
-		t.Fatalf("there should be %d paused container and not %d", len(containers), len(pausedContainers))
+		c.Fatalf("there should be %d paused container and not %d", len(containers), len(pausedContainers))
 	}
 
-	dockerCmd(t, append([]string{"unpause"}, containers...)...)
+	dockerCmd(c, append([]string{"unpause"}, containers...)...)
 
-	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix()))
+	eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix()))
 	out, _, _ = runCommandWithOutput(eventsCmd)
 	events := strings.Split(out, "\n")
 	if len(events) <= len(containers)*3-2 {
-		t.Fatalf("Missing expected event")
+		c.Fatalf("Missing expected event")
 	}
 
 	pauseEvents := make([][]string, len(containers))
@@ -92,21 +85,13 @@
 
 	for _, pauseEvent := range pauseEvents {
 		if pauseEvent[len(pauseEvent)-1] != "pause" {
-			t.Fatalf("event should be pause, not %#v", pauseEvent)
+			c.Fatalf("event should be pause, not %#v", pauseEvent)
 		}
 	}
 	for _, unpauseEvent := range unpauseEvents {
 		if unpauseEvent[len(unpauseEvent)-1] != "unpause" {
-			t.Fatalf("event should be unpause, not %#v", unpauseEvent)
+			c.Fatalf("event should be unpause, not %#v", unpauseEvent)
 		}
 	}
 
-	for _, name := range containers {
-		waitCmd := exec.Command(dockerBinary, "wait", name)
-		if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil {
-			t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err)
-		}
-	}
-
-	logDone("pause - multi pause/unpause is logged")
 }
diff --git a/integration-cli/docker_cli_port_test.go b/integration-cli/docker_cli_port_test.go
index 6b68346..f0cb663 100644
--- a/integration-cli/docker_cli_port_test.go
+++ b/integration-cli/docker_cli_port_test.go
@@ -1,45 +1,46 @@
 package main
 
 import (
+	"net"
 	"os/exec"
 	"sort"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestPortList(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestPortList(c *check.C) {
 
 	// one port
 	runCmd := exec.Command(dockerBinary, "run", "-d", "-p", "9876:80", "busybox", "top")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	firstID := stripTrailingCharacters(out)
+	firstID := strings.TrimSpace(out)
 
 	runCmd = exec.Command(dockerBinary, "port", firstID, "80")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	if !assertPortList(t, out, []string{"0.0.0.0:9876"}) {
-		t.Error("Port list is not correct")
+	if !assertPortList(c, out, []string{"0.0.0.0:9876"}) {
+		c.Error("Port list is not correct")
 	}
 
 	runCmd = exec.Command(dockerBinary, "port", firstID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	if !assertPortList(t, out, []string{"80/tcp -> 0.0.0.0:9876"}) {
-		t.Error("Port list is not correct")
+	if !assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876"}) {
+		c.Error("Port list is not correct")
 	}
 	runCmd = exec.Command(dockerBinary, "rm", "-f", firstID)
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	// three port
@@ -50,36 +51,36 @@
 		"busybox", "top")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	ID := stripTrailingCharacters(out)
+	ID := strings.TrimSpace(out)
 
 	runCmd = exec.Command(dockerBinary, "port", ID, "80")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	if !assertPortList(t, out, []string{"0.0.0.0:9876"}) {
-		t.Error("Port list is not correct")
+	if !assertPortList(c, out, []string{"0.0.0.0:9876"}) {
+		c.Error("Port list is not correct")
 	}
 
 	runCmd = exec.Command(dockerBinary, "port", ID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	if !assertPortList(t, out, []string{
+	if !assertPortList(c, out, []string{
 		"80/tcp -> 0.0.0.0:9876",
 		"81/tcp -> 0.0.0.0:9877",
 		"82/tcp -> 0.0.0.0:9878"}) {
-		t.Error("Port list is not correct")
+		c.Error("Port list is not correct")
 	}
 	runCmd = exec.Command(dockerBinary, "rm", "-f", ID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	// more and one port mapped to the same container port
@@ -91,46 +92,45 @@
 		"busybox", "top")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	ID = stripTrailingCharacters(out)
+	ID = strings.TrimSpace(out)
 
 	runCmd = exec.Command(dockerBinary, "port", ID, "80")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	if !assertPortList(t, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) {
-		t.Error("Port list is not correct")
+	if !assertPortList(c, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) {
+		c.Error("Port list is not correct")
 	}
 
 	runCmd = exec.Command(dockerBinary, "port", ID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	if !assertPortList(t, out, []string{
+	if !assertPortList(c, out, []string{
 		"80/tcp -> 0.0.0.0:9876",
 		"80/tcp -> 0.0.0.0:9999",
 		"81/tcp -> 0.0.0.0:9877",
 		"82/tcp -> 0.0.0.0:9878"}) {
-		t.Error("Port list is not correct\n", out)
+		c.Error("Port list is not correct\n", out)
 	}
 	runCmd = exec.Command(dockerBinary, "rm", "-f", ID)
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	logDone("port - test port list")
 }
 
-func assertPortList(t *testing.T, out string, expected []string) bool {
+func assertPortList(c *check.C, out string, expected []string) bool {
 	//lines := strings.Split(out, "\n")
 	lines := strings.Split(strings.Trim(out, "\n "), "\n")
 	if len(lines) != len(expected) {
-		t.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected))
+		c.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected))
 		return false
 	}
 	sort.Strings(lines)
@@ -138,10 +138,86 @@
 
 	for i := 0; i < len(expected); i++ {
 		if lines[i] != expected[i] {
-			t.Error("|" + lines[i] + "!=" + expected[i] + "|")
+			c.Error("|" + lines[i] + "!=" + expected[i] + "|")
 			return false
 		}
 	}
 
 	return true
 }
+
+func (s *DockerSuite) TestPortHostBinding(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "-p", "9876:80", "busybox",
+		"nc", "-l", "-p", "80")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatal(out, err)
+	}
+	firstID := strings.TrimSpace(out)
+
+	runCmd = exec.Command(dockerBinary, "port", firstID, "80")
+	out, _, err = runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatal(out, err)
+	}
+
+	if !assertPortList(c, out, []string{"0.0.0.0:9876"}) {
+		c.Error("Port list is not correct")
+	}
+
+	runCmd = exec.Command(dockerBinary, "run", "--net=host", "busybox",
+		"nc", "localhost", "9876")
+	if out, _, err = runCommandWithOutput(runCmd); err != nil {
+		c.Fatal(out, err)
+	}
+
+	runCmd = exec.Command(dockerBinary, "rm", "-f", firstID)
+	if out, _, err = runCommandWithOutput(runCmd); err != nil {
+		c.Fatal(out, err)
+	}
+
+	runCmd = exec.Command(dockerBinary, "run", "--net=host", "busybox",
+		"nc", "localhost", "9876")
+	if out, _, err = runCommandWithOutput(runCmd); err == nil {
+		c.Error("Port is still bound after the Container is removed")
+	}
+}
+
+func (s *DockerSuite) TestPortExposeHostBinding(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "-P", "--expose", "80", "busybox",
+		"nc", "-l", "-p", "80")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatal(out, err)
+	}
+	firstID := strings.TrimSpace(out)
+
+	runCmd = exec.Command(dockerBinary, "port", firstID, "80")
+	out, _, err = runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatal(out, err)
+	}
+
+	_, exposedPort, err := net.SplitHostPort(out)
+
+	if err != nil {
+		c.Fatal(out, err)
+	}
+
+	runCmd = exec.Command(dockerBinary, "run", "--net=host", "busybox",
+		"nc", "localhost", strings.TrimSpace(exposedPort))
+	if out, _, err = runCommandWithOutput(runCmd); err != nil {
+		c.Fatal(out, err)
+	}
+
+	runCmd = exec.Command(dockerBinary, "rm", "-f", firstID)
+	if out, _, err = runCommandWithOutput(runCmd); err != nil {
+		c.Fatal(out, err)
+	}
+
+	runCmd = exec.Command(dockerBinary, "run", "--net=host", "busybox",
+		"nc", "localhost", strings.TrimSpace(exposedPort))
+	if out, _, err = runCommandWithOutput(runCmd); err == nil {
+		c.Error("Port is still bound after the Container is removed")
+	}
+}
diff --git a/integration-cli/docker_cli_proxy_test.go b/integration-cli/docker_cli_proxy_test.go
index b39dd56..8b55c67 100644
--- a/integration-cli/docker_cli_proxy_test.go
+++ b/integration-cli/docker_cli_proxy_test.go
@@ -4,30 +4,30 @@
 	"net"
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestCliProxyDisableProxyUnixSock(t *testing.T) {
-	testRequires(t, SameHostDaemon) // test is valid when DOCKER_HOST=unix://..
+func (s *DockerSuite) TestCliProxyDisableProxyUnixSock(c *check.C) {
+	testRequires(c, SameHostDaemon) // test is valid when DOCKER_HOST=unix://..
 
 	cmd := exec.Command(dockerBinary, "info")
 	cmd.Env = appendBaseEnv([]string{"HTTP_PROXY=http://127.0.0.1:9999"})
 
 	if out, _, err := runCommandWithOutput(cmd); err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
-	logDone("cli proxy - HTTP_PROXY is not used when connecting to unix sock")
 }
 
 // Can't use localhost here since go has a special case to not use proxy if connecting to localhost
-// See http://golang.org/pkg/net/http/#ProxyFromEnvironment
-func TestCliProxyProxyTCPSock(t *testing.T) {
-	testRequires(t, SameHostDaemon)
+// See https://golang.org/pkg/net/http/#ProxyFromEnvironment
+func (s *DockerDaemonSuite) TestCliProxyProxyTCPSock(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	// get the IP to use to connect since we can't use localhost
 	addrs, err := net.InterfaceAddrs()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	var ip string
 	for _, addr := range addrs {
@@ -40,25 +40,23 @@
 	}
 
 	if ip == "" {
-		t.Fatal("could not find ip to connect to")
+		c.Fatal("could not find ip to connect to")
 	}
 
-	d := NewDaemon(t)
-	if err := d.Start("-H", "tcp://"+ip+":2375"); err != nil {
-		t.Fatal(err)
+	if err := s.d.Start("-H", "tcp://"+ip+":2375"); err != nil {
+		c.Fatal(err)
 	}
 
 	cmd := exec.Command(dockerBinary, "info")
 	cmd.Env = []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"}
 	if out, _, err := runCommandWithOutput(cmd); err == nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	// Test with no_proxy
 	cmd.Env = append(cmd.Env, "NO_PROXY="+ip)
 	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "info")); err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
-	logDone("cli proxy - HTTP_PROXY is used for TCP sock")
 }
diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go
index c2e1085..a9d36be 100644
--- a/integration-cli/docker_cli_ps_test.go
+++ b/integration-cli/docker_cli_ps_test.go
@@ -6,68 +6,78 @@
 	"reflect"
 	"strconv"
 	"strings"
-	"testing"
 	"time"
+
+	"github.com/go-check/check"
 )
 
-func TestPsListContainers(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestPsListContainers(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	firstID := stripTrailingCharacters(out)
+	firstID := strings.TrimSpace(out)
 
 	runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	secondID := stripTrailingCharacters(out)
+	secondID := strings.TrimSpace(out)
 
 	// not long running
 	runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "true")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	thirdID := stripTrailingCharacters(out)
+	thirdID := strings.TrimSpace(out)
 
 	runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	fourthID := stripTrailingCharacters(out)
+	fourthID := strings.TrimSpace(out)
+
+	// make sure the second is running
+	if err := waitRun(secondID); err != nil {
+		c.Fatalf("waiting for container failed: %v", err)
+	}
 
 	// make sure third one is not running
 	runCmd = exec.Command(dockerBinary, "wait", thirdID)
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
+	}
+
+	// make sure the forth is running
+	if err := waitRun(fourthID); err != nil {
+		c.Fatalf("waiting for container failed: %v", err)
 	}
 
 	// all
 	runCmd = exec.Command(dockerBinary, "ps", "-a")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if !assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	// running
 	runCmd = exec.Command(dockerBinary, "ps")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if !assertContainerList(out, []string{fourthID, secondID, firstID}) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	// from here all flag '-a' is ignored
@@ -76,156 +86,155 @@
 	runCmd = exec.Command(dockerBinary, "ps", "-n=2", "-a")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	expected := []string{fourthID, thirdID}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	runCmd = exec.Command(dockerBinary, "ps", "-n=2")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	// since
 	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-a")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	expected = []string{fourthID, thirdID, secondID}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	// before
 	runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID, "-a")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	expected = []string{secondID, firstID}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	// since & before
 	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-a")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	expected = []string{thirdID, secondID}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	// since & limit
 	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2", "-a")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	expected = []string{fourthID, thirdID}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	// before & limit
 	runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1", "-a")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	expected = []string{thirdID}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	// since & before & limit
 	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1", "-a")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	expected = []string{thirdID}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
 	runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if !assertContainerList(out, expected) {
-		t.Error("Container list is not in the correct order")
+		c.Errorf("Container list is not in the correct order: %s", out)
 	}
 
-	logDone("ps - test ps options")
 }
 
 func assertContainerList(out string, expected []string) bool {
@@ -245,30 +254,28 @@
 	return true
 }
 
-func TestPsListContainersSize(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestPsListContainersSize(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello")
 	runCommandWithOutput(cmd)
 	cmd = exec.Command(dockerBinary, "ps", "-s", "-n=1")
-	base_out, _, err := runCommandWithOutput(cmd)
-	base_lines := strings.Split(strings.Trim(base_out, "\n "), "\n")
-	base_sizeIndex := strings.Index(base_lines[0], "SIZE")
-	base_foundSize := base_lines[1][base_sizeIndex:]
-	base_bytes, err := strconv.Atoi(strings.Split(base_foundSize, " ")[0])
+	baseOut, _, err := runCommandWithOutput(cmd)
+	baseLines := strings.Split(strings.Trim(baseOut, "\n "), "\n")
+	baseSizeIndex := strings.Index(baseLines[0], "SIZE")
+	baseFoundsize := baseLines[1][baseSizeIndex:]
+	baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, " ")[0])
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	name := "test_size"
 	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	id, err := getIDByName(name)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "ps", "-s", "-n=1")
@@ -280,346 +287,351 @@
 	select {
 	case <-wait:
 	case <-time.After(3 * time.Second):
-		t.Fatalf("Calling \"docker ps -s\" timed out!")
+		c.Fatalf("Calling \"docker ps -s\" timed out!")
 	}
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	lines := strings.Split(strings.Trim(out, "\n "), "\n")
+	if len(lines) != 2 {
+		c.Fatalf("Expected 2 lines for 'ps -s -n=1' output, got %d", len(lines))
+	}
 	sizeIndex := strings.Index(lines[0], "SIZE")
 	idIndex := strings.Index(lines[0], "CONTAINER ID")
 	foundID := lines[1][idIndex : idIndex+12]
 	if foundID != id[:12] {
-		t.Fatalf("Expected id %s, got %s", id[:12], foundID)
+		c.Fatalf("Expected id %s, got %s", id[:12], foundID)
 	}
-	expectedSize := fmt.Sprintf("%d B", (2 + base_bytes))
+	expectedSize := fmt.Sprintf("%d B", (2 + baseBytes))
 	foundSize := lines[1][sizeIndex:]
-	if foundSize != expectedSize {
-		t.Fatalf("Expected size %q, got %q", expectedSize, foundSize)
+	if !strings.Contains(foundSize, expectedSize) {
+		c.Fatalf("Expected size %q, got %q", expectedSize, foundSize)
 	}
 
-	logDone("ps - test ps size")
 }
 
-func TestPsListContainersFilterStatus(t *testing.T) {
+func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) {
 	// FIXME: this should test paused, but it makes things hang and its wonky
 	// this is because paused containers can't be controlled by signals
-	defer deleteAllContainers()
 
 	// start exited container
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	firstID := stripTrailingCharacters(out)
+	firstID := strings.TrimSpace(out)
 
 	// make sure the exited cintainer is not running
 	runCmd = exec.Command(dockerBinary, "wait", firstID)
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	// start running container
 	runCmd = exec.Command(dockerBinary, "run", "-itd", "busybox")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	secondID := stripTrailingCharacters(out)
+	secondID := strings.TrimSpace(out)
 
 	// filter containers by exited
 	runCmd = exec.Command(dockerBinary, "ps", "-q", "--filter=status=exited")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	containerOut := strings.TrimSpace(out)
 	if containerOut != firstID[:12] {
-		t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)
+		c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)
 	}
 
 	runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=status=running")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	containerOut = strings.TrimSpace(out)
 	if containerOut != secondID[:12] {
-		t.Fatalf("Expected id %s, got %s for running filter, output: %q", secondID[:12], containerOut, out)
+		c.Fatalf("Expected id %s, got %s for running filter, output: %q", secondID[:12], containerOut, out)
 	}
 
-	logDone("ps - test ps filter status")
 }
 
-func TestPsListContainersFilterID(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestPsListContainersFilterID(c *check.C) {
 
 	// start container
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	firstID := stripTrailingCharacters(out)
+	firstID := strings.TrimSpace(out)
 
 	// start another container
-	runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 360")
+	runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top")
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	// filter containers by id
 	runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=id="+firstID)
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	containerOut := strings.TrimSpace(out)
 	if containerOut != firstID[:12] {
-		t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)
+		c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)
 	}
 
-	logDone("ps - test ps filter id")
 }
 
-func TestPsListContainersFilterName(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) {
 
 	// start container
 	runCmd := exec.Command(dockerBinary, "run", "-d", "--name=a_name_to_match", "busybox")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	firstID := stripTrailingCharacters(out)
+	firstID := strings.TrimSpace(out)
 
 	// start another container
-	runCmd = exec.Command(dockerBinary, "run", "-d", "--name=b_name_to_match", "busybox", "sh", "-c", "sleep 360")
+	runCmd = exec.Command(dockerBinary, "run", "-d", "--name=b_name_to_match", "busybox", "top")
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	// filter containers by name
 	runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=name=a_name_to_match")
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	containerOut := strings.TrimSpace(out)
 	if containerOut != firstID[:12] {
-		t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)
+		c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)
 	}
 
-	logDone("ps - test ps filter name")
 }
 
-func TestPsListContainersFilterLabel(t *testing.T) {
+func (s *DockerSuite) TestPsListContainersFilterLabel(c *check.C) {
 	// start container
 	runCmd := exec.Command(dockerBinary, "run", "-d", "-l", "match=me", "-l", "second=tag", "busybox")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	firstID := stripTrailingCharacters(out)
+	firstID := strings.TrimSpace(out)
 
 	// start another container
 	runCmd = exec.Command(dockerBinary, "run", "-d", "-l", "match=me too", "busybox")
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	secondID := stripTrailingCharacters(out)
+	secondID := strings.TrimSpace(out)
 
 	// start third container
 	runCmd = exec.Command(dockerBinary, "run", "-d", "-l", "nomatch=me", "busybox")
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	thirdID := stripTrailingCharacters(out)
+	thirdID := strings.TrimSpace(out)
 
 	// filter containers by exact match
 	runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me")
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	containerOut := strings.TrimSpace(out)
 	if containerOut != firstID {
-		t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)
+		c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)
 	}
 
 	// filter containers by two labels
 	runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag")
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	containerOut = strings.TrimSpace(out)
 	if containerOut != firstID {
-		t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)
+		c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)
 	}
 
 	// filter containers by two labels, but expect not found because of AND behavior
 	runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag-no")
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	containerOut = strings.TrimSpace(out)
 	if containerOut != "" {
-		t.Fatalf("Expected nothing, got %s for exited filter, output: %q", containerOut, out)
+		c.Fatalf("Expected nothing, got %s for exited filter, output: %q", containerOut, out)
 	}
 
 	// filter containers by exact key
 	runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=label=match")
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	containerOut = strings.TrimSpace(out)
 	if (!strings.Contains(containerOut, firstID) || !strings.Contains(containerOut, secondID)) || strings.Contains(containerOut, thirdID) {
-		t.Fatalf("Expected ids %s,%s, got %s for exited filter, output: %q", firstID, secondID, containerOut, out)
+		c.Fatalf("Expected ids %s,%s, got %s for exited filter, output: %q", firstID, secondID, containerOut, out)
 	}
-
-	deleteAllContainers()
-
-	logDone("ps - test ps filter label")
 }
 
-func TestPsListContainersFilterExited(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "top", "busybox", "top")
 	if out, _, err := runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "run", "--name", "zero1", "busybox", "true")
 	if out, _, err := runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	firstZero, err := getIDByName("zero1")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "run", "--name", "zero2", "busybox", "true")
 	if out, _, err := runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	secondZero, err := getIDByName("zero2")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "run", "--name", "nonzero1", "busybox", "false")
 	if out, _, err := runCommandWithOutput(runCmd); err == nil {
-		t.Fatal("Should fail.", out, err)
+		c.Fatal("Should fail.", out, err)
 	}
 	firstNonZero, err := getIDByName("nonzero1")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "run", "--name", "nonzero2", "busybox", "false")
 	if out, _, err := runCommandWithOutput(runCmd); err == nil {
-		t.Fatal("Should fail.", out, err)
+		c.Fatal("Should fail.", out, err)
 	}
 	secondNonZero, err := getIDByName("nonzero2")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// filter containers by exited=0
 	runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	ids := strings.Split(strings.TrimSpace(out), "\n")
 	if len(ids) != 2 {
-		t.Fatalf("Should be 2 zero exited containerst got %d", len(ids))
+		c.Fatalf("Should be 2 zero exited containers got %d: %s", len(ids), out)
 	}
 	if ids[0] != secondZero {
-		t.Fatalf("First in list should be %q, got %q", secondZero, ids[0])
+		c.Fatalf("First in list should be %q, got %q", secondZero, ids[0])
 	}
 	if ids[1] != firstZero {
-		t.Fatalf("Second in list should be %q, got %q", firstZero, ids[1])
+		c.Fatalf("Second in list should be %q, got %q", firstZero, ids[1])
 	}
 
 	runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	ids = strings.Split(strings.TrimSpace(out), "\n")
 	if len(ids) != 2 {
-		t.Fatalf("Should be 2 zero exited containerst got %d", len(ids))
+		c.Fatalf("Should be 2 zero exited containers got %d", len(ids))
 	}
 	if ids[0] != secondNonZero {
-		t.Fatalf("First in list should be %q, got %q", secondNonZero, ids[0])
+		c.Fatalf("First in list should be %q, got %q", secondNonZero, ids[0])
 	}
 	if ids[1] != firstNonZero {
-		t.Fatalf("Second in list should be %q, got %q", firstNonZero, ids[1])
+		c.Fatalf("Second in list should be %q, got %q", firstNonZero, ids[1])
 	}
 
-	logDone("ps - test ps filter exited")
 }
 
-func TestPsRightTagName(t *testing.T) {
+func (s *DockerSuite) TestPsRightTagName(c *check.C) {
 	tag := "asybox:shmatest"
-	defer deleteAllContainers()
-	defer deleteImages(tag)
 	if out, err := exec.Command(dockerBinary, "tag", "busybox", tag).CombinedOutput(); err != nil {
-		t.Fatalf("Failed to tag image: %s, out: %q", err, out)
+		c.Fatalf("Failed to tag image: %s, out: %q", err, out)
 	}
 
 	var id1 string
 	if out, err := exec.Command(dockerBinary, "run", "-d", "busybox", "top").CombinedOutput(); err != nil {
-		t.Fatalf("Failed to run container: %s, out: %q", err, out)
+		c.Fatalf("Failed to run container: %s, out: %q", err, out)
 	} else {
 		id1 = strings.TrimSpace(string(out))
 	}
 
 	var id2 string
 	if out, err := exec.Command(dockerBinary, "run", "-d", tag, "top").CombinedOutput(); err != nil {
-		t.Fatalf("Failed to run container: %s, out: %q", err, out)
+		c.Fatalf("Failed to run container: %s, out: %q", err, out)
 	} else {
 		id2 = strings.TrimSpace(string(out))
 	}
+
+	var imageID string
+	if out, err := exec.Command(dockerBinary, "inspect", "-f", "{{.Id}}", "busybox").CombinedOutput(); err != nil {
+		c.Fatalf("failed to get the image ID of busybox: %s, %v", out, err)
+	} else {
+		imageID = strings.TrimSpace(string(out))
+	}
+
+	var id3 string
+	if out, err := exec.Command(dockerBinary, "run", "-d", imageID, "top").CombinedOutput(); err != nil {
+		c.Fatalf("Failed to run container: %s, out: %q", err, out)
+	} else {
+		id3 = strings.TrimSpace(string(out))
+	}
+
 	out, err := exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput()
 	if err != nil {
-		t.Fatalf("Failed to run 'ps': %s, out: %q", err, out)
+		c.Fatalf("Failed to run 'ps': %s, out: %q", err, out)
 	}
 	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
 	// skip header
 	lines = lines[1:]
-	if len(lines) != 2 {
-		t.Fatalf("There should be 2 running container, got %d", len(lines))
+	if len(lines) != 3 {
+		c.Fatalf("There should be 3 running container, got %d", len(lines))
 	}
 	for _, line := range lines {
 		f := strings.Fields(line)
 		switch f[0] {
 		case id1:
-			if f[1] != "busybox:latest" {
-				t.Fatalf("Expected %s tag for id %s, got %s", "busybox", id1, f[1])
+			if f[1] != "busybox" {
+				c.Fatalf("Expected %s tag for id %s, got %s", "busybox", id1, f[1])
 			}
 		case id2:
 			if f[1] != tag {
-				t.Fatalf("Expected %s tag for id %s, got %s", tag, id1, f[1])
+				c.Fatalf("Expected %s tag for id %s, got %s", tag, id2, f[1])
+			}
+		case id3:
+			if f[1] != imageID {
+				c.Fatalf("Expected %s imageID for id %s, got %s", tag, id3, f[1])
 			}
 		default:
-			t.Fatalf("Unexpected id %s, expected %s and %s", f[0], id1, id2)
+			c.Fatalf("Unexpected id %s, expected %s and %s and %s", f[0], id1, id2, id3)
 		}
 	}
-	logDone("ps - right tags for containers")
 }
 
-func TestPsLinkedWithNoTrunc(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) {
 	if out, err := exec.Command(dockerBinary, "run", "--name=first", "-d", "busybox", "top").CombinedOutput(); err != nil {
-		t.Fatalf("Output: %s, err: %s", out, err)
+		c.Fatalf("Output: %s, err: %s", out, err)
 	}
 	if out, err := exec.Command(dockerBinary, "run", "--name=second", "--link=first:first", "-d", "busybox", "top").CombinedOutput(); err != nil {
-		t.Fatalf("Output: %s, err: %s", out, err)
+		c.Fatalf("Output: %s, err: %s", out, err)
 	}
 	out, err := exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput()
 	if err != nil {
-		t.Fatalf("Output: %s, err: %s", out, err)
+		c.Fatalf("Output: %s, err: %s", out, err)
 	}
 	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
 	// strip header
@@ -631,28 +643,40 @@
 		names = append(names, fields[len(fields)-1])
 	}
 	if !reflect.DeepEqual(expected, names) {
-		t.Fatalf("Expected array: %v, got: %v", expected, names)
+		c.Fatalf("Expected array: %v, got: %v", expected, names)
 	}
 }
 
-func TestPsGroupPortRange(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestPsGroupPortRange(c *check.C) {
 
-	portRange := "3300-3900"
+	portRange := "3800-3900"
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "porttest", "-p", portRange+":"+portRange, "busybox", "top"))
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "ps"))
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	// check that the port range is in the output
 	if !strings.Contains(string(out), portRange) {
-		t.Fatalf("docker ps output should have had the port range %q: %s", portRange, string(out))
+		c.Fatalf("docker ps output should have had the port range %q: %s", portRange, string(out))
 	}
 
-	logDone("ps - port range")
+}
+
+func (s *DockerSuite) TestPsWithSize(c *check.C) {
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "sizetest", "busybox", "top"))
+	if err != nil {
+		c.Fatal(out, err)
+	}
+	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "ps", "--size"))
+	if err != nil {
+		c.Fatal(out, err)
+	}
+	if !strings.Contains(out, "virtual") {
+		c.Fatalf("docker ps with --size should show virtual size of container")
+	}
 }
diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go
index 926e763..a3ded8f 100644
--- a/integration-cli/docker_cli_pull_test.go
+++ b/integration-cli/docker_cli_pull_test.go
@@ -4,15 +4,13 @@
 	"fmt"
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
 // See issue docker/docker#8141
-func TestPullImageWithAliases(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
-	defer deleteImages(repoName)
 
 	repos := []string{}
 	for _, tag := range []string{"recent", "fresh"} {
@@ -22,90 +20,92 @@
 	// Tag and push the same image multiple times.
 	for _, repo := range repos {
 		if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "tag", "busybox", repo)); err != nil {
-			t.Fatalf("Failed to tag image %v: error %v, output %q", repos, err, out)
+			c.Fatalf("Failed to tag image %v: error %v, output %q", repos, err, out)
 		}
-		defer deleteImages(repo)
 		if out, err := exec.Command(dockerBinary, "push", repo).CombinedOutput(); err != nil {
-			t.Fatalf("Failed to push image %v: error %v, output %q", repo, err, string(out))
+			c.Fatalf("Failed to push image %v: error %v, output %q", repo, err, string(out))
 		}
 	}
 
 	// Clear local images store.
 	args := append([]string{"rmi"}, repos...)
 	if out, err := exec.Command(dockerBinary, args...).CombinedOutput(); err != nil {
-		t.Fatalf("Failed to clean images: error %v, output %q", err, string(out))
+		c.Fatalf("Failed to clean images: error %v, output %q", err, string(out))
 	}
 
 	// Pull a single tag and verify it doesn't bring down all aliases.
 	pullCmd := exec.Command(dockerBinary, "pull", repos[0])
 	if out, _, err := runCommandWithOutput(pullCmd); err != nil {
-		t.Fatalf("Failed to pull %v: error %v, output %q", repoName, err, out)
+		c.Fatalf("Failed to pull %v: error %v, output %q", repoName, err, out)
 	}
 	if err := exec.Command(dockerBinary, "inspect", repos[0]).Run(); err != nil {
-		t.Fatalf("Image %v was not pulled down", repos[0])
+		c.Fatalf("Image %v was not pulled down", repos[0])
 	}
 	for _, repo := range repos[1:] {
 		if err := exec.Command(dockerBinary, "inspect", repo).Run(); err == nil {
-			t.Fatalf("Image %v shouldn't have been pulled down", repo)
+			c.Fatalf("Image %v shouldn't have been pulled down", repo)
 		}
 	}
-
-	logDone("pull - image with aliases")
 }
 
 // pulling library/hello-world should show verified message
-func TestPullVerified(t *testing.T) {
+func (s *DockerSuite) TestPullVerified(c *check.C) {
+	c.Skip("Skipping hub dependent test")
+
 	// Image must be pulled from central repository to get verified message
 	// unless keychain is manually updated to contain the daemon's sign key.
 
 	verifiedName := "hello-world"
-	defer deleteImages(verifiedName)
 
 	// pull it
 	expected := "The image you are pulling has been verified"
 	pullCmd := exec.Command(dockerBinary, "pull", verifiedName)
 	if out, exitCode, err := runCommandWithOutput(pullCmd); err != nil || !strings.Contains(out, expected) {
 		if err != nil || exitCode != 0 {
-			t.Skipf("pulling the '%s' image from the registry has failed: %s", verifiedName, err)
+			c.Skip(fmt.Sprintf("pulling the '%s' image from the registry has failed: %v", verifiedName, err))
 		}
-		t.Fatalf("pulling a verified image failed. expected: %s\ngot: %s, %v", expected, out, err)
+		c.Fatalf("pulling a verified image failed. expected: %s\ngot: %s, %v", expected, out, err)
 	}
 
 	// pull it again
 	pullCmd = exec.Command(dockerBinary, "pull", verifiedName)
 	if out, exitCode, err := runCommandWithOutput(pullCmd); err != nil || strings.Contains(out, expected) {
 		if err != nil || exitCode != 0 {
-			t.Skipf("pulling the '%s' image from the registry has failed: %s", verifiedName, err)
+			c.Skip(fmt.Sprintf("pulling the '%s' image from the registry has failed: %v", verifiedName, err))
 		}
-		t.Fatalf("pulling a verified image failed. unexpected verify message\ngot: %s, %v", out, err)
+		c.Fatalf("pulling a verified image failed. unexpected verify message\ngot: %s, %v", out, err)
 	}
 
-	logDone("pull - pull verified")
 }
 
 // pulling an image from the central registry should work
-func TestPullImageFromCentralRegistry(t *testing.T) {
-	defer deleteImages("hello-world")
+func (s *DockerSuite) TestPullImageFromCentralRegistry(c *check.C) {
+	testRequires(c, Network)
 
 	pullCmd := exec.Command(dockerBinary, "pull", "hello-world")
 	if out, _, err := runCommandWithOutput(pullCmd); err != nil {
-		t.Fatalf("pulling the hello-world image from the registry has failed: %s, %v", out, err)
+		c.Fatalf("pulling the hello-world image from the registry has failed: %s, %v", out, err)
 	}
-	logDone("pull - pull hello-world")
 }
 
 // pulling a non-existing image from the central registry should return a non-zero exit code
-func TestPullNonExistingImage(t *testing.T) {
-	pullCmd := exec.Command(dockerBinary, "pull", "fooblahblah1234")
-	if out, _, err := runCommandWithOutput(pullCmd); err == nil {
-		t.Fatalf("expected non-zero exit status when pulling non-existing image: %s", out)
+func (s *DockerSuite) TestPullNonExistingImage(c *check.C) {
+	testRequires(c, Network)
+
+	name := "sadfsadfasdf"
+	pullCmd := exec.Command(dockerBinary, "pull", name)
+	out, _, err := runCommandWithOutput(pullCmd)
+
+	if err == nil || !strings.Contains(out, fmt.Sprintf("Error: image library/%s:latest not found", name)) {
+		c.Fatalf("expected non-zero exit status when pulling non-existing image: %s", out)
 	}
-	logDone("pull - pull fooblahblah1234 (non-existing image)")
 }
 
 // pulling an image from the central registry using official names should work
 // ensure all pulls result in the same image
-func TestPullImageOfficialNames(t *testing.T) {
+func (s *DockerSuite) TestPullImageOfficialNames(c *check.C) {
+	testRequires(c, Network)
+
 	names := []string{
 		"docker.io/hello-world",
 		"index.docker.io/hello-world",
@@ -117,7 +117,7 @@
 		pullCmd := exec.Command(dockerBinary, "pull", name)
 		out, exitCode, err := runCommandWithOutput(pullCmd)
 		if err != nil || exitCode != 0 {
-			t.Errorf("pulling the '%s' image from the registry has failed: %s", name, err)
+			c.Errorf("pulling the '%s' image from the registry has failed: %s", name, err)
 			continue
 		}
 
@@ -125,10 +125,28 @@
 		imagesCmd := exec.Command(dockerBinary, "images")
 		out, _, err = runCommandWithOutput(imagesCmd)
 		if err != nil {
-			t.Errorf("listing images failed with errors: %v", err)
+			c.Errorf("listing images failed with errors: %v", err)
 		} else if strings.Contains(out, name) {
-			t.Errorf("images should not have listed '%s'", name)
+			c.Errorf("images should not have listed '%s'", name)
 		}
 	}
-	logDone("pull - pull official names")
+}
+
+func (s *DockerSuite) TestPullScratchNotAllowed(c *check.C) {
+	testRequires(c, Network)
+
+	pullCmd := exec.Command(dockerBinary, "pull", "scratch")
+	out, exitCode, err := runCommandWithOutput(pullCmd)
+	if err == nil {
+		c.Fatal("expected pull of scratch to fail, but it didn't")
+	}
+	if exitCode != 1 {
+		c.Fatalf("pulling scratch expected exit code 1, got %d", exitCode)
+	}
+	if strings.Contains(out, "Pulling repository scratch") {
+		c.Fatalf("pulling scratch should not have begun: %s", out)
+	}
+	if !strings.Contains(out, "'scratch' is a reserved name") {
+		c.Fatalf("unexpected output pulling scratch: %s", out)
+	}
 }
diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go
index f1274ba..ca97180 100644
--- a/integration-cli/docker_cli_push_test.go
+++ b/integration-cli/docker_cli_push_test.go
@@ -1,158 +1,140 @@
 package main
 
 import (
+	"archive/tar"
 	"fmt"
 	"io/ioutil"
 	"os"
 	"os/exec"
 	"strings"
-	"testing"
 	"time"
 
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/go-check/check"
 )
 
 // pulling an image from the central registry should work
-func TestPushBusyboxImage(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 	// tag the image to upload it to the private registry
 	tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName)
 	if out, _, err := runCommandWithOutput(tagCmd); err != nil {
-		t.Fatalf("image tagging failed: %s, %v", out, err)
+		c.Fatalf("image tagging failed: %s, %v", out, err)
 	}
-	defer deleteImages(repoName)
 
 	pushCmd := exec.Command(dockerBinary, "push", repoName)
 	if out, _, err := runCommandWithOutput(pushCmd); err != nil {
-		t.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err)
+		c.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err)
 	}
-	logDone("push - busybox to private registry")
 }
 
 // pushing an image without a prefix should throw an error
-func TestPushUnprefixedRepo(t *testing.T) {
+func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) {
 	pushCmd := exec.Command(dockerBinary, "push", "busybox")
 	if out, _, err := runCommandWithOutput(pushCmd); err == nil {
-		t.Fatalf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out)
+		c.Fatalf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out)
 	}
-	logDone("push - unprefixed busybox repo must not pass")
 }
 
-func TestPushUntagged(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 
 	expected := "Repository does not exist"
 	pushCmd := exec.Command(dockerBinary, "push", repoName)
 	if out, _, err := runCommandWithOutput(pushCmd); err == nil {
-		t.Fatalf("pushing the image to the private registry should have failed: outuput %q", out)
+		c.Fatalf("pushing the image to the private registry should have failed: output %q", out)
 	} else if !strings.Contains(out, expected) {
-		t.Fatalf("pushing the image failed with an unexpected message: expected %q, got %q", expected, out)
+		c.Fatalf("pushing the image failed with an unexpected message: expected %q, got %q", expected, out)
 	}
-	logDone("push - untagged image")
 }
 
-func TestPushBadTag(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL)
 
 	expected := "does not exist"
 	pushCmd := exec.Command(dockerBinary, "push", repoName)
 	if out, _, err := runCommandWithOutput(pushCmd); err == nil {
-		t.Fatalf("pushing the image to the private registry should have failed: outuput %q", out)
+		c.Fatalf("pushing the image to the private registry should have failed: output %q", out)
 	} else if !strings.Contains(out, expected) {
-		t.Fatalf("pushing the image failed with an unexpected message: expected %q, got %q", expected, out)
+		c.Fatalf("pushing the image failed with an unexpected message: expected %q, got %q", expected, out)
 	}
-	logDone("push - image with bad tag")
 }
 
-func TestPushMultipleTags(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 	repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL)
 	repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL)
-	// tag the image to upload it tot he private registry
+	// tag the image and upload it to the private registry
 	tagCmd1 := exec.Command(dockerBinary, "tag", "busybox", repoTag1)
 	if out, _, err := runCommandWithOutput(tagCmd1); err != nil {
-		t.Fatalf("image tagging failed: %s, %v", out, err)
+		c.Fatalf("image tagging failed: %s, %v", out, err)
 	}
-	defer deleteImages(repoTag1)
 	tagCmd2 := exec.Command(dockerBinary, "tag", "busybox", repoTag2)
 	if out, _, err := runCommandWithOutput(tagCmd2); err != nil {
-		t.Fatalf("image tagging failed: %s, %v", out, err)
+		c.Fatalf("image tagging failed: %s, %v", out, err)
 	}
-	defer deleteImages(repoTag2)
 
 	pushCmd := exec.Command(dockerBinary, "push", repoName)
 	if out, _, err := runCommandWithOutput(pushCmd); err != nil {
-		t.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err)
+		c.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err)
 	}
-	logDone("push - multiple tags to private registry")
 }
 
-func TestPushInterrupt(t *testing.T) {
-	defer setupRegistry(t)()
-
+func (s *DockerRegistrySuite) TestPushInterrupt(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
-	// tag the image to upload it tot he private registry
-	tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName)
-	if out, _, err := runCommandWithOutput(tagCmd); err != nil {
-		t.Fatalf("image tagging failed: %s, %v", out, err)
+	// tag the image and upload it to the private registry
+	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "tag", "busybox", repoName)); err != nil {
+		c.Fatalf("image tagging failed: %s, %v", out, err)
 	}
-	defer deleteImages(repoName)
 
 	pushCmd := exec.Command(dockerBinary, "push", repoName)
 	if err := pushCmd.Start(); err != nil {
-		t.Fatalf("Failed to start pushing to private registry: %v", err)
+		c.Fatalf("Failed to start pushing to private registry: %v", err)
 	}
 
 	// Interrupt push (yes, we have no idea at what point it will get killed).
 	time.Sleep(200 * time.Millisecond)
 	if err := pushCmd.Process.Kill(); err != nil {
-		t.Fatalf("Failed to kill push process: %v", err)
+		c.Fatalf("Failed to kill push process: %v", err)
 	}
-	// Try agin
-	pushCmd = exec.Command(dockerBinary, "push", repoName)
-	if err := pushCmd.Start(); err != nil {
-		t.Fatalf("Failed to start pushing to private registry: %v", err)
+	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "push", repoName)); err == nil {
+		str := string(out)
+		if !strings.Contains(str, "already in progress") {
+			c.Fatalf("Push should be continued on daemon side, but seems ok: %v, %s", err, out)
+		}
 	}
-
-	logDone("push - interrupted")
+	// now wait until all this pushes will complete
+	// if it failed with timeout - there would be some error,
+	// so no logic about it here
+	for exec.Command(dockerBinary, "push", repoName).Run() != nil {
+	}
 }
 
-func TestPushEmptyLayer(t *testing.T) {
-	defer setupRegistry(t)()
+func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL)
 	emptyTarball, err := ioutil.TempFile("", "empty_tarball")
 	if err != nil {
-		t.Fatalf("Unable to create test file: %v", err)
+		c.Fatalf("Unable to create test file: %v", err)
 	}
 	tw := tar.NewWriter(emptyTarball)
 	err = tw.Close()
 	if err != nil {
-		t.Fatalf("Error creating empty tarball: %v", err)
+		c.Fatalf("Error creating empty tarball: %v", err)
 	}
 	freader, err := os.Open(emptyTarball.Name())
 	if err != nil {
-		t.Fatalf("Could not open test tarball: %v", err)
+		c.Fatalf("Could not open test tarball: %v", err)
 	}
 
 	importCmd := exec.Command(dockerBinary, "import", "-", repoName)
 	importCmd.Stdin = freader
 	out, _, err := runCommandWithOutput(importCmd)
 	if err != nil {
-		t.Errorf("import failed with errors: %v, output: %q", err, out)
+		c.Errorf("import failed with errors: %v, output: %q", err, out)
 	}
 
 	// Now verify we can push it
 	pushCmd := exec.Command(dockerBinary, "push", repoName)
 	if out, _, err := runCommandWithOutput(pushCmd); err != nil {
-		t.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err)
+		c.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err)
 	}
-	logDone("push - empty layer config to private registry")
 }
diff --git a/integration-cli/docker_cli_rename_test.go b/integration-cli/docker_cli_rename_test.go
index 3aaf795..156ea6e 100644
--- a/integration-cli/docker_cli_rename_test.go
+++ b/integration-cli/docker_cli_rename_test.go
@@ -3,119 +3,110 @@
 import (
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/go-check/check"
 )
 
-func TestRenameStoppedContainer(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRenameStoppedContainer(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "--name", "first_name", "-d", "busybox", "sh")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
 
 	name, err := inspectField(cleanedContainerID, "Name")
 
-	runCmd = exec.Command(dockerBinary, "rename", "first_name", "new_name")
+	newName := "new_name" + stringid.GenerateRandomID()
+	runCmd = exec.Command(dockerBinary, "rename", "first_name", newName)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
 
 	name, err = inspectField(cleanedContainerID, "Name")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	if name != "/new_name" {
-		t.Fatal("Failed to rename container ", name)
+	if name != "/"+newName {
+		c.Fatal("Failed to rename container ", name)
 	}
 
-	logDone("rename - stopped container")
 }
 
-func TestRenameRunningContainer(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRenameRunningContainer(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "--name", "first_name", "-d", "busybox", "sh")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-	runCmd = exec.Command(dockerBinary, "rename", "first_name", "new_name")
+	newName := "new_name" + stringid.GenerateRandomID()
+	cleanedContainerID := strings.TrimSpace(out)
+	runCmd = exec.Command(dockerBinary, "rename", "first_name", newName)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
 
 	name, err := inspectField(cleanedContainerID, "Name")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	if name != "/new_name" {
-		t.Fatal("Failed to rename container ")
+	if name != "/"+newName {
+		c.Fatal("Failed to rename container ")
 	}
-
-	logDone("rename - running container")
 }
 
-func TestRenameCheckNames(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRenameCheckNames(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "--name", "first_name", "-d", "busybox", "sh")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
 
-	runCmd = exec.Command(dockerBinary, "rename", "first_name", "new_name")
+	newName := "new_name" + stringid.GenerateRandomID()
+	runCmd = exec.Command(dockerBinary, "rename", "first_name", newName)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
 
-	name, err := inspectField("new_name", "Name")
+	name, err := inspectField(newName, "Name")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	if name != "/new_name" {
-		t.Fatal("Failed to rename container ")
+	if name != "/"+newName {
+		c.Fatal("Failed to rename container ")
 	}
 
 	name, err = inspectField("first_name", "Name")
 	if err == nil && !strings.Contains(err.Error(), "No such image or container: first_name") {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-
-	logDone("rename - running container")
 }
 
-func TestRenameInvalidName(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRenameInvalidName(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "--name", "myname", "-d", "busybox", "top")
 	if out, _, err := runCommandWithOutput(runCmd); err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "rename", "myname", "new:invalid")
 	if out, _, err := runCommandWithOutput(runCmd); err == nil || !strings.Contains(out, "Invalid container name") {
-		t.Fatalf("Renaming container to invalid name should have failed: %s\n%v", out, err)
+		c.Fatalf("Renaming container to invalid name should have failed: %s\n%v", out, err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "ps", "-a")
 	if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "myname") {
-		t.Fatalf("Output of docker ps should have included 'myname': %s\n%v", out, err)
+		c.Fatalf("Output of docker ps should have included 'myname': %s\n%v", out, err)
 	}
-
-	logDone("rename - invalid container name")
 }
diff --git a/integration-cli/docker_cli_restart_test.go b/integration-cli/docker_cli_restart_test.go
index 7b97c07..fd95fd0 100644
--- a/integration-cli/docker_cli_restart_test.go
+++ b/integration-cli/docker_cli_restart_test.go
@@ -3,244 +3,221 @@
 import (
 	"os/exec"
 	"strings"
-	"testing"
 	"time"
+
+	"github.com/go-check/check"
 )
 
-func TestRestartStoppedContainer(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "foobar")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID)
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if out != "foobar\n" {
-		t.Errorf("container should've printed 'foobar'")
+		c.Errorf("container should've printed 'foobar'")
 	}
 
 	runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID)
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if out != "foobar\nfoobar\n" {
-		t.Errorf("container should've printed 'foobar' twice")
+		c.Errorf("container should've printed 'foobar' twice")
 	}
 
-	logDone("restart - echo foobar for stopped container")
 }
 
-func TestRestartRunningContainer(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRestartRunningContainer(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	time.Sleep(1 * time.Second)
 
 	runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if out != "foobar\n" {
-		t.Errorf("container should've printed 'foobar'")
+		c.Errorf("container should've printed 'foobar'")
 	}
 
 	runCmd = exec.Command(dockerBinary, "restart", "-t", "1", cleanedContainerID)
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	time.Sleep(1 * time.Second)
 
 	if out != "foobar\nfoobar\n" {
-		t.Errorf("container should've printed 'foobar' twice")
+		c.Errorf("container should've printed 'foobar' twice")
 	}
 
-	logDone("restart - echo foobar for running container")
 }
 
 // Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819.
-func TestRestartWithVolumes(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRestartWithVolumes(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "-v", "/test", "busybox", "top")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if out = strings.Trim(out, " \n\r"); out != "1" {
-		t.Errorf("expect 1 volume received %s", out)
+		c.Errorf("expect 1 volume received %s", out)
 	}
 
-	runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID)
-	volumes, _, err := runCommandWithOutput(runCmd)
-	if err != nil {
-		t.Fatal(volumes, err)
-	}
+	volumes, err := inspectField(cleanedContainerID, ".Volumes")
+	c.Assert(err, check.IsNil)
 
 	runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID)
 	if out, _, err = runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID)
 	out, _, err = runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	if out = strings.Trim(out, " \n\r"); out != "1" {
-		t.Errorf("expect 1 volume after restart received %s", out)
+		c.Errorf("expect 1 volume after restart received %s", out)
 	}
 
-	runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID)
-	volumesAfterRestart, _, err := runCommandWithOutput(runCmd)
-	if err != nil {
-		t.Fatal(volumesAfterRestart, err)
-	}
+	volumesAfterRestart, err := inspectField(cleanedContainerID, ".Volumes")
+	c.Assert(err, check.IsNil)
 
 	if volumes != volumesAfterRestart {
-		volumes = strings.Trim(volumes, " \n\r")
-		volumesAfterRestart = strings.Trim(volumesAfterRestart, " \n\r")
-		t.Errorf("expected volume path: %s Actual path: %s", volumes, volumesAfterRestart)
+		c.Errorf("expected volume path: %s Actual path: %s", volumes, volumesAfterRestart)
 	}
 
-	logDone("restart - does not create a new volume on restart")
 }
 
-func TestRestartPolicyNO(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRestartPolicyNO(c *check.C) {
 
 	cmd := exec.Command(dockerBinary, "run", "-d", "--restart=no", "busybox", "false")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	id := strings.TrimSpace(string(out))
 	name, err := inspectField(id, "HostConfig.RestartPolicy.Name")
-	if err != nil {
-		t.Fatal(err, out)
-	}
+	c.Assert(err, check.IsNil)
 	if name != "no" {
-		t.Fatalf("Container restart policy name is %s, expected %s", name, "no")
+		c.Fatalf("Container restart policy name is %s, expected %s", name, "no")
 	}
 
-	logDone("restart - recording restart policy name for --restart=no")
 }
 
-func TestRestartPolicyAlways(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRestartPolicyAlways(c *check.C) {
 
 	cmd := exec.Command(dockerBinary, "run", "-d", "--restart=always", "busybox", "false")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	id := strings.TrimSpace(string(out))
 	name, err := inspectField(id, "HostConfig.RestartPolicy.Name")
-	if err != nil {
-		t.Fatal(err, out)
-	}
+	c.Assert(err, check.IsNil)
 	if name != "always" {
-		t.Fatalf("Container restart policy name is %s, expected %s", name, "always")
+		c.Fatalf("Container restart policy name is %s, expected %s", name, "always")
 	}
 
-	logDone("restart - recording restart policy name for --restart=always")
+	MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount")
+	c.Assert(err, check.IsNil)
+
+	// MaximumRetryCount=0 if the restart policy is always
+	if MaximumRetryCount != "0" {
+		c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "0")
+	}
+
 }
 
-func TestRestartPolicyOnFailure(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRestartPolicyOnFailure(c *check.C) {
 
 	cmd := exec.Command(dockerBinary, "run", "-d", "--restart=on-failure:1", "busybox", "false")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	id := strings.TrimSpace(string(out))
 	name, err := inspectField(id, "HostConfig.RestartPolicy.Name")
-	if err != nil {
-		t.Fatal(err, out)
-	}
+	c.Assert(err, check.IsNil)
 	if name != "on-failure" {
-		t.Fatalf("Container restart policy name is %s, expected %s", name, "on-failure")
+		c.Fatalf("Container restart policy name is %s, expected %s", name, "on-failure")
 	}
 
-	logDone("restart - recording restart policy name for --restart=on-failure")
 }
 
 // a good container with --restart=on-failure:3
 // MaximumRetryCount!=0; RestartCount=0
-func TestContainerRestartwithGoodContainer(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestContainerRestartwithGoodContainer(c *check.C) {
 	out, err := exec.Command(dockerBinary, "run", "-d", "--restart=on-failure:3", "busybox", "true").CombinedOutput()
 	if err != nil {
-		t.Fatal(string(out), err)
+		c.Fatal(string(out), err)
 	}
 	id := strings.TrimSpace(string(out))
 	if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 5); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	count, err := inspectField(id, "RestartCount")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if count != "0" {
-		t.Fatalf("Container was restarted %s times, expected %d", count, 0)
+		c.Fatalf("Container was restarted %s times, expected %d", count, 0)
 	}
 	MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if MaximumRetryCount != "3" {
-		t.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3")
+		c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3")
 	}
 
-	logDone("restart - for a good container with restart policy, MaximumRetryCount is not 0 and RestartCount is 0")
 }
diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go
index d01b36d..f5884dc 100644
--- a/integration-cli/docker_cli_rm_test.go
+++ b/integration-cli/docker_cli_rm_test.go
@@ -4,94 +4,68 @@
 	"os"
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestRmContainerWithRemovedVolume(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRmContainerWithRemovedVolume(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	cmd := exec.Command(dockerBinary, "run", "--name", "losemyvolumes", "-v", "/tmp/testing:/test", "busybox", "true")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if err := os.Remove("/tmp/testing"); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "rm", "-v", "losemyvolumes")
 	if out, _, err := runCommandWithOutput(cmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	logDone("rm - removed volume")
 }
 
-func TestRmContainerWithVolume(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRmContainerWithVolume(c *check.C) {
 
 	cmd := exec.Command(dockerBinary, "run", "--name", "foo", "-v", "/srv", "busybox", "true")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "rm", "-v", "foo")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	logDone("rm - volume")
 }
 
-func TestRmRunningContainer(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRmRunningContainer(c *check.C) {
 
-	createRunningContainer(t, "foo")
+	createRunningContainer(c, "foo")
 
 	// Test cannot remove running container
 	cmd := exec.Command(dockerBinary, "rm", "foo")
 	if _, err := runCommand(cmd); err == nil {
-		t.Fatalf("Expected error, can't rm a running container")
+		c.Fatalf("Expected error, can't rm a running container")
 	}
 
-	logDone("rm - running container")
 }
 
-func TestRmRunningContainerCheckError409(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRmForceRemoveRunningContainer(c *check.C) {
 
-	createRunningContainer(t, "foo")
-
-	endpoint := "/containers/foo"
-	_, err := sockRequest("DELETE", endpoint, nil)
-
-	if err == nil {
-		t.Fatalf("Expected error, can't rm a running container")
-	}
-	if !strings.Contains(err.Error(), "409 Conflict") {
-		t.Fatalf("Expected error to contain '409 Conflict' but found %s", err)
-	}
-
-	logDone("rm - running container")
-}
-
-func TestRmForceRemoveRunningContainer(t *testing.T) {
-	defer deleteAllContainers()
-
-	createRunningContainer(t, "foo")
+	createRunningContainer(c, "foo")
 
 	// Stop then remove with -s
 	cmd := exec.Command(dockerBinary, "rm", "-f", "foo")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	logDone("rm - running container with --force=true")
 }
 
-func TestRmContainerOrphaning(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) {
 
 	dockerfile1 := `FROM busybox:latest
 	ENTRYPOINT ["/bin/true"]`
@@ -102,47 +76,44 @@
 
 	// build first dockerfile
 	img1, err := buildImage(img, dockerfile1, true)
-	defer deleteImages(img1)
 	if err != nil {
-		t.Fatalf("Could not build image %s: %v", img, err)
+		c.Fatalf("Could not build image %s: %v", img, err)
 	}
 	// run container on first image
 	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", img)); err != nil {
-		t.Fatalf("Could not run image %s: %v: %s", img, err, out)
+		c.Fatalf("Could not run image %s: %v: %s", img, err, out)
 	}
 	// rebuild dockerfile with a small addition at the end
 	if _, err := buildImage(img, dockerfile2, true); err != nil {
-		t.Fatalf("Could not rebuild image %s: %v", img, err)
+		c.Fatalf("Could not rebuild image %s: %v", img, err)
 	}
 	// try to remove the image, should error out.
 	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", img)); err == nil {
-		t.Fatalf("Expected to error out removing the image, but succeeded: %s", out)
+		c.Fatalf("Expected to error out removing the image, but succeeded: %s", out)
 	}
 	// check if we deleted the first image
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc"))
 	if err != nil {
-		t.Fatalf("%v: %s", err, out)
+		c.Fatalf("%v: %s", err, out)
 	}
 	if !strings.Contains(out, img1) {
-		t.Fatalf("Orphaned container (could not find %q in docker images): %s", img1, out)
+		c.Fatalf("Orphaned container (could not find %q in docker images): %s", img1, out)
 	}
 
-	logDone("rm - container orphaning")
 }
 
-func TestRmInvalidContainer(t *testing.T) {
+func (s *DockerSuite) TestRmInvalidContainer(c *check.C) {
 	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "unknown")); err == nil {
-		t.Fatal("Expected error on rm unknown container, got none")
-	} else if !strings.Contains(out, "failed to remove one or more containers") {
-		t.Fatalf("Expected output to contain 'failed to remove one or more containers', got %q", out)
+		c.Fatal("Expected error on rm unknown container, got none")
+	} else if !strings.Contains(out, "failed to remove containers") {
+		c.Fatalf("Expected output to contain 'failed to remove containers', got %q", out)
 	}
 
-	logDone("rm - delete unknown container")
 }
 
-func createRunningContainer(t *testing.T, name string) {
+func createRunningContainer(c *check.C, name string) {
 	cmd := exec.Command(dockerBinary, "run", "-dt", "--name", name, "busybox", "top")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 }
diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go
index fd34c22..c7d0ca8 100644
--- a/integration-cli/docker_cli_rmi_test.go
+++ b/integration-cli/docker_cli_rmi_test.go
@@ -1,107 +1,145 @@
 package main
 
 import (
+	"fmt"
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestRmiWithContainerFails(t *testing.T) {
+func (s *DockerSuite) TestRmiWithContainerFails(c *check.C) {
 	errSubstr := "is using it"
 
 	// create a container
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("failed to create a container: %s, %v", out, err)
+		c.Fatalf("failed to create a container: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	// try to delete the image
 	runCmd = exec.Command(dockerBinary, "rmi", "busybox")
 	out, _, err = runCommandWithOutput(runCmd)
 	if err == nil {
-		t.Fatalf("Container %q is using image, should not be able to rmi: %q", cleanedContainerID, out)
+		c.Fatalf("Container %q is using image, should not be able to rmi: %q", cleanedContainerID, out)
 	}
 	if !strings.Contains(out, errSubstr) {
-		t.Fatalf("Container %q is using image, error message should contain %q: %v", cleanedContainerID, errSubstr, out)
+		c.Fatalf("Container %q is using image, error message should contain %q: %v", cleanedContainerID, errSubstr, out)
 	}
 
 	// make sure it didn't delete the busybox name
-	images, _, _ := dockerCmd(t, "images")
+	images, _ := dockerCmd(c, "images")
 	if !strings.Contains(images, "busybox") {
-		t.Fatalf("The name 'busybox' should not have been removed from images: %q", images)
+		c.Fatalf("The name 'busybox' should not have been removed from images: %q", images)
 	}
-
-	deleteContainer(cleanedContainerID)
-
-	logDone("rmi- container using image while rmi, should not remove image name")
 }
 
-func TestRmiTag(t *testing.T) {
-	imagesBefore, _, _ := dockerCmd(t, "images", "-a")
-	dockerCmd(t, "tag", "busybox", "utest:tag1")
-	dockerCmd(t, "tag", "busybox", "utest/docker:tag2")
-	dockerCmd(t, "tag", "busybox", "utest:5000/docker:tag3")
+func (s *DockerSuite) TestRmiTag(c *check.C) {
+	imagesBefore, _ := dockerCmd(c, "images", "-a")
+	dockerCmd(c, "tag", "busybox", "utest:tag1")
+	dockerCmd(c, "tag", "busybox", "utest/docker:tag2")
+	dockerCmd(c, "tag", "busybox", "utest:5000/docker:tag3")
 	{
-		imagesAfter, _, _ := dockerCmd(t, "images", "-a")
+		imagesAfter, _ := dockerCmd(c, "images", "-a")
 		if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+3 {
-			t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
+			c.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
 		}
 	}
-	dockerCmd(t, "rmi", "utest/docker:tag2")
+	dockerCmd(c, "rmi", "utest/docker:tag2")
 	{
-		imagesAfter, _, _ := dockerCmd(t, "images", "-a")
+		imagesAfter, _ := dockerCmd(c, "images", "-a")
 		if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+2 {
-			t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
+			c.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
 		}
 
 	}
-	dockerCmd(t, "rmi", "utest:5000/docker:tag3")
+	dockerCmd(c, "rmi", "utest:5000/docker:tag3")
 	{
-		imagesAfter, _, _ := dockerCmd(t, "images", "-a")
+		imagesAfter, _ := dockerCmd(c, "images", "-a")
 		if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+1 {
-			t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
+			c.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
 		}
 
 	}
-	dockerCmd(t, "rmi", "utest:tag1")
+	dockerCmd(c, "rmi", "utest:tag1")
 	{
-		imagesAfter, _, _ := dockerCmd(t, "images", "-a")
+		imagesAfter, _ := dockerCmd(c, "images", "-a")
 		if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+0 {
-			t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
+			c.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)
 		}
 
 	}
-	logDone("rmi - tag,rmi- tagging the same images multiple times then removing tags")
 }
 
-func TestRmiTagWithExistingContainers(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRmiImgIDForce(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf("failed to create a container:%s, %v", out, err)
+	}
+	containerID := strings.TrimSpace(out)
+	runCmd = exec.Command(dockerBinary, "commit", containerID, "busybox-test")
+	out, _, err = runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatalf("failed to commit a new busybox-test:%s, %v", out, err)
+	}
+
+	imagesBefore, _ := dockerCmd(c, "images", "-a")
+	dockerCmd(c, "tag", "busybox-test", "utest:tag1")
+	dockerCmd(c, "tag", "busybox-test", "utest:tag2")
+	dockerCmd(c, "tag", "busybox-test", "utest/docker:tag3")
+	dockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4")
+	{
+		imagesAfter, _ := dockerCmd(c, "images", "-a")
+		if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+4 {
+			c.Fatalf("tag busybox to create 4 more images with same imageID; docker images shows: %q\n", imagesAfter)
+		}
+	}
+	imgID, err := inspectField("busybox-test", "Id")
+	c.Assert(err, check.IsNil)
+
+	// first checkout without force it fails
+	runCmd = exec.Command(dockerBinary, "rmi", imgID)
+	out, _, err = runCommandWithOutput(runCmd)
+	if err == nil || !strings.Contains(out, fmt.Sprintf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", imgID)) {
+		c.Fatalf("rmi tagged in multiple repos should have failed without force:%s, %v", out, err)
+	}
+
+	dockerCmd(c, "rmi", "-f", imgID)
+	{
+		imagesAfter, _ := dockerCmd(c, "images", "-a")
+		if strings.Contains(imagesAfter, imgID[:12]) {
+			c.Fatalf("rmi -f %s failed, image still exists: %q\n\n", imgID, imagesAfter)
+		}
+
+	}
+}
+
+func (s *DockerSuite) TestRmiTagWithExistingContainers(c *check.C) {
 
 	container := "test-delete-tag"
 	newtag := "busybox:newtag"
 	bb := "busybox:latest"
 	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "tag", bb, newtag)); err != nil {
-		t.Fatalf("Could not tag busybox: %v: %s", err, out)
+		c.Fatalf("Could not tag busybox: %v: %s", err, out)
 	}
 	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", container, bb, "/bin/true")); err != nil {
-		t.Fatalf("Could not run busybox: %v: %s", err, out)
+		c.Fatalf("Could not run busybox: %v: %s", err, out)
 	}
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", newtag))
 	if err != nil {
-		t.Fatalf("Could not remove tag %s: %v: %s", newtag, err, out)
+		c.Fatalf("Could not remove tag %s: %v: %s", newtag, err, out)
 	}
 	if d := strings.Count(out, "Untagged: "); d != 1 {
-		t.Fatalf("Expected 1 untagged entry got %d: %q", d, out)
+		c.Fatalf("Expected 1 untagged entry got %d: %q", d, out)
 	}
 
-	logDone("rmi - delete tag with existing containers")
 }
 
-func TestRmiForceWithExistingContainers(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRmiForceWithExistingContainers(c *check.C) {
 
 	image := "busybox-clone"
 
@@ -110,64 +148,60 @@
 MAINTAINER foo`)
 
 	if out, _, err := runCommandWithOutput(cmd); err != nil {
-		t.Fatalf("Could not build %s: %s, %v", image, out, err)
+		c.Fatalf("Could not build %s: %s, %v", image, out, err)
 	}
 
 	if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "test-force-rmi", image, "/bin/true")); err != nil {
-		t.Fatalf("Could not run container: %s, %v", out, err)
+		c.Fatalf("Could not run container: %s, %v", out, err)
 	}
 
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", "-f", image))
 	if err != nil {
-		t.Fatalf("Could not remove image %s:  %s, %v", image, out, err)
+		c.Fatalf("Could not remove image %s:  %s, %v", image, out, err)
 	}
 
-	logDone("rmi - force delete with existing containers")
 }
 
-func TestRmiWithMultipleRepositories(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRmiWithMultipleRepositories(c *check.C) {
 	newRepo := "127.0.0.1:5000/busybox"
 	oldRepo := "busybox"
 	newTag := "busybox:test"
 	cmd := exec.Command(dockerBinary, "tag", oldRepo, newRepo)
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("Could not tag busybox: %v: %s", err, out)
+		c.Fatalf("Could not tag busybox: %v: %s", err, out)
 	}
 	cmd = exec.Command(dockerBinary, "run", "--name", "test", oldRepo, "touch", "/home/abcd")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %s", err, out)
+		c.Fatalf("failed to run container: %v, output: %s", err, out)
 	}
 	cmd = exec.Command(dockerBinary, "commit", "test", newTag)
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to commit container: %v, output: %s", err, out)
+		c.Fatalf("failed to commit container: %v, output: %s", err, out)
 	}
 	cmd = exec.Command(dockerBinary, "rmi", newTag)
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to remove image: %v, output: %s", err, out)
+		c.Fatalf("failed to remove image: %v, output: %s", err, out)
 	}
 	if !strings.Contains(out, "Untagged: "+newTag) {
-		t.Fatalf("Could not remove image %s: %s, %v", newTag, out, err)
+		c.Fatalf("Could not remove image %s: %s, %v", newTag, out, err)
 	}
 
-	logDone("rmi - delete a image which its dependency tagged to multiple repositories success")
 }
 
-func TestRmiBlank(t *testing.T) {
+func (s *DockerSuite) TestRmiBlank(c *check.C) {
 	// try to delete a blank image name
 	runCmd := exec.Command(dockerBinary, "rmi", "")
 	out, _, err := runCommandWithOutput(runCmd)
 
 	if err == nil {
-		t.Fatal("Should have failed to delete '' image")
+		c.Fatal("Should have failed to delete '' image")
 	}
 
 	if strings.Contains(out, "No such image") {
-		t.Fatalf("Wrong error message generated: %s", out)
+		c.Fatalf("Wrong error message generated: %s", out)
 	}
-	logDone("rmi- blank image name")
 }
diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go
index 7fda0a4..20e8c6e 100644
--- a/integration-cli/docker_cli_run_test.go
+++ b/integration-cli/docker_cli_run_test.go
@@ -16,688 +16,571 @@
 	"strconv"
 	"strings"
 	"sync"
-	"testing"
 	"time"
 
 	"github.com/docker/docker/nat"
-	"github.com/docker/docker/pkg/networkfs/resolvconf"
+	"github.com/docker/libnetwork/resolvconf"
+	"github.com/go-check/check"
 )
 
 // "test123" should be printed by docker run
-func TestRunEchoStdout(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunEchoStdout(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "busybox", "echo", "test123")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
 	if out != "test123\n" {
-		t.Errorf("container should've printed 'test123'")
+		c.Fatalf("container should've printed 'test123'")
 	}
-
-	logDone("run - echo test123")
 }
 
 // "test" should be printed
-func TestRunEchoStdoutWithMemoryLimit(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunEchoStdoutWithMemoryLimit(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-m", "16m", "busybox", "echo", "test")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
 	out = strings.Trim(out, "\r\n")
 
 	if expected := "test"; out != expected {
-		t.Errorf("container should've printed %q but printed %q", expected, out)
-
+		c.Fatalf("container should've printed %q but printed %q", expected, out)
 	}
-
-	logDone("run - echo with memory limit")
 }
 
 // should run without memory swap
-func TestRunWithoutMemoryswapLimit(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunWithoutMemoryswapLimit(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-m", "16m", "--memory-swap", "-1", "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container, output: %q", out)
+		c.Fatalf("failed to run container, output: %q", out)
 	}
-
-	logDone("run - without memory swap limit")
 }
 
 // "test" should be printed
-func TestRunEchoStdoutWitCPULimit(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunEchoStdoutWitCPULimit(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "busybox", "echo", "test")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
 	if out != "test\n" {
-		t.Errorf("container should've printed 'test'")
+		c.Errorf("container should've printed 'test'")
 	}
-
-	logDone("run - echo with CPU limit")
 }
 
 // "test" should be printed
-func TestRunEchoStdoutWithCPUAndMemoryLimit(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunEchoStdoutWithCPUAndMemoryLimit(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "16m", "busybox", "echo", "test")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
 	if out != "test\n" {
-		t.Errorf("container should've printed 'test', got %q instead", out)
+		c.Errorf("container should've printed 'test', got %q instead", out)
 	}
-
-	logDone("run - echo with CPU and memory limit")
 }
 
 // "test" should be printed
-func TestRunEchoNamedContainer(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunEchoStdoutWithCPUQuota(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "--cpu-quota", "8000", "--name", "test", "busybox", "echo", "test")
+	out, _, _, err := runCommandWithStdoutStderr(runCmd)
+	if err != nil {
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
+	}
+	out = strings.TrimSpace(out)
+	if strings.Contains(out, "Your kernel does not support CPU cfs quota") {
+		c.Skip("Your kernel does not support CPU cfs quota, skip this test")
+	}
+	if out != "test" {
+		c.Errorf("container should've printed 'test'")
+	}
 
+	out, err = inspectField("test", "HostConfig.CpuQuota")
+	c.Assert(err, check.IsNil)
+
+	if out != "8000" {
+		c.Errorf("setting the CPU CFS quota failed")
+	}
+}
+
+// "test" should be printed
+func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
 	if out != "test\n" {
-		t.Errorf("container should've printed 'test'")
+		c.Errorf("container should've printed 'test'")
 	}
-
-	if err := deleteContainer("testfoonamedcontainer"); err != nil {
-		t.Errorf("failed to remove the named container: %v", err)
-	}
-
-	logDone("run - echo with named container")
 }
 
 // docker run should not leak file descriptors
-func TestRunLeakyFileDescriptors(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "busybox", "ls", "-C", "/proc/self/fd")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
 	// normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory
 	if out != "0  1  2  3\n" {
-		t.Errorf("container should've printed '0  1  2  3', not: %s", out)
+		c.Errorf("container should've printed '0  1  2  3', not: %s", out)
 	}
-
-	logDone("run - check file descriptor leakage")
 }
 
 // it should be possible to lookup Google DNS
 // this will fail when Internet access is unavailable
-func TestRunLookupGoogleDns(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunLookupGoogleDns(c *check.C) {
+	testRequires(c, Network)
 
 	out, _, _, err := runCommandWithStdoutStderr(exec.Command(dockerBinary, "run", "busybox", "nslookup", "google.com"))
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
-
-	logDone("run - nslookup google.com")
 }
 
 // the exit code should be 0
 // some versions of lxc might make this test fail
-func TestRunExitCodeZero(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunExitCodeZero(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "busybox", "true")
 	if out, _, err := runCommandWithOutput(runCmd); err != nil {
-		t.Errorf("container should've exited with exit code 0: %s, %v", out, err)
+		c.Errorf("container should've exited with exit code 0: %s, %v", out, err)
 	}
-
-	logDone("run - exit with 0")
 }
 
 // the exit code should be 1
 // some versions of lxc might make this test fail
-func TestRunExitCodeOne(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunExitCodeOne(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "busybox", "false")
 	exitCode, err := runCommand(runCmd)
 	if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if exitCode != 1 {
-		t.Errorf("container should've exited with exit code 1")
+		c.Errorf("container should've exited with exit code 1")
 	}
-
-	logDone("run - exit with 1")
 }
 
 // it should be possible to pipe in data via stdin to a process running in a container
 // some versions of lxc might make this test fail
-func TestRunStdinPipe(t *testing.T) {
-	defer deleteAllContainers()
-
-	runCmd := exec.Command("bash", "-c", `echo "blahblah" | docker run -i -a stdin busybox cat`)
+func (s *DockerSuite) TestRunStdinPipe(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat")
+	runCmd.Stdin = strings.NewReader("blahblah")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
-	out = stripTrailingCharacters(out)
-
-	inspectCmd := exec.Command(dockerBinary, "inspect", out)
-	if out, _, err := runCommandWithOutput(inspectCmd); err != nil {
-		t.Fatalf("out should've been a container id: %s %v", out, err)
-	}
-
+	out = strings.TrimSpace(out)
 	waitCmd := exec.Command(dockerBinary, "wait", out)
 	if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil {
-		t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err)
+		c.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err)
 	}
 
 	logsCmd := exec.Command(dockerBinary, "logs", out)
 	logsOut, _, err := runCommandWithOutput(logsCmd)
 	if err != nil {
-		t.Fatalf("error thrown while trying to get container logs: %s, %v", logsOut, err)
+		c.Fatalf("error thrown while trying to get container logs: %s, %v", logsOut, err)
 	}
 
-	containerLogs := stripTrailingCharacters(logsOut)
+	containerLogs := strings.TrimSpace(logsOut)
 
 	if containerLogs != "blahblah" {
-		t.Errorf("logs didn't print the container's logs %s", containerLogs)
+		c.Errorf("logs didn't print the container's logs %s", containerLogs)
 	}
 
 	rmCmd := exec.Command(dockerBinary, "rm", out)
 	if out, _, err = runCommandWithOutput(rmCmd); err != nil {
-		t.Fatalf("rm failed to remove container: %s, %v", out, err)
+		c.Fatalf("rm failed to remove container: %s, %v", out, err)
 	}
-
-	logDone("run - pipe in with -i -a stdin")
 }
 
 // the container's ID should be printed when starting a container in detached mode
-func TestRunDetachedContainerIDPrinting(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
-	out = stripTrailingCharacters(out)
-
-	inspectCmd := exec.Command(dockerBinary, "inspect", out)
-	if inspectOut, _, err := runCommandWithOutput(inspectCmd); err != nil {
-		t.Fatalf("out should've been a container id: %s %v", inspectOut, err)
-	}
-
+	out = strings.TrimSpace(out)
 	waitCmd := exec.Command(dockerBinary, "wait", out)
 	if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil {
-		t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err)
+		c.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err)
 	}
 
 	rmCmd := exec.Command(dockerBinary, "rm", out)
 	rmOut, _, err := runCommandWithOutput(rmCmd)
 	if err != nil {
-		t.Fatalf("rm failed to remove container: %s, %v", rmOut, err)
+		c.Fatalf("rm failed to remove container: %s, %v", rmOut, err)
 	}
 
-	rmOut = stripTrailingCharacters(rmOut)
+	rmOut = strings.TrimSpace(rmOut)
 	if rmOut != out {
-		t.Errorf("rm didn't print the container ID %s %s", out, rmOut)
+		c.Errorf("rm didn't print the container ID %s %s", out, rmOut)
 	}
-
-	logDone("run - print container ID in detached mode")
 }
 
 // the working directory should be set correctly
-func TestRunWorkingDirectory(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-w", "/root", "busybox", "pwd")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
-	out = stripTrailingCharacters(out)
+	out = strings.TrimSpace(out)
 
 	if out != "/root" {
-		t.Errorf("-w failed to set working directory")
+		c.Errorf("-w failed to set working directory")
 	}
 
 	runCmd = exec.Command(dockerBinary, "run", "--workdir", "/root", "busybox", "pwd")
 	out, _, _, err = runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
-	out = stripTrailingCharacters(out)
+	out = strings.TrimSpace(out)
 
 	if out != "/root" {
-		t.Errorf("--workdir failed to set working directory")
+		c.Errorf("--workdir failed to set working directory")
 	}
-
-	logDone("run - run with working directory set by -w")
-	logDone("run - run with working directory set by --workdir")
 }
 
 // pinging Google's DNS resolver should fail when we disable the networking
-func TestRunWithoutNetworking(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "8.8.8.8")
 	out, _, exitCode, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil && exitCode != 1 {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	if exitCode != 1 {
-		t.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8")
+		c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8")
 	}
 
 	runCmd = exec.Command(dockerBinary, "run", "-n=false", "busybox", "ping", "-c", "1", "8.8.8.8")
 	out, _, exitCode, err = runCommandWithStdoutStderr(runCmd)
 	if err != nil && exitCode != 1 {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	if exitCode != 1 {
-		t.Errorf("-n=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8")
+		c.Errorf("-n=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8")
 	}
-
-	logDone("run - disable networking with --net=none")
-	logDone("run - disable networking with -n=false")
 }
 
 //test --link use container name to link target
-func TestRunLinksContainerWithContainerName(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-i", "-t", "-d", "--name", "parent", "busybox")
 	out, _, _, err := runCommandWithStdoutStderr(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
-	cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.NetworkSettings.IPAddress}}", "parent")
-	ip, _, _, err := runCommandWithStdoutStderr(cmd)
-	if err != nil {
-		t.Fatalf("failed to inspect container: %v, output: %q", err, ip)
-	}
-	ip = strings.TrimSpace(ip)
+	ip, err := inspectField("parent", "NetworkSettings.IPAddress")
+	c.Assert(err, check.IsNil)
 	cmd = exec.Command(dockerBinary, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 	if !strings.Contains(out, ip+"	test") {
-		t.Fatalf("use a container name to link target failed")
+		c.Fatalf("use a container name to link target failed")
 	}
-
-	logDone("run - use a container name to link target work")
 }
 
 //test --link use container id to link target
-func TestRunLinksContainerWithContainerId(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunLinksContainerWithContainerId(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-i", "-t", "-d", "busybox")
 	cID, _, _, err := runCommandWithStdoutStderr(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, cID)
+		c.Fatalf("failed to run container: %v, output: %q", err, cID)
 	}
 	cID = strings.TrimSpace(cID)
-	cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.NetworkSettings.IPAddress}}", cID)
-	ip, _, _, err := runCommandWithStdoutStderr(cmd)
-	if err != nil {
-		t.Fatalf("faild to inspect container: %v, output: %q", err, ip)
-	}
-	ip = strings.TrimSpace(ip)
+	ip, err := inspectField(cID, "NetworkSettings.IPAddress")
+	c.Assert(err, check.IsNil)
 	cmd = exec.Command(dockerBinary, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 	if !strings.Contains(out, ip+"	test") {
-		t.Fatalf("use a container id to link target failed")
+		c.Fatalf("use a container id to link target failed")
 	}
-
-	logDone("run - use a container id to link target work")
 }
 
-func TestRunLinkToContainerNetMode(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--name", "test", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 	cmd = exec.Command(dockerBinary, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 	cmd = exec.Command(dockerBinary, "run", "-d", "--link=parent:parent", "busybox", "top")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 	cmd = exec.Command(dockerBinary, "run", "-d", "--link=child:child", "busybox", "top")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
-
-	logDone("run - link to a container which net mode is container success")
 }
 
-func TestRunModeNetContainerHostname(t *testing.T) {
-	testRequires(t, ExecSupport)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunContainerNetModeWithDnsMacHosts(c *check.C) {
+	cmd := exec.Command(dockerBinary, "run", "-d", "--name", "parent", "busybox", "top")
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
+	}
+
+	cmd = exec.Command(dockerBinary, "run", "--dns", "1.2.3.4", "--net=container:parent", "busybox")
+	out, _, err = runCommandWithOutput(cmd)
+	if err == nil || !strings.Contains(out, "Conflicting options: --dns and the network mode") {
+		c.Fatalf("run --net=container with --dns should error out")
+	}
+
+	cmd = exec.Command(dockerBinary, "run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox")
+	out, _, err = runCommandWithOutput(cmd)
+	if err == nil || !strings.Contains(out, "--mac-address and the network mode") {
+		c.Fatalf("run --net=container with --mac-address should error out")
+	}
+
+	cmd = exec.Command(dockerBinary, "run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox")
+	out, _, err = runCommandWithOutput(cmd)
+	if err == nil || !strings.Contains(out, "--add-host and the network mode") {
+		c.Fatalf("run --net=container with --add-host should error out")
+	}
+
+}
+
+func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) {
+	testRequires(c, ExecSupport)
 	cmd := exec.Command(dockerBinary, "run", "-i", "-d", "--name", "parent", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 	cmd = exec.Command(dockerBinary, "exec", "parent", "cat", "/etc/hostname")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to exec command: %v, output: %q", err, out)
+		c.Fatalf("failed to exec command: %v, output: %q", err, out)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname")
 	out1, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out1)
+		c.Fatalf("failed to run container: %v, output: %q", err, out1)
 	}
 	if out1 != out {
-		t.Fatal("containers with shared net namespace should have same hostname")
+		c.Fatal("containers with shared net namespace should have same hostname")
 	}
-
-	logDone("run - containers with shared net namespace have same hostname")
-}
-
-// Regression test for #4741
-func TestRunWithVolumesAsFiles(t *testing.T) {
-	defer deleteAllContainers()
-
-	runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/etc/hosts:/target-file", "busybox", "true")
-	out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd)
-	if err != nil && exitCode != 0 {
-		t.Fatal("1", out, stderr, err)
-	}
-
-	runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/target-file")
-	out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd)
-	if err != nil && exitCode != 0 {
-		t.Fatal("2", out, stderr, err)
-	}
-
-	logDone("run - regression test for #4741 - volumes from as files")
 }
 
 // Regression test for #4979
-func TestRunWithVolumesFromExited(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file")
 	out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil && exitCode != 0 {
-		t.Fatal("1", out, stderr, err)
+		c.Fatal("1", out, stderr, err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file")
 	out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd)
 	if err != nil && exitCode != 0 {
-		t.Fatal("2", out, stderr, err)
+		c.Fatal("2", out, stderr, err)
 	}
-
-	logDone("run - regression test for #4979 - volumes-from on exited container")
 }
 
 // Volume path is a symlink which also exists on the host, and the host side is a file not a dir
 // But the volume call is just a normal volume, not a bind mount
-func TestRunCreateVolumesInSymlinkDir(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-	testRequires(t, NativeExecDriver)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) {
+	testRequires(c, SameHostDaemon)
+	testRequires(c, NativeExecDriver)
 	name := "test-volume-symlink"
 
 	dir, err := ioutil.TempDir("", name)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.RemoveAll(dir)
 
 	f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	f.Close()
 
 	dockerFile := fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir)
 	if _, err := buildImage(name, dockerFile, false); err != nil {
-		t.Fatal(err)
-	}
-	defer deleteImages(name)
-
-	if out, _, err := dockerCmd(t, "run", "-v", "/test/test", name); err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err)
 	}
 
-	logDone("run - create volume in symlink directory")
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-v", "/test/test", name))
+	if err != nil {
+		c.Fatalf("Failed with errors: %s, %v", out, err)
+	}
 }
 
-// Regression test for #4830
-func TestRunWithRelativePath(t *testing.T) {
-	defer deleteAllContainers()
-
-	runCmd := exec.Command(dockerBinary, "run", "-v", "tmp:/other-tmp", "busybox", "true")
-	if _, _, _, err := runCommandWithStdoutStderr(runCmd); err == nil {
-		t.Fatalf("relative path should result in an error")
-	}
-
-	logDone("run - volume with relative path")
-}
-
-func TestRunVolumesMountedAsReadonly(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile")
 	if code, err := runCommand(cmd); err == nil || code == 0 {
-		t.Fatalf("run should fail because volume is ro: exit code %d", code)
+		c.Fatalf("run should fail because volume is ro: exit code %d", code)
 	}
-
-	logDone("run - volumes as readonly mount")
 }
 
-func TestRunVolumesFromInReadonlyMode(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunVolumesFromInReadonlyMode(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:ro", "busybox", "touch", "/test/file")
 	if code, err := runCommand(cmd); err == nil || code == 0 {
-		t.Fatalf("run should fail because volume is ro: exit code %d", code)
+		c.Fatalf("run should fail because volume is ro: exit code %d", code)
 	}
-
-	logDone("run - volumes from as readonly mount")
 }
 
 // Regression test for #1201
-func TestRunVolumesFromInReadWriteMode(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file")
 	if out, _, err := runCommandWithOutput(cmd); err != nil {
-		t.Fatalf("running --volumes-from parent:rw failed with output: %q\nerror: %v", out, err)
+		c.Fatalf("running --volumes-from parent:rw failed with output: %q\nerror: %v", out, err)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:bar", "busybox", "touch", "/test/file")
 	if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "invalid mode for volumes-from: bar") {
-		t.Fatalf("running --volumes-from foo:bar should have failed with invalid mount mode: %q", out)
+		c.Fatalf("running --volumes-from foo:bar should have failed with invalid mount mode: %q", out)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "busybox", "touch", "/test/file")
 	if out, _, err := runCommandWithOutput(cmd); err != nil {
-		t.Fatalf("running --volumes-from parent failed with output: %q\nerror: %v", out, err)
+		c.Fatalf("running --volumes-from parent failed with output: %q\nerror: %v", out, err)
 	}
-
-	logDone("run - volumes from as read write mount")
 }
 
-func TestVolumesFromGetsProperMode(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test:/test:ro", "busybox", "true")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	// Expect this "rw" mode to be be ignored since the inheritted volume is "ro"
+	// Expect this "rw" mode to be be ignored since the inherited volume is "ro"
 	cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file")
 	if _, err := runCommand(cmd); err == nil {
-		t.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`")
+		c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`")
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/test:/test:ro", "busybox", "true")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	// Expect this to be read-only since both are "ro"
 	cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent2:ro", "busybox", "touch", "/test/file")
 	if _, err := runCommand(cmd); err == nil {
-		t.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`")
+		c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`")
 	}
-
-	logDone("run - volumes from ignores `rw` if inherrited volume is `ro`")
 }
 
 // Test for GH#10618
-func TestRunNoDupVolumes(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) {
 	mountstr1 := randomUnixTmpDirPath("test1") + ":/someplace"
 	mountstr2 := randomUnixTmpDirPath("test2") + ":/someplace"
 
 	cmd := exec.Command(dockerBinary, "run", "-v", mountstr1, "-v", mountstr2, "busybox", "true")
 	if out, _, err := runCommandWithOutput(cmd); err == nil {
-		t.Fatal("Expected error about duplicate volume definitions")
+		c.Fatal("Expected error about duplicate volume definitions")
 	} else {
-		if !strings.Contains(out, "Duplicate volume") {
-			t.Fatalf("Expected 'duplicate volume' error, got %v", err)
+		if !strings.Contains(out, "Duplicate bind mount") {
+			c.Fatalf("Expected 'duplicate volume' error, got %v", err)
 		}
 	}
-
-	logDone("run - don't allow multiple (bind) volumes on the same container target")
 }
 
 // Test for #1351
-func TestRunApplyVolumesFromBeforeVolumes(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "touch", "/test/foo")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "-v", "/test", "busybox", "cat", "/test/foo")
 	if out, _, err := runCommandWithOutput(cmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-
-	logDone("run - volumes from mounted first")
 }
 
-func TestRunMultipleVolumesFrom(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--name", "parent1", "-v", "/test", "busybox", "touch", "/test/foo")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/other", "busybox", "touch", "/other/bar")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent1", "--volumes-from", "parent2",
 		"busybox", "sh", "-c", "cat /test/foo && cat /other/bar")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-
-	logDone("run - multiple volumes from")
 }
 
 // this tests verifies the ID format for the container
-func TestRunVerifyContainerID(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
 	out, exit, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if exit != 0 {
-		t.Fatalf("expected exit code 0 received %d", exit)
+		c.Fatalf("expected exit code 0 received %d", exit)
 	}
 	match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n"))
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if !match {
-		t.Fatalf("Invalid container ID: %s", out)
+		c.Fatalf("Invalid container ID: %s", out)
 	}
-
-	logDone("run - verify container ID")
 }
 
 // Test that creating a container with a volume doesn't crash. Regression test for #995.
-func TestRunCreateVolume(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCreateVolume(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-v", "/var/lib/data", "busybox", "true")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-
-	logDone("run - create docker managed volume")
 }
 
 // Test that creating a volume with a symlink in its path works correctly. Test for #5152.
 // Note that this bug happens only with symlinks with a target that starts with '/'.
-func TestRunCreateVolumeWithSymlink(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) {
 	image := "docker-test-createvolumewithsymlink"
-	defer deleteImages(image)
 
 	buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-")
 	buildCmd.Stdin = strings.NewReader(`FROM busybox
@@ -705,43 +588,38 @@
 	buildCmd.Dir = workingDirectory
 	err := buildCmd.Run()
 	if err != nil {
-		t.Fatalf("could not build '%s': %v", image, err)
+		c.Fatalf("could not build '%s': %v", image, err)
 	}
 
 	cmd := exec.Command(dockerBinary, "run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo")
 	exitCode, err := runCommand(cmd)
 	if err != nil || exitCode != 0 {
-		t.Fatalf("[run] err: %v, exitcode: %d", err, exitCode)
+		c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode)
 	}
 
 	var volPath string
 	cmd = exec.Command(dockerBinary, "inspect", "-f", "{{range .Volumes}}{{.}}{{end}}", "test-createvolumewithsymlink")
 	volPath, exitCode, err = runCommandWithOutput(cmd)
 	if err != nil || exitCode != 0 {
-		t.Fatalf("[inspect] err: %v, exitcode: %d", err, exitCode)
+		c.Fatalf("[inspect] err: %v, exitcode: %d", err, exitCode)
 	}
 
 	cmd = exec.Command(dockerBinary, "rm", "-v", "test-createvolumewithsymlink")
 	exitCode, err = runCommand(cmd)
 	if err != nil || exitCode != 0 {
-		t.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode)
+		c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode)
 	}
 
 	f, err := os.Open(volPath)
 	defer f.Close()
 	if !os.IsNotExist(err) {
-		t.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath)
+		c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath)
 	}
-
-	logDone("run - create volume with symlink")
 }
 
 // Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`.
-func TestRunVolumesFromSymlinkPath(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) {
 	name := "docker-test-volumesfromsymlinkpath"
-	defer deleteImages(name)
 
 	buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-")
 	buildCmd.Stdin = strings.NewReader(`FROM busybox
@@ -750,166 +628,131 @@
 	buildCmd.Dir = workingDirectory
 	err := buildCmd.Run()
 	if err != nil {
-		t.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err)
+		c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err)
 	}
 
 	cmd := exec.Command(dockerBinary, "run", "--name", "test-volumesfromsymlinkpath", name)
 	exitCode, err := runCommand(cmd)
 	if err != nil || exitCode != 0 {
-		t.Fatalf("[run] (volume) err: %v, exitcode: %d", err, exitCode)
+		c.Fatalf("[run] (volume) err: %v, exitcode: %d", err, exitCode)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls /foo | grep -q bar")
 	exitCode, err = runCommand(cmd)
 	if err != nil || exitCode != 0 {
-		t.Fatalf("[run] err: %v, exitcode: %d", err, exitCode)
+		c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode)
 	}
-
-	logDone("run - volumes-from symlink path")
 }
 
-func TestRunExitCode(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunExitCode(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "busybox", "/bin/sh", "-c", "exit 72")
 
 	exit, err := runCommand(cmd)
 	if err == nil {
-		t.Fatal("should not have a non nil error")
+		c.Fatal("should not have a non nil error")
 	}
 	if exit != 72 {
-		t.Fatalf("expected exit code 72 received %d", exit)
+		c.Fatalf("expected exit code 72 received %d", exit)
 	}
-
-	logDone("run - correct exit code")
 }
 
-func TestRunUserDefaultsToRoot(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunUserDefaultsToRoot(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "busybox", "id")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	if !strings.Contains(out, "uid=0(root) gid=0(root)") {
-		t.Fatalf("expected root user got %s", out)
+		c.Fatalf("expected root user got %s", out)
 	}
-
-	logDone("run - default user")
 }
 
-func TestRunUserByName(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunUserByName(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-u", "root", "busybox", "id")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	if !strings.Contains(out, "uid=0(root) gid=0(root)") {
-		t.Fatalf("expected root user got %s", out)
+		c.Fatalf("expected root user got %s", out)
 	}
-
-	logDone("run - user by name")
 }
 
-func TestRunUserByID(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunUserByID(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-u", "1", "busybox", "id")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") {
-		t.Fatalf("expected daemon user got %s", out)
+		c.Fatalf("expected daemon user got %s", out)
 	}
-
-	logDone("run - user by id")
 }
 
-func TestRunUserByIDBig(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunUserByIDBig(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-u", "2147483648", "busybox", "id")
 	out, _, err := runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatal("No error, but must be.", out)
+		c.Fatal("No error, but must be.", out)
 	}
 	if !strings.Contains(out, "Uids and gids must be in range") {
-		t.Fatalf("expected error about uids range, got %s", out)
+		c.Fatalf("expected error about uids range, got %s", out)
 	}
-
-	logDone("run - user by id, id too big")
 }
 
-func TestRunUserByIDNegative(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-u", "-1", "busybox", "id")
 	out, _, err := runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatal("No error, but must be.", out)
+		c.Fatal("No error, but must be.", out)
 	}
 	if !strings.Contains(out, "Uids and gids must be in range") {
-		t.Fatalf("expected error about uids range, got %s", out)
+		c.Fatalf("expected error about uids range, got %s", out)
 	}
-
-	logDone("run - user by id, id negative")
 }
 
-func TestRunUserByIDZero(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunUserByIDZero(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-u", "0", "busybox", "id")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") {
-		t.Fatalf("expected daemon user got %s", out)
+		c.Fatalf("expected daemon user got %s", out)
 	}
-
-	logDone("run - user by id, zero uid")
 }
 
-func TestRunUserNotFound(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunUserNotFound(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-u", "notme", "busybox", "id")
 	_, err := runCommand(cmd)
 	if err == nil {
-		t.Fatal("unknown user should cause container to fail")
+		c.Fatal("unknown user should cause container to fail")
 	}
-
-	logDone("run - user not found")
 }
 
-func TestRunTwoConcurrentContainers(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) {
 	group := sync.WaitGroup{}
 	group.Add(2)
 
+	errChan := make(chan error, 2)
 	for i := 0; i < 2; i++ {
 		go func() {
 			defer group.Done()
 			cmd := exec.Command(dockerBinary, "run", "busybox", "sleep", "2")
-			if _, err := runCommand(cmd); err != nil {
-				t.Fatal(err)
-			}
+			_, err := runCommand(cmd)
+			errChan <- err
 		}()
 	}
 
 	group.Wait()
+	close(errChan)
 
-	logDone("run - two concurrent containers")
+	for err := range errChan {
+		c.Assert(err, check.IsNil)
+	}
 }
 
-func TestRunEnvironment(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunEnvironment(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env")
 	cmd.Env = append(os.Environ(),
 		"TRUE=false",
@@ -918,7 +761,7 @@
 
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n")
@@ -942,29 +785,26 @@
 	}
 	sort.Strings(goodEnv)
 	if len(goodEnv) != len(actualEnv) {
-		t.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", "))
+		c.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", "))
 	}
 	for i := range goodEnv {
 		if actualEnv[i] != goodEnv[i] {
-			t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
+			c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
 		}
 	}
-
-	logDone("run - verify environment")
 }
 
-func TestRunEnvironmentErase(t *testing.T) {
+func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) {
 	// Test to make sure that when we use -e on env vars that are
 	// not set in our local env that they're removed (if present) in
 	// the container
-	defer deleteAllContainers()
 
 	cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env")
 	cmd.Env = appendBaseEnv([]string{})
 
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n")
@@ -982,28 +822,25 @@
 	}
 	sort.Strings(goodEnv)
 	if len(goodEnv) != len(actualEnv) {
-		t.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", "))
+		c.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", "))
 	}
 	for i := range goodEnv {
 		if actualEnv[i] != goodEnv[i] {
-			t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
+			c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
 		}
 	}
-
-	logDone("run - verify environment erase")
 }
 
-func TestRunEnvironmentOverride(t *testing.T) {
+func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) {
 	// Test to make sure that when we use -e on env vars that are
 	// already in the env that we're overriding them
-	defer deleteAllContainers()
 
 	cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env")
 	cmd.Env = appendBaseEnv([]string{"HOSTNAME=bar"})
 
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n")
@@ -1022,60 +859,47 @@
 	}
 	sort.Strings(goodEnv)
 	if len(goodEnv) != len(actualEnv) {
-		t.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", "))
+		c.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", "))
 	}
 	for i := range goodEnv {
 		if actualEnv[i] != goodEnv[i] {
-			t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
+			c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i])
 		}
 	}
-
-	logDone("run - verify environment override")
 }
 
-func TestRunContainerNetwork(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunContainerNetwork(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "busybox", "ping", "-c", "1", "127.0.0.1")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-
-	logDone("run - test container network via ping")
 }
 
 // Issue #4681
-func TestRunLoopbackWhenNetworkDisabled(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-
-	logDone("run - test container loopback when networking disabled")
 }
 
-func TestRunNetHostNotAllowedWithLinks(t *testing.T) {
-	defer deleteAllContainers()
-
-	_, _, err := dockerCmd(t, "run", "--name", "linked", "busybox", "true")
+func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) {
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "linked", "busybox", "true"))
+	if err != nil {
+		c.Fatalf("Failed with errors: %s, %v", out, err)
+	}
 	cmd := exec.Command(dockerBinary, "run", "--net=host", "--link", "linked:linked", "busybox", "true")
 	_, _, err = runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatal("Expected error")
+		c.Fatal("Expected error")
 	}
-
-	logDone("run - don't allow --net=host to be used with links")
 }
 
-func TestRunLoopbackOnlyExistsWhenNetworkingDisabled(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	var (
@@ -1090,14 +914,12 @@
 	}
 
 	if count != 1 {
-		t.Fatalf("Wrong interface count in container %d", count)
+		c.Fatalf("Wrong interface count in container %d", count)
 	}
 
 	if !strings.HasPrefix(out, "1: lo") {
-		t.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out)
+		c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out)
 	}
-
-	logDone("run - test loopback only exists when networking disabled")
 }
 
 // #7851 hostname outside container shows FQDN, inside only shortname
@@ -1105,293 +927,252 @@
 // and use "--net=host" (as the original issue submitter did), as the same
 // codepath is executed with "docker run -h <hostname>".  Both were manually
 // tested, but this testcase takes the simpler path of using "run -h .."
-func TestRunFullHostnameSet(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-h", "foo.bar.baz", "busybox", "hostname")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" {
-		t.Fatalf("expected hostname 'foo.bar.baz', received %s", actual)
+		c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual)
 	}
-
-	logDone("run - test fully qualified hostname set with -h")
 }
 
-func TestRunPrivilegedCanMknod(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual != "ok" {
-		t.Fatalf("expected output ok received %s", actual)
+		c.Fatalf("expected output ok received %s", actual)
 	}
-
-	logDone("run - test privileged can mknod")
 }
 
-func TestRunUnPrivilegedCanMknod(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual != "ok" {
-		t.Fatalf("expected output ok received %s", actual)
+		c.Fatalf("expected output ok received %s", actual)
 	}
-
-	logDone("run - test un-privileged can mknod")
 }
 
-func TestRunCapDropInvalid(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--cap-drop=CHPASS", "busybox", "ls")
 	out, _, err := runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
-
-	logDone("run - test --cap-drop=CHPASS invalid")
 }
 
-func TestRunCapDropCannotMknod(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
 	out, _, err := runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual == "ok" {
-		t.Fatalf("expected output not ok received %s", actual)
+		c.Fatalf("expected output not ok received %s", actual)
 	}
-
-	logDone("run - test --cap-drop=MKNOD cannot mknod")
 }
 
-func TestRunCapDropCannotMknodLowerCase(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
 	out, _, err := runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual == "ok" {
-		t.Fatalf("expected output not ok received %s", actual)
+		c.Fatalf("expected output not ok received %s", actual)
 	}
-
-	logDone("run - test --cap-drop=mknod cannot mknod lowercase")
 }
 
-func TestRunCapDropALLCannotMknod(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
 	out, _, err := runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual == "ok" {
-		t.Fatalf("expected output not ok received %s", actual)
+		c.Fatalf("expected output not ok received %s", actual)
 	}
-
-	logDone("run - test --cap-drop=ALL cannot mknod")
 }
 
-func TestRunCapDropALLAddMknodCanMknod(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual != "ok" {
-		t.Fatalf("expected output ok received %s", actual)
+		c.Fatalf("expected output ok received %s", actual)
 	}
-
-	logDone("run - test --cap-drop=ALL --cap-add=MKNOD can mknod")
 }
 
-func TestRunCapAddInvalid(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--cap-add=CHPASS", "busybox", "ls")
 	out, _, err := runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
-
-	logDone("run - test --cap-add=CHPASS invalid")
 }
 
-func TestRunCapAddCanDownInterface(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual != "ok" {
-		t.Fatalf("expected output ok received %s", actual)
+		c.Fatalf("expected output ok received %s", actual)
 	}
-
-	logDone("run - test --cap-add=NET_ADMIN can set eth0 down")
 }
 
-func TestRunCapAddALLCanDownInterface(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual != "ok" {
-		t.Fatalf("expected output ok received %s", actual)
+		c.Fatalf("expected output ok received %s", actual)
 	}
-
-	logDone("run - test --cap-add=ALL can set eth0 down")
 }
 
-func TestRunCapAddALLDropNetAdminCanDownInterface(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok")
 	out, _, err := runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual == "ok" {
-		t.Fatalf("expected output not ok received %s", actual)
+		c.Fatalf("expected output not ok received %s", actual)
 	}
-
-	logDone("run - test --cap-add=ALL --cap-drop=NET_ADMIN cannot set eth0 down")
 }
 
-func TestRunPrivilegedCanMount(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual != "ok" {
-		t.Fatalf("expected output ok received %s", actual)
+		c.Fatalf("expected output ok received %s", actual)
 	}
-
-	logDone("run - test privileged can mount")
 }
 
-func TestRunUnPrivilegedCannotMount(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok")
 	out, _, err := runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual == "ok" {
-		t.Fatalf("expected output not ok received %s", actual)
+		c.Fatalf("expected output not ok received %s", actual)
 	}
-
-	logDone("run - test un-privileged cannot mount")
 }
 
-func TestRunSysNotWritableInNonPrivilegedContainers(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "busybox", "touch", "/sys/kernel/profiling")
 	if code, err := runCommand(cmd); err == nil || code == 0 {
-		t.Fatal("sys should not be writable in a non privileged container")
+		c.Fatal("sys should not be writable in a non privileged container")
 	}
-
-	logDone("run - sys not writable in non privileged container")
 }
 
-func TestRunSysWritableInPrivilegedContainers(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "touch", "/sys/kernel/profiling")
 	if code, err := runCommand(cmd); err != nil || code != 0 {
-		t.Fatalf("sys should be writable in privileged container")
+		c.Fatalf("sys should be writable in privileged container")
 	}
-
-	logDone("run - sys writable in privileged container")
 }
 
-func TestRunProcNotWritableInNonPrivilegedContainers(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "busybox", "touch", "/proc/sysrq-trigger")
 	if code, err := runCommand(cmd); err == nil || code == 0 {
-		t.Fatal("proc should not be writable in a non privileged container")
+		c.Fatal("proc should not be writable in a non privileged container")
 	}
-
-	logDone("run - proc not writable in non privileged container")
 }
 
-func TestRunProcWritableInPrivilegedContainers(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "touch", "/proc/sysrq-trigger")
 	if code, err := runCommand(cmd); err != nil || code != 0 {
-		t.Fatalf("proc should be writable in privileged container")
+		c.Fatalf("proc should be writable in privileged container")
 	}
-	logDone("run - proc writable in privileged container")
 }
 
-func TestRunWithCpuset(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunWithCpuPeriod(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "--cpu-period", "50000", "--name", "test", "busybox", "true")
+	out, _, _, err := runCommandWithStdoutStderr(runCmd)
+	if err != nil {
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
+	}
+	out = strings.TrimSpace(out)
+	if strings.Contains(out, "Your kernel does not support CPU cfs period") {
+		c.Skip("Your kernel does not support CPU cfs period, skip this test")
+	}
 
+	out, err = inspectField("test", "HostConfig.CpuPeriod")
+	c.Assert(err, check.IsNil)
+	if out != "50000" {
+		c.Errorf("setting the CPU CFS period failed")
+	}
+}
+
+func (s *DockerSuite) TestRunWithCpuset(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--cpuset", "0", "busybox", "true")
 	if code, err := runCommand(cmd); err != nil || code != 0 {
-		t.Fatalf("container should run successfuly with cpuset of 0: %s", err)
+		c.Fatalf("container should run successfully with cpuset of 0: %s", err)
 	}
-
-	logDone("run - cpuset 0")
 }
 
-func TestRunWithCpusetCpus(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunWithCpusetCpus(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--cpuset-cpus", "0", "busybox", "true")
 	if code, err := runCommand(cmd); err != nil || code != 0 {
-		t.Fatalf("container should run successfuly with cpuset-cpus of 0: %s", err)
+		c.Fatalf("container should run successfully with cpuset-cpus of 0: %s", err)
 	}
-
-	logDone("run - cpuset-cpus 0")
 }
 
-func TestRunDeviceNumbers(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunWithCpusetMems(c *check.C) {
+	cmd := exec.Command(dockerBinary, "run", "--cpuset-mems", "0", "busybox", "true")
+	if code, err := runCommand(cmd); err != nil || code != 0 {
+		c.Fatalf("container should run successfully with cpuset-mems of 0: %s", err)
+	}
+}
 
+func (s *DockerSuite) TestRunWithBlkioWeight(c *check.C) {
+	cmd := exec.Command(dockerBinary, "run", "--blkio-weight", "300", "busybox", "true")
+	if code, err := runCommand(cmd); err != nil || code != 0 {
+		c.Fatalf("container should run successfully with blkio-weight of 300: %s", err)
+	}
+}
+
+func (s *DockerSuite) TestRunWithBlkioInvalidWeight(c *check.C) {
+	cmd := exec.Command(dockerBinary, "run", "--blkio-weight", "5", "busybox", "true")
+	if _, err := runCommand(cmd); err == nil {
+		c.Fatalf("run with invalid blkio-weight should failed")
+	}
+}
+
+func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "ls -l /dev/null")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	deviceLineFields := strings.Fields(out)
 	deviceLineFields[6] = ""
@@ -1400,137 +1181,107 @@
 	expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"}
 
 	if !(reflect.DeepEqual(deviceLineFields, expected)) {
-		t.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out)
+		c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out)
 	}
-
-	logDone("run - test device numbers")
 }
 
-func TestRunThatCharacterDevicesActLikeCharacterDevices(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual[0] == '0' {
-		t.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual)
+		c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual)
 	}
-
-	logDone("run - test that character devices work.")
 }
 
-func TestRunUnprivilegedWithChroot(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "busybox", "chroot", "/", "true")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-
-	logDone("run - unprivileged with chroot")
 }
 
-func TestRunAddingOptionalDevices(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" {
-		t.Fatalf("expected output /dev/nulo, received %s", actual)
+		c.Fatalf("expected output /dev/nulo, received %s", actual)
 	}
-
-	logDone("run - test --device argument")
 }
 
-func TestRunModeHostname(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunModeHostname(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	cmd := exec.Command(dockerBinary, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actual := strings.Trim(out, "\r\n"); actual != "testhostname" {
-		t.Fatalf("expected 'testhostname', but says: %q", actual)
+		c.Fatalf("expected 'testhostname', but says: %q", actual)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hostname")
 
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	hostname, err := os.Hostname()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if actual := strings.Trim(out, "\r\n"); actual != hostname {
-		t.Fatalf("expected %q, but says: %q", hostname, actual)
+		c.Fatalf("expected %q, but says: %q", hostname, actual)
 	}
-
-	logDone("run - hostname and several network modes")
 }
 
-func TestRunRootWorkdir(t *testing.T) {
-	defer deleteAllContainers()
-
-	s, _, err := dockerCmd(t, "run", "--workdir", "/", "busybox", "pwd")
+func (s *DockerSuite) TestRunRootWorkdir(c *check.C) {
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--workdir", "/", "busybox", "pwd"))
 	if err != nil {
-		t.Fatal(s, err)
+		c.Fatalf("Failed with errors: %s, %v", out, err)
 	}
-	if s != "/\n" {
-		t.Fatalf("pwd returned %q (expected /\\n)", s)
+	if out != "/\n" {
+		c.Fatalf("pwd returned %q (expected /\\n)", s)
 	}
-
-	logDone("run - workdir /")
 }
 
-func TestRunAllowBindMountingRoot(t *testing.T) {
-	defer deleteAllContainers()
-
-	s, _, err := dockerCmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host")
+func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) {
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-v", "/:/host", "busybox", "ls", "/host"))
 	if err != nil {
-		t.Fatal(s, err)
+		c.Fatalf("Failed with errors: %s, %v", out, err)
 	}
-
-	logDone("run - bind mount / as volume")
 }
 
-func TestRunDisallowBindMountingRootToRoot(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-v", "/:/", "busybox", "ls", "/host")
 	out, _, err := runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-
-	logDone("run - bind mount /:/ as volume should not work")
 }
 
 // Verify that a container gets default DNS when only localhost resolvers exist
-func TestRunDnsDefaultOptions(t *testing.T) {
-	defer deleteAllContainers()
-	testRequires(t, SameHostDaemon)
+func (s *DockerSuite) TestRunDnsDefaultOptions(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	// preserve original resolv.conf for restoring after test
 	origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf")
 	if os.IsNotExist(err) {
-		t.Fatalf("/etc/resolv.conf does not exist")
+		c.Fatalf("/etc/resolv.conf does not exist")
 	}
 	// defer restored original conf
 	defer func() {
 		if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 	}()
 
@@ -1539,14 +1290,14 @@
 	// GetNameservers(), leading to a replacement of nameservers with the default set
 	tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1")
 	if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd := exec.Command(dockerBinary, "run", "busybox", "cat", "/etc/resolv.conf")
 
 	actual, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, actual)
+		c.Fatal(err, actual)
 	}
 
 	// check that the actual defaults are appended to the commented out
@@ -1554,54 +1305,47 @@
 	// NOTE: if we ever change the defaults from google dns, this will break
 	expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4"
 	if actual != expected {
-		t.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual)
+		c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual)
 	}
-
-	logDone("run - dns default options")
 }
 
-func TestRunDnsOptions(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunDnsOptions(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf")
 
 	out, stderr, _, err := runCommandWithStdoutStderr(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	// The client will get a warning on stderr when setting DNS to a localhost address; verify this:
 	if !strings.Contains(stderr, "Localhost DNS setting") {
-		t.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr)
+		c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr)
 	}
 
 	actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1)
 	if actual != "nameserver 127.0.0.1 search mydomain" {
-		t.Fatalf("expected 'nameserver 127.0.0.1 search mydomain', but says: %q", actual)
+		c.Fatalf("expected 'nameserver 127.0.0.1 search mydomain', but says: %q", actual)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=.", "busybox", "cat", "/etc/resolv.conf")
 
 	out, _, _, err = runCommandWithStdoutStderr(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1)
 	if actual != "nameserver 127.0.0.1" {
-		t.Fatalf("expected 'nameserver 127.0.0.1', but says: %q", actual)
+		c.Fatalf("expected 'nameserver 127.0.0.1', but says: %q", actual)
 	}
-
-	logDone("run - dns options")
 }
 
-func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) {
-	defer deleteAllContainers()
-	testRequires(t, SameHostDaemon)
+func (s *DockerSuite) TestRunDnsOptionsBasedOnHostResolvConf(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf")
 	if os.IsNotExist(err) {
-		t.Fatalf("/etc/resolv.conf does not exist")
+		c.Fatalf("/etc/resolv.conf does not exist")
 	}
 
 	hostNamservers := resolvconf.GetNameservers(origResolvConf)
@@ -1610,58 +1354,58 @@
 	var out string
 	cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf")
 	if out, _, _, err = runCommandWithStdoutStderr(cmd); err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actualNameservers := resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "127.0.0.1" {
-		t.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0]))
+		c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0]))
 	}
 
 	actualSearch := resolvconf.GetSearchDomains([]byte(out))
 	if len(actualSearch) != len(hostSearch) {
-		t.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch))
+		c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch))
 	}
 	for i := range actualSearch {
 		if actualSearch[i] != hostSearch[i] {
-			t.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i])
+			c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i])
 		}
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf")
 
 	if out, _, err = runCommandWithOutput(cmd); err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	actualNameservers := resolvconf.GetNameservers([]byte(out))
 	if len(actualNameservers) != len(hostNamservers) {
-		t.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNamservers), len(actualNameservers))
+		c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNamservers), len(actualNameservers))
 	}
 	for i := range actualNameservers {
 		if actualNameservers[i] != hostNamservers[i] {
-			t.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNamservers[i])
+			c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNamservers[i])
 		}
 	}
 
 	if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" {
-		t.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0]))
+		c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0]))
 	}
 
 	// test with file
 	tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1")
 	if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	// put the old resolvconf back
 	defer func() {
 		if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 	}()
 
 	resolvConf, err := ioutil.ReadFile("/etc/resolv.conf")
 	if os.IsNotExist(err) {
-		t.Fatalf("/etc/resolv.conf does not exist")
+		c.Fatalf("/etc/resolv.conf does not exist")
 	}
 
 	hostNamservers = resolvconf.GetNameservers(resolvConf)
@@ -1670,35 +1414,56 @@
 	cmd = exec.Command(dockerBinary, "run", "busybox", "cat", "/etc/resolv.conf")
 
 	if out, _, err = runCommandWithOutput(cmd); err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	if actualNameservers = resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 {
-		t.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers)
+		c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers)
 	}
 
 	actualSearch = resolvconf.GetSearchDomains([]byte(out))
 	if len(actualSearch) != len(hostSearch) {
-		t.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch))
+		c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch))
 	}
 	for i := range actualSearch {
 		if actualSearch[i] != hostSearch[i] {
-			t.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i])
+			c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i])
 		}
 	}
-	defer deleteAllContainers()
-
-	logDone("run - dns options based on host resolv.conf")
 }
 
-// Test the file watch notifier on docker host's /etc/resolv.conf
-// A go-routine is responsible for auto-updating containers which are
-// stopped and have an unmodified copy of resolv.conf, as well as
-// marking running containers as requiring an update on next restart
-func TestRunResolvconfUpdater(t *testing.T) {
-	// Because overlay doesn't support inotify properly, we need to skip
-	// this test if the docker daemon has Storage Driver == overlay
-	testRequires(t, SameHostDaemon, NotOverlay)
+// Test to see if a non-root user can resolve a DNS name and reach out to it. Also
+// check if the container resolv.conf file has atleast 0644 perm.
+func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) {
+	testRequires(c, SameHostDaemon)
+	testRequires(c, Network)
+
+	cmd := exec.Command(dockerBinary, "run", "--name=testperm", "--user=default", "busybox", "ping", "-c", "1", "www.docker.io")
+	if out, err := runCommand(cmd); err != nil {
+		c.Fatal(err, out)
+	}
+
+	cID, err := getIDByName("testperm")
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	fmode := (os.FileMode)(0644)
+	finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf"))
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	if (finfo.Mode() & fmode) != fmode {
+		c.Fatalf("Expected container resolv.conf mode to be atleast %s, instead got %s", fmode.String(), finfo.Mode().String())
+	}
+}
+
+// Test if container resolv.conf gets updated the next time it restarts
+// if host /etc/resolv.conf has changed. This only applies if the container
+// uses the host's /etc/resolv.conf and does not have any dns options provided.
+func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78")
 	tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1")
@@ -1706,125 +1471,128 @@
 	//take a copy of resolv.conf for restoring after test completes
 	resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// This test case is meant to test monitoring resolv.conf when it is
-	// a regular file not a bind mount. So we unmount resolv.conf and replace
+	// a regular file not a bind mounc. So we unmount resolv.conf and replace
 	// it with a file containing the original settings.
 	cmd := exec.Command("umount", "/etc/resolv.conf")
 	if _, err = runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	//cleanup
 	defer func() {
-		deleteAllContainers()
 		if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
-			t.Fatal(err)
+			c.Fatal(err)
 		}
 	}()
 
-	//1. test that a non-running container gets an updated resolv.conf
+	//1. test that a restarting container gets an updated resolv.conf
 	cmd = exec.Command(dockerBinary, "run", "--name='first'", "busybox", "true")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	containerID1, err := getIDByName("first")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// replace resolv.conf with our temporary copy
 	bytesResolvConf := []byte(tmpResolvConf)
 	if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	time.Sleep(time.Second / 2)
+	// start the container again to pickup changes
+	cmd = exec.Command(dockerBinary, "start", "first")
+	if out, err := runCommand(cmd); err != nil {
+		c.Fatalf("Errored out %s, \nerror: %v", string(out), err)
+	}
+
 	// check for update in container
 	containerResolv, err := readContainerFile(containerID1, "resolv.conf")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if !bytes.Equal(containerResolv, bytesResolvConf) {
-		t.Fatalf("Stopped container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv))
+		c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv))
 	}
 
-	//2. test that a non-running container does not receive resolv.conf updates
+	/* 	//make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
+	   	if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
+	   		c.Fatal(err)
+	   	} */
+	//2. test that a restarting container does not receive resolv.conf updates
 	//   if it modified the container copy of the starting point resolv.conf
 	cmd = exec.Command(dockerBinary, "run", "--name='second'", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf")
 	if _, err = runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	containerID2, err := getIDByName("second")
 	if err != nil {
-		t.Fatal(err)
-	}
-	containerResolvHashBefore, err := readContainerFile(containerID2, "resolv.conf.hash")
-	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	//make a change to resolv.conf (in this case replacing our tmp copy with orig copy)
 	if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	time.Sleep(time.Second / 2)
-	containerResolvHashAfter, err := readContainerFile(containerID2, "resolv.conf.hash")
+	// start the container again
+	cmd = exec.Command(dockerBinary, "start", "second")
+	if out, err := runCommand(cmd); err != nil {
+		c.Fatalf("Errored out %s, \nerror: %v", string(out), err)
+	}
+
+	// check for update in container
+	containerResolv, err = readContainerFile(containerID2, "resolv.conf")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	if !bytes.Equal(containerResolvHashBefore, containerResolvHashAfter) {
-		t.Fatalf("Stopped container with modified resolv.conf should not have been updated; expected hash: %v, new hash: %v", containerResolvHashBefore, containerResolvHashAfter)
+	if bytes.Equal(containerResolv, resolvConfSystem) {
+		c.Fatalf("Restarting  a container after container updated resolv.conf should not pick up host changes; expected %q, got %q", string(containerResolv), string(resolvConfSystem))
 	}
 
 	//3. test that a running container's resolv.conf is not modified while running
 	cmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	runningContainerID := strings.TrimSpace(out)
 
-	containerResolvHashBefore, err = readContainerFile(runningContainerID, "resolv.conf.hash")
-	if err != nil {
-		t.Fatal(err)
-	}
-
 	// replace resolv.conf
 	if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	// make sure the updater has time to run to validate we really aren't
-	// getting updated
-	time.Sleep(time.Second / 2)
-	containerResolvHashAfter, err = readContainerFile(runningContainerID, "resolv.conf.hash")
+	// check for update in container
+	containerResolv, err = readContainerFile(runningContainerID, "resolv.conf")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	if !bytes.Equal(containerResolvHashBefore, containerResolvHashAfter) {
-		t.Fatalf("Running container's resolv.conf should not be updated; expected hash: %v, new hash: %v", containerResolvHashBefore, containerResolvHashAfter)
+	if bytes.Equal(containerResolv, bytesResolvConf) {
+		c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv))
 	}
 
 	//4. test that a running container's resolv.conf is updated upon restart
 	//   (the above container is still running..)
 	cmd = exec.Command(dockerBinary, "restart", runningContainerID)
 	if _, err = runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// check for update in container
 	containerResolv, err = readContainerFile(runningContainerID, "resolv.conf")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if !bytes.Equal(containerResolv, bytesResolvConf) {
-		t.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv))
+		c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv))
 	}
 
 	//5. test that additions of a localhost resolver are cleaned from
@@ -1833,20 +1601,25 @@
 	// replace resolv.conf with a localhost-only nameserver copy
 	bytesResolvConf = []byte(tmpLocalhostResolvConf)
 	if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	time.Sleep(time.Second / 2)
+	// start the container again to pickup changes
+	cmd = exec.Command(dockerBinary, "start", "first")
+	if out, err := runCommand(cmd); err != nil {
+		c.Fatalf("Errored out %s, \nerror: %v", string(out), err)
+	}
+
 	// our first exited container ID should have been updated, but with default DNS
 	// after the cleanup of resolv.conf found only a localhost nameserver:
 	containerResolv, err = readContainerFile(containerID1, "resolv.conf")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4"
 	if !bytes.Equal(containerResolv, []byte(expected)) {
-		t.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv))
+		c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv))
 	}
 
 	//6. Test that replacing (as opposed to modifying) resolv.conf triggers an update
@@ -1854,194 +1627,163 @@
 
 	// Restore the original resolv.conf
 	if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// Run the container so it picks up the old settings
 	cmd = exec.Command(dockerBinary, "run", "--name='third'", "busybox", "true")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	containerID3, err := getIDByName("third")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// Create a modified resolv.conf.aside and override resolv.conf with it
 	bytesResolvConf = []byte(tmpResolvConf)
 	if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	time.Sleep(time.Second / 2)
+	// start the container again to pickup changes
+	cmd = exec.Command(dockerBinary, "start", "third")
+	if out, err := runCommand(cmd); err != nil {
+		c.Fatalf("Errored out %s, \nerror: %v", string(out), err)
+	}
+
 	// check for update in container
 	containerResolv, err = readContainerFile(containerID3, "resolv.conf")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if !bytes.Equal(containerResolv, bytesResolvConf) {
-		t.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv))
+		c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv))
 	}
 
 	//cleanup, restore original resolv.conf happens in defer func()
-	logDone("run - resolv.conf updater")
 }
 
-func TestRunAddHost(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunAddHost(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts")
 
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	actual := strings.Trim(out, "\r\n")
 	if actual != "86.75.30.9\textra" {
-		t.Fatalf("expected '86.75.30.9\textra', but says: %q", actual)
+		c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual)
 	}
-
-	logDone("run - add-host option")
 }
 
 // Regression test for #6983
-func TestRunAttachStdErrOnlyTTYMode(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stderr", "busybox", "true")
 	exitCode, err := runCommand(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	} else if exitCode != 0 {
-		t.Fatalf("Container should have exited with error code 0")
+		c.Fatalf("Container should have exited with error code 0")
 	}
-
-	logDone("run - Attach stderr only with -t")
 }
 
 // Regression test for #6983
-func TestRunAttachStdOutOnlyTTYMode(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stdout", "busybox", "true")
 
 	exitCode, err := runCommand(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	} else if exitCode != 0 {
-		t.Fatalf("Container should have exited with error code 0")
+		c.Fatalf("Container should have exited with error code 0")
 	}
-
-	logDone("run - Attach stdout only with -t")
 }
 
 // Regression test for #6983
-func TestRunAttachStdOutAndErrTTYMode(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true")
 	exitCode, err := runCommand(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	} else if exitCode != 0 {
-		t.Fatalf("Container should have exited with error code 0")
+		c.Fatalf("Container should have exited with error code 0")
 	}
-
-	logDone("run - Attach stderr and stdout with -t")
 }
 
 // Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode
 // but using --attach instead of -a to make sure we read the flag correctly
-func TestRunAttachWithDettach(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true")
 	_, stderr, _, err := runCommandWithStdoutStderr(cmd)
 	if err == nil {
-		t.Fatalf("Container should have exited with error code different than 0", err)
+		c.Fatal("Container should have exited with error code different than 0")
 	} else if !strings.Contains(stderr, "Conflicting options: -a and -d") {
-		t.Fatalf("Should have been returned an error with conflicting options -a and -d")
+		c.Fatal("Should have been returned an error with conflicting options -a and -d")
 	}
-
-	logDone("run - Attach stdout with -d")
 }
 
-func TestRunState(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunState(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
 
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	id := strings.TrimSpace(out)
 	state, err := inspectField(id, "State.Running")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if state != "true" {
-		t.Fatal("Container state is 'not running'")
+		c.Fatal("Container state is 'not running'")
 	}
 	pid1, err := inspectField(id, "State.Pid")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if pid1 == "0" {
-		t.Fatal("Container state Pid 0")
+		c.Fatal("Container state Pid 0")
 	}
 
 	cmd = exec.Command(dockerBinary, "stop", id)
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	state, err = inspectField(id, "State.Running")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if state != "false" {
-		t.Fatal("Container state is 'running'")
+		c.Fatal("Container state is 'running'")
 	}
 	pid2, err := inspectField(id, "State.Pid")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if pid2 == pid1 {
-		t.Fatalf("Container state Pid %s, but expected %s", pid2, pid1)
+		c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1)
 	}
 
 	cmd = exec.Command(dockerBinary, "start", id)
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	state, err = inspectField(id, "State.Running")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if state != "true" {
-		t.Fatal("Container state is 'not running'")
+		c.Fatal("Container state is 'not running'")
 	}
 	pid3, err := inspectField(id, "State.Pid")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if pid3 == pid1 {
-		t.Fatalf("Container state Pid %s, but expected %s", pid2, pid1)
+		c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1)
 	}
-	logDone("run - test container state.")
 }
 
 // Test for #1737
-func TestRunCopyVolumeUidGid(t *testing.T) {
+func (s *DockerSuite) TestRunCopyVolumeUidGid(c *check.C) {
 	name := "testrunvolumesuidgid"
-	defer deleteImages(name)
-	defer deleteAllContainers()
 	_, err := buildImage(name,
 		`FROM busybox
 		RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
@@ -2049,184 +1791,166 @@
 		RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// Test that the uid and gid is copied from the image to the volume
 	cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	out = strings.TrimSpace(out)
 	if out != "dockerio:dockerio" {
-		t.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out)
+		c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out)
 	}
-
-	logDone("run - copy uid/gid for volume")
 }
 
 // Test for #1582
-func TestRunCopyVolumeContent(t *testing.T) {
+func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) {
 	name := "testruncopyvolumecontent"
-	defer deleteImages(name)
-	defer deleteAllContainers()
 	_, err := buildImage(name,
 		`FROM busybox
 		RUN mkdir -p /hello/local && echo hello > /hello/local/world`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// Test that the content is copied from the image to the volume
 	cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "find", "/hello")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) {
-		t.Fatal("Container failed to transfer content to volume")
+		c.Fatal("Container failed to transfer content to volume")
 	}
-	logDone("run - copy volume content")
 }
 
-func TestRunCleanupCmdOnEntrypoint(t *testing.T) {
+func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) {
 	name := "testrunmdcleanuponentrypoint"
-	defer deleteImages(name)
-	defer deleteAllContainers()
 	if _, err := buildImage(name,
 		`FROM busybox
 		ENTRYPOINT ["echo"]
         CMD ["testingpoint"]`,
 		true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	runCmd := exec.Command(dockerBinary, "run", "--entrypoint", "whoami", name)
 	out, exit, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("Error: %v, out: %q", err, out)
+		c.Fatalf("Error: %v, out: %q", err, out)
 	}
 	if exit != 0 {
-		t.Fatalf("expected exit code 0 received %d, out: %q", exit, out)
+		c.Fatalf("expected exit code 0 received %d, out: %q", exit, out)
 	}
 	out = strings.TrimSpace(out)
 	if out != "root" {
-		t.Fatalf("Expected output root, got %q", out)
+		c.Fatalf("Expected output root, got %q", out)
 	}
-	logDone("run - cleanup cmd on --entrypoint")
 }
 
 // TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected
-func TestRunWorkdirExistsAndIsFile(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-w", "/bin/cat", "busybox")
 	out, exit, err := runCommandWithOutput(runCmd)
 	if !(err != nil && exit == 1 && strings.Contains(out, "Cannot mkdir: /bin/cat is not a directory")) {
-		t.Fatalf("Docker must complains about making dir, but we got out: %s, exit: %d, err: %s", out, exit, err)
+		c.Fatalf("Docker must complains about making dir, but we got out: %s, exit: %d, err: %s", out, exit, err)
 	}
-	logDone("run - error on existing file for workdir")
 }
 
-func TestRunExitOnStdinClose(t *testing.T) {
+func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) {
 	name := "testrunexitonstdinclose"
-	defer deleteAllContainers()
 	runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", "/bin/cat")
 
 	stdin, err := runCmd.StdinPipe()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	stdout, err := runCmd.StdoutPipe()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if err := runCmd.Start(); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if _, err := stdin.Write([]byte("hello\n")); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	r := bufio.NewReader(stdout)
 	line, err := r.ReadString('\n')
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	line = strings.TrimSpace(line)
 	if line != "hello" {
-		t.Fatalf("Output should be 'hello', got '%q'", line)
+		c.Fatalf("Output should be 'hello', got '%q'", line)
 	}
 	if err := stdin.Close(); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	finish := make(chan struct{})
+	finish := make(chan error)
 	go func() {
-		if err := runCmd.Wait(); err != nil {
-			t.Fatal(err)
-		}
+		finish <- runCmd.Wait()
 		close(finish)
 	}()
 	select {
-	case <-finish:
+	case err := <-finish:
+		c.Assert(err, check.IsNil)
 	case <-time.After(1 * time.Second):
-		t.Fatal("docker run failed to exit on stdin close")
+		c.Fatal("docker run failed to exit on stdin close")
 	}
 	state, err := inspectField(name, "State.Running")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
+
 	if state != "false" {
-		t.Fatal("Container must be stopped after stdin closing")
+		c.Fatal("Container must be stopped after stdin closing")
 	}
-	logDone("run - exit on stdin closing")
 }
 
 // Test for #2267
-func TestRunWriteHostsFileAndNotCommit(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) {
 	name := "writehosts"
 	cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	if !strings.Contains(out, "test2267") {
-		t.Fatal("/etc/hosts should contain 'test2267'")
+		c.Fatal("/etc/hosts should contain 'test2267'")
 	}
 
 	cmd = exec.Command(dockerBinary, "diff", name)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
-	if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, t) {
-		t.Fatal("diff should be empty")
+	if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) {
+		c.Fatal("diff should be empty")
 	}
-
-	logDone("run - write to /etc/hosts and not commited")
 }
 
-func eqToBaseDiff(out string, t *testing.T) bool {
+func eqToBaseDiff(out string, c *check.C) bool {
 	cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello")
 	out1, _, err := runCommandWithOutput(cmd)
-	cID := stripTrailingCharacters(out1)
+	cID := strings.TrimSpace(out1)
 	cmd = exec.Command(dockerBinary, "diff", cID)
-	base_diff, _, err := runCommandWithOutput(cmd)
+	baseDiff, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, base_diff)
+		c.Fatal(err, baseDiff)
 	}
-	base_arr := strings.Split(base_diff, "\n")
-	sort.Strings(base_arr)
-	out_arr := strings.Split(out, "\n")
-	sort.Strings(out_arr)
-	return sliceEq(base_arr, out_arr)
+	baseArr := strings.Split(baseDiff, "\n")
+	sort.Strings(baseArr)
+	outArr := strings.Split(out, "\n")
+	sort.Strings(outArr)
+	return sliceEq(baseArr, outArr)
 }
 
 func sliceEq(a, b []string) bool {
@@ -2244,859 +1968,769 @@
 }
 
 // Test for #2267
-func TestRunWriteHostnameFileAndNotCommit(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunWriteHostnameFileAndNotCommit(c *check.C) {
 	name := "writehostname"
 	cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	if !strings.Contains(out, "test2267") {
-		t.Fatal("/etc/hostname should contain 'test2267'")
+		c.Fatal("/etc/hostname should contain 'test2267'")
 	}
 
 	cmd = exec.Command(dockerBinary, "diff", name)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
-	if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, t) {
-		t.Fatal("diff should be empty")
+	if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) {
+		c.Fatal("diff should be empty")
 	}
-
-	logDone("run - write to /etc/hostname and not commited")
 }
 
 // Test for #2267
-func TestRunWriteResolvFileAndNotCommit(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunWriteResolvFileAndNotCommit(c *check.C) {
 	name := "writeresolv"
 	cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	if !strings.Contains(out, "test2267") {
-		t.Fatal("/etc/resolv.conf should contain 'test2267'")
+		c.Fatal("/etc/resolv.conf should contain 'test2267'")
 	}
 
 	cmd = exec.Command(dockerBinary, "diff", name)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
-	if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, t) {
-		t.Fatal("diff should be empty")
+	if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) {
+		c.Fatal("diff should be empty")
 	}
-
-	logDone("run - write to /etc/resolv.conf and not commited")
 }
 
-func TestRunWithBadDevice(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunWithBadDevice(c *check.C) {
 	name := "baddevice"
 	cmd := exec.Command(dockerBinary, "run", "--name", name, "--device", "/etc", "busybox", "true")
 	out, _, err := runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatal("Run should fail with bad device")
+		c.Fatal("Run should fail with bad device")
 	}
-	expected := `\"/etc\": not a device node`
+	expected := `"/etc": not a device node`
 	if !strings.Contains(out, expected) {
-		t.Fatalf("Output should contain %q, actual out: %q", expected, out)
+		c.Fatalf("Output should contain %q, actual out: %q", expected, out)
 	}
-	logDone("run - error with bad device")
 }
 
-func TestRunEntrypoint(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunEntrypoint(c *check.C) {
 	name := "entrypoint"
 	cmd := exec.Command(dockerBinary, "run", "--name", name, "--entrypoint", "/bin/echo", "busybox", "-n", "foobar")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	expected := "foobar"
 	if out != expected {
-		t.Fatalf("Output should be %q, actual out: %q", expected, out)
+		c.Fatalf("Output should be %q, actual out: %q", expected, out)
 	}
-	logDone("run - entrypoint")
 }
 
-func TestRunBindMounts(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunBindMounts(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	tmpDir, err := ioutil.TempDir("", "docker-test-container")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	defer os.RemoveAll(tmpDir)
-	writeFile(path.Join(tmpDir, "touch-me"), "", t)
+	writeFile(path.Join(tmpDir, "touch-me"), "", c)
 
 	// Test reading from a read-only bind mount
 	cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox", "ls", "/tmp")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	if !strings.Contains(out, "touch-me") {
-		t.Fatal("Container failed to read from bind mount")
+		c.Fatal("Container failed to read from bind mount")
 	}
 
 	// test writing to bind mount
 	cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
-	readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist
+	readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist
 
 	// test mounting to an illegal destination directory
 	cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".")
 	_, err = runCommand(cmd)
 	if err == nil {
-		t.Fatal("Container bind mounted illegal directory")
+		c.Fatal("Container bind mounted illegal directory")
 	}
 
 	// test mount a file
 	cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla")
 	_, err = runCommand(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
-	content := readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist
+	content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist
 	expected := "yotta"
 	if content != expected {
-		t.Fatalf("Output should be %q, actual out: %q", expected, content)
+		c.Fatalf("Output should be %q, actual out: %q", expected, content)
 	}
-
-	logDone("run - bind mounts")
 }
 
 // Ensure that CIDFile gets deleted if it's empty
 // Perform this test by making `docker run` fail
-func TestRunCidFileCleanupIfEmpty(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) {
 	tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.RemoveAll(tmpDir)
 	tmpCidFile := path.Join(tmpDir, "cid")
 	cmd := exec.Command(dockerBinary, "run", "--cidfile", tmpCidFile, "emptyfs")
 	out, _, err := runCommandWithOutput(cmd)
 	if err == nil {
-		t.Fatalf("Run without command must fail. out=%s", out)
+		c.Fatalf("Run without command must fail. out=%s", out)
 	} else if !strings.Contains(out, "No command specified") {
-		t.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err)
+		c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err)
 	}
 
 	if _, err := os.Stat(tmpCidFile); err == nil {
-		t.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile)
+		c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile)
 	}
-	logDone("run - cleanup empty cidfile on error")
 }
 
 // #2098 - Docker cidFiles only contain short version of the containerId
-//sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test"
+//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test"
 // TestRunCidFile tests that run --cidfile returns the longid
-func TestRunCidFileCheckIDLength(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) {
 	tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	tmpCidFile := path.Join(tmpDir, "cid")
 	defer os.RemoveAll(tmpDir)
 	cmd := exec.Command(dockerBinary, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id := strings.TrimSpace(out)
 	buffer, err := ioutil.ReadFile(tmpCidFile)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	cid := string(buffer)
 	if len(cid) != 64 {
-		t.Fatalf("--cidfile should be a long id, not %q", id)
+		c.Fatalf("--cidfile should be a long id, not %q", id)
 	}
 	if cid != id {
-		t.Fatalf("cid must be equal to %s, got %s", id, cid)
+		c.Fatalf("cid must be equal to %s, got %s", id, cid)
 	}
-
-	logDone("run - cidfile contains long id")
 }
 
-func TestRunNetworkNotInitializedNoneMode(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-d", "--net=none", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id := strings.TrimSpace(out)
 	res, err := inspectField(id, "NetworkSettings.IPAddress")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if res != "" {
-		t.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res)
+		c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res)
 	}
-
-	logDone("run - network must not be initialized in 'none' mode")
 }
 
-func TestRunSetMacAddress(t *testing.T) {
+func (s *DockerSuite) TestRunSetMacAddress(c *check.C) {
 	mac := "12:34:56:78:9a:bc"
 
-	defer deleteAllContainers()
 	cmd := exec.Command(dockerBinary, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'")
 	out, ec, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("exec failed:\nexit code=%v\noutput=%s", ec, out)
+		c.Fatalf("exec failed:\nexit code=%v\noutput=%s", ec, out)
 	}
 	actualMac := strings.TrimSpace(out)
 	if actualMac != mac {
-		t.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac)
+		c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac)
 	}
-
-	logDone("run - setting MAC address with --mac-address")
 }
 
-func TestRunInspectMacAddress(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) {
 	mac := "12:34:56:78:9a:bc"
 	cmd := exec.Command(dockerBinary, "run", "-d", "--mac-address="+mac, "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id := strings.TrimSpace(out)
 	inspectedMac, err := inspectField(id, "NetworkSettings.MacAddress")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if inspectedMac != mac {
-		t.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac)
+		c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac)
 	}
-
-	logDone("run - inspecting MAC address")
 }
 
 // test docker run use a invalid mac address
-func TestRunWithInvalidMacAddress(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "--mac-address", "92:d0:c6:0a:29", "busybox")
 	out, _, err := runCommandWithOutput(runCmd)
 	//use a invalid mac address should with a error out
 	if err == nil || !strings.Contains(out, "is not a valid mac address") {
-		t.Fatalf("run with an invalid --mac-address should with error out")
+		c.Fatalf("run with an invalid --mac-address should with error out")
 	}
-
-	logDone("run - can't use an invalid mac address")
 }
 
-func TestRunDeallocatePortOnMissingIptablesRule(t *testing.T) {
-	defer deleteAllContainers()
-	testRequires(t, SameHostDaemon)
+func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	cmd := exec.Command(dockerBinary, "run", "-d", "-p", "23:23", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id := strings.TrimSpace(out)
 	ip, err := inspectField(id, "NetworkSettings.IPAddress")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip),
 		"!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT")
 	out, _, err = runCommandWithOutput(iptCmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	if err := deleteContainer(id); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	cmd = exec.Command(dockerBinary, "run", "-d", "-p", "23:23", "busybox", "top")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
-
-	logDone("run - port should be deallocated even on iptables error")
 }
 
-func TestRunPortInUse(t *testing.T) {
-	defer deleteAllContainers()
-	testRequires(t, SameHostDaemon)
+func (s *DockerSuite) TestRunPortInUse(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	port := "1234"
-	l, err := net.Listen("tcp", ":"+port)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer l.Close()
 	cmd := exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
-	if err == nil {
-		t.Fatalf("Binding on used port must fail")
-	}
-	if !strings.Contains(out, "address already in use") {
-		t.Fatalf("Out must be about \"address already in use\", got %s", out)
+	if err != nil {
+		c.Fatalf("Fail to run listening container")
 	}
 
-	logDone("run - error out if port already in use")
+	cmd = exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top")
+	out, _, err = runCommandWithOutput(cmd)
+	if err == nil {
+		c.Fatalf("Binding on used port must fail")
+	}
+	if !strings.Contains(out, "port is already allocated") {
+		c.Fatalf("Out must be about \"port is already allocated\", got %s", out)
+	}
 }
 
-// https://github.com/docker/docker/issues/8428
-func TestRunPortProxy(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-
-	defer deleteAllContainers()
-
-	port := "12345"
-	cmd := exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top")
-
+// https://github.com/docker/docker/issues/12148
+func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) {
+	// allocate a dynamic port to get the most recent
+	cmd := exec.Command(dockerBinary, "run", "-d", "-P", "-p", "80", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf("Failed to run and bind port %s, output: %s, error: %s", port, out, err)
+		c.Fatalf("Failed to run, output: %s, error: %s", out, err)
 	}
+	id := strings.TrimSpace(out)
 
-	// connect for 10 times here. This will trigger 10 EPIPES in the child
-	// process and kill it when it writes to a closed stdout/stderr
-	for i := 0; i < 10; i++ {
-		net.Dial("tcp", fmt.Sprintf("0.0.0.0:%s", port))
-	}
-
-	listPs := exec.Command("sh", "-c", "ps ax | grep docker")
-	out, _, err = runCommandWithOutput(listPs)
+	cmd = exec.Command(dockerBinary, "port", id, "80")
+	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Errorf("list docker process failed with output %s, error %s", out, err)
+		c.Fatalf("Failed to get port, output: %s, error: %s", out, err)
 	}
-	if strings.Contains(out, "docker <defunct>") {
-		t.Errorf("Unexpected defunct docker process")
-	}
-	if !strings.Contains(out, "docker-proxy -proto tcp -host-ip 0.0.0.0 -host-port 12345") {
-		t.Errorf("Failed to find docker-proxy process, got %s", out)
+	strPort := strings.Split(strings.TrimSpace(out), ":")[1]
+	port, err := strconv.ParseInt(strPort, 10, 64)
+	if err != nil {
+		c.Fatalf("invalid port, got: %s, error: %s", strPort, err)
 	}
 
-	logDone("run - proxy should work with unavailable port")
+	// allocate a static port and a dynamic port together, with static port
+	// takes the next recent port in dynamic port range.
+	cmd = exec.Command(dockerBinary, "run", "-d", "-P",
+		"-p", "80",
+		"-p", fmt.Sprintf("%d:8080", port+1),
+		"busybox", "top")
+	out, _, err = runCommandWithOutput(cmd)
+	if err != nil {
+		c.Fatalf("Failed to run, output: %s, error: %s", out, err)
+	}
 }
 
 // Regression test for #7792
-func TestRunMountOrdering(t *testing.T) {
-	defer deleteAllContainers()
-	testRequires(t, SameHostDaemon)
+func (s *DockerSuite) TestRunMountOrdering(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.RemoveAll(tmpDir)
 
 	tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.RemoveAll(tmpDir2)
 
-	// Create a temporary tmpfs mount.
+	// Create a temporary tmpfs mounc.
 	fooDir := filepath.Join(tmpDir, "foo")
 	if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil {
-		t.Fatalf("failed to mkdir at %s - %s", fooDir, err)
+		c.Fatalf("failed to mkdir at %s - %s", fooDir, err)
 	}
 
 	if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp", tmpDir), "-v", fmt.Sprintf("%s:/tmp/foo", fooDir), "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2), "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir), "busybox:latest", "sh", "-c", "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me")
+	cmd := exec.Command(dockerBinary, "run",
+		"-v", fmt.Sprintf("%s:/tmp", tmpDir),
+		"-v", fmt.Sprintf("%s:/tmp/foo", fooDir),
+		"-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2),
+		"-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir),
+		"busybox:latest", "sh", "-c",
+		"ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-
-	logDone("run - volumes are mounted in the correct order")
 }
 
 // Regression test for https://github.com/docker/docker/issues/8259
-func TestRunReuseBindVolumeThatIsSymlink(t *testing.T) {
-	defer deleteAllContainers()
-	testRequires(t, SameHostDaemon)
+func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.RemoveAll(tmpDir)
 
 	linkPath := os.TempDir() + "/testlink2"
 	if err := os.Symlink(tmpDir, linkPath); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer os.RemoveAll(linkPath)
 
 	// Create first container
 	cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp/test", linkPath), "busybox", "ls", "-lh", "/tmp/test")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// Create second container with same symlinked path
 	// This will fail if the referenced issue is hit with a "Volume exists" error
 	cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp/test", linkPath), "busybox", "ls", "-lh", "/tmp/test")
 	if out, _, err := runCommandWithOutput(cmd); err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
-
-	logDone("run - can remount old bindmount volume")
 }
 
 //GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container
-func TestRunCreateVolumeEtc(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 	if !strings.Contains(out, "nameserver 127.0.0.1") {
-		t.Fatal("/etc volume mount hides /etc/resolv.conf")
+		c.Fatal("/etc volume mount hides /etc/resolv.conf")
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 	if !strings.Contains(out, "test123") {
-		t.Fatal("/etc volume mount hides /etc/hostname")
+		c.Fatal("/etc volume mount hides /etc/hostname")
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 	out = strings.Replace(out, "\n", " ", -1)
 	if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") {
-		t.Fatal("/etc volume mount hides /etc/hosts")
+		c.Fatal("/etc volume mount hides /etc/hosts")
 	}
-
-	logDone("run - verify /etc volume doesn't hide special bind mounts")
 }
 
-func TestVolumesNoCopyData(t *testing.T) {
-	defer deleteImages("dataimage")
-	defer deleteAllContainers()
+func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) {
 	if _, err := buildImage("dataimage",
 		`FROM busybox
 		 RUN mkdir -p /foo
 		 RUN touch /foo/bar`,
 		true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd := exec.Command(dockerBinary, "run", "--name", "test", "-v", "/foo", "busybox")
 	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar")
 	if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "No such file or directory") {
-		t.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out)
+		c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out)
 	}
 
 	tmpDir := randomUnixTmpDirPath("docker_test_bind_mount_copy_data")
 	cmd = exec.Command(dockerBinary, "run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar")
 	if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "No such file or directory") {
-		t.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out)
+		c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out)
 	}
-
-	logDone("run - volumes do not copy data for volumes-from and bindmounts")
 }
 
-func TestRunVolumesNotRecreatedOnStart(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-
-	// Clear out any remnants from other tests
-	deleteAllContainers()
-	info, err := ioutil.ReadDir(volumesConfigPath)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(info) > 0 {
-		for _, f := range info {
-			if err := os.RemoveAll(volumesConfigPath + "/" + f.Name()); err != nil {
-				t.Fatal(err)
-			}
-		}
-	}
-
-	defer deleteAllContainers()
-	cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "--name", "lone_starr", "busybox")
-	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
-	}
-
-	cmd = exec.Command(dockerBinary, "start", "lone_starr")
-	if _, err := runCommand(cmd); err != nil {
-		t.Fatal(err)
-	}
-
-	info, err = ioutil.ReadDir(volumesConfigPath)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(info) != 1 {
-		t.Fatalf("Expected only 1 volume have %v", len(info))
-	}
-
-	logDone("run - volumes not recreated on start")
-}
-
-func TestRunNoOutputFromPullInStdout(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) {
 	// just run with unknown image
 	cmd := exec.Command(dockerBinary, "run", "asdfsg")
 	stdout := bytes.NewBuffer(nil)
 	cmd.Stdout = stdout
 	if err := cmd.Run(); err == nil {
-		t.Fatal("Run with unknown image should fail")
+		c.Fatal("Run with unknown image should fail")
 	}
 	if stdout.Len() != 0 {
-		t.Fatalf("Stdout contains output from pull: %s", stdout)
+		c.Fatalf("Stdout contains output from pull: %s", stdout)
 	}
-	logDone("run - no output from pull in stdout")
 }
 
-func TestRunVolumesCleanPaths(t *testing.T) {
+func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) {
 	if _, err := buildImage("run_volumes_clean_paths",
 		`FROM busybox
 		 VOLUME /foo/`,
 		true); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
-	defer deleteImages("run_volumes_clean_paths")
-	defer deleteAllContainers()
 
 	cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "-v", "/bar/", "--name", "dark_helmet", "run_volumes_clean_paths")
 	if out, _, err := runCommandWithOutput(cmd); err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	out, err := inspectFieldMap("dark_helmet", "Volumes", "/foo/")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if out != "<no value>" {
-		t.Fatalf("Found unexpected volume entry for '/foo/' in volumes\n%q", out)
+	c.Assert(err, check.IsNil)
+	if out != "" {
+		c.Fatalf("Found unexpected volume entry for '/foo/' in volumes\n%q", out)
 	}
 
 	out, err = inspectFieldMap("dark_helmet", "Volumes", "/foo")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if !strings.Contains(out, volumesStoragePath) {
-		t.Fatalf("Volume was not defined for /foo\n%q", out)
+	c.Assert(err, check.IsNil)
+	if !strings.Contains(out, volumesConfigPath) {
+		c.Fatalf("Volume was not defined for /foo\n%q", out)
 	}
 
 	out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar/")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if out != "<no value>" {
-		t.Fatalf("Found unexpected volume entry for '/bar/' in volumes\n%q", out)
+	c.Assert(err, check.IsNil)
+	if out != "" {
+		c.Fatalf("Found unexpected volume entry for '/bar/' in volumes\n%q", out)
 	}
 	out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar")
-	if err != nil {
-		t.Fatal(err)
+	c.Assert(err, check.IsNil)
+	if !strings.Contains(out, volumesConfigPath) {
+		c.Fatalf("Volume was not defined for /bar\n%q", out)
 	}
-	if !strings.Contains(out, volumesStoragePath) {
-		t.Fatalf("Volume was not defined for /bar\n%q", out)
-	}
-
-	logDone("run - volume paths are cleaned")
 }
 
 // Regression test for #3631
-func TestRunSlowStdoutConsumer(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) {
+	cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv")
 
-	c := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv")
-
-	stdout, err := c.StdoutPipe()
+	stdout, err := cont.StdoutPipe()
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
-	if err := c.Start(); err != nil {
-		t.Fatal(err)
+	if err := cont.Start(); err != nil {
+		c.Fatal(err)
 	}
 	n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	expected := 2 * 1024 * 2000
 	if n != expected {
-		t.Fatalf("Expected %d, got %d", expected, n)
+		c.Fatalf("Expected %d, got %d", expected, n)
 	}
-
-	logDone("run - slow consumer")
 }
 
-func TestRunAllowPortRangeThroughExpose(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	id := strings.TrimSpace(out)
 	portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	var ports nat.PortMap
-	err = unmarshalJSON([]byte(portstr), &ports)
+	if err = unmarshalJSON([]byte(portstr), &ports); err != nil {
+		c.Fatal(err)
+	}
 	for port, binding := range ports {
 		portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0])
 		if portnum < 3000 || portnum > 3003 {
-			t.Fatalf("Port is out of range ", portnum, binding, out)
+			c.Fatalf("Port %d is out of range ", portnum)
 		}
 		if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 {
-			t.Fatal("Port is not mapped for the port "+port, out)
+			c.Fatalf("Port is not mapped for the port %d", port)
 		}
 	}
-	if err := deleteContainer(id); err != nil {
-		t.Fatal(err)
-	}
-	logDone("run - allow port range through --expose flag")
 }
 
 // test docker run expose a invalid port
-func TestRunExposePort(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunExposePort(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "--expose", "80000", "busybox")
 	out, _, err := runCommandWithOutput(runCmd)
 	//expose a invalid port should with a error out
 	if err == nil || !strings.Contains(out, "Invalid range format for --expose") {
-		t.Fatalf("run --expose a invalid port should with error out")
+		c.Fatalf("run --expose a invalid port should with error out")
 	}
-
-	logDone("run - can't expose a invalid port")
 }
 
-func TestRunUnknownCommand(t *testing.T) {
-	testRequires(t, NativeExecDriver)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunUnknownCommand(c *check.C) {
+	testRequires(c, NativeExecDriver)
 	runCmd := exec.Command(dockerBinary, "create", "busybox", "/bin/nada")
 	cID, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("Failed to create container: %v, output: %q", err, cID)
+		c.Fatalf("Failed to create container: %v, output: %q", err, cID)
 	}
 	cID = strings.TrimSpace(cID)
 
 	runCmd = exec.Command(dockerBinary, "start", cID)
 	_, _, _, _ = runCommandWithStdoutStderr(runCmd)
 
-	runCmd = exec.Command(dockerBinary, "inspect", "--format={{.State.ExitCode}}", cID)
-	rc, _, _, err2 := runCommandWithStdoutStderr(runCmd)
-	rc = strings.TrimSpace(rc)
-
-	if err2 != nil {
-		t.Fatalf("Error getting status of container: %v", err2)
-	}
-
+	rc, err := inspectField(cID, "State.ExitCode")
+	c.Assert(err, check.IsNil)
 	if rc == "0" {
-		t.Fatalf("ExitCode(%v) cannot be 0", rc)
+		c.Fatalf("ExitCode(%v) cannot be 0", rc)
 	}
-
-	logDone("run - Unknown Command")
 }
 
-func TestRunModeIpcHost(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunModeIpcHost(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	hostIpc, err := os.Readlink("/proc/1/ns/ipc")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd := exec.Command(dockerBinary, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc")
 	out2, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out2)
+		c.Fatal(err, out2)
 	}
 
 	out2 = strings.Trim(out2, "\n")
 	if hostIpc != out2 {
-		t.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out2)
+		c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out2)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/ipc")
 	out2, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out2)
+		c.Fatal(err, out2)
 	}
 
 	out2 = strings.Trim(out2, "\n")
 	if hostIpc == out2 {
-		t.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out2)
+		c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out2)
 	}
-
-	logDone("run - ipc host mode")
 }
 
-func TestRunModeIpcContainer(t *testing.T) {
-	defer deleteAllContainers()
-	testRequires(t, SameHostDaemon)
+func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	id := strings.TrimSpace(out)
 	state, err := inspectField(id, "State.Running")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if state != "true" {
-		t.Fatal("Container state is 'not running'")
+		c.Fatal("Container state is 'not running'")
 	}
 	pid1, err := inspectField(id, "State.Pid")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 
 	parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1))
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	cmd = exec.Command(dockerBinary, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc")
 	out2, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out2)
+		c.Fatal(err, out2)
 	}
 
 	out2 = strings.Trim(out2, "\n")
 	if parentContainerIpc != out2 {
-		t.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out2)
+		c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out2)
 	}
-
-	logDone("run - ipc container mode")
 }
 
-func TestContainerNetworkMode(t *testing.T) {
-	defer deleteAllContainers()
-	testRequires(t, SameHostDaemon)
+func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) {
+	cmd := exec.Command(dockerBinary, "run", "-d", "--ipc", "container:abcd1234", "busybox", "top")
+	out, _, err := runCommandWithOutput(cmd)
+	if !strings.Contains(out, "abcd1234") || err == nil {
+		c.Fatalf("run IPC from a non exists container should with correct error out")
+	}
+}
+
+func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) {
+	testRequires(c, SameHostDaemon)
+
+	cmd := exec.Command(dockerBinary, "create", "busybox")
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		c.Fatal(err, out)
+	}
+	id := strings.TrimSpace(out)
+
+	cmd = exec.Command(dockerBinary, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox")
+	out, _, err = runCommandWithOutput(cmd)
+	if err == nil {
+		c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err)
+	}
+}
+
+func (s *DockerSuite) TestContainerNetworkMode(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	id := strings.TrimSpace(out)
 	if err := waitRun(id); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	pid1, err := inspectField(id, "State.Pid")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 
 	parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1))
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	cmd = exec.Command(dockerBinary, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net")
 	out2, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out2)
+		c.Fatal(err, out2)
 	}
 
 	out2 = strings.Trim(out2, "\n")
 	if parentContainerNet != out2 {
-		t.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out2)
+		c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out2)
 	}
-
-	logDone("run - container shared network namespace")
 }
 
-func TestRunModePidHost(t *testing.T) {
-	testRequires(t, NativeExecDriver, SameHostDaemon)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestContainerNetworkModeToSelf(c *check.C) {
+	cmd := exec.Command(dockerBinary, "run", "--name=me", "--net=container:me", "busybox", "true")
+	out, _, err := runCommandWithOutput(cmd)
+	if err == nil || !strings.Contains(out, "cannot join own network") {
+		c.Fatalf("using container net mode to self should result in an error")
+	}
+}
+
+func (s *DockerSuite) TestRunModePidHost(c *check.C) {
+	testRequires(c, NativeExecDriver, SameHostDaemon)
 
 	hostPid, err := os.Readlink("/proc/1/ns/pid")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd := exec.Command(dockerBinary, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid")
 	out2, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out2)
+		c.Fatal(err, out2)
 	}
 
 	out2 = strings.Trim(out2, "\n")
 	if hostPid != out2 {
-		t.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out2)
+		c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out2)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/pid")
 	out2, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out2)
+		c.Fatal(err, out2)
 	}
 
 	out2 = strings.Trim(out2, "\n")
 	if hostPid == out2 {
-		t.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out2)
+		c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out2)
 	}
-
-	logDone("run - pid host mode")
 }
 
-func TestRunTLSverify(t *testing.T) {
+func (s *DockerSuite) TestRunModeUTSHost(c *check.C) {
+	testRequires(c, NativeExecDriver, SameHostDaemon)
+
+	hostUTS, err := os.Readlink("/proc/1/ns/uts")
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	cmd := exec.Command(dockerBinary, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts")
+	out2, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		c.Fatal(err, out2)
+	}
+
+	out2 = strings.Trim(out2, "\n")
+	if hostUTS != out2 {
+		c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out2)
+	}
+
+	cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/uts")
+	out2, _, err = runCommandWithOutput(cmd)
+	if err != nil {
+		c.Fatal(err, out2)
+	}
+
+	out2 = strings.Trim(out2, "\n")
+	if hostUTS == out2 {
+		c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out2)
+	}
+}
+
+func (s *DockerSuite) TestRunTLSverify(c *check.C) {
 	cmd := exec.Command(dockerBinary, "ps")
 	out, ec, err := runCommandWithOutput(cmd)
 	if err != nil || ec != 0 {
-		t.Fatalf("Should have worked: %v:\n%v", err, out)
+		c.Fatalf("Should have worked: %v:\n%v", err, out)
 	}
 
 	// Regardless of whether we specify true or false we need to
@@ -3105,318 +2739,354 @@
 	cmd = exec.Command(dockerBinary, "--tlsverify=false", "ps")
 	out, ec, err = runCommandWithOutput(cmd)
 	if err == nil || ec == 0 || !strings.Contains(out, "trying to connect") {
-		t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err)
+		c.Fatalf("Should have failed: \net:%v\nout:%v\nerr:%v", ec, out, err)
 	}
 
 	cmd = exec.Command(dockerBinary, "--tlsverify=true", "ps")
 	out, ec, err = runCommandWithOutput(cmd)
 	if err == nil || ec == 0 || !strings.Contains(out, "cert") {
-		t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err)
+		c.Fatalf("Should have failed: \net:%v\nout:%v\nerr:%v", ec, out, err)
 	}
-
-	logDone("run - verify tls is set for --tlsverify")
 }
 
-func TestRunPortFromDockerRangeInUse(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) {
 	// first find allocator current position
 	cmd := exec.Command(dockerBinary, "run", "-d", "-p", ":80", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	id := strings.TrimSpace(out)
 	cmd = exec.Command(dockerBinary, "port", id)
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	out = strings.TrimSpace(out)
 
 	if out == "" {
-		t.Fatal("docker port command output is empty")
+		c.Fatal("docker port command output is empty")
 	}
 	out = strings.Split(out, ":")[1]
 	lastPort, err := strconv.Atoi(out)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	port := lastPort + 1
 	l, err := net.Listen("tcp", ":"+strconv.Itoa(port))
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer l.Close()
 	cmd = exec.Command(dockerBinary, "run", "-d", "-p", ":80", "busybox", "top")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatalf(out, err)
+		c.Fatalf(out, err)
 	}
 	id = strings.TrimSpace(out)
 	cmd = exec.Command(dockerBinary, "port", id)
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-
-	logDone("run - find another port if port from autorange already bound")
 }
 
-func TestRunTtyWithPipe(t *testing.T) {
-	defer deleteAllContainers()
-
-	done := make(chan struct{})
+func (s *DockerSuite) TestRunTtyWithPipe(c *check.C) {
+	errChan := make(chan error)
 	go func() {
-		defer close(done)
+		defer close(errChan)
 
 		cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true")
 		if _, err := cmd.StdinPipe(); err != nil {
-			t.Fatal(err)
+			errChan <- err
+			return
 		}
 
 		expected := "cannot enable tty mode"
 		if out, _, err := runCommandWithOutput(cmd); err == nil {
-			t.Fatal("run should have failed")
+			errChan <- fmt.Errorf("run should have failed")
+			return
 		} else if !strings.Contains(out, expected) {
-			t.Fatalf("run failed with error %q: expected %q", out, expected)
+			errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected)
+			return
 		}
 	}()
 
 	select {
-	case <-done:
+	case err := <-errChan:
+		c.Assert(err, check.IsNil)
 	case <-time.After(3 * time.Second):
-		t.Fatal("container is running but should have failed")
+		c.Fatal("container is running but should have failed")
 	}
-
-	logDone("run - forbid piped stdin with tty")
 }
 
-func TestRunNonLocalMacAddress(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) {
 	addr := "00:16:3E:08:00:50"
 
 	cmd := exec.Command(dockerBinary, "run", "--mac-address", addr, "busybox", "ifconfig")
 	if out, _, err := runCommandWithOutput(cmd); err != nil || !strings.Contains(out, addr) {
-		t.Fatalf("Output should have contained %q: %s, %v", addr, out, err)
+		c.Fatalf("Output should have contained %q: %s, %v", addr, out, err)
 	}
-
-	logDone("run - use non-local mac-address")
 }
 
-func TestRunNetHost(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunNetHost(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	hostNet, err := os.Readlink("/proc/1/ns/net")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net")
 	out2, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out2)
+		c.Fatal(err, out2)
 	}
 
 	out2 = strings.Trim(out2, "\n")
 	if hostNet != out2 {
-		t.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out2)
+		c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out2)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/net")
 	out2, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out2)
+		c.Fatal(err, out2)
 	}
 
 	out2 = strings.Trim(out2, "\n")
 	if hostNet == out2 {
-		t.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out2)
+		c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out2)
 	}
-
-	logDone("run - net host mode")
 }
 
-func TestRunNetContainerWhichHost(t *testing.T) {
-	testRequires(t, SameHostDaemon)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) {
+	testRequires(c, SameHostDaemon)
 
 	hostNet, err := os.Readlink("/proc/1/ns/net")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	cmd := exec.Command(dockerBinary, "run", "-d", "--net=host", "--name=test", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	cmd = exec.Command(dockerBinary, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net")
 	out, _, err = runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	out = strings.Trim(out, "\n")
 	if hostNet != out {
-		t.Fatalf("Container should have host network namespace")
+		c.Fatalf("Container should have host network namespace")
 	}
-
-	logDone("run - net container mode, where container in host mode")
 }
 
-func TestRunAllowPortRangeThroughPublish(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) {
 	cmd := exec.Command(dockerBinary, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top")
 	out, _, err := runCommandWithOutput(cmd)
 
 	id := strings.TrimSpace(out)
 	portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	var ports nat.PortMap
 	err = unmarshalJSON([]byte(portstr), &ports)
 	for port, binding := range ports {
 		portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0])
 		if portnum < 3000 || portnum > 3003 {
-			t.Fatalf("Port is out of range ", portnum, binding, out)
+			c.Fatalf("Port %d is out of range ", portnum)
 		}
 		if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 {
-			t.Fatal("Port is not mapped for the port "+port, out)
+			c.Fatal("Port is not mapped for the port "+port, out)
 		}
 	}
-	logDone("run - allow port range through --expose flag")
 }
 
-func TestRunOOMExitCode(t *testing.T) {
-	defer deleteAllContainers()
-
-	done := make(chan struct{})
+func (s *DockerSuite) TestRunOOMExitCode(c *check.C) {
+	errChan := make(chan error)
 	go func() {
-		defer close(done)
-
-		runCmd := exec.Command(dockerBinary, "run", "-m", "4MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x; done")
+		defer close(errChan)
+		runCmd := exec.Command(dockerBinary, "run", "-m", "4MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done")
 		out, exitCode, _ := runCommandWithOutput(runCmd)
 		if expected := 137; exitCode != expected {
-			t.Fatalf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out)
+			errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out)
 		}
 	}()
 
 	select {
-	case <-done:
-	case <-time.After(3 * time.Second):
-		t.Fatal("Timeout waiting for container to die on OOM")
+	case err := <-errChan:
+		c.Assert(err, check.IsNil)
+	case <-time.After(30 * time.Second):
+		c.Fatal("Timeout waiting for container to die on OOM")
 	}
-
-	logDone("run - exit code on oom")
 }
 
-func TestRunSetDefaultRestartPolicy(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) {
 	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "test", "busybox", "top")
 	if out, _, err := runCommandWithOutput(runCmd); err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
-	cmd := exec.Command(dockerBinary, "inspect", "-f", "{{.HostConfig.RestartPolicy.Name}}", "test")
-	out, _, err := runCommandWithOutput(cmd)
-	if err != nil {
-		t.Fatalf("failed to inspect container: %v, output: %q", err, out)
-	}
-	out = strings.Trim(out, "\r\n")
+	out, err := inspectField("test", "HostConfig.RestartPolicy.Name")
+	c.Assert(err, check.IsNil)
 	if out != "no" {
-		t.Fatalf("Set default restart policy failed")
+		c.Fatalf("Set default restart policy failed")
 	}
-
-	logDone("run - set default restart policy success")
 }
 
-func TestRunRestartMaxRetries(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) {
 	out, err := exec.Command(dockerBinary, "run", "-d", "--restart=on-failure:3", "busybox", "false").CombinedOutput()
 	if err != nil {
-		t.Fatal(string(out), err)
+		c.Fatal(string(out), err)
 	}
 	id := strings.TrimSpace(string(out))
-	if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 5); err != nil {
-		t.Fatal(err)
+	if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 10); err != nil {
+		c.Fatal(err)
 	}
 	count, err := inspectField(id, "RestartCount")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if count != "3" {
-		t.Fatalf("Container was restarted %s times, expected %d", count, 3)
+		c.Fatalf("Container was restarted %s times, expected %d", count, 3)
 	}
 	MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount")
-	if err != nil {
-		t.Fatal(err)
-	}
+	c.Assert(err, check.IsNil)
 	if MaximumRetryCount != "3" {
-		t.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3")
+		c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3")
 	}
-	logDone("run - test max-retries for --restart")
 }
 
-func TestRunContainerWithWritableRootfs(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) {
 	out, err := exec.Command(dockerBinary, "run", "--rm", "busybox", "touch", "/file").CombinedOutput()
 	if err != nil {
-		t.Fatal(string(out), err)
+		c.Fatal(string(out), err)
 	}
-	logDone("run - writable rootfs")
 }
 
-func TestRunContainerWithReadonlyRootfs(t *testing.T) {
-	testRequires(t, NativeExecDriver)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) {
+	testRequires(c, NativeExecDriver)
 
-	out, err := exec.Command(dockerBinary, "run", "--read-only", "--rm", "busybox", "touch", "/file").CombinedOutput()
+	for _, f := range []string{"/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname"} {
+		testReadOnlyFile(f, c)
+	}
+}
+
+func testReadOnlyFile(filename string, c *check.C) {
+	testRequires(c, NativeExecDriver)
+
+	out, err := exec.Command(dockerBinary, "run", "--read-only", "--rm", "busybox", "touch", filename).CombinedOutput()
 	if err == nil {
-		t.Fatal("expected container to error on run with read only error")
+		c.Fatal("expected container to error on run with read only error")
 	}
 	expected := "Read-only file system"
 	if !strings.Contains(string(out), expected) {
-		t.Fatalf("expected output from failure to contain %s but contains %s", expected, out)
+		c.Fatalf("expected output from failure to contain %s but contains %s", expected, out)
 	}
-	logDone("run - read only rootfs")
 }
 
-func TestRunVolumesFromRestartAfterRemoved(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) {
+	testRequires(c, NativeExecDriver)
 
+	_, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top"))
+	c.Assert(err, check.IsNil)
+
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts"))
+	c.Assert(err, check.IsNil)
+
+	if !strings.Contains(string(out), "testlinked") {
+		c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled")
+	}
+}
+
+func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDnsFlag(c *check.C) {
+	testRequires(c, NativeExecDriver)
+
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf"))
+	c.Assert(err, check.IsNil)
+
+	if !strings.Contains(string(out), "1.1.1.1") {
+		c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used")
+	}
+}
+
+func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) {
+	testRequires(c, NativeExecDriver)
+
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts"))
+	c.Assert(err, check.IsNil)
+
+	if !strings.Contains(string(out), "testreadonly") {
+		c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used")
+	}
+}
+
+func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) {
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "voltest", "-v", "/foo", "busybox"))
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "restarter", "--volumes-from", "voltest", "busybox", "top"))
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	// Remove the main volume container and restart the consuming container
 	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "rm", "-f", "voltest"))
 	if err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	// This should not fail since the volumes-from were already applied
 	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "restart", "restarter"))
 	if err != nil {
-		t.Fatalf("expected container to restart successfully: %v\n%s", err, out)
+		c.Fatalf("expected container to restart successfully: %v\n%s", err, out)
 	}
-
-	logDone("run - can restart a volumes-from container after producer is removed")
 }
 
-func TestRunPidHostWithChildIsKillable(t *testing.T) {
-	defer deleteAllContainers()
+// run container with --rm should remove container if exit code != 0
+func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) {
+	name := "flowers"
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "--rm", "busybox", "ls", "/notexists")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err == nil {
+		c.Fatal("Expected docker run to fail", out, err)
+	}
+
+	out, err = getAllContainers()
+	if err != nil {
+		c.Fatal(out, err)
+	}
+
+	if out != "" {
+		c.Fatal("Expected not to have containers", out)
+	}
+}
+
+func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) {
+	name := "sparkles"
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "--rm", "busybox", "commandNotFound")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err == nil {
+		c.Fatal("Expected docker run to fail", out, err)
+	}
+
+	out, err = getAllContainers()
+	if err != nil {
+		c.Fatal(out, err)
+	}
+
+	if out != "" {
+		c.Fatal("Expected not to have containers", out)
+	}
+}
+
+func (s *DockerSuite) TestRunPidHostWithChildIsKillable(c *check.C) {
 	name := "ibuildthecloud"
 	if out, err := exec.Command(dockerBinary, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi").CombinedOutput(); err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 	time.Sleep(1 * time.Second)
 	errchan := make(chan error)
@@ -3428,80 +3098,91 @@
 	}()
 	select {
 	case err := <-errchan:
-		if err != nil {
-			t.Fatal(err)
-		}
+		c.Assert(err, check.IsNil)
 	case <-time.After(5 * time.Second):
-		t.Fatal("Kill container timed out")
+		c.Fatal("Kill container timed out")
 	}
-	logDone("run - can kill container with pid-host and some childs of pid 1")
 }
 
-func TestRunWithTooSmallMemoryLimit(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) {
 	// this memory limit is 1 byte less than the min, which is 4MB
 	// https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-m", "4194303", "busybox"))
 	if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") {
-		t.Fatalf("expected run to fail when using too low a memory limit: %q", out)
+		c.Fatalf("expected run to fail when using too low a memory limit: %q", out)
 	}
-
-	logDone("run - can't set too low memory limit")
 }
 
-func TestRunWriteToProcAsound(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) {
 	code, err := runCommand(exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version"))
 	if err == nil || code == 0 {
-		t.Fatal("standard container should not be able to write to /proc/asound")
+		c.Fatal("standard container should not be able to write to /proc/asound")
 	}
-	logDone("run - ro write to /proc/asound")
 }
 
-func TestRunReadProcTimer(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunReadProcTimer(c *check.C) {
+	testRequires(c, NativeExecDriver)
 	out, code, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "busybox", "cat", "/proc/timer_stats"))
 	if err != nil || code != 0 {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if strings.Trim(out, "\n ") != "" {
-		t.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out)
+		c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out)
 	}
-	logDone("run - read /proc/timer_stats")
 }
 
-func TestRunReadProcLatency(t *testing.T) {
+func (s *DockerSuite) TestRunReadProcLatency(c *check.C) {
+	testRequires(c, NativeExecDriver)
 	// some kernels don't have this configured so skip the test if this file is not found
 	// on the host running the tests.
 	if _, err := os.Stat("/proc/latency_stats"); err != nil {
-		t.Skip()
+		c.Skip("kernel doesnt have latency_stats configured")
 		return
 	}
-	defer deleteAllContainers()
 	out, code, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "busybox", "cat", "/proc/latency_stats"))
 	if err != nil || code != 0 {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	if strings.Trim(out, "\n ") != "" {
-		t.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out)
+		c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out)
 	}
-	logDone("run - read /proc/latency_stats")
 }
 
-func TestMountIntoProc(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestMountIntoProc(c *check.C) {
+	testRequires(c, NativeExecDriver)
 	code, err := runCommand(exec.Command(dockerBinary, "run", "-v", "/proc//sys", "busybox", "true"))
 	if err == nil || code == 0 {
-		t.Fatal("container should not be able to mount into /proc")
+		c.Fatal("container should not be able to mount into /proc")
 	}
-	logDone("run - mount into proc")
 }
 
-func TestMountIntoSys(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestMountIntoSys(c *check.C) {
+	testRequires(c, NativeExecDriver)
 	_, err := runCommand(exec.Command(dockerBinary, "run", "-v", "/sys/fs/cgroup", "busybox", "true"))
 	if err != nil {
-		t.Fatal("container should be able to mount into /sys")
+		c.Fatal("container should be able to mount into /sys/fs/cgroup")
 	}
-	logDone("run - mount into sys")
+}
+
+func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) {
+	dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top")
+	dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top")
+	dockerCmd(c, "stop", "first")
+	dockerCmd(c, "stop", "second")
+}
+
+func (s *DockerSuite) TestRunUnshareProc(c *check.C) {
+	testRequires(c, Apparmor, NativeExecDriver)
+
+	name := "acidburn"
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "jess/unshare", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount")
+	if out, _, err := runCommandWithOutput(runCmd); err == nil || !strings.Contains(out, "Permission denied") {
+		c.Fatalf("unshare should have failed with permission denied, got: %s, %v", out, err)
+	}
+
+	name = "cereal"
+	runCmd = exec.Command(dockerBinary, "run", "--name", name, "jess/unshare", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc")
+	if out, _, err := runCommandWithOutput(runCmd); err == nil || !strings.Contains(out, "Permission denied") {
+		c.Fatalf("unshare should have failed with permission denied, got: %s, %v", out, err)
+	}
 }
diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go
index 9327ac2..59b6231 100644
--- a/integration-cli/docker_cli_run_unix_test.go
+++ b/integration-cli/docker_cli_run_unix_test.go
@@ -3,6 +3,7 @@
 package main
 
 import (
+	"bufio"
 	"fmt"
 	"io/ioutil"
 	"os"
@@ -10,58 +11,52 @@
 	"path"
 	"path/filepath"
 	"strings"
-	"testing"
 	"time"
 
 	"github.com/docker/docker/pkg/mount"
+	"github.com/go-check/check"
 	"github.com/kr/pty"
 )
 
 // #6509
-func TestRunRedirectStdout(t *testing.T) {
-
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunRedirectStdout(c *check.C) {
 	checkRedirect := func(command string) {
 		_, tty, err := pty.Open()
 		if err != nil {
-			t.Fatalf("Could not open pty: %v", err)
+			c.Fatalf("Could not open pty: %v", err)
 		}
 		cmd := exec.Command("sh", "-c", command)
 		cmd.Stdin = tty
 		cmd.Stdout = tty
 		cmd.Stderr = tty
-		ch := make(chan struct{})
 		if err := cmd.Start(); err != nil {
-			t.Fatalf("start err: %v", err)
+			c.Fatalf("start err: %v", err)
 		}
+		ch := make(chan error)
 		go func() {
-			if err := cmd.Wait(); err != nil {
-				t.Fatalf("wait err=%v", err)
-			}
+			ch <- cmd.Wait()
 			close(ch)
 		}()
 
 		select {
 		case <-time.After(10 * time.Second):
-			t.Fatal("command timeout")
-		case <-ch:
+			c.Fatal("command timeout")
+		case err := <-ch:
+			if err != nil {
+				c.Fatalf("wait err=%v", err)
+			}
 		}
 	}
 
 	checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root")
 	checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root")
-
-	logDone("run - redirect stdout")
 }
 
 // Test recursive bind mount works by default
-func TestRunWithVolumesIsRecursive(t *testing.T) {
-	defer deleteAllContainers()
-
+func (s *DockerSuite) TestRunWithVolumesIsRecursive(c *check.C) {
 	tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	defer os.RemoveAll(tmpDir)
@@ -69,68 +64,62 @@
 	// Create a temporary tmpfs mount.
 	tmpfsDir := filepath.Join(tmpDir, "tmpfs")
 	if err := os.MkdirAll(tmpfsDir, 0777); err != nil {
-		t.Fatalf("failed to mkdir at %s - %s", tmpfsDir, err)
+		c.Fatalf("failed to mkdir at %s - %s", tmpfsDir, err)
 	}
 	if err := mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""); err != nil {
-		t.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err)
+		c.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err)
 	}
 
 	f, err := ioutil.TempFile(tmpfsDir, "touch-me")
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	defer f.Close()
 
 	runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs")
 	out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil && exitCode != 0 {
-		t.Fatal(out, stderr, err)
+		c.Fatal(out, stderr, err)
 	}
 	if !strings.Contains(out, filepath.Base(f.Name())) {
-		t.Fatal("Recursive bind mount test failed. Expected file not found")
+		c.Fatal("Recursive bind mount test failed. Expected file not found")
 	}
-
-	logDone("run - volumes are bind mounted recursively")
 }
 
-func TestRunWithUlimits(t *testing.T) {
-	testRequires(t, NativeExecDriver)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunWithUlimits(c *check.C) {
+	testRequires(c, NativeExecDriver)
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n"))
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	ul := strings.TrimSpace(out)
 	if ul != "42" {
-		t.Fatalf("expected `ulimit -n` to be 42, got %s", ul)
+		c.Fatalf("expected `ulimit -n` to be 42, got %s", ul)
 	}
-
-	logDone("run - ulimits are set")
 }
 
-func TestRunContainerWithCgroupParent(t *testing.T) {
-	testRequires(t, NativeExecDriver)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) {
+	testRequires(c, NativeExecDriver)
 
 	cgroupParent := "test"
 	data, err := ioutil.ReadFile("/proc/self/cgroup")
 	if err != nil {
-		t.Fatalf("failed to read '/proc/self/cgroup - %v", err)
+		c.Fatalf("failed to read '/proc/self/cgroup - %v", err)
 	}
 	selfCgroupPaths := parseCgroupPaths(string(data))
 	selfCpuCgroup, found := selfCgroupPaths["memory"]
 	if !found {
-		t.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths)
+		c.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths)
 	}
 
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--cgroup-parent", cgroupParent, "--rm", "busybox", "cat", "/proc/self/cgroup"))
 	if err != nil {
-		t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
+		c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
 	}
 	cgroupPaths := parseCgroupPaths(string(out))
 	if len(cgroupPaths) == 0 {
-		t.Fatalf("unexpected output - %q", string(out))
+		c.Fatalf("unexpected output - %q", string(out))
 	}
 	found = false
 	expectedCgroupPrefix := path.Join(selfCpuCgroup, cgroupParent)
@@ -141,24 +130,22 @@
 		}
 	}
 	if !found {
-		t.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have prefix %q. Cgroup Paths: %v", expectedCgroupPrefix, cgroupPaths)
+		c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have prefix %q. Cgroup Paths: %v", expectedCgroupPrefix, cgroupPaths)
 	}
-	logDone("run - cgroup parent")
 }
 
-func TestRunContainerWithCgroupParentAbsPath(t *testing.T) {
-	testRequires(t, NativeExecDriver)
-	defer deleteAllContainers()
+func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) {
+	testRequires(c, NativeExecDriver)
 
 	cgroupParent := "/cgroup-parent/test"
 
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--cgroup-parent", cgroupParent, "--rm", "busybox", "cat", "/proc/self/cgroup"))
 	if err != nil {
-		t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
+		c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err)
 	}
 	cgroupPaths := parseCgroupPaths(string(out))
 	if len(cgroupPaths) == 0 {
-		t.Fatalf("unexpected output - %q", string(out))
+		c.Fatalf("unexpected output - %q", string(out))
 	}
 	found := false
 	for _, path := range cgroupPaths {
@@ -168,8 +155,98 @@
 		}
 	}
 	if !found {
-		t.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have prefix %q. Cgroup Paths: %v", cgroupParent, cgroupPaths)
+		c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have prefix %q. Cgroup Paths: %v", cgroupParent, cgroupPaths)
+	}
+}
+
+func (s *DockerSuite) TestRunDeviceDirectory(c *check.C) {
+	testRequires(c, NativeExecDriver)
+	cmd := exec.Command(dockerBinary, "run", "--device", "/dev/snd:/dev/snd", "busybox", "sh", "-c", "ls /dev/snd/")
+
+	out, _, err := runCommandWithOutput(cmd)
+	if err != nil {
+		c.Fatal(err, out)
 	}
 
-	logDone("run - cgroup parent with absolute cgroup path")
+	if actual := strings.Trim(out, "\r\n"); !strings.Contains(out, "timer") {
+		c.Fatalf("expected output /dev/snd/timer, received %s", actual)
+	}
+
+	cmd = exec.Command(dockerBinary, "run", "--device", "/dev/snd:/dev/othersnd", "busybox", "sh", "-c", "ls /dev/othersnd/")
+
+	out, _, err = runCommandWithOutput(cmd)
+	if err != nil {
+		c.Fatal(err, out)
+	}
+
+	if actual := strings.Trim(out, "\r\n"); !strings.Contains(out, "seq") {
+		c.Fatalf("expected output /dev/othersnd/seq, received %s", actual)
+	}
+}
+
+// TestRunDetach checks attaching and detaching with the escape sequence.
+func (s *DockerSuite) TestRunAttachDetach(c *check.C) {
+	name := "attach-detach"
+	cmd := exec.Command(dockerBinary, "run", "--name", name, "-it", "busybox", "cat")
+	stdout, err := cmd.StdoutPipe()
+	if err != nil {
+		c.Fatal(err)
+	}
+	cpty, tty, err := pty.Open()
+	if err != nil {
+		c.Fatal(err)
+	}
+	defer cpty.Close()
+	cmd.Stdin = tty
+	if err := cmd.Start(); err != nil {
+		c.Fatal(err)
+	}
+	if err := waitRun(name); err != nil {
+		c.Fatal(err)
+	}
+
+	if _, err := cpty.Write([]byte("hello\n")); err != nil {
+		c.Fatal(err)
+	}
+
+	out, err := bufio.NewReader(stdout).ReadString('\n')
+	if err != nil {
+		c.Fatal(err)
+	}
+	if strings.TrimSpace(out) != "hello" {
+		c.Fatalf("expected 'hello', got %q", out)
+	}
+
+	// escape sequence
+	if _, err := cpty.Write([]byte{16}); err != nil {
+		c.Fatal(err)
+	}
+	time.Sleep(100 * time.Millisecond)
+	if _, err := cpty.Write([]byte{17}); err != nil {
+		c.Fatal(err)
+	}
+
+	ch := make(chan struct{})
+	go func() {
+		cmd.Wait()
+		ch <- struct{}{}
+	}()
+
+	running, err := inspectField(name, "State.Running")
+	if err != nil {
+		c.Fatal(err)
+	}
+	if running != "true" {
+		c.Fatal("expected container to still be running")
+	}
+
+	go func() {
+		exec.Command(dockerBinary, "kill", name).Run()
+	}()
+
+	select {
+	case <-ch:
+	case <-time.After(10 * time.Millisecond):
+		c.Fatal("timed out waiting for container to exit")
+	}
 }
diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go
index 1dd71ee..c538890 100644
--- a/integration-cli/docker_cli_save_load_test.go
+++ b/integration-cli/docker_cli_save_load_test.go
@@ -9,37 +9,31 @@
 	"reflect"
 	"sort"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
 // save a repo using gz compression and try to load it using stdout
-func TestSaveXzAndLoadRepoStdout(t *testing.T) {
-	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) {
+	name := "test-save-xz-and-load-repo-stdout"
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("failed to create a container: %v %v", out, err)
+		c.Fatalf("failed to create a container: %v %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-
 	repoName := "foobar-save-load-test-xz-gz"
 
-	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
-	out, _, err = runCommandWithOutput(inspectCmd)
-	if err != nil {
-		t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err)
-	}
-
-	commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+	commitCmd := exec.Command(dockerBinary, "commit", name, repoName)
 	out, _, err = runCommandWithOutput(commitCmd)
 	if err != nil {
-		t.Fatalf("failed to commit container: %v %v", out, err)
+		c.Fatalf("failed to commit container: %v %v", out, err)
 	}
 
-	inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+	inspectCmd := exec.Command(dockerBinary, "inspect", repoName)
 	before, _, err := runCommandWithOutput(inspectCmd)
 	if err != nil {
-		t.Fatalf("the repo should exist before saving it: %v %v", before, err)
+		c.Fatalf("the repo should exist before saving it: %v %v", before, err)
 	}
 
 	repoTarball, _, err := runCommandPipelineWithOutput(
@@ -47,7 +41,7 @@
 		exec.Command("xz", "-c"),
 		exec.Command("gzip", "-c"))
 	if err != nil {
-		t.Fatalf("failed to save repo: %v %v", out, err)
+		c.Fatalf("failed to save repo: %v %v", out, err)
 	}
 	deleteImages(repoName)
 
@@ -55,48 +49,40 @@
 	loadCmd.Stdin = strings.NewReader(repoTarball)
 	out, _, err = runCommandWithOutput(loadCmd)
 	if err == nil {
-		t.Fatalf("expected error, but succeeded with no error and output: %v", out)
+		c.Fatalf("expected error, but succeeded with no error and output: %v", out)
 	}
 
 	inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
 	after, _, err := runCommandWithOutput(inspectCmd)
 	if err == nil {
-		t.Fatalf("the repo should not exist: %v", after)
+		c.Fatalf("the repo should not exist: %v", after)
 	}
 
 	deleteImages(repoName)
 
-	logDone("load - save a repo with xz compression & load it using stdout")
 }
 
 // save a repo using xz+gz compression and try to load it using stdout
-func TestSaveXzGzAndLoadRepoStdout(t *testing.T) {
-	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) {
+	name := "test-save-xz-gz-and-load-repo-stdout"
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("failed to create a container: %v %v", out, err)
+		c.Fatalf("failed to create a container: %v %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-
 	repoName := "foobar-save-load-test-xz-gz"
 
-	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
-	out, _, err = runCommandWithOutput(inspectCmd)
-	if err != nil {
-		t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err)
-	}
-
-	commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+	commitCmd := exec.Command(dockerBinary, "commit", name, repoName)
 	out, _, err = runCommandWithOutput(commitCmd)
 	if err != nil {
-		t.Fatalf("failed to commit container: %v %v", out, err)
+		c.Fatalf("failed to commit container: %v %v", out, err)
 	}
 
-	inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+	inspectCmd := exec.Command(dockerBinary, "inspect", repoName)
 	before, _, err := runCommandWithOutput(inspectCmd)
 	if err != nil {
-		t.Fatalf("the repo should exist before saving it: %v %v", before, err)
+		c.Fatalf("the repo should exist before saving it: %v %v", before, err)
 	}
 
 	out, _, err = runCommandPipelineWithOutput(
@@ -104,7 +90,7 @@
 		exec.Command("xz", "-c"),
 		exec.Command("gzip", "-c"))
 	if err != nil {
-		t.Fatalf("failed to save repo: %v %v", out, err)
+		c.Fatalf("failed to save repo: %v %v", out, err)
 	}
 
 	deleteImages(repoName)
@@ -113,90 +99,82 @@
 	loadCmd.Stdin = strings.NewReader(out)
 	out, _, err = runCommandWithOutput(loadCmd)
 	if err == nil {
-		t.Fatalf("expected error, but succeeded with no error and output: %v", out)
+		c.Fatalf("expected error, but succeeded with no error and output: %v", out)
 	}
 
 	inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
 	after, _, err := runCommandWithOutput(inspectCmd)
 	if err == nil {
-		t.Fatalf("the repo should not exist: %v", after)
+		c.Fatalf("the repo should not exist: %v", after)
 	}
-
-	deleteContainer(cleanedContainerID)
-	deleteImages(repoName)
-
-	logDone("load - save a repo with xz+gz compression & load it using stdout")
 }
 
-func TestSaveSingleTag(t *testing.T) {
+func (s *DockerSuite) TestSaveSingleTag(c *check.C) {
 	repoName := "foobar-save-single-tag-test"
 
 	tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName))
-	defer deleteImages(repoName)
 	if out, _, err := runCommandWithOutput(tagCmd); err != nil {
-		t.Fatalf("failed to tag repo: %s, %v", out, err)
+		c.Fatalf("failed to tag repo: %s, %v", out, err)
 	}
 
 	idCmd := exec.Command(dockerBinary, "images", "-q", "--no-trunc", repoName)
 	out, _, err := runCommandWithOutput(idCmd)
 	if err != nil {
-		t.Fatalf("failed to get repo ID: %s, %v", out, err)
+		c.Fatalf("failed to get repo ID: %s, %v", out, err)
 	}
-	cleanedImageID := stripTrailingCharacters(out)
+	cleanedImageID := strings.TrimSpace(out)
 
 	out, _, err = runCommandPipelineWithOutput(
 		exec.Command(dockerBinary, "save", fmt.Sprintf("%v:latest", repoName)),
 		exec.Command("tar", "t"),
 		exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID)))
 	if err != nil {
-		t.Fatalf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)
+		c.Fatalf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)
 	}
 
-	logDone("save - save a specific image:tag")
 }
 
-func TestSaveImageId(t *testing.T) {
+func (s *DockerSuite) TestSaveImageId(c *check.C) {
 	repoName := "foobar-save-image-id-test"
 
 	tagCmd := exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName))
-	defer deleteImages(repoName)
 	if out, _, err := runCommandWithOutput(tagCmd); err != nil {
-		t.Fatalf("failed to tag repo: %s, %v", out, err)
+		c.Fatalf("failed to tag repo: %s, %v", out, err)
 	}
 
 	idLongCmd := exec.Command(dockerBinary, "images", "-q", "--no-trunc", repoName)
 	out, _, err := runCommandWithOutput(idLongCmd)
 	if err != nil {
-		t.Fatalf("failed to get repo ID: %s, %v", out, err)
+		c.Fatalf("failed to get repo ID: %s, %v", out, err)
 	}
 
-	cleanedLongImageID := stripTrailingCharacters(out)
+	cleanedLongImageID := strings.TrimSpace(out)
 
 	idShortCmd := exec.Command(dockerBinary, "images", "-q", repoName)
 	out, _, err = runCommandWithOutput(idShortCmd)
 	if err != nil {
-		t.Fatalf("failed to get repo short ID: %s, %v", out, err)
+		c.Fatalf("failed to get repo short ID: %s, %v", out, err)
 	}
 
-	cleanedShortImageID := stripTrailingCharacters(out)
+	cleanedShortImageID := strings.TrimSpace(out)
 
 	saveCmd := exec.Command(dockerBinary, "save", cleanedShortImageID)
 	tarCmd := exec.Command("tar", "t")
 	tarCmd.Stdin, err = saveCmd.StdoutPipe()
 	if err != nil {
-		t.Fatalf("cannot set stdout pipe for tar: %v", err)
+		c.Fatalf("cannot set stdout pipe for tar: %v", err)
 	}
 	grepCmd := exec.Command("grep", cleanedLongImageID)
 	grepCmd.Stdin, err = tarCmd.StdoutPipe()
 	if err != nil {
-		t.Fatalf("cannot set stdout pipe for grep: %v", err)
+		c.Fatalf("cannot set stdout pipe for grep: %v", err)
 	}
 
 	if err = tarCmd.Start(); err != nil {
-		t.Fatalf("tar failed with error: %v", err)
+		c.Fatalf("tar failed with error: %v", err)
 	}
 	if err = saveCmd.Start(); err != nil {
-		t.Fatalf("docker save failed with error: %v", err)
+		c.Fatalf("docker save failed with error: %v", err)
 	}
 	defer saveCmd.Wait()
 	defer tarCmd.Wait()
@@ -204,40 +182,31 @@
 	out, _, err = runCommandWithOutput(grepCmd)
 
 	if err != nil {
-		t.Fatalf("failed to save repo with image ID: %s, %v", out, err)
+		c.Fatalf("failed to save repo with image ID: %s, %v", out, err)
 	}
 
-	logDone("save - save a image by ID")
 }
 
 // save a repo and try to load it using flags
-func TestSaveAndLoadRepoFlags(t *testing.T) {
-	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) {
+	name := "test-save-and-load-repo-flags"
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("failed to create a container: %s, %v", out, err)
+		c.Fatalf("failed to create a container: %s, %v", out, err)
 	}
-
-	cleanedContainerID := stripTrailingCharacters(out)
-	defer deleteContainer(cleanedContainerID)
-
 	repoName := "foobar-save-load-test"
 
-	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
-	if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
-		t.Fatalf("output should've been a container id: %s, %v", out, err)
-	}
-
-	commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+	commitCmd := exec.Command(dockerBinary, "commit", name, repoName)
 	deleteImages(repoName)
 	if out, _, err = runCommandWithOutput(commitCmd); err != nil {
-		t.Fatalf("failed to commit container: %s, %v", out, err)
+		c.Fatalf("failed to commit container: %s, %v", out, err)
 	}
 
-	inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+	inspectCmd := exec.Command(dockerBinary, "inspect", repoName)
 	before, _, err := runCommandWithOutput(inspectCmd)
 	if err != nil {
-		t.Fatalf("the repo should exist before saving it: %s, %v", before, err)
+		c.Fatalf("the repo should exist before saving it: %s, %v", before, err)
 
 	}
 
@@ -245,39 +214,35 @@
 		exec.Command(dockerBinary, "save", repoName),
 		exec.Command(dockerBinary, "load"))
 	if err != nil {
-		t.Fatalf("failed to save and load repo: %s, %v", out, err)
+		c.Fatalf("failed to save and load repo: %s, %v", out, err)
 	}
 
 	inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
 	after, _, err := runCommandWithOutput(inspectCmd)
 	if err != nil {
-		t.Fatalf("the repo should exist after loading it: %s, %v", after, err)
+		c.Fatalf("the repo should exist after loading it: %s, %v", after, err)
 	}
 
 	if before != after {
-		t.Fatalf("inspect is not the same after a save / load")
+		c.Fatalf("inspect is not the same after a save / load")
 	}
-
-	logDone("save - save a repo using -o && load a repo using -i")
 }
 
-func TestSaveMultipleNames(t *testing.T) {
+func (s *DockerSuite) TestSaveMultipleNames(c *check.C) {
 	repoName := "foobar-save-multi-name-test"
 
 	// Make one image
 	tagCmd := exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName))
 	if out, _, err := runCommandWithOutput(tagCmd); err != nil {
-		t.Fatalf("failed to tag repo: %s, %v", out, err)
+		c.Fatalf("failed to tag repo: %s, %v", out, err)
 	}
-	defer deleteImages(repoName + "-one")
 
 	// Make two images
 	tagCmd = exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName))
 	out, _, err := runCommandWithOutput(tagCmd)
 	if err != nil {
-		t.Fatalf("failed to tag repo: %s, %v", out, err)
+		c.Fatalf("failed to tag repo: %s, %v", out, err)
 	}
-	defer deleteImages(repoName + "-two")
 
 	out, _, err = runCommandPipelineWithOutput(
 		exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)),
@@ -285,13 +250,12 @@
 		exec.Command("grep", "-q", "-E", "(-one|-two)"),
 	)
 	if err != nil {
-		t.Fatalf("failed to save multiple repos: %s, %v", out, err)
+		c.Fatalf("failed to save multiple repos: %s, %v", out, err)
 	}
 
-	logDone("save - save by multiple names")
 }
 
-func TestSaveRepoWithMultipleImages(t *testing.T) {
+func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) {
 
 	makeImage := func(from string, tag string) string {
 		runCmd := exec.Command(dockerBinary, "run", "-d", from, "true")
@@ -300,16 +264,15 @@
 			err error
 		)
 		if out, _, err = runCommandWithOutput(runCmd); err != nil {
-			t.Fatalf("failed to create a container: %v %v", out, err)
+			c.Fatalf("failed to create a container: %v %v", out, err)
 		}
-		cleanedContainerID := stripTrailingCharacters(out)
-		defer deleteContainer(cleanedContainerID)
+		cleanedContainerID := strings.TrimSpace(out)
 
 		commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, tag)
 		if out, _, err = runCommandWithOutput(commitCmd); err != nil {
-			t.Fatalf("failed to commit container: %v %v", out, err)
+			c.Fatalf("failed to commit container: %v %v", out, err)
 		}
-		imageID := stripTrailingCharacters(out)
+		imageID := strings.TrimSpace(out)
 		return imageID
 	}
 
@@ -318,9 +281,7 @@
 	tagBar := repoName + ":bar"
 
 	idFoo := makeImage("busybox:latest", tagFoo)
-	defer deleteImages(idFoo)
 	idBar := makeImage("busybox:latest", tagBar)
-	defer deleteImages(idBar)
 
 	deleteImages(repoName)
 
@@ -331,61 +292,59 @@
 		exec.Command("grep", "VERSION"),
 		exec.Command("cut", "-d", "/", "-f1"))
 	if err != nil {
-		t.Fatalf("failed to save multiple images: %s, %v", out, err)
+		c.Fatalf("failed to save multiple images: %s, %v", out, err)
 	}
-	actual := strings.Split(stripTrailingCharacters(out), "\n")
+	actual := strings.Split(strings.TrimSpace(out), "\n")
 
 	// make the list of expected layers
 	out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "history", "-q", "--no-trunc", "busybox:latest"))
 	if err != nil {
-		t.Fatalf("failed to get history: %s, %v", out, err)
+		c.Fatalf("failed to get history: %s, %v", out, err)
 	}
 
-	expected := append(strings.Split(stripTrailingCharacters(out), "\n"), idFoo, idBar)
+	expected := append(strings.Split(strings.TrimSpace(out), "\n"), idFoo, idBar)
 
 	sort.Strings(actual)
 	sort.Strings(expected)
 	if !reflect.DeepEqual(expected, actual) {
-		t.Fatalf("achive does not contains the right layers: got %v, expected %v", actual, expected)
+		c.Fatalf("archive does not contains the right layers: got %v, expected %v", actual, expected)
 	}
 
-	logDone("save - save repository with multiple images")
 }
 
 // Issue #6722 #5892 ensure directories are included in changes
-func TestSaveDirectoryPermissions(t *testing.T) {
+func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) {
 	layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"}
 	layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"}
 
 	name := "save-directory-permissions"
 	tmpDir, err := ioutil.TempDir("", "save-layers-with-directories")
 	if err != nil {
-		t.Errorf("failed to create temporary directory: %s", err)
+		c.Errorf("failed to create temporary directory: %s", err)
 	}
 	extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir")
 	os.Mkdir(extractionDirectory, 0777)
 
 	defer os.RemoveAll(tmpDir)
-	defer deleteImages(name)
 	_, err = buildImage(name,
 		`FROM busybox
 	RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a
 	RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`,
 		true)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	if out, _, err := runCommandPipelineWithOutput(
 		exec.Command(dockerBinary, "save", name),
 		exec.Command("tar", "-xf", "-", "-C", extractionDirectory),
 	); err != nil {
-		t.Errorf("failed to save and extract image: %s", out)
+		c.Errorf("failed to save and extract image: %s", out)
 	}
 
 	dirs, err := ioutil.ReadDir(extractionDirectory)
 	if err != nil {
-		t.Errorf("failed to get a listing of the layer directories: %s", err)
+		c.Errorf("failed to get a listing of the layer directories: %s", err)
 	}
 
 	found := false
@@ -396,7 +355,7 @@
 
 			f, err := os.Open(layerPath)
 			if err != nil {
-				t.Fatalf("failed to open %s: %s", layerPath, err)
+				c.Fatalf("failed to open %s: %s", layerPath, err)
 			}
 
 			entries, err := ListTar(f)
@@ -406,7 +365,7 @@
 				}
 			}
 			if err != nil {
-				t.Fatalf("encountered error while listing tar entries: %s", err)
+				c.Fatalf("encountered error while listing tar entries: %s", err)
 			}
 
 			if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) {
@@ -417,8 +376,7 @@
 	}
 
 	if !found {
-		t.Fatalf("failed to find the layer with the right content listing")
+		c.Fatalf("failed to find the layer with the right content listing")
 	}
 
-	logDone("save - ensure directories exist in exported layers")
 }
diff --git a/integration-cli/docker_cli_save_load_unix_test.go b/integration-cli/docker_cli_save_load_unix_test.go
index c670487..5a6f580 100644
--- a/integration-cli/docker_cli_save_load_unix_test.go
+++ b/integration-cli/docker_cli_save_load_unix_test.go
@@ -4,99 +4,95 @@
 
 import (
 	"bytes"
-	"fmt"
+	"io/ioutil"
 	"os"
 	"os/exec"
-	"testing"
 
 	"github.com/docker/docker/vendor/src/github.com/kr/pty"
+	"github.com/go-check/check"
 )
 
 // save a repo and try to load it using stdout
-func TestSaveAndLoadRepoStdout(t *testing.T) {
-	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) {
+	name := "test-save-and-load-repo-stdout"
+	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("failed to create a container: %s, %v", out, err)
+		c.Fatalf("failed to create a container: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-
 	repoName := "foobar-save-load-test"
 
-	inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
-	if out, _, err = runCommandWithOutput(inspectCmd); err != nil {
-		t.Fatalf("output should've been a container id: %s, %v", out, err)
-	}
-
-	commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+	commitCmd := exec.Command(dockerBinary, "commit", name, repoName)
 	if out, _, err = runCommandWithOutput(commitCmd); err != nil {
-		t.Fatalf("failed to commit container: %s, %v", out, err)
+		c.Fatalf("failed to commit container: %s, %v", out, err)
 	}
 
-	inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+	inspectCmd := exec.Command(dockerBinary, "inspect", repoName)
 	before, _, err := runCommandWithOutput(inspectCmd)
 	if err != nil {
-		t.Fatalf("the repo should exist before saving it: %s, %v", before, err)
+		c.Fatalf("the repo should exist before saving it: %s, %v", before, err)
 	}
 
-	saveCmdTemplate := `%v save %v > /tmp/foobar-save-load-test.tar`
-	saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName)
-	saveCmd := exec.Command("bash", "-c", saveCmdFinal)
-	if out, _, err = runCommandWithOutput(saveCmd); err != nil {
-		t.Fatalf("failed to save repo: %s, %v", out, err)
+	tmpFile, err := ioutil.TempFile("", "foobar-save-load-test.tar")
+	c.Assert(err, check.IsNil)
+	defer os.Remove(tmpFile.Name())
+
+	saveCmd := exec.Command(dockerBinary, "save", repoName)
+	saveCmd.Stdout = tmpFile
+
+	if _, err = runCommand(saveCmd); err != nil {
+		c.Fatalf("failed to save repo: %v", err)
 	}
 
+	tmpFile, err = os.Open(tmpFile.Name())
+	c.Assert(err, check.IsNil)
+
 	deleteImages(repoName)
 
-	loadCmdFinal := `cat /tmp/foobar-save-load-test.tar | docker load`
-	loadCmd := exec.Command("bash", "-c", loadCmdFinal)
+	loadCmd := exec.Command(dockerBinary, "load")
+	loadCmd.Stdin = tmpFile
+
 	if out, _, err = runCommandWithOutput(loadCmd); err != nil {
-		t.Fatalf("failed to load repo: %s, %v", out, err)
+		c.Fatalf("failed to load repo: %s, %v", out, err)
 	}
 
 	inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
 	after, _, err := runCommandWithOutput(inspectCmd)
 	if err != nil {
-		t.Fatalf("the repo should exist after loading it: %s %v", after, err)
+		c.Fatalf("the repo should exist after loading it: %s %v", after, err)
 	}
 
 	if before != after {
-		t.Fatalf("inspect is not the same after a save / load")
+		c.Fatalf("inspect is not the same after a save / load")
 	}
 
-	deleteContainer(cleanedContainerID)
 	deleteImages(repoName)
 
-	os.Remove("/tmp/foobar-save-load-test.tar")
-
-	logDone("save - save/load a repo using stdout")
-
 	pty, tty, err := pty.Open()
 	if err != nil {
-		t.Fatalf("Could not open pty: %v", err)
+		c.Fatalf("Could not open pty: %v", err)
 	}
 	cmd := exec.Command(dockerBinary, "save", repoName)
 	cmd.Stdin = tty
 	cmd.Stdout = tty
 	cmd.Stderr = tty
 	if err := cmd.Start(); err != nil {
-		t.Fatalf("start err: %v", err)
+		c.Fatalf("start err: %v", err)
 	}
 	if err := cmd.Wait(); err == nil {
-		t.Fatal("did not break writing to a TTY")
+		c.Fatal("did not break writing to a TTY")
 	}
 
 	buf := make([]byte, 1024)
 
 	n, err := pty.Read(buf)
 	if err != nil {
-		t.Fatal("could not read tty output")
+		c.Fatal("could not read tty output")
 	}
 
 	if !bytes.Contains(buf[:n], []byte("Cowardly refusing")) {
-		t.Fatal("help output is not being yielded", out)
+		c.Fatal("help output is not being yielded", out)
 	}
 
-	logDone("save - do not save to a tty")
 }
diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go
index fafb5df..da298a1 100644
--- a/integration-cli/docker_cli_search_test.go
+++ b/integration-cli/docker_cli_search_test.go
@@ -3,20 +3,103 @@
 import (
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
 // search for repos named  "registry" on the central registry
-func TestSearchOnCentralRegistry(t *testing.T) {
+func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) {
+	testRequires(c, Network)
 	searchCmd := exec.Command(dockerBinary, "search", "busybox")
 	out, exitCode, err := runCommandWithOutput(searchCmd)
 	if err != nil || exitCode != 0 {
-		t.Fatalf("failed to search on the central registry: %s, %v", out, err)
+		c.Fatalf("failed to search on the central registry: %s, %v", out, err)
 	}
 
 	if !strings.Contains(out, "Busybox base image.") {
-		t.Fatal("couldn't find any repository named (or containing) 'Busybox base image.'")
+		c.Fatal("couldn't find any repository named (or containing) 'Busybox base image.'")
 	}
 
-	logDone("search - search for repositories named (or containing) 'Busybox base image.'")
+}
+
+func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) {
+	searchCmdStarsChars := exec.Command(dockerBinary, "search", "--stars=a", "busybox")
+	out, exitCode, err := runCommandWithOutput(searchCmdStarsChars)
+	if err == nil || exitCode == 0 {
+		c.Fatalf("Should not get right information: %s, %v", out, err)
+	}
+
+	if !strings.Contains(out, "invalid value") {
+		c.Fatal("couldn't find the invalid value warning")
+	}
+
+	searchCmdStarsNegativeNumber := exec.Command(dockerBinary, "search", "-s=-1", "busybox")
+	out, exitCode, err = runCommandWithOutput(searchCmdStarsNegativeNumber)
+	if err == nil || exitCode == 0 {
+		c.Fatalf("Should not get right information: %s, %v", out, err)
+	}
+
+	if !strings.Contains(out, "invalid value") {
+		c.Fatal("couldn't find the invalid value warning")
+	}
+
+}
+
+func (s *DockerSuite) TestSearchCmdOptions(c *check.C) {
+	testRequires(c, Network)
+	searchCmdhelp := exec.Command(dockerBinary, "search", "--help")
+	out, exitCode, err := runCommandWithOutput(searchCmdhelp)
+	if err != nil || exitCode != 0 {
+		c.Fatalf("failed to get search help information: %s, %v", out, err)
+	}
+
+	if !strings.Contains(out, "Usage: docker search [OPTIONS] TERM") {
+		c.Fatalf("failed to show docker search usage: %s, %v", out, err)
+	}
+
+	searchCmd := exec.Command(dockerBinary, "search", "busybox")
+	outSearchCmd, exitCode, err := runCommandWithOutput(searchCmd)
+	if err != nil || exitCode != 0 {
+		c.Fatalf("failed to search on the central registry: %s, %v", outSearchCmd, err)
+	}
+
+	searchCmdNotrunc := exec.Command(dockerBinary, "search", "--no-trunc=true", "busybox")
+	outSearchCmdNotrunc, _, err := runCommandWithOutput(searchCmdNotrunc)
+	if err != nil {
+		c.Fatalf("failed to search on the central registry: %s, %v", outSearchCmdNotrunc, err)
+	}
+
+	if len(outSearchCmd) > len(outSearchCmdNotrunc) {
+		c.Fatalf("The no-trunc option can't take effect.")
+	}
+
+	searchCmdautomated := exec.Command(dockerBinary, "search", "--automated=true", "busybox")
+	outSearchCmdautomated, exitCode, err := runCommandWithOutput(searchCmdautomated) //The busybox is a busybox base image, not an AUTOMATED image.
+	if err != nil || exitCode != 0 {
+		c.Fatalf("failed to search with automated=true on the central registry: %s, %v", outSearchCmdautomated, err)
+	}
+
+	outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n")
+	for i := range outSearchCmdautomatedSlice {
+		if strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox ") {
+			c.Fatalf("The busybox is not an AUTOMATED image: %s, %v", out, err)
+		}
+	}
+
+	searchCmdStars := exec.Command(dockerBinary, "search", "-s=2", "busybox")
+	outSearchCmdStars, exitCode, err := runCommandWithOutput(searchCmdStars)
+	if err != nil || exitCode != 0 {
+		c.Fatalf("failed to search with stars=2 on the central registry: %s, %v", outSearchCmdStars, err)
+	}
+
+	if strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]") {
+		c.Fatalf("The quantity of images with stars should be less than that of all images: %s, %v", outSearchCmdStars, err)
+	}
+
+	searchCmdOptions := exec.Command(dockerBinary, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox")
+	out, exitCode, err = runCommandWithOutput(searchCmdOptions)
+	if err != nil || exitCode != 0 {
+		c.Fatalf("failed to search with stars&automated&no-trunc options on the central registry: %s, %v", out, err)
+	}
+
 }
diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go
index 3ec04c9..0475826 100644
--- a/integration-cli/docker_cli_start_test.go
+++ b/integration-cli/docker_cli_start_test.go
@@ -4,253 +4,200 @@
 	"fmt"
 	"os/exec"
 	"strings"
-	"testing"
 	"time"
+
+	"github.com/go-check/check"
 )
 
 // Regression test for https://github.com/docker/docker/issues/7843
-func TestStartAttachReturnsOnError(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) {
 
-	dockerCmd(t, "run", "-d", "--name", "test", "busybox")
-	dockerCmd(t, "wait", "test")
+	dockerCmd(c, "run", "-d", "--name", "test", "busybox")
+	dockerCmd(c, "wait", "test")
 
 	// Expect this to fail because the above container is stopped, this is what we want
 	if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "--link", "test:test", "busybox")); err == nil {
-		t.Fatal("Expected error but got none")
+		c.Fatal("Expected error but got none")
 	}
 
-	ch := make(chan struct{})
+	ch := make(chan error)
 	go func() {
 		// Attempt to start attached to the container that won't start
 		// This should return an error immediately since the container can't be started
 		if _, err := runCommand(exec.Command(dockerBinary, "start", "-a", "test2")); err == nil {
-			t.Fatal("Expected error but got none")
+			ch <- fmt.Errorf("Expected error but got none")
 		}
 		close(ch)
 	}()
 
 	select {
-	case <-ch:
+	case err := <-ch:
+		c.Assert(err, check.IsNil)
 	case <-time.After(time.Second):
-		t.Fatalf("Attach did not exit properly")
+		c.Fatalf("Attach did not exit properly")
 	}
 
-	logDone("start - error on start with attach exits")
 }
 
 // gh#8555: Exit code should be passed through when using start -a
-func TestStartAttachCorrectExitCode(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) {
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
-	out = stripTrailingCharacters(out)
+	out = strings.TrimSpace(out)
 
 	// make sure the container has exited before trying the "start -a"
 	waitCmd := exec.Command(dockerBinary, "wait", out)
 	if _, _, err = runCommandWithOutput(waitCmd); err != nil {
-		t.Fatalf("Failed to wait on container: %v", err)
+		c.Fatalf("Failed to wait on container: %v", err)
 	}
 
 	startCmd := exec.Command(dockerBinary, "start", "-a", out)
 	startOut, exitCode, err := runCommandWithOutput(startCmd)
 	if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) {
-		t.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut)
+		c.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut)
 	}
 	if exitCode != 1 {
-		t.Fatalf("start -a did not respond with proper exit code: expected 1, got %d", exitCode)
+		c.Fatalf("start -a did not respond with proper exit code: expected 1, got %d", exitCode)
 	}
 
-	logDone("start - correct exit code returned with -a")
 }
 
-func TestStartSilentAttach(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestStartAttachSilent(c *check.C) {
 
 	name := "teststartattachcorrectexitcode"
 	runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "echo", "test")
 	out, _, _, err := runCommandWithStdoutStderr(runCmd)
 	if err != nil {
-		t.Fatalf("failed to run container: %v, output: %q", err, out)
+		c.Fatalf("failed to run container: %v, output: %q", err, out)
 	}
 
 	// make sure the container has exited before trying the "start -a"
 	waitCmd := exec.Command(dockerBinary, "wait", name)
 	if _, _, err = runCommandWithOutput(waitCmd); err != nil {
-		t.Fatalf("wait command failed with error: %v", err)
+		c.Fatalf("wait command failed with error: %v", err)
 	}
 
 	startCmd := exec.Command(dockerBinary, "start", "-a", name)
 	startOut, _, err := runCommandWithOutput(startCmd)
 	if err != nil {
-		t.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut)
+		c.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut)
 	}
 	if expected := "test\n"; startOut != expected {
-		t.Fatalf("start -a produced unexpected output: expected %q, got %q", expected, startOut)
+		c.Fatalf("start -a produced unexpected output: expected %q, got %q", expected, startOut)
 	}
 
-	logDone("start - don't echo container ID when attaching")
 }
 
-func TestStartRecordError(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestStartRecordError(c *check.C) {
 
 	// when container runs successfully, we should not have state.Error
-	dockerCmd(t, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top")
+	dockerCmd(c, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top")
 	stateErr, err := inspectField("test", "State.Error")
-	if err != nil {
-		t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err)
-	}
+	c.Assert(err, check.IsNil)
 	if stateErr != "" {
-		t.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr)
+		c.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr)
 	}
 
 	// Expect this to fail and records error because of ports conflict
 	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top"))
 	if err == nil {
-		t.Fatalf("Expected error but got none, output %q", out)
+		c.Fatalf("Expected error but got none, output %q", out)
 	}
 	stateErr, err = inspectField("test2", "State.Error")
-	if err != nil {
-		t.Fatalf("Failed to inspect %q state's error, got error %q", "test2", err)
-	}
+	c.Assert(err, check.IsNil)
 	expected := "port is already allocated"
 	if stateErr == "" || !strings.Contains(stateErr, expected) {
-		t.Fatalf("State.Error(%q) does not include %q", stateErr, expected)
+		c.Fatalf("State.Error(%q) does not include %q", stateErr, expected)
 	}
 
 	// Expect the conflict to be resolved when we stop the initial container
-	dockerCmd(t, "stop", "test")
-	dockerCmd(t, "start", "test2")
+	dockerCmd(c, "stop", "test")
+	dockerCmd(c, "start", "test2")
 	stateErr, err = inspectField("test2", "State.Error")
-	if err != nil {
-		t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err)
-	}
+	c.Assert(err, check.IsNil)
 	if stateErr != "" {
-		t.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr)
+		c.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr)
 	}
 
-	logDone("start - set state error when start is unsuccessful")
 }
 
-// gh#8726: a failed Start() breaks --volumes-from on subsequent Start()'s
-func TestStartVolumesFromFailsCleanly(t *testing.T) {
-	defer deleteAllContainers()
-
-	// Create the first data volume
-	dockerCmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox")
-
-	// Expect this to fail because the data test after contaienr doesn't exist yet
-	if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil {
-		t.Fatal("Expected error but got none")
-	}
-
-	// Create the second data volume
-	dockerCmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox")
-
-	// Now, all the volumes should be there
-	dockerCmd(t, "start", "consumer")
-
-	// Check that we have the volumes we want
-	out, _, _ := dockerCmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer")
-	n_volumes := strings.Trim(out, " \r\n'")
-	if n_volumes != "2" {
-		t.Fatalf("Missing volumes: expected 2, got %s", n_volumes)
-	}
-
-	logDone("start - missing containers in --volumes-from did not affect subsequent runs")
-}
-
-func TestStartPausedContainer(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestStartPausedContainer(c *check.C) {
 	defer unpauseAllContainers()
 
 	runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top")
 	if out, _, err := runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "pause", "testing")
 	if out, _, err := runCommandWithOutput(runCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 
 	runCmd = exec.Command(dockerBinary, "start", "testing")
 	if out, _, err := runCommandWithOutput(runCmd); err == nil || !strings.Contains(out, "Cannot start a paused container, try unpause instead.") {
-		t.Fatalf("an error should have been shown that you cannot start paused container: %s\n%v", out, err)
+		c.Fatalf("an error should have been shown that you cannot start paused container: %s\n%v", out, err)
 	}
 
-	logDone("start - error should show if trying to start paused container")
 }
 
-func TestStartMultipleContainers(t *testing.T) {
-	defer deleteAllContainers()
+func (s *DockerSuite) TestStartMultipleContainers(c *check.C) {
 	// run a container named 'parent' and create two container link to `parent`
 	cmd := exec.Command(dockerBinary, "run", "-d", "--name", "parent", "busybox", "top")
 	if out, _, err := runCommandWithOutput(cmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	for _, container := range []string{"child_first", "child_second"} {
 		cmd = exec.Command(dockerBinary, "create", "--name", container, "--link", "parent:parent", "busybox", "top")
 		if out, _, err := runCommandWithOutput(cmd); err != nil {
-			t.Fatal(out, err)
+			c.Fatal(out, err)
 		}
 	}
 
 	// stop 'parent' container
 	cmd = exec.Command(dockerBinary, "stop", "parent")
 	if out, _, err := runCommandWithOutput(cmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", "parent")
-	out, _, err := runCommandWithOutput(cmd)
-	if err != nil {
-		t.Fatal(out, err)
-	}
-	out = strings.Trim(out, "\r\n")
+	out, err := inspectField("parent", "State.Running")
+	c.Assert(err, check.IsNil)
 	if out != "false" {
-		t.Fatal("Container should be stopped")
+		c.Fatal("Container should be stopped")
 	}
 
-	// start all the three containers, container `child_first` start first which should be faild
+	// start all the three containers, container `child_first` start first which should be failed
 	// container 'parent' start second and then start container 'child_second'
 	cmd = exec.Command(dockerBinary, "start", "child_first", "parent", "child_second")
 	out, _, err = runCommandWithOutput(cmd)
 	if !strings.Contains(out, "Cannot start container child_first") || err == nil {
-		t.Fatal("Expected error but got none")
+		c.Fatal("Expected error but got none")
 	}
 
 	for container, expected := range map[string]string{"parent": "true", "child_first": "false", "child_second": "true"} {
-		cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", container)
-		out, _, err = runCommandWithOutput(cmd)
-		if err != nil {
-			t.Fatal(out, err)
-		}
-		out = strings.Trim(out, "\r\n")
+		out, err := inspectField(container, "State.Running")
+		c.Assert(err, check.IsNil)
 		if out != expected {
-			t.Fatal("Container running state wrong")
+			c.Fatal("Container running state wrong")
 		}
 
 	}
 
-	logDone("start - start multiple containers continue on one failed")
 }
 
-func TestStartAttachMultipleContainers(t *testing.T) {
+func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) {
 
 	var cmd *exec.Cmd
 
-	defer deleteAllContainers()
 	// run  multiple containers to test
 	for _, container := range []string{"test1", "test2", "test3"} {
 		cmd = exec.Command(dockerBinary, "run", "-d", "--name", container, "busybox", "top")
 		if out, _, err := runCommandWithOutput(cmd); err != nil {
-			t.Fatal(out, err)
+			c.Fatal(out, err)
 		}
 	}
 
@@ -258,7 +205,7 @@
 	for _, container := range []string{"test1", "test2", "test3"} {
 		cmd = exec.Command(dockerBinary, "stop", container)
 		if out, _, err := runCommandWithOutput(cmd); err != nil {
-			t.Fatal(out, err)
+			c.Fatal(out, err)
 		}
 	}
 
@@ -267,22 +214,19 @@
 		cmd = exec.Command(dockerBinary, "start", option, "test1", "test2", "test3")
 		out, _, err := runCommandWithOutput(cmd)
 		if !strings.Contains(out, "You cannot start and attach multiple containers at once.") || err == nil {
-			t.Fatal("Expected error but got none")
+			c.Fatal("Expected error but got none")
 		}
 	}
 
 	// confirm the state of all the containers be stopped
 	for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} {
-		cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", container)
-		out, _, err := runCommandWithOutput(cmd)
+		out, err := inspectField(container, "State.Running")
 		if err != nil {
-			t.Fatal(out, err)
+			c.Fatal(out, err)
 		}
-		out = strings.Trim(out, "\r\n")
 		if out != expected {
-			t.Fatal("Container running state wrong")
+			c.Fatal("Container running state wrong")
 		}
 	}
 
-	logDone("start - error on start and attach multiple containers at once")
 }
diff --git a/integration-cli/docker_cli_start_volume_driver_unix_test.go b/integration-cli/docker_cli_start_volume_driver_unix_test.go
new file mode 100644
index 0000000..1cc9080
--- /dev/null
+++ b/integration-cli/docker_cli_start_volume_driver_unix_test.go
@@ -0,0 +1,249 @@
+// +build experimental
+// +build !windows
+
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/http/httptest"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/go-check/check"
+)
+
+func init() {
+	check.Suite(&DockerExternalVolumeSuite{
+		ds: &DockerSuite{},
+	})
+}
+
+type eventCounter struct {
+	activations int
+	creations   int
+	removals    int
+	mounts      int
+	unmounts    int
+	paths       int
+}
+
+type DockerExternalVolumeSuite struct {
+	server *httptest.Server
+	ds     *DockerSuite
+	d      *Daemon
+	ec     *eventCounter
+}
+
+func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) {
+	s.d = NewDaemon(c)
+	s.ds.SetUpTest(c)
+	s.ec = &eventCounter{}
+
+}
+
+func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) {
+	s.d.Stop()
+	s.ds.TearDownTest(c)
+}
+
+func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) {
+	mux := http.NewServeMux()
+	s.server = httptest.NewServer(mux)
+
+	type pluginRequest struct {
+		name string
+	}
+
+	mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
+		s.ec.activations++
+
+		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
+		fmt.Fprintln(w, `{"Implements": ["VolumeDriver"]}`)
+	})
+
+	mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) {
+		s.ec.creations++
+
+		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
+		fmt.Fprintln(w, `{}`)
+	})
+
+	mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) {
+		s.ec.removals++
+
+		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
+		fmt.Fprintln(w, `{}`)
+	})
+
+	mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) {
+		s.ec.paths++
+
+		var pr pluginRequest
+		if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
+			http.Error(w, err.Error(), 500)
+		}
+
+		p := hostVolumePath(pr.name)
+
+		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
+		fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p))
+	})
+
+	mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) {
+		s.ec.mounts++
+
+		var pr pluginRequest
+		if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
+			http.Error(w, err.Error(), 500)
+		}
+
+		p := hostVolumePath(pr.name)
+		if err := os.MkdirAll(p, 0755); err != nil {
+			http.Error(w, err.Error(), 500)
+		}
+
+		if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.server.URL), 0644); err != nil {
+			http.Error(w, err.Error(), 500)
+		}
+
+		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
+		fmt.Fprintln(w, fmt.Sprintf("{\"Mountpoint\": \"%s\"}", p))
+	})
+
+	mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) {
+		s.ec.unmounts++
+
+		var pr pluginRequest
+		if err := json.NewDecoder(r.Body).Decode(&pr); err != nil {
+			http.Error(w, err.Error(), 500)
+		}
+
+		p := hostVolumePath(pr.name)
+		if err := os.RemoveAll(p); err != nil {
+			http.Error(w, err.Error(), 500)
+		}
+
+		w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json")
+		fmt.Fprintln(w, `{}`)
+	})
+
+	if err := os.MkdirAll("/usr/share/docker/plugins", 0755); err != nil {
+		c.Fatal(err)
+	}
+
+	if err := ioutil.WriteFile("/usr/share/docker/plugins/test-external-volume-driver.spec", []byte(s.server.URL), 0644); err != nil {
+		c.Fatal(err)
+	}
+}
+
+func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) {
+	s.server.Close()
+
+	if err := os.RemoveAll("/usr/share/docker/plugins"); err != nil {
+		c.Fatal(err)
+	}
+}
+
+func (s *DockerExternalVolumeSuite) TestStartExternalNamedVolumeDriver(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatal(err)
+	}
+
+	out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test")
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	if !strings.Contains(out, s.server.URL) {
+		c.Fatalf("External volume mount failed. Output: %s\n", out)
+	}
+
+	p := hostVolumePath("external-volume-test")
+	_, err = os.Lstat(p)
+	if err == nil {
+		c.Fatalf("Expected error checking volume path in host: %s\n", p)
+	}
+
+	if !os.IsNotExist(err) {
+		c.Fatalf("Expected volume path in host to not exist: %s, %v\n", p, err)
+	}
+
+	c.Assert(s.ec.activations, check.Equals, 1)
+	c.Assert(s.ec.creations, check.Equals, 1)
+	c.Assert(s.ec.removals, check.Equals, 1)
+	c.Assert(s.ec.mounts, check.Equals, 1)
+	c.Assert(s.ec.unmounts, check.Equals, 1)
+}
+
+func (s *DockerExternalVolumeSuite) TestStartExternalVolumeUnnamedDriver(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatal(err)
+	}
+
+	out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test")
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	if !strings.Contains(out, s.server.URL) {
+		c.Fatalf("External volume mount failed. Output: %s\n", out)
+	}
+
+	c.Assert(s.ec.activations, check.Equals, 1)
+	c.Assert(s.ec.creations, check.Equals, 1)
+	c.Assert(s.ec.removals, check.Equals, 1)
+	c.Assert(s.ec.mounts, check.Equals, 1)
+	c.Assert(s.ec.unmounts, check.Equals, 1)
+}
+
+func (s DockerExternalVolumeSuite) TestStartExternalVolumeDriverVolumesFrom(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatal(err)
+	}
+
+	if _, err := s.d.Cmd("run", "-d", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest"); err != nil {
+		c.Fatal(err)
+	}
+
+	if _, err := s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp"); err != nil {
+		c.Fatal(err)
+	}
+
+	if _, err := s.d.Cmd("rm", "-f", "vol-test1"); err != nil {
+		c.Fatal(err)
+	}
+
+	c.Assert(s.ec.activations, check.Equals, 1)
+	c.Assert(s.ec.creations, check.Equals, 2)
+	c.Assert(s.ec.removals, check.Equals, 1)
+	c.Assert(s.ec.mounts, check.Equals, 2)
+	c.Assert(s.ec.unmounts, check.Equals, 2)
+}
+
+func (s DockerExternalVolumeSuite) TestStartExternalVolumeDriverDeleteContainer(c *check.C) {
+	if err := s.d.StartWithBusybox(); err != nil {
+		c.Fatal(err)
+	}
+
+	if _, err := s.d.Cmd("run", "-d", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest"); err != nil {
+		c.Fatal(err)
+	}
+
+	if _, err := s.d.Cmd("rm", "-fv", "vol-test1"); err != nil {
+		c.Fatal(err)
+	}
+
+	c.Assert(s.ec.activations, check.Equals, 1)
+	c.Assert(s.ec.creations, check.Equals, 1)
+	c.Assert(s.ec.removals, check.Equals, 1)
+	c.Assert(s.ec.mounts, check.Equals, 1)
+	c.Assert(s.ec.unmounts, check.Equals, 1)
+}
+
+func hostVolumePath(name string) string {
+	return fmt.Sprintf("/var/lib/docker/volumes/%s", name)
+}
diff --git a/integration-cli/docker_cli_stats_test.go b/integration-cli/docker_cli_stats_test.go
new file mode 100644
index 0000000..7664de5
--- /dev/null
+++ b/integration-cli/docker_cli_stats_test.go
@@ -0,0 +1,36 @@
+package main
+
+import (
+	"os/exec"
+	"strings"
+	"time"
+
+	"github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestCliStatsNoStream(c *check.C) {
+	out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "busybox", "top"))
+	if err != nil {
+		c.Fatalf("Error on container creation: %v, output: %s", err, out)
+	}
+	id := strings.TrimSpace(out)
+	if err := waitRun(id); err != nil {
+		c.Fatalf("error waiting for container to start: %v", err)
+	}
+
+	statsCmd := exec.Command(dockerBinary, "stats", "--no-stream", id)
+	chErr := make(chan error)
+	go func() {
+		chErr <- statsCmd.Run()
+	}()
+
+	select {
+	case err := <-chErr:
+		if err != nil {
+			c.Fatalf("Error running stats: %v", err)
+		}
+	case <-time.After(2 * time.Second):
+		statsCmd.Process.Kill()
+		c.Fatalf("stats did not return immediately when not streaming")
+	}
+}
diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go
index 081e239..7db21a6 100644
--- a/integration-cli/docker_cli_tag_test.go
+++ b/integration-cli/docker_cli_tag_test.go
@@ -1,49 +1,37 @@
 package main
 
 import (
-	"fmt"
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/docker/docker/pkg/stringutils"
+	"github.com/go-check/check"
 )
 
 // tagging a named image in a new unprefixed repo should work
-func TestTagUnprefixedRepoByName(t *testing.T) {
+func (s *DockerSuite) TestTagUnprefixedRepoByName(c *check.C) {
 	if err := pullImageIfNotExist("busybox:latest"); err != nil {
-		t.Fatal("couldn't find the busybox:latest image locally and failed to pull it")
+		c.Fatal("couldn't find the busybox:latest image locally and failed to pull it")
 	}
 
 	tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "testfoobarbaz")
 	if out, _, err := runCommandWithOutput(tagCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-
-	deleteImages("testfoobarbaz")
-
-	logDone("tag - busybox -> testfoobarbaz")
 }
 
 // tagging an image by ID in a new unprefixed repo should work
-func TestTagUnprefixedRepoByID(t *testing.T) {
-	getIDCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.Id}}", "busybox")
-	out, _, err := runCommandWithOutput(getIDCmd)
-	if err != nil {
-		t.Fatalf("failed to get the image ID of busybox: %s, %v", out, err)
+func (s *DockerSuite) TestTagUnprefixedRepoByID(c *check.C) {
+	imageID, err := inspectField("busybox", "Id")
+	c.Assert(err, check.IsNil)
+	tagCmd := exec.Command(dockerBinary, "tag", imageID, "testfoobarbaz")
+	if out, _, err := runCommandWithOutput(tagCmd); err != nil {
+		c.Fatal(out, err)
 	}
-
-	cleanedImageID := stripTrailingCharacters(out)
-	tagCmd := exec.Command(dockerBinary, "tag", cleanedImageID, "testfoobarbaz")
-	if out, _, err = runCommandWithOutput(tagCmd); err != nil {
-		t.Fatal(out, err)
-	}
-
-	deleteImages("testfoobarbaz")
-
-	logDone("tag - busybox's image ID -> testfoobarbaz")
 }
 
 // ensure we don't allow the use of invalid repository names; these tag operations should fail
-func TestTagInvalidUnprefixedRepo(t *testing.T) {
+func (s *DockerSuite) TestTagInvalidUnprefixedRepo(c *check.C) {
 
 	invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd"}
 
@@ -51,32 +39,30 @@
 		tagCmd := exec.Command(dockerBinary, "tag", "busybox", repo)
 		_, _, err := runCommandWithOutput(tagCmd)
 		if err == nil {
-			t.Fatalf("tag busybox %v should have failed", repo)
+			c.Fatalf("tag busybox %v should have failed", repo)
 		}
 	}
-	logDone("tag - busybox invalid repo names --> must not work")
 }
 
 // ensure we don't allow the use of invalid tags; these tag operations should fail
-func TestTagInvalidPrefixedRepo(t *testing.T) {
-	long_tag := makeRandomString(121)
+func (s *DockerSuite) TestTagInvalidPrefixedRepo(c *check.C) {
+	longTag := stringutils.GenerateRandomAlphaOnlyString(121)
 
-	invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", long_tag}
+	invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag}
 
 	for _, repotag := range invalidTags {
 		tagCmd := exec.Command(dockerBinary, "tag", "busybox", repotag)
 		_, _, err := runCommandWithOutput(tagCmd)
 		if err == nil {
-			t.Fatalf("tag busybox %v should have failed", repotag)
+			c.Fatalf("tag busybox %v should have failed", repotag)
 		}
 	}
-	logDone("tag - busybox with invalid repo:tagnames --> must not work")
 }
 
 // ensure we allow the use of valid tags
-func TestTagValidPrefixedRepo(t *testing.T) {
+func (s *DockerSuite) TestTagValidPrefixedRepo(c *check.C) {
 	if err := pullImageIfNotExist("busybox:latest"); err != nil {
-		t.Fatal("couldn't find the busybox:latest image locally and failed to pull it")
+		c.Fatal("couldn't find the busybox:latest image locally and failed to pull it")
 	}
 
 	validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t"}
@@ -85,57 +71,73 @@
 		tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", repo)
 		_, _, err := runCommandWithOutput(tagCmd)
 		if err != nil {
-			t.Errorf("tag busybox %v should have worked: %s", repo, err)
+			c.Errorf("tag busybox %v should have worked: %s", repo, err)
 			continue
 		}
 		deleteImages(repo)
-		logMessage := fmt.Sprintf("tag - busybox %v", repo)
-		logDone(logMessage)
 	}
 }
 
 // tag an image with an existed tag name without -f option should fail
-func TestTagExistedNameWithoutForce(t *testing.T) {
+func (s *DockerSuite) TestTagExistedNameWithoutForce(c *check.C) {
 	if err := pullImageIfNotExist("busybox:latest"); err != nil {
-		t.Fatal("couldn't find the busybox:latest image locally and failed to pull it")
+		c.Fatal("couldn't find the busybox:latest image locally and failed to pull it")
 	}
 
 	tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test")
 	if out, _, err := runCommandWithOutput(tagCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	tagCmd = exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test")
 	out, _, err := runCommandWithOutput(tagCmd)
 	if err == nil || !strings.Contains(out, "Conflict: Tag test is already set to image") {
-		t.Fatal("tag busybox busybox:test should have failed,because busybox:test is existed")
+		c.Fatal("tag busybox busybox:test should have failed,because busybox:test is existed")
 	}
-	deleteImages("busybox:test")
-
-	logDone("tag - busybox with an existed tag name without -f option --> must not work")
 }
 
 // tag an image with an existed tag name with -f option should work
-func TestTagExistedNameWithForce(t *testing.T) {
+func (s *DockerSuite) TestTagExistedNameWithForce(c *check.C) {
 	if err := pullImageIfNotExist("busybox:latest"); err != nil {
-		t.Fatal("couldn't find the busybox:latest image locally and failed to pull it")
+		c.Fatal("couldn't find the busybox:latest image locally and failed to pull it")
 	}
 
 	tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test")
 	if out, _, err := runCommandWithOutput(tagCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
 	tagCmd = exec.Command(dockerBinary, "tag", "-f", "busybox:latest", "busybox:test")
 	if out, _, err := runCommandWithOutput(tagCmd); err != nil {
-		t.Fatal(out, err)
+		c.Fatal(out, err)
 	}
-	deleteImages("busybox:test")
+}
 
-	logDone("tag - busybox with an existed tag name with -f option work")
+func (s *DockerSuite) TestTagWithSuffixHyphen(c *check.C) {
+	if err := pullImageIfNotExist("busybox:latest"); err != nil {
+		c.Fatal("couldn't find the busybox:latest image locally and failed to pull it")
+	}
+	// test repository name begin with '-'
+	tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "-busybox:test")
+	out, _, err := runCommandWithOutput(tagCmd)
+	if err == nil || !strings.Contains(out, "Invalid repository name (-busybox). Cannot begin or end with a hyphen") {
+		c.Fatal("tag a name begin with '-' should failed")
+	}
+	// test namespace name begin with '-'
+	tagCmd = exec.Command(dockerBinary, "tag", "busybox:latest", "-test/busybox:test")
+	out, _, err = runCommandWithOutput(tagCmd)
+	if err == nil || !strings.Contains(out, "Invalid namespace name (-test). Cannot begin or end with a hyphen") {
+		c.Fatal("tag a name begin with '-' should failed")
+	}
+	// test index name begin wiht '-'
+	tagCmd = exec.Command(dockerBinary, "tag", "busybox:latest", "-index:5000/busybox:test")
+	out, _, err = runCommandWithOutput(tagCmd)
+	if err == nil || !strings.Contains(out, "Invalid index name (-index:5000). Cannot begin or end with a hyphen") {
+		c.Fatal("tag a name begin with '-' should failed")
+	}
 }
 
 // ensure tagging using official names works
 // ensure all tags result in the same name
-func TestTagOfficialNames(t *testing.T) {
+func (s *DockerSuite) TestTagOfficialNames(c *check.C) {
 	names := []string{
 		"docker.io/busybox",
 		"index.docker.io/busybox",
@@ -148,7 +150,7 @@
 		tagCmd := exec.Command(dockerBinary, "tag", "-f", "busybox:latest", name+":latest")
 		out, exitCode, err := runCommandWithOutput(tagCmd)
 		if err != nil || exitCode != 0 {
-			t.Errorf("tag busybox %v should have worked: %s, %s", name, err, out)
+			c.Errorf("tag busybox %v should have worked: %s, %s", name, err, out)
 			continue
 		}
 
@@ -156,13 +158,10 @@
 		imagesCmd := exec.Command(dockerBinary, "images")
 		out, _, err = runCommandWithOutput(imagesCmd)
 		if err != nil {
-			t.Errorf("listing images failed with errors: %v, %s", err, out)
+			c.Errorf("listing images failed with errors: %v, %s", err, out)
 		} else if strings.Contains(out, name) {
-			t.Errorf("images should not have listed '%s'", name)
+			c.Errorf("images should not have listed '%s'", name)
 			deleteImages(name + ":latest")
-		} else {
-			logMessage := fmt.Sprintf("tag official name - busybox %v", name)
-			logDone(logMessage)
 		}
 	}
 
@@ -170,11 +169,9 @@
 		tagCmd := exec.Command(dockerBinary, "tag", "-f", name+":latest", "fooo/bar:latest")
 		_, exitCode, err := runCommandWithOutput(tagCmd)
 		if err != nil || exitCode != 0 {
-			t.Errorf("tag %v fooo/bar should have worked: %s", name, err)
+			c.Errorf("tag %v fooo/bar should have worked: %s", name, err)
 			continue
 		}
 		deleteImages("fooo/bar:latest")
-		logMessage := fmt.Sprintf("tag official name - %v fooo/bar", name)
-		logDone(logMessage)
 	}
 }
diff --git a/integration-cli/docker_cli_top_test.go b/integration-cli/docker_cli_top_test.go
index de0d3d2..f28e20b 100644
--- a/integration-cli/docker_cli_top_test.go
+++ b/integration-cli/docker_cli_top_test.go
@@ -3,106 +3,99 @@
 import (
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
-func TestTopMultipleArgs(t *testing.T) {
-	runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20")
+func (s *DockerSuite) TestTopMultipleArgs(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("failed to start the container: %s, %v", out, err)
+		c.Fatalf("failed to start the container: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
-	defer deleteContainer(cleanedContainerID)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	topCmd := exec.Command(dockerBinary, "top", cleanedContainerID, "-o", "pid")
 	out, _, err = runCommandWithOutput(topCmd)
 	if err != nil {
-		t.Fatalf("failed to run top: %s, %v", out, err)
+		c.Fatalf("failed to run top: %s, %v", out, err)
 	}
 
 	if !strings.Contains(out, "PID") {
-		t.Fatalf("did not see PID after top -o pid: %s", out)
+		c.Fatalf("did not see PID after top -o pid: %s", out)
 	}
 
-	logDone("top - multiple arguments")
 }
 
-func TestTopNonPrivileged(t *testing.T) {
-	runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20")
+func (s *DockerSuite) TestTopNonPrivileged(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("failed to start the container: %s, %v", out, err)
+		c.Fatalf("failed to start the container: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	topCmd := exec.Command(dockerBinary, "top", cleanedContainerID)
 	out1, _, err := runCommandWithOutput(topCmd)
 	if err != nil {
-		t.Fatalf("failed to run top: %s, %v", out1, err)
+		c.Fatalf("failed to run top: %s, %v", out1, err)
 	}
 
 	topCmd = exec.Command(dockerBinary, "top", cleanedContainerID)
 	out2, _, err := runCommandWithOutput(topCmd)
 	if err != nil {
-		t.Fatalf("failed to run top: %s, %v", out2, err)
+		c.Fatalf("failed to run top: %s, %v", out2, err)
 	}
 
 	killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID)
 	if out, _, err = runCommandWithOutput(killCmd); err != nil {
-		t.Fatalf("failed to kill container: %s, %v", out, err)
+		c.Fatalf("failed to kill container: %s, %v", out, err)
 	}
 
-	deleteContainer(cleanedContainerID)
-
-	if !strings.Contains(out1, "sleep 20") && !strings.Contains(out2, "sleep 20") {
-		t.Fatal("top should've listed `sleep 20` in the process list, but failed twice")
-	} else if !strings.Contains(out1, "sleep 20") {
-		t.Fatal("top should've listed `sleep 20` in the process list, but failed the first time")
-	} else if !strings.Contains(out2, "sleep 20") {
-		t.Fatal("top should've listed `sleep 20` in the process list, but failed the second itime")
+	if !strings.Contains(out1, "top") && !strings.Contains(out2, "top") {
+		c.Fatal("top should've listed `top` in the process list, but failed twice")
+	} else if !strings.Contains(out1, "top") {
+		c.Fatal("top should've listed `top` in the process list, but failed the first time")
+	} else if !strings.Contains(out2, "top") {
+		c.Fatal("top should've listed `top` in the process list, but failed the second itime")
 	}
 
-	logDone("top - sleep process should be listed in non privileged mode")
 }
 
-func TestTopPrivileged(t *testing.T) {
-	runCmd := exec.Command(dockerBinary, "run", "--privileged", "-i", "-d", "busybox", "sleep", "20")
+func (s *DockerSuite) TestTopPrivileged(c *check.C) {
+	runCmd := exec.Command(dockerBinary, "run", "--privileged", "-i", "-d", "busybox", "top")
 	out, _, err := runCommandWithOutput(runCmd)
 	if err != nil {
-		t.Fatalf("failed to start the container: %s, %v", out, err)
+		c.Fatalf("failed to start the container: %s, %v", out, err)
 	}
 
-	cleanedContainerID := stripTrailingCharacters(out)
+	cleanedContainerID := strings.TrimSpace(out)
 
 	topCmd := exec.Command(dockerBinary, "top", cleanedContainerID)
 	out1, _, err := runCommandWithOutput(topCmd)
 	if err != nil {
-		t.Fatalf("failed to run top: %s, %v", out1, err)
+		c.Fatalf("failed to run top: %s, %v", out1, err)
 	}
 
 	topCmd = exec.Command(dockerBinary, "top", cleanedContainerID)
 	out2, _, err := runCommandWithOutput(topCmd)
 	if err != nil {
-		t.Fatalf("failed to run top: %s, %v", out2, err)
+		c.Fatalf("failed to run top: %s, %v", out2, err)
 	}
 
 	killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID)
 	if out, _, err = runCommandWithOutput(killCmd); err != nil {
-		t.Fatalf("failed to kill container: %s, %v", out, err)
+		c.Fatalf("failed to kill container: %s, %v", out, err)
 	}
 
-	deleteContainer(cleanedContainerID)
-
-	if !strings.Contains(out1, "sleep 20") && !strings.Contains(out2, "sleep 20") {
-		t.Fatal("top should've listed `sleep 20` in the process list, but failed twice")
-	} else if !strings.Contains(out1, "sleep 20") {
-		t.Fatal("top should've listed `sleep 20` in the process list, but failed the first time")
-	} else if !strings.Contains(out2, "sleep 20") {
-		t.Fatal("top should've listed `sleep 20` in the process list, but failed the second itime")
+	if !strings.Contains(out1, "top") && !strings.Contains(out2, "top") {
+		c.Fatal("top should've listed `top` in the process list, but failed twice")
+	} else if !strings.Contains(out1, "top") {
+		c.Fatal("top should've listed `top` in the process list, but failed the first time")
+	} else if !strings.Contains(out2, "top") {
+		c.Fatal("top should've listed `top` in the process list, but failed the second itime")
 	}
 
-	logDone("top - sleep process should be listed in privileged mode")
 }
diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go
index ceaeba8..3616da9 100644
--- a/integration-cli/docker_cli_version_test.go
+++ b/integration-cli/docker_cli_version_test.go
@@ -3,15 +3,16 @@
 import (
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
 // ensure docker version works
-func TestVersionEnsureSucceeds(t *testing.T) {
+func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) {
 	versionCmd := exec.Command(dockerBinary, "version")
 	out, _, err := runCommandWithOutput(versionCmd)
 	if err != nil {
-		t.Fatalf("failed to execute docker version: %s, %v", out, err)
+		c.Fatalf("failed to execute docker version: %s, %v", out, err)
 	}
 
 	stringsToCheck := []string{
@@ -29,9 +30,8 @@
 
 	for _, linePrefix := range stringsToCheck {
 		if !strings.Contains(out, linePrefix) {
-			t.Errorf("couldn't find string %v in output", linePrefix)
+			c.Errorf("couldn't find string %v in output", linePrefix)
 		}
 	}
 
-	logDone("version - verify that it works and that the output is properly formatted")
 }
diff --git a/integration-cli/docker_cli_wait_test.go b/integration-cli/docker_cli_wait_test.go
new file mode 100644
index 0000000..c6d469e
--- /dev/null
+++ b/integration-cli/docker_cli_wait_test.go
@@ -0,0 +1,142 @@
+package main
+
+import (
+	"bytes"
+	"os/exec"
+	"strings"
+	"time"
+
+	"github.com/go-check/check"
+)
+
+// non-blocking wait with 0 exit code
+func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) {
+
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "true")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatal(out, err)
+	}
+	containerID := strings.TrimSpace(out)
+
+	status := "true"
+	for i := 0; status != "false"; i++ {
+		status, err = inspectField(containerID, "State.Running")
+		c.Assert(err, check.IsNil)
+
+		time.Sleep(time.Second)
+		if i >= 60 {
+			c.Fatal("Container should have stopped by now")
+		}
+	}
+
+	runCmd = exec.Command(dockerBinary, "wait", containerID)
+	out, _, err = runCommandWithOutput(runCmd)
+
+	if err != nil || strings.TrimSpace(out) != "0" {
+		c.Fatal("failed to set up container", out, err)
+	}
+
+}
+
+// blocking wait with 0 exit code
+func (s *DockerSuite) TestWaitBlockedExitZero(c *check.C) {
+	out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 0' TERM; while true; do sleep 0.01; done")
+	containerID := strings.TrimSpace(out)
+
+	if err := waitRun(containerID); err != nil {
+		c.Fatal(err)
+	}
+
+	chWait := make(chan string)
+	go func() {
+		out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "wait", containerID))
+		chWait <- out
+	}()
+
+	time.Sleep(100 * time.Millisecond)
+	dockerCmd(c, "stop", containerID)
+
+	select {
+	case status := <-chWait:
+		if strings.TrimSpace(status) != "0" {
+			c.Fatalf("expected exit 0, got %s", status)
+		}
+	case <-time.After(2 * time.Second):
+		c.Fatal("timeout waiting for `docker wait` to exit")
+	}
+
+}
+
+// non-blocking wait with random exit code
+func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) {
+
+	runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "exit 99")
+	out, _, err := runCommandWithOutput(runCmd)
+	if err != nil {
+		c.Fatal(out, err)
+	}
+	containerID := strings.TrimSpace(out)
+
+	status := "true"
+	for i := 0; status != "false"; i++ {
+		status, err = inspectField(containerID, "State.Running")
+		c.Assert(err, check.IsNil)
+
+		time.Sleep(time.Second)
+		if i >= 60 {
+			c.Fatal("Container should have stopped by now")
+		}
+	}
+
+	runCmd = exec.Command(dockerBinary, "wait", containerID)
+	out, _, err = runCommandWithOutput(runCmd)
+
+	if err != nil || strings.TrimSpace(out) != "99" {
+		c.Fatal("failed to set up container", out, err)
+	}
+
+}
+
+// blocking wait with random exit code
+func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) {
+	out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 99' TERM; while true; do sleep 0.01; done")
+	containerID := strings.TrimSpace(out)
+	if err := waitRun(containerID); err != nil {
+		c.Fatal(err)
+	}
+	if err := waitRun(containerID); err != nil {
+		c.Fatal(err)
+	}
+
+	chWait := make(chan error)
+	waitCmd := exec.Command(dockerBinary, "wait", containerID)
+	waitCmdOut := bytes.NewBuffer(nil)
+	waitCmd.Stdout = waitCmdOut
+	if err := waitCmd.Start(); err != nil {
+		c.Fatal(err)
+	}
+
+	go func() {
+		chWait <- waitCmd.Wait()
+	}()
+
+	dockerCmd(c, "stop", containerID)
+
+	select {
+	case err := <-chWait:
+		if err != nil {
+			c.Fatal(err)
+		}
+		status, err := waitCmdOut.ReadString('\n')
+		if err != nil {
+			c.Fatal(err)
+		}
+		if strings.TrimSpace(status) != "99" {
+			c.Fatalf("expected exit 99, got %s", status)
+		}
+	case <-time.After(2 * time.Second):
+		waitCmd.Process.Kill()
+		c.Fatal("timeout waiting for `docker wait` to exit")
+	}
+}
diff --git a/integration-cli/docker_test_vars.go b/integration-cli/docker_test_vars.go
index 9cb28b2..ed394d2 100644
--- a/integration-cli/docker_test_vars.go
+++ b/integration-cli/docker_test_vars.go
@@ -18,7 +18,6 @@
 
 	dockerBasePath       = "/var/lib/docker"
 	volumesConfigPath    = dockerBasePath + "/volumes"
-	volumesStoragePath   = dockerBasePath + "/vfs/dir"
 	containerStoragePath = dockerBasePath + "/containers"
 
 	runtimePath    = "/var/run/docker"
diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go
index 10cc6c9..e9c0df6 100644
--- a/integration-cli/docker_utils.go
+++ b/integration-cli/docker_utils.go
@@ -18,15 +18,17 @@
 	"path/filepath"
 	"strconv"
 	"strings"
-	"testing"
 	"time"
 
-	"github.com/docker/docker/api"
+	"github.com/docker/docker/opts"
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/stringutils"
+	"github.com/go-check/check"
 )
 
 // Daemon represents a Docker daemon for the testing framework.
 type Daemon struct {
-	t              *testing.T
+	c              *check.C
 	logFile        *os.File
 	folder         string
 	stdin          io.WriteCloser
@@ -35,32 +37,50 @@
 	storageDriver  string
 	execDriver     string
 	wait           chan error
+	userlandProxy  bool
+}
+
+func enableUserlandProxy() bool {
+	if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
+		if val, err := strconv.ParseBool(env); err != nil {
+			return val
+		}
+	}
+	return true
 }
 
 // NewDaemon returns a Daemon instance to be used for testing.
 // This will create a directory such as daemon123456789 in the folder specified by $DEST.
 // The daemon will not automatically start.
-func NewDaemon(t *testing.T) *Daemon {
+func NewDaemon(c *check.C) *Daemon {
 	dest := os.Getenv("DEST")
 	if dest == "" {
-		t.Fatal("Please set the DEST environment variable")
+		c.Fatal("Please set the DEST environment variable")
 	}
 
-	dir := filepath.Join(dest, fmt.Sprintf("daemon%d", time.Now().UnixNano()%100000000))
+	dir := filepath.Join(dest, fmt.Sprintf("d%d", time.Now().UnixNano()%100000000))
 	daemonFolder, err := filepath.Abs(dir)
 	if err != nil {
-		t.Fatalf("Could not make %q an absolute path: %v", dir, err)
+		c.Fatalf("Could not make %q an absolute path: %v", dir, err)
 	}
 
 	if err := os.MkdirAll(filepath.Join(daemonFolder, "graph"), 0600); err != nil {
-		t.Fatalf("Could not create %s/graph directory", daemonFolder)
+		c.Fatalf("Could not create %s/graph directory", daemonFolder)
+	}
+
+	userlandProxy := true
+	if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
+		if val, err := strconv.ParseBool(env); err != nil {
+			userlandProxy = val
+		}
 	}
 
 	return &Daemon{
-		t:             t,
+		c:             c,
 		folder:        daemonFolder,
 		storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"),
 		execDriver:    os.Getenv("DOCKER_EXECDRIVER"),
+		userlandProxy: userlandProxy,
 	}
 }
 
@@ -69,7 +89,7 @@
 func (d *Daemon) Start(arg ...string) error {
 	dockerBinary, err := exec.LookPath(dockerBinary)
 	if err != nil {
-		d.t.Fatalf("could not find docker binary in $PATH: %v", err)
+		d.c.Fatalf("could not find docker binary in $PATH: %v", err)
 	}
 
 	args := []string{
@@ -77,6 +97,7 @@
 		"--daemon",
 		"--graph", fmt.Sprintf("%s/graph", d.folder),
 		"--pidfile", fmt.Sprintf("%s/docker.pid", d.folder),
+		fmt.Sprintf("--userland-proxy=%t", d.userlandProxy),
 	}
 
 	// If we don't explicitly set the log-level or debug flag(-D) then
@@ -103,7 +124,7 @@
 
 	d.logFile, err = os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)
 	if err != nil {
-		d.t.Fatalf("Could not create %s/docker.log: %v", d.folder, err)
+		d.c.Fatalf("Could not create %s/docker.log: %v", d.folder, err)
 	}
 
 	d.cmd.Stdout = d.logFile
@@ -117,7 +138,7 @@
 
 	go func() {
 		wait <- d.cmd.Wait()
-		d.t.Log("exiting daemon")
+		d.c.Log("exiting daemon")
 		close(wait)
 	}()
 
@@ -127,7 +148,7 @@
 	// make sure daemon is ready to receive requests
 	startTime := time.Now().Unix()
 	for {
-		d.t.Log("waiting for daemon to start")
+		d.c.Log("waiting for daemon to start")
 		if time.Now().Unix()-startTime > 5 {
 			// After 5 seconds, give up
 			return errors.New("Daemon exited and never started")
@@ -146,7 +167,7 @@
 
 			req, err := http.NewRequest("GET", "/_ping", nil)
 			if err != nil {
-				d.t.Fatalf("could not create new request: %v", err)
+				d.c.Fatalf("could not create new request: %v", err)
 			}
 
 			resp, err := client.Do(req)
@@ -154,10 +175,10 @@
 				continue
 			}
 			if resp.StatusCode != http.StatusOK {
-				d.t.Logf("received status != 200 OK: %s", resp.Status)
+				d.c.Logf("received status != 200 OK: %s", resp.Status)
 			}
 
-			d.t.Log("daemon started")
+			d.c.Log("daemon started")
 			return nil
 		}
 	}
@@ -184,7 +205,7 @@
 		return fmt.Errorf("could not load busybox image: %v", err)
 	}
 	if err := os.Remove(bb); err != nil {
-		d.t.Logf("Could not remove %s: %v", bb, err)
+		d.c.Logf("Could not remove %s: %v", bb, err)
 	}
 	return nil
 }
@@ -216,7 +237,7 @@
 			return err
 		case <-time.After(15 * time.Second):
 			// time for stopping jobs and run onShutdown hooks
-			d.t.Log("timeout")
+			d.c.Log("timeout")
 			break out1
 		}
 	}
@@ -229,10 +250,10 @@
 		case <-tick:
 			i++
 			if i > 4 {
-				d.t.Logf("tried to interrupt daemon for %d times, now try to kill it", i)
+				d.c.Logf("tried to interrupt daemon for %d times, now try to kill it", i)
 				break out2
 			}
-			d.t.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid)
+			d.c.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid)
 			if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
 				return fmt.Errorf("could not send signal: %v", err)
 			}
@@ -240,7 +261,7 @@
 	}
 
 	if err := d.cmd.Process.Kill(); err != nil {
-		d.t.Logf("Could not kill daemon: %v", err)
+		d.c.Logf("Could not kill daemon: %v", err)
 		return err
 	}
 
@@ -267,12 +288,20 @@
 	return string(b), err
 }
 
+func (d *Daemon) CmdWithArgs(daemonArgs []string, name string, arg ...string) (string, error) {
+	args := append(daemonArgs, name)
+	args = append(args, arg...)
+	c := exec.Command(dockerBinary, args...)
+	b, err := c.CombinedOutput()
+	return string(b), err
+}
+
 func (d *Daemon) LogfileName() string {
 	return d.logFile.Name()
 }
 
 func daemonHost() string {
-	daemonUrlStr := "unix://" + api.DEFAULTUNIXSOCKET
+	daemonUrlStr := "unix://" + opts.DefaultUnixSocket
 	if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" {
 		daemonUrlStr = daemonHostVar
 	}
@@ -297,58 +326,62 @@
 	}
 }
 
-func sockRequest(method, endpoint string, data interface{}) ([]byte, error) {
+func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) {
 	jsonData := bytes.NewBuffer(nil)
 	if err := json.NewEncoder(jsonData).Encode(data); err != nil {
-		return nil, err
+		return -1, nil, err
 	}
 
-	return sockRequestRaw(method, endpoint, jsonData, "application/json")
+	res, body, err := sockRequestRaw(method, endpoint, jsonData, "application/json")
+	if err != nil {
+		b, _ := ioutil.ReadAll(body)
+		return -1, b, err
+	}
+	var b []byte
+	b, err = readBody(body)
+	return res.StatusCode, b, err
 }
 
-func sockRequestRaw(method, endpoint string, data io.Reader, ct string) ([]byte, error) {
+func sockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) {
 	c, err := sockConn(time.Duration(10 * time.Second))
 	if err != nil {
-		return nil, fmt.Errorf("could not dial docker daemon: %v", err)
+		return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err)
 	}
 
 	client := httputil.NewClientConn(c, nil)
-	defer client.Close()
 
 	req, err := http.NewRequest(method, endpoint, data)
 	if err != nil {
-		return nil, fmt.Errorf("could not create new request: %v", err)
+		client.Close()
+		return nil, nil, fmt.Errorf("could not create new request: %v", err)
 	}
 
-	if ct == "" {
-		ct = "application/json"
+	if ct != "" {
+		req.Header.Set("Content-Type", ct)
 	}
-	req.Header.Set("Content-Type", ct)
 
 	resp, err := client.Do(req)
 	if err != nil {
-		return nil, fmt.Errorf("could not perform request: %v", err)
+		client.Close()
+		return nil, nil, fmt.Errorf("could not perform request: %v", err)
 	}
-	defer resp.Body.Close()
-	if resp.StatusCode != http.StatusOK {
-		body, _ := ioutil.ReadAll(resp.Body)
-		return body, fmt.Errorf("received status != 200 OK: %s", resp.Status)
-	}
+	body := ioutils.NewReadCloserWrapper(resp.Body, func() error {
+		defer client.Close()
+		return resp.Body.Close()
+	})
 
-	return ioutil.ReadAll(resp.Body)
+	return resp, body, nil
+}
+
+func readBody(b io.ReadCloser) ([]byte, error) {
+	defer b.Close()
+	return ioutil.ReadAll(b)
 }
 
 func deleteContainer(container string) error {
-	container = strings.Replace(container, "\n", " ", -1)
-	container = strings.Trim(container, " ")
-	killArgs := fmt.Sprintf("kill %v", container)
-	killSplitArgs := strings.Split(killArgs, " ")
-	killCmd := exec.Command(dockerBinary, killSplitArgs...)
-	runCommand(killCmd)
-	rmArgs := fmt.Sprintf("rm -v %v", container)
-	rmSplitArgs := strings.Split(rmArgs, " ")
-	rmCmd := exec.Command(dockerBinary, rmSplitArgs...)
-	exitCode, err := runCommand(rmCmd)
+	container = strings.TrimSpace(strings.Replace(container, "\n", " ", -1))
+	rmArgs := strings.Split(fmt.Sprintf("rm -fv %v", container), " ")
+	exitCode, err := runCommand(exec.Command(dockerBinary, rmArgs...))
 	// set error manually if not set
 	if exitCode != 0 && err == nil {
 		err = fmt.Errorf("failed to remove container: `docker rm` exit is non-zero")
@@ -380,6 +413,58 @@
 	return nil
 }
 
+var protectedImages = map[string]struct{}{}
+
+func init() {
+	out, err := exec.Command(dockerBinary, "images").CombinedOutput()
+	if err != nil {
+		panic(err)
+	}
+	lines := strings.Split(string(out), "\n")[1:]
+	for _, l := range lines {
+		if l == "" {
+			continue
+		}
+		fields := strings.Fields(l)
+		imgTag := fields[0] + ":" + fields[1]
+		// just for case if we have dangling images in tested daemon
+		if imgTag != "<none>:<none>" {
+			protectedImages[imgTag] = struct{}{}
+		}
+	}
+}
+
+func deleteAllImages() error {
+	out, err := exec.Command(dockerBinary, "images").CombinedOutput()
+	if err != nil {
+		return err
+	}
+	lines := strings.Split(string(out), "\n")[1:]
+	var imgs []string
+	for _, l := range lines {
+		if l == "" {
+			continue
+		}
+		fields := strings.Fields(l)
+		imgTag := fields[0] + ":" + fields[1]
+		if _, ok := protectedImages[imgTag]; !ok {
+			if fields[0] == "<none>" {
+				imgs = append(imgs, fields[2])
+				continue
+			}
+			imgs = append(imgs, imgTag)
+		}
+	}
+	if len(imgs) == 0 {
+		return nil
+	}
+	args := append([]string{"rmi", "-f"}, imgs...)
+	if err := exec.Command(dockerBinary, args...).Run(); err != nil {
+		return err
+	}
+	return nil
+}
+
 func getPausedContainers() (string, error) {
 	getPausedContainersCmd := exec.Command(dockerBinary, "ps", "-f", "status=paused", "-q", "-a")
 	out, exitCode, err := runCommandWithOutput(getPausedContainersCmd)
@@ -393,11 +478,13 @@
 func getSliceOfPausedContainers() ([]string, error) {
 	out, err := getPausedContainers()
 	if err == nil {
+		if len(out) == 0 {
+			return nil, err
+		}
 		slice := strings.Split(strings.TrimSpace(out), "\n")
 		return slice, err
-	} else {
-		return []string{out}, err
 	}
+	return []string{out}, err
 }
 
 func unpauseContainer(container string) error {
@@ -431,8 +518,7 @@
 }
 
 func deleteImages(images ...string) error {
-	args := make([]string, 1, 2)
-	args[0] = "rmi"
+	args := []string{"rmi", "-f"}
 	args = append(args, images...)
 	rmiCmd := exec.Command(dockerBinary, args...)
 	exitCode, err := runCommand(rmiCmd)
@@ -440,7 +526,6 @@
 	if exitCode != 0 && err == nil {
 		err = fmt.Errorf("failed to remove image: `docker rmi` exit is non-zero")
 	}
-
 	return err
 }
 
@@ -465,12 +550,12 @@
 	return
 }
 
-func dockerCmd(t *testing.T, args ...string) (string, int, error) {
+func dockerCmd(c *check.C, args ...string) (string, int) {
 	out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...))
 	if err != nil {
-		t.Fatalf("%q failed with errors: %s, %v", strings.Join(args, " "), out, err)
+		c.Fatalf("%q failed with errors: %s, %v", strings.Join(args, " "), out, err)
 	}
-	return out, status, err
+	return out, status
 }
 
 // execute a docker command with a timeout
@@ -483,7 +568,7 @@
 }
 
 // execute a docker command in a directory
-func dockerCmdInDir(t *testing.T, path string, args ...string) (string, int, error) {
+func dockerCmdInDir(c *check.C, path string, args ...string) (string, int, error) {
 	dockerCommand := exec.Command(dockerBinary, args...)
 	dockerCommand.Dir = path
 	out, status, err := runCommandWithOutput(dockerCommand)
@@ -504,16 +589,21 @@
 	return out, status, err
 }
 
-func findContainerIP(t *testing.T, id string) string {
-	cmd := exec.Command(dockerBinary, "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", id)
+func findContainerIP(c *check.C, id string, vargs ...string) string {
+	args := append(vargs, "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", id)
+	cmd := exec.Command(dockerBinary, args...)
 	out, _, err := runCommandWithOutput(cmd)
 	if err != nil {
-		t.Fatal(err, out)
+		c.Fatal(err, out)
 	}
 
 	return strings.Trim(out, " \r\n'")
 }
 
+func (d *Daemon) findContainerIP(id string) string {
+	return findContainerIP(d.c, id, "--host", d.sock())
+}
+
 func getContainerCount() (int, error) {
 	const containers = "Containers:"
 
@@ -526,7 +616,7 @@
 	lines := strings.Split(out, "\n")
 	for _, line := range lines {
 		if strings.Contains(line, containers) {
-			output := stripTrailingCharacters(line)
+			output := strings.TrimSpace(line)
 			output = strings.TrimLeft(output, containers)
 			output = strings.Trim(output, " ")
 			containerCount, err := strconv.Atoi(output)
@@ -597,7 +687,6 @@
 func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) {
 	ctx, err := fakeContextWithFiles(files)
 	if err != nil {
-		ctx.Close()
 		return nil, err
 	}
 	if err := fakeContextAddDockerfile(ctx, dockerfile); err != nil {
@@ -696,8 +785,8 @@
 
 func newRemoteFileServer(ctx *FakeContext) (*remoteFileServer, error) {
 	var (
-		image     = fmt.Sprintf("fileserver-img-%s", strings.ToLower(makeRandomString(10)))
-		container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(makeRandomString(10)))
+		image     = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10)))
+		container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10)))
 	)
 
 	// Build the image
@@ -766,14 +855,14 @@
 // getContainerState returns the exit code of the container
 // and true if it's running
 // the exit code should be ignored if it's running
-func getContainerState(t *testing.T, id string) (int, bool, error) {
+func getContainerState(c *check.C, id string) (int, bool, error) {
 	var (
 		exitStatus int
 		running    bool
 	)
-	out, exitCode, err := dockerCmd(t, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id)
-	if err != nil || exitCode != 0 {
-		return 0, false, fmt.Errorf("%q doesn't exist: %s", id, err)
+	out, exitCode := dockerCmd(c, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id)
+	if exitCode != 0 {
+		return 0, false, fmt.Errorf("%q doesn't exist: %s", id, out)
 	}
 
 	out = strings.Trim(out, "\n")
@@ -972,28 +1061,28 @@
 // Write `content` to the file at path `dst`, creating it if necessary,
 // as well as any missing directories.
 // The file is truncated if it already exists.
-// Call t.Fatal() at the first error.
-func writeFile(dst, content string, t *testing.T) {
+// Call c.Fatal() at the first error.
+func writeFile(dst, content string, c *check.C) {
 	// Create subdirectories if necessary
 	if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	// Write content (truncate if it exists)
 	if _, err := io.Copy(f, strings.NewReader(content)); err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 }
 
 // Return the contents of file at path `src`.
-// Call t.Fatal() at the first error (including if the file doesn't exist)
-func readFile(src string, t *testing.T) (content string) {
+// Call c.Fatal() at the first error (including if the file doesn't exist)
+func readFile(src string, c *check.C) (content string) {
 	data, err := ioutil.ReadFile(src)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	return string(data)
@@ -1038,36 +1127,35 @@
 }
 
 // daemonTime provides the current time on the daemon host
-func daemonTime(t *testing.T) time.Time {
+func daemonTime(c *check.C) time.Time {
 	if isLocalDaemon {
 		return time.Now()
 	}
 
-	body, err := sockRequest("GET", "/info", nil)
-	if err != nil {
-		t.Fatal("daemonTime: failed to get /info: %v", err)
-	}
+	status, body, err := sockRequest("GET", "/info", nil)
+	c.Assert(status, check.Equals, http.StatusOK)
+	c.Assert(err, check.IsNil)
 
 	type infoJSON struct {
 		SystemTime string
 	}
 	var info infoJSON
 	if err = json.Unmarshal(body, &info); err != nil {
-		t.Fatalf("unable to unmarshal /info response: %v", err)
+		c.Fatalf("unable to unmarshal /info response: %v", err)
 	}
 
 	dt, err := time.Parse(time.RFC3339Nano, info.SystemTime)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 	return dt
 }
 
-func setupRegistry(t *testing.T) func() {
-	testRequires(t, RegistryHosting)
-	reg, err := newTestRegistryV2(t)
+func setupRegistry(c *check.C) *testRegistryV2 {
+	testRequires(c, RegistryHosting)
+	reg, err := newTestRegistryV2(c)
 	if err != nil {
-		t.Fatal(err)
+		c.Fatal(err)
 	}
 
 	// Wait for registry to be ready to serve requests.
@@ -1079,10 +1167,9 @@
 	}
 
 	if err != nil {
-		t.Fatal("Timeout waiting for test registry to become available")
+		c.Fatal("Timeout waiting for test registry to become available")
 	}
-
-	return func() { reg.Close() }
+	return reg
 }
 
 // appendBaseEnv appends the minimum set of environment variables to exec the
diff --git a/integration/fixtures/https/ca.pem b/integration-cli/fixtures/https/ca.pem
similarity index 100%
rename from integration/fixtures/https/ca.pem
rename to integration-cli/fixtures/https/ca.pem
diff --git a/integration/fixtures/https/client-cert.pem b/integration-cli/fixtures/https/client-cert.pem
similarity index 100%
rename from integration/fixtures/https/client-cert.pem
rename to integration-cli/fixtures/https/client-cert.pem
diff --git a/integration/fixtures/https/client-key.pem b/integration-cli/fixtures/https/client-key.pem
similarity index 100%
rename from integration/fixtures/https/client-key.pem
rename to integration-cli/fixtures/https/client-key.pem
diff --git a/integration/fixtures/https/client-rogue-cert.pem b/integration-cli/fixtures/https/client-rogue-cert.pem
similarity index 100%
rename from integration/fixtures/https/client-rogue-cert.pem
rename to integration-cli/fixtures/https/client-rogue-cert.pem
diff --git a/integration/fixtures/https/client-rogue-key.pem b/integration-cli/fixtures/https/client-rogue-key.pem
similarity index 100%
rename from integration/fixtures/https/client-rogue-key.pem
rename to integration-cli/fixtures/https/client-rogue-key.pem
diff --git a/integration/fixtures/https/server-cert.pem b/integration-cli/fixtures/https/server-cert.pem
similarity index 100%
rename from integration/fixtures/https/server-cert.pem
rename to integration-cli/fixtures/https/server-cert.pem
diff --git a/integration/fixtures/https/server-key.pem b/integration-cli/fixtures/https/server-key.pem
similarity index 100%
rename from integration/fixtures/https/server-key.pem
rename to integration-cli/fixtures/https/server-key.pem
diff --git a/integration/fixtures/https/server-rogue-cert.pem b/integration-cli/fixtures/https/server-rogue-cert.pem
similarity index 100%
rename from integration/fixtures/https/server-rogue-cert.pem
rename to integration-cli/fixtures/https/server-rogue-cert.pem
diff --git a/integration/fixtures/https/server-rogue-key.pem b/integration-cli/fixtures/https/server-rogue-key.pem
similarity index 100%
rename from integration/fixtures/https/server-rogue-key.pem
rename to integration-cli/fixtures/https/server-rogue-key.pem
diff --git a/integration-cli/registry.go b/integration-cli/registry.go
index 8290e71..2801eac 100644
--- a/integration-cli/registry.go
+++ b/integration-cli/registry.go
@@ -7,7 +7,8 @@
 	"os"
 	"os/exec"
 	"path/filepath"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
 const v2binary = "registry-v2"
@@ -17,7 +18,7 @@
 	dir string
 }
 
-func newTestRegistryV2(t *testing.T) (*testRegistryV2, error) {
+func newTestRegistryV2(c *check.C) (*testRegistryV2, error) {
 	template := `version: 0.1
 loglevel: debug
 storage:
@@ -43,7 +44,7 @@
 	if err := cmd.Start(); err != nil {
 		os.RemoveAll(tmp)
 		if os.IsNotExist(err) {
-			t.Skip()
+			c.Skip(err.Error())
 		}
 		return nil, err
 	}
diff --git a/integration-cli/requirements.go b/integration-cli/requirements.go
index 346d0cd..fc4f5ee 100644
--- a/integration-cli/requirements.go
+++ b/integration-cli/requirements.go
@@ -3,10 +3,13 @@
 import (
 	"encoding/json"
 	"fmt"
+	"io/ioutil"
 	"log"
+	"net/http"
 	"os/exec"
 	"strings"
-	"testing"
+
+	"github.com/go-check/check"
 )
 
 type TestCondition func() bool
@@ -32,6 +35,23 @@
 		func() bool { return supportsExec },
 		"Test requires 'docker exec' capabilities on the tested daemon.",
 	}
+	Network = TestRequirement{
+		func() bool {
+			resp, err := http.Get("http://hub.docker.com")
+			if resp != nil {
+				resp.Body.Close()
+			}
+			return err == nil
+		},
+		"Test requires network availability, environment variable set to none to run in a non-network enabled mode.",
+	}
+	Apparmor = TestRequirement{
+		func() bool {
+			buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
+			return err == nil && len(buf) > 1 && buf[0] == 'Y'
+		},
+		"Test requires apparmor is enabled.",
+	}
 	RegistryHosting = TestRequirement{
 		func() bool {
 			// for now registry binary is built only if we're running inside
@@ -46,8 +66,8 @@
 		func() bool {
 			if daemonExecDriver == "" {
 				// get daemon info
-				body, err := sockRequest("GET", "/info", nil)
-				if err != nil {
+				status, body, err := sockRequest("GET", "/info", nil)
+				if err != nil || status != http.StatusOK {
 					log.Fatalf("sockRequest failed for /info: %v", err)
 				}
 
@@ -66,7 +86,6 @@
 		},
 		"Test requires the native (libcontainer) exec driver.",
 	}
-
 	NotOverlay = TestRequirement{
 		func() bool {
 			cmd := exec.Command("grep", "^overlay / overlay", "/proc/mounts")
@@ -81,10 +100,10 @@
 
 // testRequires checks if the environment satisfies the requirements
 // for the test to run or skips the tests.
-func testRequires(t *testing.T, requirements ...TestRequirement) {
+func testRequires(c *check.C, requirements ...TestRequirement) {
 	for _, r := range requirements {
 		if !r.Condition() {
-			t.Skip(r.SkipMessage)
+			c.Skip(r.SkipMessage)
 		}
 	}
 }
diff --git a/integration-cli/utils.go b/integration-cli/utils.go
index 85e6f1c..0ec4e75 100644
--- a/integration-cli/utils.go
+++ b/integration-cli/utils.go
@@ -1,13 +1,12 @@
 package main
 
 import (
+	"archive/tar"
 	"bytes"
 	"encoding/json"
 	"errors"
 	"fmt"
 	"io"
-	"math/rand"
-	"net/http"
 	"net/http/httptest"
 	"os"
 	"os/exec"
@@ -17,13 +16,13 @@
 	"syscall"
 	"time"
 
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/pkg/stringutils"
 )
 
 func getExitCode(err error) (int, error) {
 	exitCode := 0
 	if exiterr, ok := err.(*exec.ExitError); ok {
-		if procExit := exiterr.Sys().(syscall.WaitStatus); ok {
+		if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok {
 			return procExit.ExitStatus(), nil
 		}
 	}
@@ -44,12 +43,14 @@
 
 func IsKilled(err error) bool {
 	if exitErr, ok := err.(*exec.ExitError); ok {
-		sys := exitErr.ProcessState.Sys()
-		status, ok := sys.(syscall.WaitStatus)
+		status, ok := exitErr.Sys().(syscall.WaitStatus)
 		if !ok {
 			return false
 		}
-		return status.Signaled() && status.Signal() == os.Kill
+		// status.ExitStatus() is required on Windows because it does not
+		// implement Signal() nor Signaled(). Just check it had a bad exit
+		// status could mean it was killed (and in tests we do kill)
+		return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0
 	}
 	return false
 }
@@ -141,6 +142,7 @@
 		if i > 0 {
 			prevCmd := cmds[i-1]
 			cmd.Stdin, err = prevCmd.StdoutPipe()
+
 			if err != nil {
 				return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err)
 			}
@@ -165,17 +167,8 @@
 	return runCommandWithOutput(cmds[len(cmds)-1])
 }
 
-func logDone(message string) {
-	fmt.Printf("[PASSED]: %s\n", message)
-}
-
-func stripTrailingCharacters(target string) string {
-	return strings.TrimSpace(target)
-}
-
 func unmarshalJSON(data []byte, result interface{}) error {
-	err := json.Unmarshal(data, result)
-	if err != nil {
+	if err := json.Unmarshal(data, result); err != nil {
 		return err
 	}
 
@@ -215,7 +208,16 @@
 		cmd := exec.Command(dockerBinary, "inspect", "-f", expr, name)
 		out, _, err := runCommandWithOutput(cmd)
 		if err != nil {
-			return fmt.Errorf("error executing docker inspect: %v", err)
+			if !strings.Contains(out, "No such") {
+				return fmt.Errorf("error executing docker inspect: %v\n%s", err, out)
+			}
+			select {
+			case <-after:
+				return err
+			default:
+				time.Sleep(10 * time.Millisecond)
+				continue
+			}
 		}
 
 		out = strings.TrimSpace(out)
@@ -272,50 +274,10 @@
 	*httptest.Server
 }
 
-func fileServer(files map[string]string) (*FileServer, error) {
-	var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
-		if filePath, found := files[r.URL.Path]; found {
-			http.ServeFile(w, r, filePath)
-		} else {
-			http.Error(w, http.StatusText(404), 404)
-		}
-	}
-
-	for _, file := range files {
-		if _, err := os.Stat(file); err != nil {
-			return nil, err
-		}
-	}
-	server := httptest.NewServer(handler)
-	return &FileServer{
-		Server: server,
-	}, nil
-}
-
-func copyWithCP(source, target string) error {
-	copyCmd := exec.Command("cp", "-rp", source, target)
-	out, exitCode, err := runCommandWithOutput(copyCmd)
-	if err != nil || exitCode != 0 {
-		return fmt.Errorf("failed to copy: error: %q ,output: %q", err, out)
-	}
-	return nil
-}
-
-func makeRandomString(n int) string {
-	// make a really long string
-	letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
-	b := make([]byte, n)
-	r := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
-	for i := range b {
-		b[i] = letters[r.Intn(len(letters))]
-	}
-	return string(b)
-}
-
 // randomUnixTmpDirPath provides a temporary unix path with rand string appended.
 // does not create or checks if it exists.
 func randomUnixTmpDirPath(s string) string {
-	return path.Join("/tmp", fmt.Sprintf("%s.%s", s, makeRandomString(10)))
+	return path.Join("/tmp", fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10)))
 }
 
 // Reads chunkSize bytes from reader after every interval.
@@ -354,3 +316,26 @@
 	}
 	return cgroupPaths
 }
+
+type channelBuffer struct {
+	c chan []byte
+}
+
+func (c *channelBuffer) Write(b []byte) (int, error) {
+	c.c <- b
+	return len(b), nil
+}
+
+func (c *channelBuffer) Close() error {
+	close(c.c)
+	return nil
+}
+
+func (c *channelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) {
+	select {
+	case b := <-c.c:
+		return copy(p[0:], b), nil
+	case <-time.After(n):
+		return -1, fmt.Errorf("timeout reading from channel")
+	}
+}
diff --git a/integration/README.md b/integration/README.md
deleted file mode 100644
index 41f43a4..0000000
--- a/integration/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-## Legacy integration tests
-
-`./integration` contains Docker's legacy integration tests.
-It is DEPRECATED and will eventually be removed.
-
-### If you are a *CONTRIBUTOR* and want to add a test:
-
-* Consider mocking out side effects and contributing a *unit test* in the subsystem
-you're modifying. For example, the remote API has unit tests in `./api/server/server_unit_tests.go`.
-The events subsystem has unit tests in `./events/events_test.go`. And so on.
-
-* For end-to-end integration tests, please contribute to `./integration-cli`.
-
-
-### If you are a *MAINTAINER*
-
-Please don't allow patches adding new tests to `./integration`.
-
-### If you are *LOOKING FOR A WAY TO HELP*
-
-Please consider porting tests away from `./integration` and into either unit tests or CLI tests.
-
-Any help will be greatly appreciated!
diff --git a/integration/api_test.go b/integration/api_test.go
deleted file mode 100644
index e978c31..0000000
--- a/integration/api_test.go
+++ /dev/null
@@ -1,991 +0,0 @@
-package docker
-
-import (
-	"bufio"
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net"
-	"net/http"
-	"net/http/httptest"
-	"strings"
-	"testing"
-	"time"
-
-	"github.com/docker/docker/api"
-	"github.com/docker/docker/api/server"
-	"github.com/docker/docker/builder"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
-)
-
-func TestSaveImageAndThenLoad(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	// save image
-	r := httptest.NewRecorder()
-	req, err := http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	if r.Code != http.StatusOK {
-		t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
-	}
-	tarball := r.Body
-
-	// delete the image
-	r = httptest.NewRecorder()
-	req, err = http.NewRequest("DELETE", "/images/"+unitTestImageID, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	if r.Code != http.StatusOK {
-		t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
-	}
-
-	// make sure there is no image
-	r = httptest.NewRecorder()
-	req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	if r.Code != http.StatusNotFound {
-		t.Fatalf("%d NotFound expected, received %d\n", http.StatusNotFound, r.Code)
-	}
-
-	// load the image
-	r = httptest.NewRecorder()
-	req, err = http.NewRequest("POST", "/images/load", tarball)
-	if err != nil {
-		t.Fatal(err)
-	}
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	if r.Code != http.StatusOK {
-		t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
-	}
-
-	// finally make sure the image is there
-	r = httptest.NewRecorder()
-	req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	if r.Code != http.StatusOK {
-		t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
-	}
-}
-
-func TestGetContainersTop(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	containerID := createTestContainer(eng,
-		&runconfig.Config{
-			Image:     unitTestImageID,
-			Cmd:       []string{"/bin/sh", "-c", "cat"},
-			OpenStdin: true,
-		},
-		t,
-	)
-	defer func() {
-		// Make sure the process dies before destroying daemon
-		containerKill(eng, containerID, t)
-		containerWait(eng, containerID, t)
-	}()
-
-	startContainer(eng, containerID, t)
-
-	setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() {
-		for {
-			if containerRunning(eng, containerID, t) {
-				break
-			}
-			time.Sleep(10 * time.Millisecond)
-		}
-	})
-
-	if !containerRunning(eng, containerID, t) {
-		t.Fatalf("Container should be running")
-	}
-
-	// Make sure sh spawn up cat
-	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
-		in, out := containerAttach(eng, containerID, t)
-		if err := assertPipe("hello\n", "hello", out, in, 150); err != nil {
-			t.Fatal(err)
-		}
-	})
-
-	r := httptest.NewRecorder()
-	req, err := http.NewRequest("GET", "/containers/"+containerID+"/top?ps_args=aux", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-	var procs engine.Env
-	if err := procs.Decode(r.Body); err != nil {
-		t.Fatal(err)
-	}
-
-	if len(procs.GetList("Titles")) != 11 {
-		t.Fatalf("Expected 11 titles, found %d.", len(procs.GetList("Titles")))
-	}
-	if procs.GetList("Titles")[0] != "USER" || procs.GetList("Titles")[10] != "COMMAND" {
-		t.Fatalf("Expected Titles[0] to be USER and Titles[10] to be COMMAND, found %s and %s.", procs.GetList("Titles")[0], procs.GetList("Titles")[10])
-	}
-	processes := [][]string{}
-	if err := procs.GetJson("Processes", &processes); err != nil {
-		t.Fatal(err)
-	}
-	if len(processes) != 2 {
-		t.Fatalf("Expected 2 processes, found %d.", len(processes))
-	}
-	if processes[0][10] != "/bin/sh -c cat" {
-		t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[0][10])
-	}
-	if processes[1][10] != "/bin/sh -c cat" {
-		t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[1][10])
-	}
-}
-
-func TestPostCommit(t *testing.T) {
-	eng := NewTestEngine(t)
-	b := &builder.BuilderJob{Engine: eng}
-	b.Install()
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	// Create a container and remove a file
-	containerID := createTestContainer(eng,
-		&runconfig.Config{
-			Image: unitTestImageID,
-			Cmd:   []string{"touch", "/test"},
-		},
-		t,
-	)
-
-	containerRun(eng, containerID, t)
-
-	req, err := http.NewRequest("POST", "/commit?repo=testrepo&testtag=tag&container="+containerID, bytes.NewReader([]byte{}))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	r := httptest.NewRecorder()
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-	if r.Code != http.StatusCreated {
-		t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code)
-	}
-
-	var env engine.Env
-	if err := env.Decode(r.Body); err != nil {
-		t.Fatal(err)
-	}
-	if err := eng.Job("image_inspect", env.Get("Id")).Run(); err != nil {
-		t.Fatalf("The image has not been committed")
-	}
-}
-
-func TestPostContainersCreate(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	configJSON, err := json.Marshal(&runconfig.Config{
-		Image:  unitTestImageID,
-		Memory: 33554432,
-		Cmd:    []string{"touch", "/test"},
-	})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	req, err := http.NewRequest("POST", "/containers/create", bytes.NewReader(configJSON))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	req.Header.Set("Content-Type", "application/json")
-
-	r := httptest.NewRecorder()
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-	if r.Code != http.StatusCreated {
-		t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code)
-	}
-
-	var apiRun engine.Env
-	if err := apiRun.Decode(r.Body); err != nil {
-		t.Fatal(err)
-	}
-	containerID := apiRun.Get("Id")
-
-	containerAssertExists(eng, containerID, t)
-	containerRun(eng, containerID, t)
-
-	if !containerFileExists(eng, containerID, "test", t) {
-		t.Fatal("Test file was not created")
-	}
-}
-
-func TestPostJsonVerify(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	configJSON, err := json.Marshal(&runconfig.Config{
-		Image:  unitTestImageID,
-		Memory: 33554432,
-		Cmd:    []string{"touch", "/test"},
-	})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	req, err := http.NewRequest("POST", "/containers/create", bytes.NewReader(configJSON))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	r := httptest.NewRecorder()
-
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-
-	// Don't add Content-Type header
-	// req.Header.Set("Content-Type", "application/json")
-
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	if r.Code != http.StatusInternalServerError || !strings.Contains(((*r.Body).String()), "application/json") {
-		t.Fatal("Create should have failed due to no Content-Type header - got:", r)
-	}
-
-	// Now add header but with wrong type and retest
-	req.Header.Set("Content-Type", "application/xml")
-
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	if r.Code != http.StatusInternalServerError || !strings.Contains(((*r.Body).String()), "application/json") {
-		t.Fatal("Create should have failed due to wrong Content-Type header - got:", r)
-	}
-}
-
-// Issue 7941 - test to make sure a "null" in JSON is just ignored.
-// W/o this fix a null in JSON would be parsed into a string var as "null"
-func TestPostCreateNull(t *testing.T) {
-	eng := NewTestEngine(t)
-	daemon := mkDaemonFromEngine(eng, t)
-	defer daemon.Nuke()
-
-	configStr := fmt.Sprintf(`{
-		"Hostname":"",
-		"Domainname":"",
-		"Memory":0,
-		"MemorySwap":0,
-		"CpuShares":0,
-		"Cpuset":null,
-		"AttachStdin":true,
-		"AttachStdout":true,
-		"AttachStderr":true,
-		"PortSpecs":null,
-		"ExposedPorts":{},
-		"Tty":true,
-		"OpenStdin":true,
-		"StdinOnce":true,
-		"Env":[],
-		"Cmd":"ls",
-		"Image":"%s",
-		"Volumes":{},
-		"WorkingDir":"",
-		"Entrypoint":null,
-		"NetworkDisabled":false,
-		"OnBuild":null}`, unitTestImageID)
-
-	req, err := http.NewRequest("POST", "/containers/create", strings.NewReader(configStr))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	req.Header.Set("Content-Type", "application/json")
-
-	r := httptest.NewRecorder()
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-	if r.Code != http.StatusCreated {
-		t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code)
-	}
-
-	var apiRun engine.Env
-	if err := apiRun.Decode(r.Body); err != nil {
-		t.Fatal(err)
-	}
-	containerID := apiRun.Get("Id")
-
-	containerAssertExists(eng, containerID, t)
-
-	c, _ := daemon.Get(containerID)
-	if c.Config.Cpuset != "" {
-		t.Fatalf("Cpuset should have been empty - instead its:" + c.Config.Cpuset)
-	}
-}
-
-func TestPostContainersKill(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	containerID := createTestContainer(eng,
-		&runconfig.Config{
-			Image:     unitTestImageID,
-			Cmd:       []string{"/bin/cat"},
-			OpenStdin: true,
-		},
-		t,
-	)
-
-	startContainer(eng, containerID, t)
-
-	// Give some time to the process to start
-	containerWaitTimeout(eng, containerID, t)
-
-	if !containerRunning(eng, containerID, t) {
-		t.Errorf("Container should be running")
-	}
-
-	r := httptest.NewRecorder()
-	req, err := http.NewRequest("POST", "/containers/"+containerID+"/kill", bytes.NewReader([]byte{}))
-	if err != nil {
-		t.Fatal(err)
-	}
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-	if r.Code != http.StatusNoContent {
-		t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code)
-	}
-	if containerRunning(eng, containerID, t) {
-		t.Fatalf("The container hasn't been killed")
-	}
-}
-
-func TestPostContainersRestart(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	containerID := createTestContainer(eng,
-		&runconfig.Config{
-			Image:     unitTestImageID,
-			Cmd:       []string{"/bin/top"},
-			OpenStdin: true,
-		},
-		t,
-	)
-
-	startContainer(eng, containerID, t)
-
-	// Give some time to the process to start
-	containerWaitTimeout(eng, containerID, t)
-
-	if !containerRunning(eng, containerID, t) {
-		t.Errorf("Container should be running")
-	}
-
-	req, err := http.NewRequest("POST", "/containers/"+containerID+"/restart?t=1", bytes.NewReader([]byte{}))
-	if err != nil {
-		t.Fatal(err)
-	}
-	r := httptest.NewRecorder()
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-	if r.Code != http.StatusNoContent {
-		t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code)
-	}
-
-	// Give some time to the process to restart
-	containerWaitTimeout(eng, containerID, t)
-
-	if !containerRunning(eng, containerID, t) {
-		t.Fatalf("Container should be running")
-	}
-
-	containerKill(eng, containerID, t)
-}
-
-func TestPostContainersStart(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	containerID := createTestContainer(
-		eng,
-		&runconfig.Config{
-			Image:     unitTestImageID,
-			Cmd:       []string{"/bin/cat"},
-			OpenStdin: true,
-		},
-		t,
-	)
-
-	hostConfigJSON, err := json.Marshal(&runconfig.HostConfig{})
-
-	req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	req.Header.Set("Content-Type", "application/json")
-
-	r := httptest.NewRecorder()
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-	if r.Code != http.StatusNoContent {
-		t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code)
-	}
-
-	containerAssertExists(eng, containerID, t)
-
-	req, err = http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	req.Header.Set("Content-Type", "application/json")
-
-	r = httptest.NewRecorder()
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-
-	// Starting an already started container should return a 304
-	assertHttpNotError(r, t)
-	if r.Code != http.StatusNotModified {
-		t.Fatalf("%d NOT MODIFIER expected, received %d\n", http.StatusNotModified, r.Code)
-	}
-	containerAssertExists(eng, containerID, t)
-	containerKill(eng, containerID, t)
-}
-
-func TestPostContainersStop(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	containerID := createTestContainer(eng,
-		&runconfig.Config{
-			Image:     unitTestImageID,
-			Cmd:       []string{"/bin/top"},
-			OpenStdin: true,
-		},
-		t,
-	)
-
-	startContainer(eng, containerID, t)
-
-	// Give some time to the process to start
-	containerWaitTimeout(eng, containerID, t)
-
-	if !containerRunning(eng, containerID, t) {
-		t.Errorf("Container should be running")
-	}
-
-	// Note: as it is a POST request, it requires a body.
-	req, err := http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{}))
-	if err != nil {
-		t.Fatal(err)
-	}
-	r := httptest.NewRecorder()
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-	if r.Code != http.StatusNoContent {
-		t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code)
-	}
-	if containerRunning(eng, containerID, t) {
-		t.Fatalf("The container hasn't been stopped")
-	}
-
-	req, err = http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{}))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	r = httptest.NewRecorder()
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-
-	// Stopping an already stopper container should return a 304
-	assertHttpNotError(r, t)
-	if r.Code != http.StatusNotModified {
-		t.Fatalf("%d NOT MODIFIER expected, received %d\n", http.StatusNotModified, r.Code)
-	}
-}
-
-func TestPostContainersWait(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	containerID := createTestContainer(eng,
-		&runconfig.Config{
-			Image:     unitTestImageID,
-			Cmd:       []string{"/bin/sleep", "1"},
-			OpenStdin: true,
-		},
-		t,
-	)
-	startContainer(eng, containerID, t)
-
-	setTimeout(t, "Wait timed out", 3*time.Second, func() {
-		r := httptest.NewRecorder()
-		req, err := http.NewRequest("POST", "/containers/"+containerID+"/wait", bytes.NewReader([]byte{}))
-		if err != nil {
-			t.Fatal(err)
-		}
-		server.ServeRequest(eng, api.APIVERSION, r, req)
-		assertHttpNotError(r, t)
-		var apiWait engine.Env
-		if err := apiWait.Decode(r.Body); err != nil {
-			t.Fatal(err)
-		}
-		if apiWait.GetInt("StatusCode") != 0 {
-			t.Fatalf("Non zero exit code for sleep: %d\n", apiWait.GetInt("StatusCode"))
-		}
-	})
-
-	if containerRunning(eng, containerID, t) {
-		t.Fatalf("The container should be stopped after wait")
-	}
-}
-
-func TestPostContainersAttach(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	containerID := createTestContainer(eng,
-		&runconfig.Config{
-			Image:     unitTestImageID,
-			Cmd:       []string{"/bin/cat"},
-			OpenStdin: true,
-		},
-		t,
-	)
-	// Start the process
-	startContainer(eng, containerID, t)
-
-	stdin, stdinPipe := io.Pipe()
-	stdout, stdoutPipe := io.Pipe()
-
-	// Try to avoid the timeout in destroy. Best effort, don't check error
-	defer func() {
-		closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
-		containerKill(eng, containerID, t)
-	}()
-
-	// Attach to it
-	c1 := make(chan struct{})
-	go func() {
-		defer close(c1)
-
-		r := &hijackTester{
-			ResponseRecorder: httptest.NewRecorder(),
-			in:               stdin,
-			out:              stdoutPipe,
-		}
-
-		req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{}))
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		server.ServeRequest(eng, api.APIVERSION, r, req)
-		assertHttpNotError(r.ResponseRecorder, t)
-	}()
-
-	// Acknowledge hijack
-	setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() {
-		stdout.Read([]byte{})
-		stdout.Read(make([]byte, 4096))
-	})
-
-	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", string([]byte{1, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 150); err != nil {
-			t.Fatal(err)
-		}
-	})
-
-	// Close pipes (client disconnects)
-	if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
-		t.Fatal(err)
-	}
-
-	// Wait for attach to finish, the client disconnected, therefore, Attach finished his job
-	setTimeout(t, "Waiting for CmdAttach timed out", 10*time.Second, func() {
-		<-c1
-	})
-
-	// We closed stdin, expect /bin/cat to still be running
-	// Wait a little bit to make sure container.monitor() did his thing
-	containerWaitTimeout(eng, containerID, t)
-
-	// Try to avoid the timeout in destroy. Best effort, don't check error
-	cStdin, _ := containerAttach(eng, containerID, t)
-	cStdin.Close()
-	containerWait(eng, containerID, t)
-}
-
-func TestPostContainersAttachStderr(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	containerID := createTestContainer(eng,
-		&runconfig.Config{
-			Image:     unitTestImageID,
-			Cmd:       []string{"/bin/sh", "-c", "/bin/cat >&2"},
-			OpenStdin: true,
-		},
-		t,
-	)
-	// Start the process
-	startContainer(eng, containerID, t)
-
-	stdin, stdinPipe := io.Pipe()
-	stdout, stdoutPipe := io.Pipe()
-
-	// Try to avoid the timeout in destroy. Best effort, don't check error
-	defer func() {
-		closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
-		containerKill(eng, containerID, t)
-	}()
-
-	// Attach to it
-	c1 := make(chan struct{})
-	go func() {
-		defer close(c1)
-
-		r := &hijackTester{
-			ResponseRecorder: httptest.NewRecorder(),
-			in:               stdin,
-			out:              stdoutPipe,
-		}
-
-		req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{}))
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		server.ServeRequest(eng, api.APIVERSION, r, req)
-		assertHttpNotError(r.ResponseRecorder, t)
-	}()
-
-	// Acknowledge hijack
-	setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() {
-		stdout.Read([]byte{})
-		stdout.Read(make([]byte, 4096))
-	})
-
-	setTimeout(t, "read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", string([]byte{2, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 150); err != nil {
-			t.Fatal(err)
-		}
-	})
-
-	// Close pipes (client disconnects)
-	if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil {
-		t.Fatal(err)
-	}
-
-	// Wait for attach to finish, the client disconnected, therefore, Attach finished his job
-	setTimeout(t, "Waiting for CmdAttach timed out", 10*time.Second, func() {
-		<-c1
-	})
-
-	// We closed stdin, expect /bin/cat to still be running
-	// Wait a little bit to make sure container.monitor() did his thing
-	containerWaitTimeout(eng, containerID, t)
-
-	// Try to avoid the timeout in destroy. Best effort, don't check error
-	cStdin, _ := containerAttach(eng, containerID, t)
-	cStdin.Close()
-	containerWait(eng, containerID, t)
-}
-
-func TestOptionsRoute(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	r := httptest.NewRecorder()
-	req, err := http.NewRequest("OPTIONS", "/", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-	if r.Code != http.StatusOK {
-		t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code)
-	}
-}
-
-func TestGetEnabledCors(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	r := httptest.NewRecorder()
-
-	req, err := http.NewRequest("GET", "/version", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-	if r.Code != http.StatusOK {
-		t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code)
-	}
-
-	allowOrigin := r.Header().Get("Access-Control-Allow-Origin")
-	allowHeaders := r.Header().Get("Access-Control-Allow-Headers")
-	allowMethods := r.Header().Get("Access-Control-Allow-Methods")
-
-	if allowOrigin != "*" {
-		t.Errorf("Expected header Access-Control-Allow-Origin to be \"*\", %s found.", allowOrigin)
-	}
-	if allowHeaders != "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth" {
-		t.Errorf("Expected header Access-Control-Allow-Headers to be \"Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth\", %s found.", allowHeaders)
-	}
-	if allowMethods != "GET, POST, DELETE, PUT, OPTIONS" {
-		t.Errorf("Expected hearder Access-Control-Allow-Methods to be \"GET, POST, DELETE, PUT, OPTIONS\", %s found.", allowMethods)
-	}
-}
-
-func TestDeleteImages(t *testing.T) {
-	eng := NewTestEngine(t)
-	//we expect errors, so we disable stderr
-	eng.Stderr = ioutil.Discard
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	initialImages := getImages(eng, t, true, "")
-
-	if err := eng.Job("tag", unitTestImageName, "test", "test").Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	images := getImages(eng, t, true, "")
-
-	if len(images.Data[0].GetList("RepoTags")) != len(initialImages.Data[0].GetList("RepoTags"))+1 {
-		t.Errorf("Expected %d images, %d found", len(initialImages.Data[0].GetList("RepoTags"))+1, len(images.Data[0].GetList("RepoTags")))
-	}
-
-	req, err := http.NewRequest("DELETE", "/images/"+unitTestImageID, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	r := httptest.NewRecorder()
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	if r.Code != http.StatusConflict {
-		t.Fatalf("Expected http status 409-conflict, got %v", r.Code)
-	}
-
-	req2, err := http.NewRequest("DELETE", "/images/test:test", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	r2 := httptest.NewRecorder()
-	server.ServeRequest(eng, api.APIVERSION, r2, req2)
-	assertHttpNotError(r2, t)
-	if r2.Code != http.StatusOK {
-		t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
-	}
-
-	outs := engine.NewTable("Created", 0)
-	if _, err := outs.ReadListFrom(r2.Body.Bytes()); err != nil {
-		t.Fatal(err)
-	}
-	if len(outs.Data) != 1 {
-		t.Fatalf("Expected %d event (untagged), got %d", 1, len(outs.Data))
-	}
-	images = getImages(eng, t, false, "")
-
-	if images.Len() != initialImages.Len() {
-		t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len())
-	}
-}
-
-func TestPostContainersCopy(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	// Create a container and remove a file
-	containerID := createTestContainer(eng,
-		&runconfig.Config{
-			Image: unitTestImageID,
-			Cmd:   []string{"touch", "/test.txt"},
-		},
-		t,
-	)
-	containerRun(eng, containerID, t)
-
-	r := httptest.NewRecorder()
-
-	var copyData engine.Env
-	copyData.Set("Resource", "/test.txt")
-	copyData.Set("HostPath", ".")
-
-	jsonData := bytes.NewBuffer(nil)
-	if err := copyData.Encode(jsonData); err != nil {
-		t.Fatal(err)
-	}
-
-	req, err := http.NewRequest("POST", "/containers/"+containerID+"/copy", jsonData)
-	if err != nil {
-		t.Fatal(err)
-	}
-	req.Header.Add("Content-Type", "application/json")
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-
-	if r.Code != http.StatusOK {
-		t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code)
-	}
-
-	found := false
-	for tarReader := tar.NewReader(r.Body); ; {
-		h, err := tarReader.Next()
-		if err != nil {
-			if err == io.EOF {
-				break
-			}
-			t.Fatal(err)
-		}
-		if h.Name == "test.txt" {
-			found = true
-			break
-		}
-	}
-	if !found {
-		t.Fatalf("The created test file has not been found in the copied output")
-	}
-}
-
-func TestPostContainersCopyWhenContainerNotFound(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	r := httptest.NewRecorder()
-
-	var copyData engine.Env
-	copyData.Set("Resource", "/test.txt")
-	copyData.Set("HostPath", ".")
-
-	jsonData := bytes.NewBuffer(nil)
-	if err := copyData.Encode(jsonData); err != nil {
-		t.Fatal(err)
-	}
-
-	req, err := http.NewRequest("POST", "/containers/id_not_found/copy", jsonData)
-	if err != nil {
-		t.Fatal(err)
-	}
-	req.Header.Add("Content-Type", "application/json")
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	if r.Code != http.StatusNotFound {
-		t.Fatalf("404 expected for id_not_found Container, received %v", r.Code)
-	}
-}
-
-// Regression test for https://github.com/docker/docker/issues/6231
-func TestConstainersStartChunkedEncodingHostConfig(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	r := httptest.NewRecorder()
-
-	var testData engine.Env
-	testData.Set("Image", "docker-test-image")
-	testData.SetAuto("Volumes", map[string]struct{}{"/foo": {}})
-	testData.Set("Cmd", "true")
-	jsonData := bytes.NewBuffer(nil)
-	if err := testData.Encode(jsonData); err != nil {
-		t.Fatal(err)
-	}
-
-	req, err := http.NewRequest("POST", "/containers/create?name=chunk_test", jsonData)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	req.Header.Add("Content-Type", "application/json")
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-
-	var testData2 engine.Env
-	testData2.SetAuto("Binds", []string{"/tmp:/foo"})
-	jsonData = bytes.NewBuffer(nil)
-	if err := testData2.Encode(jsonData); err != nil {
-		t.Fatal(err)
-	}
-
-	req, err = http.NewRequest("POST", "/containers/chunk_test/start", jsonData)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	req.Header.Add("Content-Type", "application/json")
-	// This is a cheat to make the http request do chunked encoding
-	// Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite
-	// http://golang.org/src/pkg/net/http/request.go?s=11980:12172
-	req.ContentLength = -1
-	server.ServeRequest(eng, api.APIVERSION, r, req)
-	assertHttpNotError(r, t)
-
-	type config struct {
-		HostConfig struct {
-			Binds []string
-		}
-	}
-
-	req, err = http.NewRequest("GET", "/containers/chunk_test/json", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	r2 := httptest.NewRecorder()
-	req.Header.Add("Content-Type", "application/json")
-	server.ServeRequest(eng, api.APIVERSION, r2, req)
-	assertHttpNotError(r, t)
-
-	c := config{}
-
-	json.Unmarshal(r2.Body.Bytes(), &c)
-
-	if len(c.HostConfig.Binds) == 0 {
-		t.Fatal("Chunked Encoding not handled")
-	}
-
-	if c.HostConfig.Binds[0] != "/tmp:/foo" {
-		t.Fatal("Chunked encoding not properly handled, execpted binds to be /tmp:/foo, got:", c.HostConfig.Binds[0])
-	}
-}
-
-// Mocked types for tests
-type NopConn struct {
-	io.ReadCloser
-	io.Writer
-}
-
-func (c *NopConn) LocalAddr() net.Addr                { return nil }
-func (c *NopConn) RemoteAddr() net.Addr               { return nil }
-func (c *NopConn) SetDeadline(t time.Time) error      { return nil }
-func (c *NopConn) SetReadDeadline(t time.Time) error  { return nil }
-func (c *NopConn) SetWriteDeadline(t time.Time) error { return nil }
-
-type hijackTester struct {
-	*httptest.ResponseRecorder
-	in  io.ReadCloser
-	out io.Writer
-}
-
-func (t *hijackTester) Hijack() (net.Conn, *bufio.ReadWriter, error) {
-	bufrw := bufio.NewReadWriter(bufio.NewReader(t.in), bufio.NewWriter(t.out))
-	conn := &NopConn{
-		ReadCloser: t.in,
-		Writer:     t.out,
-	}
-	return conn, bufrw, nil
-}
diff --git a/integration/commands_test.go b/integration/commands_test.go
deleted file mode 100644
index 6c6ad0e..0000000
--- a/integration/commands_test.go
+++ /dev/null
@@ -1,436 +0,0 @@
-package docker
-
-import (
-	"bufio"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"strings"
-	"testing"
-	"time"
-
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/api/client"
-	"github.com/docker/docker/daemon"
-	"github.com/docker/docker/pkg/common"
-	"github.com/docker/docker/pkg/term"
-	"github.com/kr/pty"
-)
-
-func closeWrap(args ...io.Closer) error {
-	e := false
-	ret := fmt.Errorf("Error closing elements")
-	for _, c := range args {
-		if err := c.Close(); err != nil {
-			e = true
-			ret = fmt.Errorf("%s\n%s", ret, err)
-		}
-	}
-	if e {
-		return ret
-	}
-	return nil
-}
-
-func setRaw(t *testing.T, c *daemon.Container) *term.State {
-	pty, err := c.GetPtyMaster()
-	if err != nil {
-		t.Fatal(err)
-	}
-	state, err := term.MakeRaw(pty.Fd())
-	if err != nil {
-		t.Fatal(err)
-	}
-	return state
-}
-
-func unsetRaw(t *testing.T, c *daemon.Container, state *term.State) {
-	pty, err := c.GetPtyMaster()
-	if err != nil {
-		t.Fatal(err)
-	}
-	term.RestoreTerminal(pty.Fd(), state)
-}
-
-func waitContainerStart(t *testing.T, timeout time.Duration) *daemon.Container {
-	var container *daemon.Container
-
-	setTimeout(t, "Waiting for the container to be started timed out", timeout, func() {
-		for {
-			l := globalDaemon.List()
-			if len(l) == 1 && l[0].IsRunning() {
-				container = l[0]
-				break
-			}
-			time.Sleep(10 * time.Millisecond)
-		}
-	})
-
-	if container == nil {
-		t.Fatal("An error occured while waiting for the container to start")
-	}
-
-	return container
-}
-
-func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
-	c := make(chan bool)
-
-	// Make sure we are not too long
-	go func() {
-		time.Sleep(d)
-		c <- true
-	}()
-	go func() {
-		f()
-		c <- false
-	}()
-	if <-c && msg != "" {
-		t.Fatal(msg)
-	}
-}
-
-func expectPipe(expected string, r io.Reader) error {
-	o, err := bufio.NewReader(r).ReadString('\n')
-	if err != nil {
-		return err
-	}
-	if strings.Trim(o, " \r\n") != expected {
-		return fmt.Errorf("Unexpected output. Expected [%s], received [%s]", expected, o)
-	}
-	return nil
-}
-
-func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error {
-	for i := 0; i < count; i++ {
-		if _, err := w.Write([]byte(input)); err != nil {
-			return err
-		}
-		if err := expectPipe(output, r); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// TestRunDetach checks attaching and detaching with the escape sequence.
-func TestRunDetach(t *testing.T) {
-	stdout, stdoutPipe := io.Pipe()
-	cpty, tty, err := pty.Open()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil)
-	defer cleanup(globalEngine, t)
-
-	ch := make(chan struct{})
-	go func() {
-		defer close(ch)
-		cli.CmdRun("-i", "-t", unitTestImageID, "cat")
-	}()
-
-	container := waitContainerStart(t, 10*time.Second)
-
-	state := setRaw(t, container)
-	defer unsetRaw(t, container, state)
-
-	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil {
-			t.Fatal(err)
-		}
-	})
-
-	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
-		cpty.Write([]byte{16})
-		time.Sleep(100 * time.Millisecond)
-		cpty.Write([]byte{17})
-	})
-
-	// wait for CmdRun to return
-	setTimeout(t, "Waiting for CmdRun timed out", 15*time.Second, func() {
-		<-ch
-	})
-	closeWrap(cpty, stdout, stdoutPipe)
-
-	time.Sleep(500 * time.Millisecond)
-	if !container.IsRunning() {
-		t.Fatal("The detached container should be still running")
-	}
-
-	setTimeout(t, "Waiting for container to die timed out", 20*time.Second, func() {
-		container.Kill()
-	})
-}
-
-// TestAttachDetach checks that attach in tty mode can be detached using the long container ID
-func TestAttachDetach(t *testing.T) {
-	stdout, stdoutPipe := io.Pipe()
-	cpty, tty, err := pty.Open()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil)
-	defer cleanup(globalEngine, t)
-
-	ch := make(chan struct{})
-	go func() {
-		defer close(ch)
-		if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	container := waitContainerStart(t, 10*time.Second)
-
-	setTimeout(t, "Reading container's id timed out", 10*time.Second, func() {
-		buf := make([]byte, 1024)
-		n, err := stdout.Read(buf)
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		if strings.Trim(string(buf[:n]), " \r\n") != container.ID {
-			t.Fatalf("Wrong ID received. Expect %s, received %s", container.ID, buf[:n])
-		}
-	})
-	setTimeout(t, "Starting container timed out", 10*time.Second, func() {
-		<-ch
-	})
-
-	state := setRaw(t, container)
-	defer unsetRaw(t, container, state)
-
-	stdout, stdoutPipe = io.Pipe()
-	cpty, tty, err = pty.Open()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	cli = client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil)
-
-	ch = make(chan struct{})
-	go func() {
-		defer close(ch)
-		if err := cli.CmdAttach(container.ID); err != nil {
-			if err != io.ErrClosedPipe {
-				t.Fatal(err)
-			}
-		}
-	}()
-
-	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil {
-			if err != io.ErrClosedPipe {
-				t.Fatal(err)
-			}
-		}
-	})
-
-	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
-		cpty.Write([]byte{16})
-		time.Sleep(100 * time.Millisecond)
-		cpty.Write([]byte{17})
-	})
-
-	// wait for CmdRun to return
-	setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() {
-		<-ch
-	})
-
-	closeWrap(cpty, stdout, stdoutPipe)
-
-	time.Sleep(500 * time.Millisecond)
-	if !container.IsRunning() {
-		t.Fatal("The detached container should be still running")
-	}
-
-	setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() {
-		container.Kill()
-	})
-}
-
-// TestAttachDetachTruncatedID checks that attach in tty mode can be detached
-func TestAttachDetachTruncatedID(t *testing.T) {
-	stdout, stdoutPipe := io.Pipe()
-	cpty, tty, err := pty.Open()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil)
-	defer cleanup(globalEngine, t)
-
-	// Discard the CmdRun output
-	go stdout.Read(make([]byte, 1024))
-	setTimeout(t, "Starting container timed out", 2*time.Second, func() {
-		if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil {
-			t.Fatal(err)
-		}
-	})
-
-	container := waitContainerStart(t, 10*time.Second)
-
-	state := setRaw(t, container)
-	defer unsetRaw(t, container, state)
-
-	stdout, stdoutPipe = io.Pipe()
-	cpty, tty, err = pty.Open()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	cli = client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil)
-
-	ch := make(chan struct{})
-	go func() {
-		defer close(ch)
-		if err := cli.CmdAttach(common.TruncateID(container.ID)); err != nil {
-			if err != io.ErrClosedPipe {
-				t.Fatal(err)
-			}
-		}
-	}()
-
-	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil {
-			if err != io.ErrClosedPipe {
-				t.Fatal(err)
-			}
-		}
-	})
-
-	setTimeout(t, "Escape sequence timeout", 5*time.Second, func() {
-		cpty.Write([]byte{16})
-		time.Sleep(100 * time.Millisecond)
-		cpty.Write([]byte{17})
-	})
-
-	// wait for CmdRun to return
-	setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() {
-		<-ch
-	})
-	closeWrap(cpty, stdout, stdoutPipe)
-
-	time.Sleep(500 * time.Millisecond)
-	if !container.IsRunning() {
-		t.Fatal("The detached container should be still running")
-	}
-
-	setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() {
-		container.Kill()
-	})
-}
-
-// Expected behaviour, the process stays alive when the client disconnects
-func TestAttachDisconnect(t *testing.T) {
-	stdout, stdoutPipe := io.Pipe()
-	cpty, tty, err := pty.Open()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil)
-	defer cleanup(globalEngine, t)
-
-	go func() {
-		// Start a process in daemon mode
-		if err := cli.CmdRun("-d", "-i", unitTestImageID, "/bin/cat"); err != nil {
-			log.Debugf("Error CmdRun: %s", err)
-		}
-	}()
-
-	setTimeout(t, "Waiting for CmdRun timed out", 10*time.Second, func() {
-		if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil {
-			t.Fatal(err)
-		}
-	})
-
-	setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() {
-		for {
-			l := globalDaemon.List()
-			if len(l) == 1 && l[0].IsRunning() {
-				break
-			}
-			time.Sleep(10 * time.Millisecond)
-		}
-	})
-
-	container := globalDaemon.List()[0]
-
-	// Attach to it
-	c1 := make(chan struct{})
-	go func() {
-		// We're simulating a disconnect so the return value doesn't matter. What matters is the
-		// fact that CmdAttach returns.
-		cli.CmdAttach(container.ID)
-		close(c1)
-	}()
-
-	setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() {
-		if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil {
-			t.Fatal(err)
-		}
-	})
-	// Close pipes (client disconnects)
-	if err := closeWrap(cpty, stdout, stdoutPipe); err != nil {
-		t.Fatal(err)
-	}
-
-	// Wait for attach to finish, the client disconnected, therefore, Attach finished his job
-	setTimeout(t, "Waiting for CmdAttach timed out", 2*time.Second, func() {
-		<-c1
-	})
-
-	// We closed stdin, expect /bin/cat to still be running
-	// Wait a little bit to make sure container.monitor() did his thing
-	_, err = container.WaitStop(500 * time.Millisecond)
-	if err == nil || !container.IsRunning() {
-		t.Fatalf("/bin/cat is not running after closing stdin")
-	}
-
-	// Try to avoid the timeout in destroy. Best effort, don't check error
-	cStdin := container.StdinPipe()
-	cStdin.Close()
-	container.WaitStop(-1 * time.Second)
-}
-
-// Expected behaviour: container gets deleted automatically after exit
-func TestRunAutoRemove(t *testing.T) {
-	t.Skip("Fixme. Skipping test for now, race condition")
-	stdout, stdoutPipe := io.Pipe()
-
-	cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil)
-	defer cleanup(globalEngine, t)
-
-	c := make(chan struct{})
-	go func() {
-		defer close(c)
-		if err := cli.CmdRun("--rm", unitTestImageID, "hostname"); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	var temporaryContainerID string
-	setTimeout(t, "Reading command output time out", 2*time.Second, func() {
-		cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
-		if err != nil {
-			t.Fatal(err)
-		}
-		temporaryContainerID = cmdOutput
-		if err := closeWrap(stdout, stdoutPipe); err != nil {
-			t.Fatal(err)
-		}
-	})
-
-	setTimeout(t, "CmdRun timed out", 10*time.Second, func() {
-		<-c
-	})
-
-	time.Sleep(500 * time.Millisecond)
-
-	if len(globalDaemon.List()) > 0 {
-		t.Fatalf("failed to remove container automatically: container %s still exists", temporaryContainerID)
-	}
-}
diff --git a/integration/container_test.go b/integration/container_test.go
deleted file mode 100644
index b6cbfd0..0000000
--- a/integration/container_test.go
+++ /dev/null
@@ -1,235 +0,0 @@
-package docker
-
-import (
-	"io"
-	"io/ioutil"
-	"testing"
-	"time"
-
-	"github.com/docker/docker/runconfig"
-)
-
-func TestRestartStdin(t *testing.T) {
-	daemon := mkDaemon(t)
-	defer nuke(daemon)
-	container, _, err := daemon.Create(&runconfig.Config{
-		Image: GetTestImage(daemon).ID,
-		Cmd:   []string{"cat"},
-
-		OpenStdin: true,
-	},
-		&runconfig.HostConfig{},
-		"",
-	)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer daemon.Rm(container)
-
-	stdin := container.StdinPipe()
-	stdout := container.StdoutPipe()
-	if err := container.Start(); err != nil {
-		t.Fatal(err)
-	}
-	if _, err := io.WriteString(stdin, "hello world"); err != nil {
-		t.Fatal(err)
-	}
-	if err := stdin.Close(); err != nil {
-		t.Fatal(err)
-	}
-	container.WaitStop(-1 * time.Second)
-	output, err := ioutil.ReadAll(stdout)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := stdout.Close(); err != nil {
-		t.Fatal(err)
-	}
-	if string(output) != "hello world" {
-		t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output))
-	}
-
-	// Restart and try again
-	stdin = container.StdinPipe()
-	stdout = container.StdoutPipe()
-	if err := container.Start(); err != nil {
-		t.Fatal(err)
-	}
-	if _, err := io.WriteString(stdin, "hello world #2"); err != nil {
-		t.Fatal(err)
-	}
-	if err := stdin.Close(); err != nil {
-		t.Fatal(err)
-	}
-	container.WaitStop(-1 * time.Second)
-	output, err = ioutil.ReadAll(stdout)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := stdout.Close(); err != nil {
-		t.Fatal(err)
-	}
-	if string(output) != "hello world #2" {
-		t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world #2", string(output))
-	}
-}
-
-func TestStdin(t *testing.T) {
-	daemon := mkDaemon(t)
-	defer nuke(daemon)
-	container, _, err := daemon.Create(&runconfig.Config{
-		Image: GetTestImage(daemon).ID,
-		Cmd:   []string{"cat"},
-
-		OpenStdin: true,
-	},
-		&runconfig.HostConfig{},
-		"",
-	)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer daemon.Rm(container)
-
-	stdin := container.StdinPipe()
-	stdout := container.StdoutPipe()
-	if err := container.Start(); err != nil {
-		t.Fatal(err)
-	}
-	defer stdin.Close()
-	defer stdout.Close()
-	if _, err := io.WriteString(stdin, "hello world"); err != nil {
-		t.Fatal(err)
-	}
-	if err := stdin.Close(); err != nil {
-		t.Fatal(err)
-	}
-	container.WaitStop(-1 * time.Second)
-	output, err := ioutil.ReadAll(stdout)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if string(output) != "hello world" {
-		t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output))
-	}
-}
-
-func TestTty(t *testing.T) {
-	daemon := mkDaemon(t)
-	defer nuke(daemon)
-	container, _, err := daemon.Create(&runconfig.Config{
-		Image: GetTestImage(daemon).ID,
-		Cmd:   []string{"cat"},
-
-		OpenStdin: true,
-	},
-		&runconfig.HostConfig{},
-		"",
-	)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer daemon.Rm(container)
-
-	stdin := container.StdinPipe()
-	stdout := container.StdoutPipe()
-	if err := container.Start(); err != nil {
-		t.Fatal(err)
-	}
-	defer stdin.Close()
-	defer stdout.Close()
-	if _, err := io.WriteString(stdin, "hello world"); err != nil {
-		t.Fatal(err)
-	}
-	if err := stdin.Close(); err != nil {
-		t.Fatal(err)
-	}
-	container.WaitStop(-1 * time.Second)
-	output, err := ioutil.ReadAll(stdout)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if string(output) != "hello world" {
-		t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output))
-	}
-}
-
-func BenchmarkRunSequential(b *testing.B) {
-	daemon := mkDaemon(b)
-	defer nuke(daemon)
-	for i := 0; i < b.N; i++ {
-		container, _, err := daemon.Create(&runconfig.Config{
-			Image: GetTestImage(daemon).ID,
-			Cmd:   []string{"echo", "-n", "foo"},
-		},
-			&runconfig.HostConfig{},
-			"",
-		)
-		if err != nil {
-			b.Fatal(err)
-		}
-		defer daemon.Rm(container)
-		output, err := container.Output()
-		if err != nil {
-			b.Fatal(err)
-		}
-		if string(output) != "foo" {
-			b.Fatalf("Unexpected output: %s", output)
-		}
-		if err := daemon.Rm(container); err != nil {
-			b.Fatal(err)
-		}
-	}
-}
-
-func BenchmarkRunParallel(b *testing.B) {
-	daemon := mkDaemon(b)
-	defer nuke(daemon)
-
-	var tasks []chan error
-
-	for i := 0; i < b.N; i++ {
-		complete := make(chan error)
-		tasks = append(tasks, complete)
-		go func(i int, complete chan error) {
-			container, _, err := daemon.Create(&runconfig.Config{
-				Image: GetTestImage(daemon).ID,
-				Cmd:   []string{"echo", "-n", "foo"},
-			},
-				&runconfig.HostConfig{},
-				"",
-			)
-			if err != nil {
-				complete <- err
-				return
-			}
-			defer daemon.Rm(container)
-			if err := container.Start(); err != nil {
-				complete <- err
-				return
-			}
-			if _, err := container.WaitStop(15 * time.Second); err != nil {
-				complete <- err
-				return
-			}
-			// if string(output) != "foo" {
-			// 	complete <- fmt.Errorf("Unexecpted output: %v", string(output))
-			// }
-			if err := daemon.Rm(container); err != nil {
-				complete <- err
-				return
-			}
-			complete <- nil
-		}(i, complete)
-	}
-	var errors []error
-	for _, task := range tasks {
-		err := <-task
-		if err != nil {
-			errors = append(errors, err)
-		}
-	}
-	if len(errors) > 0 {
-		b.Fatal(errors)
-	}
-}
diff --git a/integration/graph_test.go b/integration/graph_test.go
deleted file mode 100644
index 8518fae..0000000
--- a/integration/graph_test.go
+++ /dev/null
@@ -1,319 +0,0 @@
-package docker
-
-import (
-	"errors"
-	"github.com/docker/docker/autogen/dockerversion"
-	"github.com/docker/docker/daemon/graphdriver"
-	"github.com/docker/docker/graph"
-	"github.com/docker/docker/image"
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/common"
-	"github.com/docker/docker/utils"
-	"io"
-	"io/ioutil"
-	"os"
-	"path"
-	"testing"
-	"time"
-)
-
-func TestMount(t *testing.T) {
-	graph, driver := tempGraph(t)
-	defer os.RemoveAll(graph.Root)
-	defer driver.Cleanup()
-
-	archive, err := fakeTar()
-	if err != nil {
-		t.Fatal(err)
-	}
-	image, err := graph.Create(archive, "", "", "Testing", "", nil, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	tmp, err := ioutil.TempDir("", "docker-test-graph-mount-")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmp)
-	rootfs := path.Join(tmp, "rootfs")
-	if err := os.MkdirAll(rootfs, 0700); err != nil {
-		t.Fatal(err)
-	}
-	rw := path.Join(tmp, "rw")
-	if err := os.MkdirAll(rw, 0700); err != nil {
-		t.Fatal(err)
-	}
-
-	if _, err := driver.Get(image.ID, ""); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestInit(t *testing.T) {
-	graph, _ := tempGraph(t)
-	defer nukeGraph(graph)
-	// Root should exist
-	if _, err := os.Stat(graph.Root); err != nil {
-		t.Fatal(err)
-	}
-	// Map() should be empty
-	if l, err := graph.Map(); err != nil {
-		t.Fatal(err)
-	} else if len(l) != 0 {
-		t.Fatalf("len(Map()) should return %d, not %d", 0, len(l))
-	}
-}
-
-// Test that Register can be interrupted cleanly without side effects
-func TestInterruptedRegister(t *testing.T) {
-	graph, _ := tempGraph(t)
-	defer nukeGraph(graph)
-	badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data
-	image := &image.Image{
-		ID:      common.GenerateRandomID(),
-		Comment: "testing",
-		Created: time.Now(),
-	}
-	w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling)
-	graph.Register(image, badArchive)
-	if _, err := graph.Get(image.ID); err == nil {
-		t.Fatal("Image should not exist after Register is interrupted")
-	}
-	// Registering the same image again should succeed if the first register was interrupted
-	goodArchive, err := fakeTar()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := graph.Register(image, goodArchive); err != nil {
-		t.Fatal(err)
-	}
-}
-
-// FIXME: Do more extensive tests (ex: create multiple, delete, recreate;
-//       create multiple, check the amount of images and paths, etc..)
-func TestGraphCreate(t *testing.T) {
-	graph, _ := tempGraph(t)
-	defer nukeGraph(graph)
-	archive, err := fakeTar()
-	if err != nil {
-		t.Fatal(err)
-	}
-	img, err := graph.Create(archive, "", "", "Testing", "", nil, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := utils.ValidateID(img.ID); err != nil {
-		t.Fatal(err)
-	}
-	if img.Comment != "Testing" {
-		t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", img.Comment)
-	}
-	if img.DockerVersion != dockerversion.VERSION {
-		t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, img.DockerVersion)
-	}
-	images, err := graph.Map()
-	if err != nil {
-		t.Fatal(err)
-	} else if l := len(images); l != 1 {
-		t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
-	}
-	if images[img.ID] == nil {
-		t.Fatalf("Could not find image with id %s", img.ID)
-	}
-}
-
-func TestRegister(t *testing.T) {
-	graph, _ := tempGraph(t)
-	defer nukeGraph(graph)
-	archive, err := fakeTar()
-	if err != nil {
-		t.Fatal(err)
-	}
-	image := &image.Image{
-		ID:      common.GenerateRandomID(),
-		Comment: "testing",
-		Created: time.Now(),
-	}
-	err = graph.Register(image, archive)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if images, err := graph.Map(); err != nil {
-		t.Fatal(err)
-	} else if l := len(images); l != 1 {
-		t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
-	}
-	if resultImg, err := graph.Get(image.ID); err != nil {
-		t.Fatal(err)
-	} else {
-		if resultImg.ID != image.ID {
-			t.Fatalf("Wrong image ID. Should be '%s', not '%s'", image.ID, resultImg.ID)
-		}
-		if resultImg.Comment != image.Comment {
-			t.Fatalf("Wrong image comment. Should be '%s', not '%s'", image.Comment, resultImg.Comment)
-		}
-	}
-}
-
-// Test that an image can be deleted by its shorthand prefix
-func TestDeletePrefix(t *testing.T) {
-	graph, _ := tempGraph(t)
-	defer nukeGraph(graph)
-	img := createTestImage(graph, t)
-	if err := graph.Delete(common.TruncateID(img.ID)); err != nil {
-		t.Fatal(err)
-	}
-	assertNImages(graph, t, 0)
-}
-
-func createTestImage(graph *graph.Graph, t *testing.T) *image.Image {
-	archive, err := fakeTar()
-	if err != nil {
-		t.Fatal(err)
-	}
-	img, err := graph.Create(archive, "", "", "Test image", "", nil, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	return img
-}
-
-func TestDelete(t *testing.T) {
-	graph, _ := tempGraph(t)
-	defer nukeGraph(graph)
-	archive, err := fakeTar()
-	if err != nil {
-		t.Fatal(err)
-	}
-	assertNImages(graph, t, 0)
-	img, err := graph.Create(archive, "", "", "Bla bla", "", nil, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	assertNImages(graph, t, 1)
-	if err := graph.Delete(img.ID); err != nil {
-		t.Fatal(err)
-	}
-	assertNImages(graph, t, 0)
-
-	archive, err = fakeTar()
-	if err != nil {
-		t.Fatal(err)
-	}
-	// Test 2 create (same name) / 1 delete
-	img1, err := graph.Create(archive, "", "", "Testing", "", nil, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	archive, err = fakeTar()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if _, err = graph.Create(archive, "", "", "Testing", "", nil, nil); err != nil {
-		t.Fatal(err)
-	}
-	assertNImages(graph, t, 2)
-	if err := graph.Delete(img1.ID); err != nil {
-		t.Fatal(err)
-	}
-	assertNImages(graph, t, 1)
-
-	// Test delete wrong name
-	if err := graph.Delete("Not_foo"); err == nil {
-		t.Fatalf("Deleting wrong ID should return an error")
-	}
-	assertNImages(graph, t, 1)
-
-	archive, err = fakeTar()
-	if err != nil {
-		t.Fatal(err)
-	}
-	// Test delete twice (pull -> rm -> pull -> rm)
-	if err := graph.Register(img1, archive); err != nil {
-		t.Fatal(err)
-	}
-	if err := graph.Delete(img1.ID); err != nil {
-		t.Fatal(err)
-	}
-	assertNImages(graph, t, 1)
-}
-
-func TestByParent(t *testing.T) {
-	archive1, _ := fakeTar()
-	archive2, _ := fakeTar()
-	archive3, _ := fakeTar()
-
-	graph, _ := tempGraph(t)
-	defer nukeGraph(graph)
-	parentImage := &image.Image{
-		ID:      common.GenerateRandomID(),
-		Comment: "parent",
-		Created: time.Now(),
-		Parent:  "",
-	}
-	childImage1 := &image.Image{
-		ID:      common.GenerateRandomID(),
-		Comment: "child1",
-		Created: time.Now(),
-		Parent:  parentImage.ID,
-	}
-	childImage2 := &image.Image{
-		ID:      common.GenerateRandomID(),
-		Comment: "child2",
-		Created: time.Now(),
-		Parent:  parentImage.ID,
-	}
-	_ = graph.Register(parentImage, archive1)
-	_ = graph.Register(childImage1, archive2)
-	_ = graph.Register(childImage2, archive3)
-
-	byParent, err := graph.ByParent()
-	if err != nil {
-		t.Fatal(err)
-	}
-	numChildren := len(byParent[parentImage.ID])
-	if numChildren != 2 {
-		t.Fatalf("Expected 2 children, found %d", numChildren)
-	}
-}
-
-/*
- * HELPER FUNCTIONS
- */
-
-func assertNImages(graph *graph.Graph, t *testing.T, n int) {
-	if images, err := graph.Map(); err != nil {
-		t.Fatal(err)
-	} else if actualN := len(images); actualN != n {
-		t.Fatalf("Expected %d images, found %d", n, actualN)
-	}
-}
-
-func tempGraph(t *testing.T) (*graph.Graph, graphdriver.Driver) {
-	tmp, err := ioutil.TempDir("", "docker-graph-")
-	if err != nil {
-		t.Fatal(err)
-	}
-	driver, err := graphdriver.New(tmp, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	graph, err := graph.NewGraph(tmp, driver)
-	if err != nil {
-		t.Fatal(err)
-	}
-	return graph, driver
-}
-
-func nukeGraph(graph *graph.Graph) {
-	graph.Driver().Cleanup()
-	os.RemoveAll(graph.Root)
-}
-
-func testArchive(t *testing.T) archive.Archive {
-	archive, err := fakeTar()
-	if err != nil {
-		t.Fatal(err)
-	}
-	return archive
-}
diff --git a/integration/https_test.go b/integration/https_test.go
deleted file mode 100644
index 17d6934..0000000
--- a/integration/https_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package docker
-
-import (
-	"crypto/tls"
-	"crypto/x509"
-	"io/ioutil"
-	"strings"
-	"testing"
-	"time"
-
-	"github.com/docker/docker/api/client"
-)
-
-const (
-	errBadCertificate = "remote error: bad certificate"
-	errCaUnknown      = "x509: certificate signed by unknown authority"
-)
-
-func getTlsConfig(certFile, keyFile string, t *testing.T) *tls.Config {
-	certPool := x509.NewCertPool()
-	file, err := ioutil.ReadFile("fixtures/https/ca.pem")
-	if err != nil {
-		t.Fatal(err)
-	}
-	certPool.AppendCertsFromPEM(file)
-
-	cert, err := tls.LoadX509KeyPair("fixtures/https/"+certFile, "fixtures/https/"+keyFile)
-	if err != nil {
-		t.Fatalf("Couldn't load X509 key pair: %s", err)
-	}
-	tlsConfig := &tls.Config{
-		RootCAs:      certPool,
-		Certificates: []tls.Certificate{cert},
-	}
-	return tlsConfig
-}
-
-// TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint
-func TestHttpsInfo(t *testing.T) {
-	cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, "", testDaemonProto,
-		testDaemonHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t))
-
-	setTimeout(t, "Reading command output time out", 10*time.Second, func() {
-		if err := cli.CmdInfo(); err != nil {
-			t.Fatal(err)
-		}
-	})
-}
-
-// TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint
-// by using a rogue client certificate and checks that it fails with the expected error.
-func TestHttpsInfoRogueCert(t *testing.T) {
-	cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, "", testDaemonProto,
-		testDaemonHttpsAddr, getTlsConfig("client-rogue-cert.pem", "client-rogue-key.pem", t))
-
-	setTimeout(t, "Reading command output time out", 10*time.Second, func() {
-		err := cli.CmdInfo()
-		if err == nil {
-			t.Fatal("Expected error but got nil")
-		}
-		if !strings.Contains(err.Error(), errBadCertificate) {
-			t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err)
-		}
-	})
-}
-
-// TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint
-// which provides a rogue server certificate and checks that it fails with the expected error
-func TestHttpsInfoRogueServerCert(t *testing.T) {
-	cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, "", testDaemonProto,
-		testDaemonRogueHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t))
-
-	setTimeout(t, "Reading command output time out", 10*time.Second, func() {
-		err := cli.CmdInfo()
-		if err == nil {
-			t.Fatal("Expected error but got nil")
-		}
-
-		if !strings.Contains(err.Error(), errCaUnknown) {
-			t.Fatalf("Expected error: %s, got instead: %s", errCaUnknown, err)
-		}
-
-	})
-}
diff --git a/integration/runtime_test.go b/integration/runtime_test.go
deleted file mode 100644
index 153c385..0000000
--- a/integration/runtime_test.go
+++ /dev/null
@@ -1,915 +0,0 @@
-package docker
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	std_log "log"
-	"net"
-	"net/url"
-	"os"
-	"path/filepath"
-	"runtime"
-	"strconv"
-	"strings"
-	"syscall"
-	"testing"
-	"time"
-
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/daemon"
-	"github.com/docker/docker/daemon/execdriver"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/image"
-	"github.com/docker/docker/nat"
-	"github.com/docker/docker/pkg/common"
-	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/reexec"
-	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/utils"
-)
-
-const (
-	unitTestImageName        = "docker-test-image"
-	unitTestImageID          = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
-	unitTestImageIDShort     = "83599e29c455"
-	unitTestNetworkBridge    = "testdockbr0"
-	unitTestStoreBase        = "/var/lib/docker/unit-tests"
-	unitTestDockerTmpdir     = "/var/lib/docker/tmp"
-	testDaemonAddr           = "127.0.0.1:4270"
-	testDaemonProto          = "tcp"
-	testDaemonHttpsProto     = "tcp"
-	testDaemonHttpsAddr      = "localhost:4271"
-	testDaemonRogueHttpsAddr = "localhost:4272"
-)
-
-var (
-	// FIXME: globalDaemon is deprecated by globalEngine. All tests should be converted.
-	globalDaemon           *daemon.Daemon
-	globalEngine           *engine.Engine
-	globalHttpsEngine      *engine.Engine
-	globalRogueHttpsEngine *engine.Engine
-	startFds               int
-	startGoroutines        int
-)
-
-// FIXME: nuke() is deprecated by Daemon.Nuke()
-func nuke(daemon *daemon.Daemon) error {
-	return daemon.Nuke()
-}
-
-// FIXME: cleanup and nuke are redundant.
-func cleanup(eng *engine.Engine, t *testing.T) error {
-	daemon := mkDaemonFromEngine(eng, t)
-	for _, container := range daemon.List() {
-		container.Kill()
-		daemon.Rm(container)
-	}
-	job := eng.Job("images")
-	images, err := job.Stdout.AddTable()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-	for _, image := range images.Data {
-		if image.Get("Id") != unitTestImageID {
-			eng.Job("image_delete", image.Get("Id")).Run()
-		}
-	}
-	return nil
-}
-
-func init() {
-	// Always use the same driver (vfs) for all integration tests.
-	// To test other drivers, we need a dedicated driver validation suite.
-	os.Setenv("DOCKER_DRIVER", "vfs")
-	os.Setenv("TEST", "1")
-	os.Setenv("DOCKER_TMPDIR", unitTestDockerTmpdir)
-
-	// Hack to run sys init during unit testing
-	if reexec.Init() {
-		return
-	}
-
-	if uid := syscall.Geteuid(); uid != 0 {
-		log.Fatalf("docker tests need to be run as root")
-	}
-
-	// Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary)
-	if dockerinit := os.Getenv("TEST_DOCKERINIT_PATH"); dockerinit != "" {
-		src, err := os.Open(dockerinit)
-		if err != nil {
-			log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s", err)
-		}
-		defer src.Close()
-		dst, err := os.OpenFile(filepath.Join(filepath.Dir(utils.SelfPath()), "dockerinit"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0555)
-		if err != nil {
-			log.Fatalf("Unable to create dockerinit in test directory: %s", err)
-		}
-		defer dst.Close()
-		if _, err := io.Copy(dst, src); err != nil {
-			log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s", err)
-		}
-		dst.Close()
-		src.Close()
-	}
-
-	// Setup the base daemon, which will be duplicated for each test.
-	// (no tests are run directly in the base)
-	setupBaseImage()
-
-	// Create the "global daemon" with a long-running daemons for integration tests
-	spawnGlobalDaemon()
-	spawnLegitHttpsDaemon()
-	spawnRogueHttpsDaemon()
-	startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine()
-}
-
-func setupBaseImage() {
-	eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase)
-	job := eng.Job("image_inspect", unitTestImageName)
-	img, _ := job.Stdout.AddEnv()
-	// If the unit test is not found, try to download it.
-	if err := job.Run(); err != nil || img.Get("Id") != unitTestImageID {
-		// Retrieve the Image
-		job = eng.Job("pull", unitTestImageName)
-		job.Stdout.Add(ioutils.NopWriteCloser(os.Stdout))
-		if err := job.Run(); err != nil {
-			log.Fatalf("Unable to pull the test image: %s", err)
-		}
-	}
-}
-
-func spawnGlobalDaemon() {
-	if globalDaemon != nil {
-		log.Debugf("Global daemon already exists. Skipping.")
-		return
-	}
-	t := std_log.New(os.Stderr, "", 0)
-	eng := NewTestEngine(t)
-	globalEngine = eng
-	globalDaemon = mkDaemonFromEngine(eng, t)
-
-	// Spawn a Daemon
-	go func() {
-		log.Debugf("Spawning global daemon for integration tests")
-		listenURL := &url.URL{
-			Scheme: testDaemonProto,
-			Host:   testDaemonAddr,
-		}
-		job := eng.Job("serveapi", listenURL.String())
-		job.SetenvBool("Logging", true)
-		if err := job.Run(); err != nil {
-			log.Fatalf("Unable to spawn the test daemon: %s", err)
-		}
-	}()
-
-	// Give some time to ListenAndServer to actually start
-	// FIXME: use inmem transports instead of tcp
-	time.Sleep(time.Second)
-
-	if err := eng.Job("acceptconnections").Run(); err != nil {
-		log.Fatalf("Unable to accept connections for test api: %s", err)
-	}
-}
-
-func spawnLegitHttpsDaemon() {
-	if globalHttpsEngine != nil {
-		return
-	}
-	globalHttpsEngine = spawnHttpsDaemon(testDaemonHttpsAddr, "fixtures/https/ca.pem",
-		"fixtures/https/server-cert.pem", "fixtures/https/server-key.pem")
-}
-
-func spawnRogueHttpsDaemon() {
-	if globalRogueHttpsEngine != nil {
-		return
-	}
-	globalRogueHttpsEngine = spawnHttpsDaemon(testDaemonRogueHttpsAddr, "fixtures/https/ca.pem",
-		"fixtures/https/server-rogue-cert.pem", "fixtures/https/server-rogue-key.pem")
-}
-
-func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine {
-	t := std_log.New(os.Stderr, "", 0)
-	root, err := newTestDirectory(unitTestStoreBase)
-	if err != nil {
-		t.Fatal(err)
-	}
-	// FIXME: here we don't use NewTestEngine because it configures the daemon with Autorestart=false,
-	// and we want to set it to true.
-
-	eng := newTestEngine(t, true, root)
-
-	// Spawn a Daemon
-	go func() {
-		log.Debugf("Spawning https daemon for integration tests")
-		listenURL := &url.URL{
-			Scheme: testDaemonHttpsProto,
-			Host:   addr,
-		}
-		job := eng.Job("serveapi", listenURL.String())
-		job.SetenvBool("Logging", true)
-		job.SetenvBool("Tls", true)
-		job.SetenvBool("TlsVerify", true)
-		job.Setenv("TlsCa", cacert)
-		job.Setenv("TlsCert", cert)
-		job.Setenv("TlsKey", key)
-		if err := job.Run(); err != nil {
-			log.Fatalf("Unable to spawn the test daemon: %s", err)
-		}
-	}()
-
-	// Give some time to ListenAndServer to actually start
-	time.Sleep(time.Second)
-
-	if err := eng.Job("acceptconnections").Run(); err != nil {
-		log.Fatalf("Unable to accept connections for test api: %s", err)
-	}
-	return eng
-}
-
-// FIXME: test that ImagePull(json=true) send correct json output
-
-func GetTestImage(daemon *daemon.Daemon) *image.Image {
-	imgs, err := daemon.Graph().Map()
-	if err != nil {
-		log.Fatalf("Unable to get the test image: %s", err)
-	}
-	for _, image := range imgs {
-		if image.ID == unitTestImageID {
-			return image
-		}
-	}
-	log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, daemon.Graph().Root, imgs)
-	return nil
-}
-
-func TestDaemonCreate(t *testing.T) {
-	daemon := mkDaemon(t)
-	defer nuke(daemon)
-
-	// Make sure we start we 0 containers
-	if len(daemon.List()) != 0 {
-		t.Errorf("Expected 0 containers, %v found", len(daemon.List()))
-	}
-
-	container, _, err := daemon.Create(&runconfig.Config{
-		Image: GetTestImage(daemon).ID,
-		Cmd:   []string{"ls", "-al"},
-	},
-		&runconfig.HostConfig{},
-		"",
-	)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	defer func() {
-		if err := daemon.Rm(container); err != nil {
-			t.Error(err)
-		}
-	}()
-
-	// Make sure we can find the newly created container with List()
-	if len(daemon.List()) != 1 {
-		t.Errorf("Expected 1 container, %v found", len(daemon.List()))
-	}
-
-	// Make sure the container List() returns is the right one
-	if daemon.List()[0].ID != container.ID {
-		t.Errorf("Unexpected container %v returned by List", daemon.List()[0])
-	}
-
-	// Make sure we can get the container with Get()
-	if _, err := daemon.Get(container.ID); err != nil {
-		t.Errorf("Unable to get newly created container")
-	}
-
-	// Make sure it is the right container
-	if c, _ := daemon.Get(container.ID); c != container {
-		t.Errorf("Get() returned the wrong container")
-	}
-
-	// Make sure Exists returns it as existing
-	if !daemon.Exists(container.ID) {
-		t.Errorf("Exists() returned false for a newly created container")
-	}
-
-	// Test that conflict error displays correct details
-	testContainer, _, _ := daemon.Create(
-		&runconfig.Config{
-			Image: GetTestImage(daemon).ID,
-			Cmd:   []string{"ls", "-al"},
-		},
-		&runconfig.HostConfig{},
-		"conflictname",
-	)
-	if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), common.TruncateID(testContainer.ID)) {
-		t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %v", err)
-	}
-
-	// Make sure create with bad parameters returns an error
-	if _, _, err = daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID}, &runconfig.HostConfig{}, ""); err == nil {
-		t.Fatal("Builder.Create should throw an error when Cmd is missing")
-	}
-
-	if _, _, err := daemon.Create(
-		&runconfig.Config{
-			Image: GetTestImage(daemon).ID,
-			Cmd:   []string{},
-		},
-		&runconfig.HostConfig{},
-		"",
-	); err == nil {
-		t.Fatal("Builder.Create should throw an error when Cmd is empty")
-	}
-
-	config := &runconfig.Config{
-		Image:     GetTestImage(daemon).ID,
-		Cmd:       []string{"/bin/ls"},
-		PortSpecs: []string{"80"},
-	}
-	container, _, err = daemon.Create(config, &runconfig.HostConfig{}, "")
-
-	_, err = daemon.Commit(container, "testrepo", "testtag", "", "", true, config)
-	if err != nil {
-		t.Error(err)
-	}
-
-	// test expose 80:8000
-	container, warnings, err := daemon.Create(&runconfig.Config{
-		Image:     GetTestImage(daemon).ID,
-		Cmd:       []string{"ls", "-al"},
-		PortSpecs: []string{"80:8000"},
-	},
-		&runconfig.HostConfig{},
-		"",
-	)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if warnings == nil || len(warnings) != 1 {
-		t.Error("Expected a warning, got none")
-	}
-}
-
-func TestDestroy(t *testing.T) {
-	daemon := mkDaemon(t)
-	defer nuke(daemon)
-
-	container, _, err := daemon.Create(&runconfig.Config{
-		Image: GetTestImage(daemon).ID,
-		Cmd:   []string{"ls", "-al"},
-	},
-		&runconfig.HostConfig{},
-		"")
-	if err != nil {
-		t.Fatal(err)
-	}
-	// Destroy
-	if err := daemon.Rm(container); err != nil {
-		t.Error(err)
-	}
-
-	// Make sure daemon.Exists() behaves correctly
-	if daemon.Exists("test_destroy") {
-		t.Errorf("Exists() returned true")
-	}
-
-	// Make sure daemon.List() doesn't list the destroyed container
-	if len(daemon.List()) != 0 {
-		t.Errorf("Expected 0 container, %v found", len(daemon.List()))
-	}
-
-	// Make sure daemon.Get() refuses to return the unexisting container
-	if c, _ := daemon.Get(container.ID); c != nil {
-		t.Errorf("Got a container that should not exist")
-	}
-
-	// Test double destroy
-	if err := daemon.Rm(container); err == nil {
-		// It should have failed
-		t.Errorf("Double destroy did not fail")
-	}
-}
-
-func TestGet(t *testing.T) {
-	daemon := mkDaemon(t)
-	defer nuke(daemon)
-
-	container1, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
-	defer daemon.Rm(container1)
-
-	container2, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
-	defer daemon.Rm(container2)
-
-	container3, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t)
-	defer daemon.Rm(container3)
-
-	if c, _ := daemon.Get(container1.ID); c != container1 {
-		t.Errorf("Get(test1) returned %v while expecting %v", c, container1)
-	}
-
-	if c, _ := daemon.Get(container2.ID); c != container2 {
-		t.Errorf("Get(test2) returned %v while expecting %v", c, container2)
-	}
-
-	if c, _ := daemon.Get(container3.ID); c != container3 {
-		t.Errorf("Get(test3) returned %v while expecting %v", c, container3)
-	}
-
-}
-
-func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daemon.Container, string) {
-	var (
-		err          error
-		id           string
-		outputBuffer = bytes.NewBuffer(nil)
-		strPort      string
-		eng          = NewTestEngine(t)
-		daemon       = mkDaemonFromEngine(eng, t)
-		port         = 5554
-		p            nat.Port
-	)
-	defer func() {
-		if err != nil {
-			daemon.Nuke()
-		}
-	}()
-
-	for {
-		port += 1
-		strPort = strconv.Itoa(port)
-		var cmd string
-		if proto == "tcp" {
-			cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat"
-		} else if proto == "udp" {
-			cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat"
-		} else {
-			t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
-		}
-		ep := make(map[nat.Port]struct{}, 1)
-		p = nat.Port(fmt.Sprintf("%s/%s", strPort, proto))
-		ep[p] = struct{}{}
-
-		jobCreate := eng.Job("create")
-		jobCreate.Setenv("Image", unitTestImageID)
-		jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd})
-		jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)})
-		jobCreate.SetenvJson("ExposedPorts", ep)
-		jobCreate.Stdout.Add(outputBuffer)
-		if err := jobCreate.Run(); err != nil {
-			t.Fatal(err)
-		}
-		id = engine.Tail(outputBuffer, 1)
-		// FIXME: this relies on the undocumented behavior of daemon.Create
-		// which will return a nil error AND container if the exposed ports
-		// are invalid. That behavior should be fixed!
-		if id != "" {
-			break
-		}
-		t.Logf("Port %v already in use, trying another one", strPort)
-
-	}
-
-	jobStart := eng.Job("start", id)
-	portBindings := make(map[nat.Port][]nat.PortBinding)
-	portBindings[p] = []nat.PortBinding{
-		{},
-	}
-	if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil {
-		t.Fatal(err)
-	}
-	if err := jobStart.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	container, err := daemon.Get(id)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
-		for !container.IsRunning() {
-			time.Sleep(10 * time.Millisecond)
-		}
-	})
-
-	// Even if the state is running, lets give some time to lxc to spawn the process
-	container.WaitStop(500 * time.Millisecond)
-
-	strPort = container.NetworkSettings.Ports[p][0].HostPort
-	return daemon, container, strPort
-}
-
-// Run a container with a TCP port allocated, and test that it can receive connections on localhost
-func TestAllocateTCPPortLocalhost(t *testing.T) {
-	daemon, container, port := startEchoServerContainer(t, "tcp")
-	defer nuke(daemon)
-	defer container.Kill()
-
-	for i := 0; i != 10; i++ {
-		conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port))
-		if err != nil {
-			t.Fatal(err)
-		}
-		defer conn.Close()
-
-		input := bytes.NewBufferString("well hello there\n")
-		_, err = conn.Write(input.Bytes())
-		if err != nil {
-			t.Fatal(err)
-		}
-		buf := make([]byte, 16)
-		read := 0
-		conn.SetReadDeadline(time.Now().Add(3 * time.Second))
-		read, err = conn.Read(buf)
-		if err != nil {
-			if err, ok := err.(*net.OpError); ok {
-				if err.Err == syscall.ECONNRESET {
-					t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec")
-					conn.Close()
-					time.Sleep(time.Second)
-					continue
-				}
-				if err.Timeout() {
-					t.Log("Timeout, trying again")
-					conn.Close()
-					continue
-				}
-			}
-			t.Fatal(err)
-		}
-		output := string(buf[:read])
-		if !strings.Contains(output, "well hello there") {
-			t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output))
-		} else {
-			return
-		}
-	}
-
-	t.Fatal("No reply from the container")
-}
-
-// Run a container with an UDP port allocated, and test that it can receive connections on localhost
-func TestAllocateUDPPortLocalhost(t *testing.T) {
-	daemon, container, port := startEchoServerContainer(t, "udp")
-	defer nuke(daemon)
-	defer container.Kill()
-
-	conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port))
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer conn.Close()
-
-	input := bytes.NewBufferString("well hello there\n")
-	buf := make([]byte, 16)
-	// Try for a minute, for some reason the select in socat may take ages
-	// to return even though everything on the path seems fine (i.e: the
-	// UDPProxy forwards the traffic correctly and you can see the packets
-	// on the interface from within the container).
-	for i := 0; i != 120; i++ {
-		_, err := conn.Write(input.Bytes())
-		if err != nil {
-			t.Fatal(err)
-		}
-		conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond))
-		read, err := conn.Read(buf)
-		if err == nil {
-			output := string(buf[:read])
-			if strings.Contains(output, "well hello there") {
-				return
-			}
-		}
-	}
-
-	t.Fatal("No reply from the container")
-}
-
-func TestRestore(t *testing.T) {
-	eng := NewTestEngine(t)
-	daemon1 := mkDaemonFromEngine(eng, t)
-	defer daemon1.Nuke()
-	// Create a container with one instance of docker
-	container1, _, _ := mkContainer(daemon1, []string{"_", "ls", "-al"}, t)
-	defer daemon1.Rm(container1)
-
-	// Create a second container meant to be killed
-	container2, _, _ := mkContainer(daemon1, []string{"-i", "_", "/bin/cat"}, t)
-	defer daemon1.Rm(container2)
-
-	// Start the container non blocking
-	if err := container2.Start(); err != nil {
-		t.Fatal(err)
-	}
-
-	if !container2.IsRunning() {
-		t.Fatalf("Container %v should appear as running but isn't", container2.ID)
-	}
-
-	// Simulate a crash/manual quit of dockerd: process dies, states stays 'Running'
-	cStdin := container2.StdinPipe()
-	cStdin.Close()
-	if _, err := container2.WaitStop(2 * time.Second); err != nil {
-		t.Fatal(err)
-	}
-	container2.SetRunning(42)
-	container2.ToDisk()
-
-	if len(daemon1.List()) != 2 {
-		t.Errorf("Expected 2 container, %v found", len(daemon1.List()))
-	}
-	if err := container1.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	if !container2.IsRunning() {
-		t.Fatalf("Container %v should appear as running but isn't", container2.ID)
-	}
-
-	// Here are are simulating a docker restart - that is, reloading all containers
-	// from scratch
-	eng = newTestEngine(t, false, daemon1.Config().Root)
-	daemon2 := mkDaemonFromEngine(eng, t)
-	if len(daemon2.List()) != 2 {
-		t.Errorf("Expected 2 container, %v found", len(daemon2.List()))
-	}
-	runningCount := 0
-	for _, c := range daemon2.List() {
-		if c.IsRunning() {
-			t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
-			runningCount++
-		}
-	}
-	if runningCount != 0 {
-		t.Fatalf("Expected 0 container alive, %d found", runningCount)
-	}
-	container3, err := daemon2.Get(container1.ID)
-	if err != nil {
-		t.Fatal("Unable to Get container")
-	}
-	if err := container3.Run(); err != nil {
-		t.Fatal(err)
-	}
-	container2.SetStopped(&execdriver.ExitStatus{ExitCode: 0})
-}
-
-func TestDefaultContainerName(t *testing.T) {
-	eng := NewTestEngine(t)
-	daemon := mkDaemonFromEngine(eng, t)
-	defer nuke(daemon)
-
-	config, _, _, err := parseRun([]string{unitTestImageID, "echo test"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	container, err := daemon.Get(createNamedTestContainer(eng, config, t, "some_name"))
-	if err != nil {
-		t.Fatal(err)
-	}
-	containerID := container.ID
-
-	if container.Name != "/some_name" {
-		t.Fatalf("Expect /some_name got %s", container.Name)
-	}
-
-	c, err := daemon.Get("/some_name")
-	if err != nil {
-		t.Fatalf("Couldn't retrieve test container as /some_name")
-	}
-	if c.ID != containerID {
-		t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID)
-	}
-}
-
-func TestRandomContainerName(t *testing.T) {
-	eng := NewTestEngine(t)
-	daemon := mkDaemonFromEngine(eng, t)
-	defer nuke(daemon)
-
-	config, _, _, err := parseRun([]string{GetTestImage(daemon).ID, "echo test"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	container, err := daemon.Get(createTestContainer(eng, config, t))
-	if err != nil {
-		t.Fatal(err)
-	}
-	containerID := container.ID
-
-	if container.Name == "" {
-		t.Fatalf("Expected not empty container name")
-	}
-
-	if c, err := daemon.Get(container.Name); err != nil {
-		log.Fatalf("Could not lookup container %s by its name", container.Name)
-	} else if c.ID != containerID {
-		log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID)
-	}
-}
-
-func TestContainerNameValidation(t *testing.T) {
-	eng := NewTestEngine(t)
-	daemon := mkDaemonFromEngine(eng, t)
-	defer nuke(daemon)
-
-	for _, test := range []struct {
-		Name  string
-		Valid bool
-	}{
-		{"abc-123_AAA.1", true},
-		{"\000asdf", false},
-	} {
-		config, _, _, err := parseRun([]string{unitTestImageID, "echo test"})
-		if err != nil {
-			if !test.Valid {
-				continue
-			}
-			t.Fatal(err)
-		}
-
-		var outputBuffer = bytes.NewBuffer(nil)
-		job := eng.Job("create", test.Name)
-		if err := job.ImportEnv(config); err != nil {
-			t.Fatal(err)
-		}
-		job.Stdout.Add(outputBuffer)
-		if err := job.Run(); err != nil {
-			if !test.Valid {
-				continue
-			}
-			t.Fatal(err)
-		}
-
-		container, err := daemon.Get(engine.Tail(outputBuffer, 1))
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		if container.Name != "/"+test.Name {
-			t.Fatalf("Expect /%s got %s", test.Name, container.Name)
-		}
-
-		if c, err := daemon.Get("/" + test.Name); err != nil {
-			t.Fatalf("Couldn't retrieve test container as /%s", test.Name)
-		} else if c.ID != container.ID {
-			t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID)
-		}
-	}
-
-}
-
-func TestLinkChildContainer(t *testing.T) {
-	eng := NewTestEngine(t)
-	daemon := mkDaemonFromEngine(eng, t)
-	defer nuke(daemon)
-
-	config, _, _, err := parseRun([]string{unitTestImageID, "echo test"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	container, err := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp"))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	webapp, err := daemon.GetByName("/webapp")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if webapp.ID != container.ID {
-		t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
-	}
-
-	config, _, _, err = parseRun([]string{GetTestImage(daemon).ID, "echo test"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	childContainer, err := daemon.Get(createTestContainer(eng, config, t))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil {
-		t.Fatal(err)
-	}
-
-	// Get the child by it's new name
-	db, err := daemon.GetByName("/webapp/db")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if db.ID != childContainer.ID {
-		t.Fatalf("Expect db id to match container id: %s != %s", db.ID, childContainer.ID)
-	}
-}
-
-func TestGetAllChildren(t *testing.T) {
-	eng := NewTestEngine(t)
-	daemon := mkDaemonFromEngine(eng, t)
-	defer nuke(daemon)
-
-	config, _, _, err := parseRun([]string{unitTestImageID, "echo test"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	container, err := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp"))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	webapp, err := daemon.GetByName("/webapp")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if webapp.ID != container.ID {
-		t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
-	}
-
-	config, _, _, err = parseRun([]string{unitTestImageID, "echo test"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	childContainer, err := daemon.Get(createTestContainer(eng, config, t))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil {
-		t.Fatal(err)
-	}
-
-	children, err := daemon.Children("/webapp")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if children == nil {
-		t.Fatal("Children should not be nil")
-	}
-	if len(children) == 0 {
-		t.Fatal("Children should not be empty")
-	}
-
-	for key, value := range children {
-		if key != "/webapp/db" {
-			t.Fatalf("Expected /webapp/db got %s", key)
-		}
-		if value.ID != childContainer.ID {
-			t.Fatalf("Expected id %s got %s", childContainer.ID, value.ID)
-		}
-	}
-}
-
-func TestDestroyWithInitLayer(t *testing.T) {
-	daemon := mkDaemon(t)
-	defer nuke(daemon)
-
-	container, _, err := daemon.Create(&runconfig.Config{
-		Image: GetTestImage(daemon).ID,
-		Cmd:   []string{"ls", "-al"},
-	},
-		&runconfig.HostConfig{},
-		"")
-
-	if err != nil {
-		t.Fatal(err)
-	}
-	// Destroy
-	if err := daemon.Rm(container); err != nil {
-		t.Fatal(err)
-	}
-
-	// Make sure daemon.Exists() behaves correctly
-	if daemon.Exists("test_destroy") {
-		t.Fatalf("Exists() returned true")
-	}
-
-	// Make sure daemon.List() doesn't list the destroyed container
-	if len(daemon.List()) != 0 {
-		t.Fatalf("Expected 0 container, %v found", len(daemon.List()))
-	}
-
-	driver := daemon.Graph().Driver()
-
-	// Make sure that the container does not exist in the driver
-	if _, err := driver.Get(container.ID, ""); err == nil {
-		t.Fatal("Conttainer should not exist in the driver")
-	}
-
-	// Make sure that the init layer is removed from the driver
-	if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID), ""); err == nil {
-		t.Fatal("Container's init layer should not exist in the driver")
-	}
-}
diff --git a/integration/server_test.go b/integration/server_test.go
deleted file mode 100644
index 6d12ad3..0000000
--- a/integration/server_test.go
+++ /dev/null
@@ -1,300 +0,0 @@
-package docker
-
-import (
-	"bytes"
-	"testing"
-	"time"
-
-	"github.com/docker/docker/builder"
-	"github.com/docker/docker/engine"
-)
-
-func TestCreateNumberHostname(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	config, _, _, err := parseRun([]string{"-h", "web.0", unitTestImageID, "echo test"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	createTestContainer(eng, config, t)
-}
-
-func TestCommit(t *testing.T) {
-	eng := NewTestEngine(t)
-	b := &builder.BuilderJob{Engine: eng}
-	b.Install()
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	config, _, _, err := parseRun([]string{unitTestImageID, "/bin/cat"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	id := createTestContainer(eng, config, t)
-
-	job := eng.Job("commit", id)
-	job.Setenv("repo", "testrepo")
-	job.Setenv("tag", "testtag")
-	job.SetenvJson("config", config)
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestMergeConfigOnCommit(t *testing.T) {
-	eng := NewTestEngine(t)
-	b := &builder.BuilderJob{Engine: eng}
-	b.Install()
-	runtime := mkDaemonFromEngine(eng, t)
-	defer runtime.Nuke()
-
-	container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t)
-	defer runtime.Rm(container1)
-
-	config, _, _, err := parseRun([]string{container1.ID, "cat /tmp/foo"})
-	if err != nil {
-		t.Error(err)
-	}
-
-	job := eng.Job("commit", container1.ID)
-	job.Setenv("repo", "testrepo")
-	job.Setenv("tag", "testtag")
-	job.SetenvJson("config", config)
-	var outputBuffer = bytes.NewBuffer(nil)
-	job.Stdout.Add(outputBuffer)
-	if err := job.Run(); err != nil {
-		t.Error(err)
-	}
-
-	container2, _, _ := mkContainer(runtime, []string{engine.Tail(outputBuffer, 1)}, t)
-	defer runtime.Rm(container2)
-
-	job = eng.Job("container_inspect", container1.Name)
-	baseContainer, _ := job.Stdout.AddEnv()
-	if err := job.Run(); err != nil {
-		t.Error(err)
-	}
-
-	job = eng.Job("container_inspect", container2.Name)
-	commitContainer, _ := job.Stdout.AddEnv()
-	if err := job.Run(); err != nil {
-		t.Error(err)
-	}
-
-	baseConfig := baseContainer.GetSubEnv("Config")
-	commitConfig := commitContainer.GetSubEnv("Config")
-
-	if commitConfig.Get("Env") != baseConfig.Get("Env") {
-		t.Fatalf("Env config in committed container should be %v, was %v",
-			baseConfig.Get("Env"), commitConfig.Get("Env"))
-	}
-
-	if baseConfig.Get("Cmd") != "[\"echo test \\u003e /tmp/foo\"]" {
-		t.Fatalf("Cmd in base container should be [\"echo test \\u003e /tmp/foo\"], was %s",
-			baseConfig.Get("Cmd"))
-	}
-
-	if commitConfig.Get("Cmd") != "[\"cat /tmp/foo\"]" {
-		t.Fatalf("Cmd in committed container should be [\"cat /tmp/foo\"], was %s",
-			commitConfig.Get("Cmd"))
-	}
-}
-
-func TestRestartKillWait(t *testing.T) {
-	eng := NewTestEngine(t)
-	runtime := mkDaemonFromEngine(eng, t)
-	defer runtime.Nuke()
-
-	config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	id := createTestContainer(eng, config, t)
-
-	job := eng.Job("containers")
-	job.SetenvBool("all", true)
-	outs, err := job.Stdout.AddListTable()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	if len(outs.Data) != 1 {
-		t.Errorf("Expected 1 container, %v found", len(outs.Data))
-	}
-
-	job = eng.Job("start", id)
-	if err := job.ImportEnv(hostConfig); err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-	job = eng.Job("kill", id)
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	eng = newTestEngine(t, false, runtime.Config().Root)
-
-	job = eng.Job("containers")
-	job.SetenvBool("all", true)
-	outs, err = job.Stdout.AddListTable()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	if len(outs.Data) != 1 {
-		t.Errorf("Expected 1 container, %v found", len(outs.Data))
-	}
-
-	setTimeout(t, "Waiting on stopped container timedout", 5*time.Second, func() {
-		job = eng.Job("wait", outs.Data[0].Get("Id"))
-		if err := job.Run(); err != nil {
-			t.Fatal(err)
-		}
-	})
-}
-
-func TestCreateStartRestartStopStartKillRm(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	id := createTestContainer(eng, config, t)
-
-	job := eng.Job("containers")
-	job.SetenvBool("all", true)
-	outs, err := job.Stdout.AddListTable()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	if len(outs.Data) != 1 {
-		t.Errorf("Expected 1 container, %v found", len(outs.Data))
-	}
-
-	job = eng.Job("start", id)
-	if err := job.ImportEnv(hostConfig); err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	job = eng.Job("restart", id)
-	job.SetenvInt("t", 2)
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	job = eng.Job("stop", id)
-	job.SetenvInt("t", 2)
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	job = eng.Job("start", id)
-	if err := job.ImportEnv(hostConfig); err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := eng.Job("kill", id).Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	// FIXME: this failed once with a race condition ("Unable to remove filesystem for xxx: directory not empty")
-	job = eng.Job("rm", id)
-	job.SetenvBool("removeVolume", true)
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	job = eng.Job("containers")
-	job.SetenvBool("all", true)
-	outs, err = job.Stdout.AddListTable()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	if len(outs.Data) != 0 {
-		t.Errorf("Expected 0 container, %v found", len(outs.Data))
-	}
-}
-
-func TestRunWithTooLowMemoryLimit(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer mkDaemonFromEngine(eng, t).Nuke()
-
-	// Try to create a container with a memory limit of 1 byte less than the minimum allowed limit.
-	job := eng.Job("create")
-	job.Setenv("Image", unitTestImageID)
-	job.Setenv("Memory", "524287")
-	job.Setenv("CpuShares", "1000")
-	job.SetenvList("Cmd", []string{"/bin/cat"})
-	if err := job.Run(); err == nil {
-		t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!")
-	}
-}
-
-func TestImagesFilter(t *testing.T) {
-	eng := NewTestEngine(t)
-	defer nuke(mkDaemonFromEngine(eng, t))
-
-	if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := eng.Job("tag", unitTestImageName, "utest/docker", "tag2").Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := eng.Job("tag", unitTestImageName, "utest:5000/docker", "tag3").Run(); err != nil {
-		t.Fatal(err)
-	}
-
-	images := getImages(eng, t, false, "utest*/*")
-
-	if len(images.Data[0].GetList("RepoTags")) != 2 {
-		t.Fatal("incorrect number of matches returned")
-	}
-
-	images = getImages(eng, t, false, "utest")
-
-	if len(images.Data[0].GetList("RepoTags")) != 1 {
-		t.Fatal("incorrect number of matches returned")
-	}
-
-	images = getImages(eng, t, false, "utest*")
-
-	if len(images.Data[0].GetList("RepoTags")) != 1 {
-		t.Fatal("incorrect number of matches returned")
-	}
-
-	images = getImages(eng, t, false, "*5000*/*")
-
-	if len(images.Data[0].GetList("RepoTags")) != 1 {
-		t.Fatal("incorrect number of matches returned")
-	}
-}
diff --git a/integration/utils_test.go b/integration/utils_test.go
deleted file mode 100644
index 2e90e4f..0000000
--- a/integration/utils_test.go
+++ /dev/null
@@ -1,360 +0,0 @@
-package docker
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"net/http/httptest"
-	"os"
-	"path"
-	"path/filepath"
-	"strings"
-	"testing"
-	"time"
-
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
-
-	"github.com/docker/docker/builtins"
-	"github.com/docker/docker/daemon"
-	"github.com/docker/docker/engine"
-	flag "github.com/docker/docker/pkg/mflag"
-	"github.com/docker/docker/registry"
-	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/utils"
-)
-
-type Fataler interface {
-	Fatal(...interface{})
-}
-
-// This file contains utility functions for docker's unit test suite.
-// It has to be named XXX_test.go, apparently, in other to access private functions
-// from other XXX_test.go functions.
-
-// Create a temporary daemon suitable for unit testing.
-// Call t.Fatal() at the first error.
-func mkDaemon(f Fataler) *daemon.Daemon {
-	eng := newTestEngine(f, false, "")
-	return mkDaemonFromEngine(eng, f)
-}
-
-func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler, name string) (shortId string) {
-	job := eng.Job("create", name)
-	if err := job.ImportEnv(config); err != nil {
-		f.Fatal(err)
-	}
-	var outputBuffer = bytes.NewBuffer(nil)
-	job.Stdout.Add(outputBuffer)
-	if err := job.Run(); err != nil {
-		f.Fatal(err)
-	}
-	return engine.Tail(outputBuffer, 1)
-}
-
-func createTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler) (shortId string) {
-	return createNamedTestContainer(eng, config, f, "")
-}
-
-func startContainer(eng *engine.Engine, id string, t Fataler) {
-	job := eng.Job("start", id)
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func containerRun(eng *engine.Engine, id string, t Fataler) {
-	startContainer(eng, id, t)
-	containerWait(eng, id, t)
-}
-
-func containerFileExists(eng *engine.Engine, id, dir string, t Fataler) bool {
-	c := getContainer(eng, id, t)
-	if err := c.Mount(); err != nil {
-		t.Fatal(err)
-	}
-	defer c.Unmount()
-	if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil {
-		if os.IsNotExist(err) {
-			return false
-		}
-		t.Fatal(err)
-	}
-	return true
-}
-
-func containerAttach(eng *engine.Engine, id string, t Fataler) (io.WriteCloser, io.ReadCloser) {
-	c := getContainer(eng, id, t)
-	i := c.StdinPipe()
-	o := c.StdoutPipe()
-	return i, o
-}
-
-func containerWait(eng *engine.Engine, id string, t Fataler) int {
-	ex, _ := getContainer(eng, id, t).WaitStop(-1 * time.Second)
-	return ex
-}
-
-func containerWaitTimeout(eng *engine.Engine, id string, t Fataler) error {
-	_, err := getContainer(eng, id, t).WaitStop(500 * time.Millisecond)
-	return err
-}
-
-func containerKill(eng *engine.Engine, id string, t Fataler) {
-	if err := eng.Job("kill", id).Run(); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func containerRunning(eng *engine.Engine, id string, t Fataler) bool {
-	return getContainer(eng, id, t).IsRunning()
-}
-
-func containerAssertExists(eng *engine.Engine, id string, t Fataler) {
-	getContainer(eng, id, t)
-}
-
-func containerAssertNotExists(eng *engine.Engine, id string, t Fataler) {
-	daemon := mkDaemonFromEngine(eng, t)
-	if c, _ := daemon.Get(id); c != nil {
-		t.Fatal(fmt.Errorf("Container %s should not exist", id))
-	}
-}
-
-// assertHttpNotError expect the given response to not have an error.
-// Otherwise the it causes the test to fail.
-func assertHttpNotError(r *httptest.ResponseRecorder, t Fataler) {
-	// Non-error http status are [200, 400)
-	if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest {
-		t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code))
-	}
-}
-
-// assertHttpError expect the given response to have an error.
-// Otherwise the it causes the test to fail.
-func assertHttpError(r *httptest.ResponseRecorder, t Fataler) {
-	// Non-error http status are [200, 400)
-	if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) {
-		t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code))
-	}
-}
-
-func getContainer(eng *engine.Engine, id string, t Fataler) *daemon.Container {
-	daemon := mkDaemonFromEngine(eng, t)
-	c, err := daemon.Get(id)
-	if err != nil {
-		t.Fatal(err)
-	}
-	return c
-}
-
-func mkDaemonFromEngine(eng *engine.Engine, t Fataler) *daemon.Daemon {
-	iDaemon := eng.Hack_GetGlobalVar("httpapi.daemon")
-	if iDaemon == nil {
-		panic("Legacy daemon field not set in engine")
-	}
-	daemon, ok := iDaemon.(*daemon.Daemon)
-	if !ok {
-		panic("Legacy daemon field in engine does not cast to *daemon.Daemon")
-	}
-	return daemon
-}
-
-func newTestEngine(t Fataler, autorestart bool, root string) *engine.Engine {
-	if root == "" {
-		if dir, err := newTestDirectory(unitTestStoreBase); err != nil {
-			t.Fatal(err)
-		} else {
-			root = dir
-		}
-	}
-	os.MkdirAll(root, 0700)
-
-	eng := engine.New()
-	eng.Logging = false
-	// Load default plugins
-	if err := builtins.Register(eng); err != nil {
-		t.Fatal(err)
-	}
-	// load registry service
-	if err := registry.NewService(nil).Install(eng); err != nil {
-		t.Fatal(err)
-	}
-
-	// (This is manually copied and modified from main() until we have a more generic plugin system)
-	cfg := &daemon.Config{
-		Root:        root,
-		AutoRestart: autorestart,
-		ExecDriver:  "native",
-		// Either InterContainerCommunication or EnableIptables must be set,
-		// otherwise NewDaemon will fail because of conflicting settings.
-		InterContainerCommunication: true,
-		TrustKeyPath:                filepath.Join(root, "key.json"),
-		LogConfig:                   runconfig.LogConfig{Type: "json-file"},
-	}
-	d, err := daemon.NewDaemon(cfg, eng)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := d.Install(eng); err != nil {
-		t.Fatal(err)
-	}
-	return eng
-}
-
-func NewTestEngine(t Fataler) *engine.Engine {
-	return newTestEngine(t, false, "")
-}
-
-func newTestDirectory(templateDir string) (dir string, err error) {
-	return utils.TestDirectory(templateDir)
-}
-
-func getCallerName(depth int) string {
-	return utils.GetCallerName(depth)
-}
-
-// Write `content` to the file at path `dst`, creating it if necessary,
-// as well as any missing directories.
-// The file is truncated if it already exists.
-// Call t.Fatal() at the first error.
-func writeFile(dst, content string, t *testing.T) {
-	// Create subdirectories if necessary
-	if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) {
-		t.Fatal(err)
-	}
-	f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	// Write content (truncate if it exists)
-	if _, err := io.Copy(f, strings.NewReader(content)); err != nil {
-		t.Fatal(err)
-	}
-}
-
-// Return the contents of file at path `src`.
-// Call t.Fatal() at the first error (including if the file doesn't exist)
-func readFile(src string, t *testing.T) (content string) {
-	f, err := os.Open(src)
-	if err != nil {
-		t.Fatal(err)
-	}
-	data, err := ioutil.ReadAll(f)
-	if err != nil {
-		t.Fatal(err)
-	}
-	return string(data)
-}
-
-// Create a test container from the given daemon `r` and run arguments `args`.
-// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is
-// dynamically replaced by the current test image.
-// The caller is responsible for destroying the container.
-// Call t.Fatal() at the first error.
-func mkContainer(r *daemon.Daemon, args []string, t *testing.T) (*daemon.Container, *runconfig.HostConfig, error) {
-	config, hc, _, err := parseRun(args)
-	defer func() {
-		if err != nil && t != nil {
-			t.Fatal(err)
-		}
-	}()
-	if err != nil {
-		return nil, nil, err
-	}
-	if config.Image == "_" {
-		config.Image = GetTestImage(r).ID
-	}
-	c, _, err := r.Create(config, nil, "")
-	if err != nil {
-		return nil, nil, err
-	}
-	// NOTE: hostConfig is ignored.
-	// If `args` specify privileged mode, custom lxc conf, external mount binds,
-	// port redirects etc. they will be ignored.
-	// This is because the correct way to set these things is to pass environment
-	// to the `start` job.
-	// FIXME: this helper function should be deprecated in favor of calling
-	// `create` and `start` jobs directly.
-	return c, hc, nil
-}
-
-// Create a test container, start it, wait for it to complete, destroy it,
-// and return its standard output as a string.
-// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
-// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
-func runContainer(eng *engine.Engine, r *daemon.Daemon, args []string, t *testing.T) (output string, err error) {
-	defer func() {
-		if err != nil && t != nil {
-			t.Fatal(err)
-		}
-	}()
-	container, hc, err := mkContainer(r, args, t)
-	if err != nil {
-		return "", err
-	}
-	defer r.Rm(container)
-	stdout := container.StdoutPipe()
-	defer stdout.Close()
-
-	job := eng.Job("start", container.ID)
-	if err := job.ImportEnv(hc); err != nil {
-		return "", err
-	}
-	if err := job.Run(); err != nil {
-		return "", err
-	}
-
-	container.WaitStop(-1 * time.Second)
-	data, err := ioutil.ReadAll(stdout)
-	if err != nil {
-		return "", err
-	}
-	output = string(data)
-	return
-}
-
-// FIXME: this is duplicated from graph_test.go in the docker package.
-func fakeTar() (io.ReadCloser, error) {
-	content := []byte("Hello world!\n")
-	buf := new(bytes.Buffer)
-	tw := tar.NewWriter(buf)
-	for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
-		hdr := new(tar.Header)
-		hdr.Size = int64(len(content))
-		hdr.Name = name
-		if err := tw.WriteHeader(hdr); err != nil {
-			return nil, err
-		}
-		tw.Write([]byte(content))
-	}
-	tw.Close()
-	return ioutil.NopCloser(buf), nil
-}
-
-func getAllImages(eng *engine.Engine, t *testing.T) *engine.Table {
-	return getImages(eng, t, true, "")
-}
-
-func getImages(eng *engine.Engine, t *testing.T, all bool, filter string) *engine.Table {
-	job := eng.Job("images")
-	job.SetenvBool("all", all)
-	job.Setenv("filter", filter)
-	images, err := job.Stdout.AddListTable()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := job.Run(); err != nil {
-		t.Fatal(err)
-	}
-	return images
-
-}
-
-func parseRun(args []string) (*runconfig.Config, *runconfig.HostConfig, *flag.FlagSet, error) {
-	cmd := flag.NewFlagSet("run", flag.ContinueOnError)
-	cmd.SetOutput(ioutil.Discard)
-	cmd.Usage = nil
-	return runconfig.Parse(cmd, args)
-}
diff --git a/integration/z_final_test.go b/integration/z_final_test.go
deleted file mode 100644
index ad1eb43..0000000
--- a/integration/z_final_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package docker
-
-import (
-	"github.com/docker/docker/utils"
-	"runtime"
-	"testing"
-)
-
-func displayFdGoroutines(t *testing.T) {
-	t.Logf("Fds: %d, Goroutines: %d", utils.GetTotalUsedFds(), runtime.NumGoroutine())
-}
-
-func TestFinal(t *testing.T) {
-	nuke(globalDaemon)
-	t.Logf("Start Fds: %d, Start Goroutines: %d", startFds, startGoroutines)
-	displayFdGoroutines(t)
-}
diff --git a/links/links.go b/links/links.go
index 96c18cc..a756c8b 100644
--- a/links/links.go
+++ b/links/links.go
@@ -2,10 +2,10 @@
 
 import (
 	"fmt"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/nat"
 	"path"
 	"strings"
+
+	"github.com/docker/docker/nat"
 )
 
 type Link struct {
@@ -15,10 +15,9 @@
 	ChildEnvironment []string
 	Ports            []nat.Port
 	IsEnabled        bool
-	eng              *engine.Engine
 }
 
-func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}, eng *engine.Engine) (*Link, error) {
+func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) (*Link, error) {
 
 	var (
 		i     int
@@ -36,7 +35,6 @@
 		ParentIP:         parentIP,
 		ChildEnvironment: env,
 		Ports:            ports,
-		eng:              eng,
 	}
 	return l, nil
 
@@ -107,8 +105,8 @@
 
 	if l.ChildEnvironment != nil {
 		for _, v := range l.ChildEnvironment {
-			parts := strings.Split(v, "=")
-			if len(parts) != 2 {
+			parts := strings.SplitN(v, "=", 2)
+			if len(parts) < 2 {
 				continue
 			}
 			// Ignore a few variables that are added during docker build (and not really relevant to linked containers)
@@ -140,39 +138,10 @@
 }
 
 func (l *Link) Enable() error {
-	// -A == iptables append flag
-	if err := l.toggle("-A", false); err != nil {
-		return err
-	}
 	l.IsEnabled = true
 	return nil
 }
 
 func (l *Link) Disable() {
-	// We do not care about errors here because the link may not
-	// exist in iptables
-	// -D == iptables delete flag
-	l.toggle("-D", true)
-
 	l.IsEnabled = false
 }
-
-func (l *Link) toggle(action string, ignoreErrors bool) error {
-	job := l.eng.Job("link", action)
-
-	job.Setenv("ParentIP", l.ParentIP)
-	job.Setenv("ChildIP", l.ChildIP)
-	job.SetenvBool("IgnoreErrors", ignoreErrors)
-
-	out := make([]string, len(l.Ports))
-	for i, p := range l.Ports {
-		out[i] = string(p)
-	}
-	job.SetenvList("Ports", out)
-
-	if err := job.Run(); err != nil {
-		// TODO: get ouput from job
-		return err
-	}
-	return nil
-}
diff --git a/links/links_test.go b/links/links_test.go
index ba548fc..e639e2c 100644
--- a/links/links_test.go
+++ b/links/links_test.go
@@ -2,16 +2,17 @@
 
 import (
 	"fmt"
-	"github.com/docker/docker/nat"
 	"strings"
 	"testing"
+
+	"github.com/docker/docker/nat"
 )
 
 func TestLinkNaming(t *testing.T) {
 	ports := make(nat.PortSet)
 	ports[nat.Port("6379/tcp")] = struct{}{}
 
-	link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports, nil)
+	link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -41,7 +42,7 @@
 	ports := make(nat.PortSet)
 	ports[nat.Port("6379/tcp")] = struct{}{}
 
-	link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports, nil)
+	link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -72,7 +73,7 @@
 	ports := make(nat.PortSet)
 	ports[nat.Port("6379/tcp")] = struct{}{}
 
-	link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil)
+	link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -115,7 +116,7 @@
 	ports[nat.Port("6380/tcp")] = struct{}{}
 	ports[nat.Port("6381/tcp")] = struct{}{}
 
-	link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil)
+	link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -164,7 +165,7 @@
 	ports[nat.Port("6380/tcp")] = struct{}{}
 	ports[nat.Port("6381/tcp")] = struct{}{}
 
-	link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil)
+	link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports)
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/nat/nat.go b/nat/nat.go
index fdecf3f..2cec2e8 100644
--- a/nat/nat.go
+++ b/nat/nat.go
@@ -34,6 +34,9 @@
 }
 
 func ParsePort(rawPort string) (int, error) {
+	if len(rawPort) == 0 {
+		return 0, nil
+	}
 	port, err := strconv.ParseUint(rawPort, 10, 16)
 	if err != nil {
 		return 0, err
diff --git a/nat/sort.go b/nat/sort.go
index f36c12f..6441936 100644
--- a/nat/sort.go
+++ b/nat/sort.go
@@ -1,6 +1,10 @@
 package nat
 
-import "sort"
+import (
+	"sort"
+	"strconv"
+	"strings"
+)
 
 type portSorter struct {
 	ports []Port
@@ -26,3 +30,63 @@
 	s := &portSorter{ports, predicate}
 	sort.Sort(s)
 }
+
+type portMapEntry struct {
+	port    Port
+	binding PortBinding
+}
+
+type portMapSorter []portMapEntry
+
+func (s portMapSorter) Len() int      { return len(s) }
+func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// sort the port so that the order is:
+// 1. port with larger specified bindings
+// 2. larger port
+// 3. port with tcp protocol
+func (s portMapSorter) Less(i, j int) bool {
+	pi, pj := s[i].port, s[j].port
+	hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort)
+	return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp")
+}
+
+// SortPortMap sorts the list of ports and their respected mapping. The ports
+// will explicit HostPort will be placed first.
+func SortPortMap(ports []Port, bindings PortMap) {
+	s := portMapSorter{}
+	for _, p := range ports {
+		if binding, ok := bindings[p]; ok {
+			for _, b := range binding {
+				s = append(s, portMapEntry{port: p, binding: b})
+			}
+		} else {
+			s = append(s, portMapEntry{port: p})
+		}
+		bindings[p] = []PortBinding{}
+	}
+
+	sort.Sort(s)
+	var (
+		i  int
+		pm = make(map[Port]struct{})
+	)
+	// reorder ports
+	for _, entry := range s {
+		if _, ok := pm[entry.port]; !ok {
+			ports[i] = entry.port
+			pm[entry.port] = struct{}{}
+			i++
+		}
+		// reorder bindings for this port
+		bindings[entry.port] = append(bindings[entry.port], entry.binding)
+	}
+}
+
+func toInt(s string) int64 {
+	i, err := strconv.ParseInt(s, 10, 64)
+	if err != nil {
+		i = 0
+	}
+	return i
+}
diff --git a/nat/sort_test.go b/nat/sort_test.go
index 5d490e3..ba24cdb 100644
--- a/nat/sort_test.go
+++ b/nat/sort_test.go
@@ -2,6 +2,7 @@
 
 import (
 	"fmt"
+	"reflect"
 	"testing"
 )
 
@@ -39,3 +40,46 @@
 		t.Fail()
 	}
 }
+
+func TestSortPortMap(t *testing.T) {
+	ports := []Port{
+		Port("22/tcp"),
+		Port("22/udp"),
+		Port("8000/tcp"),
+		Port("6379/tcp"),
+		Port("9999/tcp"),
+	}
+
+	portMap := PortMap{
+		Port("22/tcp"): []PortBinding{
+			{},
+		},
+		Port("8000/tcp"): []PortBinding{
+			{},
+		},
+		Port("6379/tcp"): []PortBinding{
+			{},
+			{HostIp: "0.0.0.0", HostPort: "32749"},
+		},
+		Port("9999/tcp"): []PortBinding{
+			{HostIp: "0.0.0.0", HostPort: "40000"},
+		},
+	}
+
+	SortPortMap(ports, portMap)
+	if !reflect.DeepEqual(ports, []Port{
+		Port("9999/tcp"),
+		Port("6379/tcp"),
+		Port("8000/tcp"),
+		Port("22/tcp"),
+		Port("22/udp"),
+	}) {
+		t.Errorf("failed to prioritize port with explicit mappings, got %v", ports)
+	}
+	if pm := portMap[Port("6379/tcp")]; !reflect.DeepEqual(pm, []PortBinding{
+		{HostIp: "0.0.0.0", HostPort: "32749"},
+		{},
+	}) {
+		t.Errorf("failed to prioritize bindings with explicit mappings, got %v", pm)
+	}
+}
diff --git a/opts/opts.go b/opts/opts.go
index e867c0a..c330c27 100644
--- a/opts/opts.go
+++ b/opts/opts.go
@@ -8,24 +8,36 @@
 	"regexp"
 	"strings"
 
-	"github.com/docker/docker/api"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/ulimit"
-	"github.com/docker/docker/utils"
 )
 
 var (
-	alphaRegexp  = regexp.MustCompile(`[a-zA-Z]`)
-	domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+	alphaRegexp     = regexp.MustCompile(`[a-zA-Z]`)
+	domainRegexp    = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+	DefaultHTTPHost = "127.0.0.1" // Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080
+	// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
+	// is not supplied. A better longer term solution would be to use a named
+	// pipe as the default on the Windows daemon.
+	DefaultHTTPPort   = 2375                   // Default HTTP Port
+	DefaultUnixSocket = "/var/run/docker.sock" // Docker daemon by default always listens on the default unix socket
 )
 
 func ListVar(values *[]string, names []string, usage string) {
 	flag.Var(newListOptsRef(values, nil), names, usage)
 }
 
+func MapVar(values map[string]string, names []string, usage string) {
+	flag.Var(newMapOpt(values, nil), names, usage)
+}
+
+func LogOptsVar(values map[string]string, names []string, usage string) {
+	flag.Var(newMapOpt(values, ValidateLogOpts), names, usage)
+}
+
 func HostListVar(values *[]string, names []string, usage string) {
-	flag.Var(newListOptsRef(values, api.ValidateHost), names, usage)
+	flag.Var(newListOptsRef(values, ValidateHost), names, usage)
 }
 
 func IPListVar(values *[]string, names []string, usage string) {
@@ -126,10 +138,53 @@
 	return len((*opts.values))
 }
 
+//MapOpts type
+type MapOpts struct {
+	values    map[string]string
+	validator ValidatorFctType
+}
+
+func (opts *MapOpts) Set(value string) error {
+	if opts.validator != nil {
+		v, err := opts.validator(value)
+		if err != nil {
+			return err
+		}
+		value = v
+	}
+	vals := strings.SplitN(value, "=", 2)
+	if len(vals) == 1 {
+		(opts.values)[vals[0]] = ""
+	} else {
+		(opts.values)[vals[0]] = vals[1]
+	}
+	return nil
+}
+
+func (opts *MapOpts) String() string {
+	return fmt.Sprintf("%v", map[string]string((opts.values)))
+}
+
+func newMapOpt(values map[string]string, validator ValidatorFctType) *MapOpts {
+	return &MapOpts{
+		values:    values,
+		validator: validator,
+	}
+}
+
 // Validators
 type ValidatorFctType func(val string) (string, error)
 type ValidatorFctListType func(val string) ([]string, error)
 
+func ValidateLogOpts(val string) (string, error) {
+	allowedKeys := map[string]string{}
+	vals := strings.Split(val, "=")
+	if allowedKeys[vals[0]] != "" {
+		return val, nil
+	}
+	return "", fmt.Errorf("%s is not a valid log opt", vals[0])
+}
+
 func ValidateAttach(val string) (string, error) {
 	s := strings.ToLower(val)
 	for _, str := range []string{"stdin", "stdout", "stderr"} {
@@ -141,7 +196,7 @@
 }
 
 func ValidateLink(val string) (string, error) {
-	if _, err := parsers.PartParser("name:alias", val); err != nil {
+	if _, _, err := parsers.ParseLink(val); err != nil {
 		return val, err
 	}
 	return val, nil
@@ -174,7 +229,7 @@
 	if len(arr) > 1 {
 		return val, nil
 	}
-	if !utils.DoesEnvExist(val) {
+	if !doesEnvExist(val) {
 		return val, nil
 	}
 	return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
@@ -192,9 +247,8 @@
 	_, err := net.ParseMAC(strings.TrimSpace(val))
 	if err != nil {
 		return "", err
-	} else {
-		return val, nil
 	}
+	return val, nil
 }
 
 // Validates domain for resolvconf search configuration.
@@ -235,3 +289,21 @@
 	}
 	return val, nil
 }
+
+func ValidateHost(val string) (string, error) {
+	host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val)
+	if err != nil {
+		return val, err
+	}
+	return host, nil
+}
+
+func doesEnvExist(name string) bool {
+	for _, entry := range os.Environ() {
+		parts := strings.SplitN(entry, "=", 2)
+		if parts[0] == name {
+			return true
+		}
+	}
+	return false
+}
diff --git a/opts/opts_test.go b/opts/opts_test.go
index 8370926..dfad430 100644
--- a/opts/opts_test.go
+++ b/opts/opts_test.go
@@ -1,6 +1,7 @@
 package opts
 
 import (
+	"fmt"
 	"strings"
 	"testing"
 )
@@ -28,6 +29,31 @@
 
 }
 
+func TestMapOpts(t *testing.T) {
+	tmpMap := make(map[string]string)
+	o := newMapOpt(tmpMap, logOptsValidator)
+	o.Set("max-size=1")
+	if o.String() != "map[max-size:1]" {
+		t.Errorf("%s != [map[max-size:1]", o.String())
+	}
+
+	o.Set("max-file=2")
+	if len(tmpMap) != 2 {
+		t.Errorf("map length %d != 2", len(tmpMap))
+	}
+
+	if tmpMap["max-file"] != "2" {
+		t.Errorf("max-file = %s != 2", tmpMap["max-file"])
+	}
+
+	if tmpMap["max-size"] != "1" {
+		t.Errorf("max-size = %s != 1", tmpMap["max-size"])
+	}
+	if o.Set("dummy-val=3") == nil {
+		t.Errorf("validator is not being called")
+	}
+}
+
 func TestValidateMACAddress(t *testing.T) {
 	if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil {
 		t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err)
@@ -152,3 +178,12 @@
 		}
 	}
 }
+
+func logOptsValidator(val string) (string, error) {
+	allowedKeys := map[string]string{"max-size": "1", "max-file": "2"}
+	vals := strings.Split(val, "=")
+	if allowedKeys[vals[0]] != "" {
+		return val, nil
+	}
+	return "", fmt.Errorf("invalid key %s", vals[0])
+}
diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go
index bfa6e18..cde4de5 100644
--- a/pkg/archive/archive.go
+++ b/pkg/archive/archive.go
@@ -1,6 +1,7 @@
 package archive
 
 import (
+	"archive/tar"
 	"bufio"
 	"bytes"
 	"compress/bzip2"
@@ -11,14 +12,12 @@
 	"io/ioutil"
 	"os"
 	"os/exec"
-	"path"
 	"path/filepath"
+	"runtime"
 	"strings"
 	"syscall"
 
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
-
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/pools"
 	"github.com/docker/docker/pkg/promise"
@@ -78,7 +77,7 @@
 		Xz:    {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
 	} {
 		if len(source) < len(m) {
-			log.Debugf("Len too short")
+			logrus.Debugf("Len too short")
 			continue
 		}
 		if bytes.Compare(m, source[:len(m)]) == 0 {
@@ -292,17 +291,8 @@
 		file.Close()
 
 	case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
-		mode := uint32(hdr.Mode & 07777)
-		switch hdr.Typeflag {
-		case tar.TypeBlock:
-			mode |= syscall.S_IFBLK
-		case tar.TypeChar:
-			mode |= syscall.S_IFCHR
-		case tar.TypeFifo:
-			mode |= syscall.S_IFIFO
-		}
-
-		if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
+		// Handle this is an OS-specific way
+		if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
 			return err
 		}
 
@@ -331,15 +321,18 @@
 		}
 
 	case tar.TypeXGlobalHeader:
-		log.Debugf("PAX Global Extended Headers found and ignored")
+		logrus.Debugf("PAX Global Extended Headers found and ignored")
 		return nil
 
 	default:
 		return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
 	}
 
-	if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
-		return err
+	// Lchown is not supported on Windows
+	if runtime.GOOS != "windows" {
+		if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
+			return err
+		}
 	}
 
 	for key, value := range hdr.Xattrs {
@@ -350,15 +343,19 @@
 
 	// There is no LChmod, so ignore mode for symlink. Also, this
 	// must happen after chown, as that can modify the file mode
-	if hdr.Typeflag != tar.TypeSymlink {
-		if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
-			return err
-		}
+	if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+		return err
 	}
 
 	ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
-	// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
-	if hdr.Typeflag != tar.TypeSymlink {
+	// syscall.UtimesNano doesn't support a NOFOLLOW flag atm
+	if hdr.Typeflag == tar.TypeLink {
+		if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+			if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+				return err
+			}
+		}
+	} else if hdr.Typeflag != tar.TypeSymlink {
 		if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
 			return err
 		}
@@ -376,25 +373,16 @@
 	return TarWithOptions(path, &TarOptions{Compression: compression})
 }
 
-func escapeName(name string) string {
-	escaped := make([]byte, 0)
-	for i, c := range []byte(name) {
-		if i == 0 && c == '/' {
-			continue
-		}
-		// all printable chars except "-" which is 0x2d
-		if (0x20 <= c && c <= 0x7E) && c != 0x2d {
-			escaped = append(escaped, c)
-		} else {
-			escaped = append(escaped, fmt.Sprintf("\\%03o", c)...)
-		}
-	}
-	return string(escaped)
-}
-
 // TarWithOptions creates an archive from the directory at `path`, only including files whose relative
 // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
 func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+	patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
+
+	if err != nil {
+		return nil, err
+	}
+
 	pipeReader, pipeWriter := io.Pipe()
 
 	compressWriter, err := CompressStream(pipeWriter, options.Compression)
@@ -426,7 +414,7 @@
 		for _, include := range options.IncludeFiles {
 			filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
 				if err != nil {
-					log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+					logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
 					return nil
 				}
 
@@ -445,15 +433,15 @@
 				// is asking for that file no matter what - which is true
 				// for some files, like .dockerignore and Dockerfile (sometimes)
 				if include != relFilePath {
-					skip, err = fileutils.Matches(relFilePath, options.ExcludePatterns)
+					skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
 					if err != nil {
-						log.Debugf("Error matching %s", relFilePath, err)
+						logrus.Debugf("Error matching %s", relFilePath, err)
 						return err
 					}
 				}
 
 				if skip {
-					if f.IsDir() {
+					if !exceptions && f.IsDir() {
 						return filepath.SkipDir
 					}
 					return nil
@@ -474,7 +462,7 @@
 				}
 
 				if err := ta.addTarFile(filePath, relFilePath); err != nil {
-					log.Debugf("Can't add file %s to tar: %s", filePath, err)
+					logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
 				}
 				return nil
 			})
@@ -482,13 +470,13 @@
 
 		// Make sure to check the error on Close.
 		if err := ta.TarWriter.Close(); err != nil {
-			log.Debugf("Can't close tar writer: %s", err)
+			logrus.Debugf("Can't close tar writer: %s", err)
 		}
 		if err := compressWriter.Close(); err != nil {
-			log.Debugf("Can't close compress writer: %s", err)
+			logrus.Debugf("Can't close compress writer: %s", err)
 		}
 		if err := pipeWriter.Close(); err != nil {
-			log.Debugf("Can't close pipe writer: %s", err)
+			logrus.Debugf("Can't close pipe writer: %s", err)
 		}
 	}()
 
@@ -529,7 +517,7 @@
 			parent := filepath.Dir(hdr.Name)
 			parentPath := filepath.Join(dest, parent)
 			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
-				err = os.MkdirAll(parentPath, 0777)
+				err = system.MkdirAll(parentPath, 0777)
 				if err != nil {
 					return err
 				}
@@ -606,7 +594,7 @@
 }
 
 func (archiver *Archiver) TarUntar(src, dst string) error {
-	log.Debugf("TarUntar(%s %s)", src, dst)
+	logrus.Debugf("TarUntar(%s %s)", src, dst)
 	archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
 	if err != nil {
 		return err
@@ -648,11 +636,11 @@
 		return archiver.CopyFileWithTar(src, dst)
 	}
 	// Create dst, copy src's content into it
-	log.Debugf("Creating dest directory: %s", dst)
-	if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
+	logrus.Debugf("Creating dest directory: %s", dst)
+	if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
 		return err
 	}
-	log.Debugf("Calling TarUntar(%s, %s)", src, dst)
+	logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
 	return archiver.TarUntar(src, dst)
 }
 
@@ -665,7 +653,7 @@
 }
 
 func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
-	log.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+	logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
 	srcSt, err := os.Stat(src)
 	if err != nil {
 		return err
@@ -673,12 +661,12 @@
 	if srcSt.IsDir() {
 		return fmt.Errorf("Can't copy a directory")
 	}
-	// Clean up the trailing /
-	if dst[len(dst)-1] == '/' {
-		dst = path.Join(dst, filepath.Base(src))
+	// Clean up the trailing slash
+	if dst[len(dst)-1] == os.PathSeparator {
+		dst = filepath.Join(dst, filepath.Base(src))
 	}
 	// Create the holding directory if necessary
-	if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
+	if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
 		return err
 	}
 
@@ -791,9 +779,6 @@
 	if _, err := io.Copy(f, src); err != nil {
 		return nil, err
 	}
-	if err = f.Sync(); err != nil {
-		return nil, err
-	}
 	if _, err := f.Seek(0, 0); err != nil {
 		return nil, err
 	}
diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go
index 6cd95d5..f24f628 100644
--- a/pkg/archive/archive_test.go
+++ b/pkg/archive/archive_test.go
@@ -1,6 +1,7 @@
 package archive
 
 import (
+	"archive/tar"
 	"bytes"
 	"fmt"
 	"io"
@@ -14,9 +15,149 @@
 	"testing"
 	"time"
 
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/pkg/system"
 )
 
+func TestIsArchiveNilHeader(t *testing.T) {
+	out := IsArchive(nil)
+	if out {
+		t.Fatalf("isArchive should return false as nil is not a valid archive header")
+	}
+}
+
+func TestIsArchiveInvalidHeader(t *testing.T) {
+	header := []byte{0x00, 0x01, 0x02}
+	out := IsArchive(header)
+	if out {
+		t.Fatalf("isArchive should return false as %s is not a valid archive header", header)
+	}
+}
+
+func TestIsArchiveBzip2(t *testing.T) {
+	header := []byte{0x42, 0x5A, 0x68}
+	out := IsArchive(header)
+	if !out {
+		t.Fatalf("isArchive should return true as %s is a bz2 header", header)
+	}
+}
+
+func TestIsArchive7zip(t *testing.T) {
+	header := []byte{0x50, 0x4b, 0x03, 0x04}
+	out := IsArchive(header)
+	if out {
+		t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header)
+	}
+}
+
+func TestDecompressStreamGzip(t *testing.T) {
+	cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive")
+	output, err := cmd.CombinedOutput()
+	if err != nil {
+		t.Fatalf("Fail to create an archive file for test : %s.", output)
+	}
+	archive, err := os.Open("/tmp/archive.gz")
+	_, err = DecompressStream(archive)
+	if err != nil {
+		t.Fatalf("Failed to decompress a gzip file.")
+	}
+}
+
+func TestDecompressStreamBzip2(t *testing.T) {
+	cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive")
+	output, err := cmd.CombinedOutput()
+	if err != nil {
+		t.Fatalf("Fail to create an archive file for test : %s.", output)
+	}
+	archive, err := os.Open("/tmp/archive.bz2")
+	_, err = DecompressStream(archive)
+	if err != nil {
+		t.Fatalf("Failed to decompress a bzip2 file.")
+	}
+}
+
+func TestDecompressStreamXz(t *testing.T) {
+	cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && xz -f /tmp/archive")
+	output, err := cmd.CombinedOutput()
+	if err != nil {
+		t.Fatalf("Fail to create an archive file for test : %s.", output)
+	}
+	archive, err := os.Open("/tmp/archive.xz")
+	_, err = DecompressStream(archive)
+	if err != nil {
+		t.Fatalf("Failed to decompress a xz file.")
+	}
+}
+
+func TestCompressStreamXzUnsuported(t *testing.T) {
+	dest, err := os.Create("/tmp/dest")
+	if err != nil {
+		t.Fatalf("Fail to create the destination file")
+	}
+	_, err = CompressStream(dest, Xz)
+	if err == nil {
+		t.Fatalf("Should fail as xz is unsupported for compression format.")
+	}
+}
+
+func TestCompressStreamBzip2Unsupported(t *testing.T) {
+	dest, err := os.Create("/tmp/dest")
+	if err != nil {
+		t.Fatalf("Fail to create the destination file")
+	}
+	_, err = CompressStream(dest, Xz)
+	if err == nil {
+		t.Fatalf("Should fail as xz is unsupported for compression format.")
+	}
+}
+
+func TestCompressStreamInvalid(t *testing.T) {
+	dest, err := os.Create("/tmp/dest")
+	if err != nil {
+		t.Fatalf("Fail to create the destination file")
+	}
+	_, err = CompressStream(dest, -1)
+	if err == nil {
+		t.Fatalf("Should fail as xz is unsupported for compression format.")
+	}
+}
+
+func TestExtensionInvalid(t *testing.T) {
+	compression := Compression(-1)
+	output := compression.Extension()
+	if output != "" {
+		t.Fatalf("The extension of an invalid compression should be an empty string.")
+	}
+}
+
+func TestExtensionUncompressed(t *testing.T) {
+	compression := Uncompressed
+	output := compression.Extension()
+	if output != "tar" {
+		t.Fatalf("The extension of a uncompressed archive should be 'tar'.")
+	}
+}
+func TestExtensionBzip2(t *testing.T) {
+	compression := Bzip2
+	output := compression.Extension()
+	if output != "tar.bz2" {
+		t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'")
+	}
+}
+func TestExtensionGzip(t *testing.T) {
+	compression := Gzip
+	output := compression.Extension()
+	if output != "tar.gz" {
+		t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'")
+	}
+}
+func TestExtensionXz(t *testing.T) {
+	compression := Xz
+	output := compression.Extension()
+	if output != "tar.xz" {
+		t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'")
+	}
+}
+
 func TestCmdStreamLargeStderr(t *testing.T) {
 	cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
 	out, err := CmdStream(cmd, nil)
@@ -66,6 +207,315 @@
 	}
 }
 
+func TestUntarPathWithInvalidDest(t *testing.T) {
+	tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tempFolder)
+	invalidDestFolder := path.Join(tempFolder, "invalidDest")
+	// Create a src file
+	srcFile := path.Join(tempFolder, "src")
+	_, err = os.Create(srcFile)
+	if err != nil {
+		t.Fatalf("Fail to create the source file")
+	}
+	err = UntarPath(srcFile, invalidDestFolder)
+	if err == nil {
+		t.Fatalf("UntarPath with invalid destination path should throw an error.")
+	}
+}
+
+func TestUntarPathWithInvalidSrc(t *testing.T) {
+	dest, err := ioutil.TempDir("", "docker-archive-test")
+	if err != nil {
+		t.Fatalf("Fail to create the destination file")
+	}
+	defer os.RemoveAll(dest)
+	err = UntarPath("/invalid/path", dest)
+	if err == nil {
+		t.Fatalf("UntarPath with invalid src path should throw an error.")
+	}
+}
+
+func TestUntarPath(t *testing.T) {
+	tmpFolder, err := ioutil.TempDir("", "docker-archive-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpFolder)
+	srcFile := path.Join(tmpFolder, "src")
+	tarFile := path.Join(tmpFolder, "src.tar")
+	os.Create(path.Join(tmpFolder, "src"))
+	cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile)
+	_, err = cmd.CombinedOutput()
+	if err != nil {
+		t.Fatal(err)
+	}
+	destFolder := path.Join(tmpFolder, "dest")
+	err = os.MkdirAll(destFolder, 0740)
+	if err != nil {
+		t.Fatalf("Fail to create the destination file")
+	}
+	err = UntarPath(tarFile, destFolder)
+	if err != nil {
+		t.Fatalf("UntarPath shouldn't throw an error, %s.", err)
+	}
+	expectedFile := path.Join(destFolder, srcFile)
+	_, err = os.Stat(expectedFile)
+	if err != nil {
+		t.Fatalf("Destination folder should contain the source file but did not.")
+	}
+}
+
+// Do the same test as above but with the destination as file, it should fail
+func TestUntarPathWithDestinationFile(t *testing.T) {
+	tmpFolder, err := ioutil.TempDir("", "docker-archive-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpFolder)
+	srcFile := path.Join(tmpFolder, "src")
+	tarFile := path.Join(tmpFolder, "src.tar")
+	os.Create(path.Join(tmpFolder, "src"))
+	cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile)
+	_, err = cmd.CombinedOutput()
+	if err != nil {
+		t.Fatal(err)
+	}
+	destFile := path.Join(tmpFolder, "dest")
+	_, err = os.Create(destFile)
+	if err != nil {
+		t.Fatalf("Fail to create the destination file")
+	}
+	err = UntarPath(tarFile, destFile)
+	if err == nil {
+		t.Fatalf("UntarPath should throw an error if the destination if a file")
+	}
+}
+
+// Do the same test as above but with the destination folder already exists
+// and the destination file is a directory
+// It's working, see https://github.com/docker/docker/issues/10040
+func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) {
+	tmpFolder, err := ioutil.TempDir("", "docker-archive-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpFolder)
+	srcFile := path.Join(tmpFolder, "src")
+	tarFile := path.Join(tmpFolder, "src.tar")
+	os.Create(srcFile)
+	cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile)
+	_, err = cmd.CombinedOutput()
+	if err != nil {
+		t.Fatal(err)
+	}
+	destFolder := path.Join(tmpFolder, "dest")
+	err = os.MkdirAll(destFolder, 0740)
+	if err != nil {
+		t.Fatalf("Fail to create the destination folder")
+	}
+	// Let's create a folder that will has the same path as the extracted file (from tar)
+	destSrcFileAsFolder := path.Join(destFolder, srcFile)
+	err = os.MkdirAll(destSrcFileAsFolder, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = UntarPath(tarFile, destFolder)
+	if err != nil {
+		t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder")
+	}
+}
+
+func TestCopyWithTarInvalidSrc(t *testing.T) {
+	tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+	if err != nil {
+		t.Fatal(nil)
+	}
+	destFolder := path.Join(tempFolder, "dest")
+	invalidSrc := path.Join(tempFolder, "doesnotexists")
+	err = os.MkdirAll(destFolder, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = CopyWithTar(invalidSrc, destFolder)
+	if err == nil {
+		t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.")
+	}
+}
+
+func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) {
+	tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+	if err != nil {
+		t.Fatal(nil)
+	}
+	srcFolder := path.Join(tempFolder, "src")
+	inexistentDestFolder := path.Join(tempFolder, "doesnotexists")
+	err = os.MkdirAll(srcFolder, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = CopyWithTar(srcFolder, inexistentDestFolder)
+	if err != nil {
+		t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.")
+	}
+	_, err = os.Stat(inexistentDestFolder)
+	if err != nil {
+		t.Fatalf("CopyWithTar with an inexistent folder should create it.")
+	}
+}
+
+// Test CopyWithTar with a file as src
+func TestCopyWithTarSrcFile(t *testing.T) {
+	folder, err := ioutil.TempDir("", "docker-archive-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(folder)
+	dest := path.Join(folder, "dest")
+	srcFolder := path.Join(folder, "src")
+	src := path.Join(folder, path.Join("src", "src"))
+	err = os.MkdirAll(srcFolder, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = os.MkdirAll(dest, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ioutil.WriteFile(src, []byte("content"), 0777)
+	err = CopyWithTar(src, dest)
+	if err != nil {
+		t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err)
+	}
+	_, err = os.Stat(dest)
+	// FIXME Check the content
+	if err != nil {
+		t.Fatalf("Destination file should be the same as the source.")
+	}
+}
+
+// Test CopyWithTar with a folder as src
+func TestCopyWithTarSrcFolder(t *testing.T) {
+	folder, err := ioutil.TempDir("", "docker-archive-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(folder)
+	dest := path.Join(folder, "dest")
+	src := path.Join(folder, path.Join("src", "folder"))
+	err = os.MkdirAll(src, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = os.MkdirAll(dest, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ioutil.WriteFile(path.Join(src, "file"), []byte("content"), 0777)
+	err = CopyWithTar(src, dest)
+	if err != nil {
+		t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err)
+	}
+	_, err = os.Stat(dest)
+	// FIXME Check the content (the file inside)
+	if err != nil {
+		t.Fatalf("Destination folder should contain the source file but did not.")
+	}
+}
+
+func TestCopyFileWithTarInvalidSrc(t *testing.T) {
+	tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tempFolder)
+	destFolder := path.Join(tempFolder, "dest")
+	err = os.MkdirAll(destFolder, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	invalidFile := path.Join(tempFolder, "doesnotexists")
+	err = CopyFileWithTar(invalidFile, destFolder)
+	if err == nil {
+		t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.")
+	}
+}
+
+func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) {
+	tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+	if err != nil {
+		t.Fatal(nil)
+	}
+	defer os.RemoveAll(tempFolder)
+	srcFile := path.Join(tempFolder, "src")
+	inexistentDestFolder := path.Join(tempFolder, "doesnotexists")
+	_, err = os.Create(srcFile)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = CopyFileWithTar(srcFile, inexistentDestFolder)
+	if err != nil {
+		t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.")
+	}
+	_, err = os.Stat(inexistentDestFolder)
+	if err != nil {
+		t.Fatalf("CopyWithTar with an inexistent folder should create it.")
+	}
+	// FIXME Test the src file and content
+}
+
+func TestCopyFileWithTarSrcFolder(t *testing.T) {
+	folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(folder)
+	dest := path.Join(folder, "dest")
+	src := path.Join(folder, "srcfolder")
+	err = os.MkdirAll(src, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = os.MkdirAll(dest, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = CopyFileWithTar(src, dest)
+	if err == nil {
+		t.Fatalf("CopyFileWithTar should throw an error with a folder.")
+	}
+}
+
+func TestCopyFileWithTarSrcFile(t *testing.T) {
+	folder, err := ioutil.TempDir("", "docker-archive-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(folder)
+	dest := path.Join(folder, "dest")
+	srcFolder := path.Join(folder, "src")
+	src := path.Join(folder, path.Join("src", "src"))
+	err = os.MkdirAll(srcFolder, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = os.MkdirAll(dest, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ioutil.WriteFile(src, []byte("content"), 0777)
+	err = CopyWithTar(src, dest+"/")
+	if err != nil {
+		t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err)
+	}
+	_, err = os.Stat(dest)
+	if err != nil {
+		t.Fatalf("Destination folder should contain the source file but did not.")
+	}
+}
+
 func TestTarFiles(t *testing.T) {
 	// try without hardlinks
 	if err := checkNoChanges(1000, false); err != nil {
@@ -179,11 +629,56 @@
 	}
 }
 
+func TestTarUntarWithXattr(t *testing.T) {
+	origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(origin)
+	if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+		t.Fatal(err)
+	}
+	if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
+		t.Fatal(err)
+	}
+	if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil {
+		t.Fatal(err)
+	}
+	if err := system.Lsetxattr(path.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil {
+		t.Fatal(err)
+	}
+
+	for _, c := range []Compression{
+		Uncompressed,
+		Gzip,
+	} {
+		changes, err := tarUntar(t, origin, &TarOptions{
+			Compression:     c,
+			ExcludePatterns: []string{"3"},
+		})
+
+		if err != nil {
+			t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
+		}
+
+		if len(changes) != 1 || changes[0].Path != "/3" {
+			t.Fatalf("Unexpected differences after tarUntar: %v", changes)
+		}
+		capability, _ := system.Lgetxattr(path.Join(origin, "2"), "security.capability")
+		if capability == nil && capability[0] != 0x00 {
+			t.Fatalf("Untar should have kept the 'security.capability' xattr.")
+		}
+	}
+}
+
 func TestTarWithOptions(t *testing.T) {
 	origin, err := ioutil.TempDir("", "docker-test-untar-origin")
 	if err != nil {
 		t.Fatal(err)
 	}
+	if _, err := ioutil.TempDir(origin, "folder"); err != nil {
+		t.Fatal(err)
+	}
 	defer os.RemoveAll(origin)
 	if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
 		t.Fatal(err)
@@ -196,8 +691,11 @@
 		opts       *TarOptions
 		numChanges int
 	}{
-		{&TarOptions{IncludeFiles: []string{"1"}}, 1},
+		{&TarOptions{IncludeFiles: []string{"1"}}, 2},
 		{&TarOptions{ExcludePatterns: []string{"2"}}, 1},
+		{&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2},
+		{&TarOptions{IncludeFiles: []string{"1", "1"}}, 2},
+		{&TarOptions{Name: "test", IncludeFiles: []string{"1"}}, 4},
 	}
 	for _, testCase := range cases {
 		changes, err := tarUntar(t, origin, testCase.opts)
@@ -256,6 +754,58 @@
 	}
 }
 
+func TestTarWithBlockCharFifo(t *testing.T) {
+	origin, err := ioutil.TempDir("", "docker-test-tar-hardlink")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(origin)
+	if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+		t.Fatal(err)
+	}
+	if err := system.Mknod(path.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil {
+		t.Fatal(err)
+	}
+	if err := system.Mknod(path.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil {
+		t.Fatal(err)
+	}
+	if err := system.Mknod(path.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil {
+		t.Fatal(err)
+	}
+
+	dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(dest)
+
+	// we'll do this in two steps to separate failure
+	fh, err := Tar(origin, Uncompressed)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// ensure we can read the whole thing with no error, before writing back out
+	buf, err := ioutil.ReadAll(fh)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	bRdr := bytes.NewReader(buf)
+	err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	changes, err := ChangesDirs(origin, dest)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(changes) > 0 {
+		t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes)
+	}
+}
+
 func TestTarWithHardLink(t *testing.T) {
 	origin, err := ioutil.TempDir("", "docker-test-tar-hardlink")
 	if err != nil {
@@ -435,6 +985,34 @@
 	}
 }
 
+func TestUntarHardlinkToSymlink(t *testing.T) {
+	for i, headers := range [][]*tar.Header{
+		{
+			{
+				Name:     "symlink1",
+				Typeflag: tar.TypeSymlink,
+				Linkname: "regfile",
+				Mode:     0644,
+			},
+			{
+				Name:     "symlink2",
+				Typeflag: tar.TypeLink,
+				Linkname: "symlink1",
+				Mode:     0644,
+			},
+			{
+				Name:     "regfile",
+				Typeflag: tar.TypeReg,
+				Mode:     0644,
+			},
+		},
+	} {
+		if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil {
+			t.Fatalf("i=%d. %v", i, err)
+		}
+	}
+}
+
 func TestUntarInvalidHardlink(t *testing.T) {
 	for i, headers := range [][]*tar.Header{
 		{ // try reading victim/hello (../)
diff --git a/pkg/archive/archive_unix.go b/pkg/archive/archive_unix.go
index cbce65e..8a15cfe 100644
--- a/pkg/archive/archive_unix.go
+++ b/pkg/archive/archive_unix.go
@@ -3,11 +3,12 @@
 package archive
 
 import (
+	"archive/tar"
 	"errors"
 	"os"
 	"syscall"
 
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+	"github.com/docker/docker/pkg/system"
 )
 
 // canonicalTarNameForPath returns platform-specific filepath
@@ -36,8 +37,8 @@
 	inode = uint64(s.Ino)
 
 	// Currently go does not fil in the major/minors
-	if s.Mode&syscall.S_IFBLK == syscall.S_IFBLK ||
-		s.Mode&syscall.S_IFCHR == syscall.S_IFCHR {
+	if s.Mode&syscall.S_IFBLK != 0 ||
+		s.Mode&syscall.S_IFCHR != 0 {
 		hdr.Devmajor = int64(major(uint64(s.Rdev)))
 		hdr.Devminor = int64(minor(uint64(s.Rdev)))
 	}
@@ -52,3 +53,37 @@
 func minor(device uint64) uint64 {
 	return (device & 0xff) | ((device >> 12) & 0xfff00)
 }
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+	mode := uint32(hdr.Mode & 07777)
+	switch hdr.Typeflag {
+	case tar.TypeBlock:
+		mode |= syscall.S_IFBLK
+	case tar.TypeChar:
+		mode |= syscall.S_IFCHR
+	case tar.TypeFifo:
+		mode |= syscall.S_IFIFO
+	}
+
+	if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
+		return err
+	}
+	return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+	if hdr.Typeflag == tar.TypeLink {
+		if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+			if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+				return err
+			}
+		}
+	} else if hdr.Typeflag != tar.TypeSymlink {
+		if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/pkg/archive/archive_windows.go b/pkg/archive/archive_windows.go
index 6caef3b..10db4bd 100644
--- a/pkg/archive/archive_windows.go
+++ b/pkg/archive/archive_windows.go
@@ -3,11 +3,10 @@
 package archive
 
 import (
+	"archive/tar"
 	"fmt"
 	"os"
 	"strings"
-
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
 // canonicalTarNameForPath returns platform-specific filepath
@@ -15,11 +14,11 @@
 // path.
 func CanonicalTarNameForPath(p string) (string, error) {
 	// windows: convert windows style relative path with backslashes
-	// into forward slashes. since windows does not allow '/' or '\'
+	// into forward slashes. Since windows does not allow '/' or '\'
 	// in file names, it is mostly safe to replace however we must
 	// check just in case
 	if strings.Contains(p, "/") {
-		return "", fmt.Errorf("windows path contains forward slash: %s", p)
+		return "", fmt.Errorf("Windows path contains forward slash: %s", p)
 	}
 	return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
 
@@ -39,3 +38,13 @@
 	// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
 	return
 }
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+	return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+	return nil
+}
diff --git a/pkg/archive/archive_windows_test.go b/pkg/archive/archive_windows_test.go
index b33e0fb..72bc71e 100644
--- a/pkg/archive/archive_windows_test.go
+++ b/pkg/archive/archive_windows_test.go
@@ -20,7 +20,7 @@
 		if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail {
 			t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
 		} else if v.shouldFail && err == nil {
-			t.Fatalf("canonical path call should have pailed with error. in=%s out=%s", v.in, out)
+			t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out)
 		} else if !v.shouldFail && out != v.expected {
 			t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
 		}
diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go
index c3cb4eb..affafad 100644
--- a/pkg/archive/changes.go
+++ b/pkg/archive/changes.go
@@ -1,6 +1,7 @@
 package archive
 
 import (
+	"archive/tar"
 	"bytes"
 	"fmt"
 	"io"
@@ -11,9 +12,7 @@
 	"syscall"
 	"time"
 
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
-
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/pkg/pools"
 	"github.com/docker/docker/pkg/system"
 )
@@ -175,10 +174,6 @@
 	return filepath.Join(info.parent.path(), info.name)
 }
 
-func (info *FileInfo) isDir() bool {
-	return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR == syscall.S_IFDIR
-}
-
 func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
 
 	sizeAtEntry := len(*changes)
@@ -215,13 +210,7 @@
 			// be visible when actually comparing the stat fields. The only time this
 			// breaks down is if some code intentionally hides a change by setting
 			// back mtime
-			if oldStat.Mode() != newStat.Mode() ||
-				oldStat.Uid() != newStat.Uid() ||
-				oldStat.Gid() != newStat.Gid() ||
-				oldStat.Rdev() != newStat.Rdev() ||
-				// Don't look at size for dirs, its not a good measure of change
-				(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
-					(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) ||
+			if statDifferent(oldStat, newStat) ||
 				bytes.Compare(oldChild.capability, newChild.capability) != 0 {
 				change := Change{
 					Path: newChild.path(),
@@ -401,22 +390,22 @@
 					ChangeTime: timestamp,
 				}
 				if err := ta.TarWriter.WriteHeader(hdr); err != nil {
-					log.Debugf("Can't write whiteout header: %s", err)
+					logrus.Debugf("Can't write whiteout header: %s", err)
 				}
 			} else {
 				path := filepath.Join(dir, change.Path)
 				if err := ta.addTarFile(path, change.Path[1:]); err != nil {
-					log.Debugf("Can't add file %s to tar: %s", path, err)
+					logrus.Debugf("Can't add file %s to tar: %s", path, err)
 				}
 			}
 		}
 
 		// Make sure to check the error on Close.
 		if err := ta.TarWriter.Close(); err != nil {
-			log.Debugf("Can't close layer: %s", err)
+			logrus.Debugf("Can't close layer: %s", err)
 		}
 		if err := writer.Close(); err != nil {
-			log.Debugf("failed close Changes writer: %s", err)
+			logrus.Debugf("failed close Changes writer: %s", err)
 		}
 	}()
 	return reader, nil
diff --git a/pkg/archive/changes_test.go b/pkg/archive/changes_test.go
index 53ec575..290b2dd 100644
--- a/pkg/archive/changes_test.go
+++ b/pkg/archive/changes_test.go
@@ -6,6 +6,7 @@
 	"os/exec"
 	"path"
 	"sort"
+	"syscall"
 	"testing"
 	"time"
 )
@@ -91,17 +92,130 @@
 	}
 }
 
+func TestChangeString(t *testing.T) {
+	modifiyChange := Change{"change", ChangeModify}
+	toString := modifiyChange.String()
+	if toString != "C change" {
+		t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString)
+	}
+	addChange := Change{"change", ChangeAdd}
+	toString = addChange.String()
+	if toString != "A change" {
+		t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString)
+	}
+	deleteChange := Change{"change", ChangeDelete}
+	toString = deleteChange.String()
+	if toString != "D change" {
+		t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString)
+	}
+}
+
+func TestChangesWithNoChanges(t *testing.T) {
+	rwLayer, err := ioutil.TempDir("", "docker-changes-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(rwLayer)
+	layer, err := ioutil.TempDir("", "docker-changes-test-layer")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(layer)
+	createSampleDir(t, layer)
+	changes, err := Changes([]string{layer}, rwLayer)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(changes) != 0 {
+		t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes))
+	}
+}
+
+func TestChangesWithChanges(t *testing.T) {
+	rwLayer, err := ioutil.TempDir("", "docker-changes-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(rwLayer)
+	// Create a folder
+	dir1 := path.Join(rwLayer, "dir1")
+	os.MkdirAll(dir1, 0740)
+	deletedFile := path.Join(dir1, ".wh.file1-2")
+	ioutil.WriteFile(deletedFile, []byte{}, 0600)
+	modifiedFile := path.Join(dir1, "file1-1")
+	ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444)
+	// Let's add a subfolder for a newFile
+	subfolder := path.Join(dir1, "subfolder")
+	os.MkdirAll(subfolder, 0740)
+	newFile := path.Join(subfolder, "newFile")
+	ioutil.WriteFile(newFile, []byte{}, 0740)
+	// Let's create folders that with have the role of layers with the same data
+	layer, err := ioutil.TempDir("", "docker-changes-test-layer")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(layer)
+	createSampleDir(t, layer)
+	os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740)
+
+	// Let's modify modtime for dir1 to be sure it's the same for the two layer (to not having false positive)
+	fi, err := os.Stat(dir1)
+	if err != nil {
+		return
+	}
+	mtime := fi.ModTime()
+	stat := fi.Sys().(*syscall.Stat_t)
+	atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
+
+	layerDir1 := path.Join(layer, "dir1")
+	os.Chtimes(layerDir1, atime, mtime)
+
+	changes, err := Changes([]string{layer}, rwLayer)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	sort.Sort(changesByPath(changes))
+
+	expectedChanges := []Change{
+		{"/dir1/file1-1", ChangeModify},
+		{"/dir1/file1-2", ChangeDelete},
+		{"/dir1/subfolder", ChangeModify},
+		{"/dir1/subfolder/newFile", ChangeAdd},
+	}
+
+	for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
+		if i >= len(expectedChanges) {
+			t.Fatalf("unexpected change %s\n", changes[i].String())
+		}
+		if i >= len(changes) {
+			t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
+		}
+		if changes[i].Path == expectedChanges[i].Path {
+			if changes[i] != expectedChanges[i] {
+				t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
+			}
+		} else if changes[i].Path < expectedChanges[i].Path {
+			t.Fatalf("unexpected change %s\n", changes[i].String())
+		} else {
+			t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
+		}
+	}
+}
+
 // Create an directory, copy it, make sure we report no changes between the two
 func TestChangesDirsEmpty(t *testing.T) {
 	src, err := ioutil.TempDir("", "docker-changes-test")
 	if err != nil {
 		t.Fatal(err)
 	}
+	defer os.RemoveAll(src)
 	createSampleDir(t, src)
 	dst := src + "-copy"
 	if err := copyDir(src, dst); err != nil {
 		t.Fatal(err)
 	}
+	defer os.RemoveAll(dst)
 	changes, err := ChangesDirs(dst, src)
 	if err != nil {
 		t.Fatal(err)
@@ -291,3 +405,41 @@
 		t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2)
 	}
 }
+
+func TestChangesSizeWithNoChanges(t *testing.T) {
+	size := ChangesSize("/tmp", nil)
+	if size != 0 {
+		t.Fatalf("ChangesSizes with no changes should be 0, was %d", size)
+	}
+}
+
+func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) {
+	changes := []Change{
+		{Path: "deletedPath", Kind: ChangeDelete},
+	}
+	size := ChangesSize("/tmp", changes)
+	if size != 0 {
+		t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size)
+	}
+}
+
+func TestChangesSize(t *testing.T) {
+	parentPath, err := ioutil.TempDir("", "docker-changes-test")
+	defer os.RemoveAll(parentPath)
+	addition := path.Join(parentPath, "addition")
+	if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil {
+		t.Fatal(err)
+	}
+	modification := path.Join(parentPath, "modification")
+	if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil {
+		t.Fatal(err)
+	}
+	changes := []Change{
+		{Path: "addition", Kind: ChangeAdd},
+		{Path: "modification", Kind: ChangeModify},
+	}
+	size := ChangesSize(parentPath, changes)
+	if size != 6 {
+		t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size)
+	}
+}
diff --git a/pkg/archive/changes_unix.go b/pkg/archive/changes_unix.go
new file mode 100644
index 0000000..d780f16
--- /dev/null
+++ b/pkg/archive/changes_unix.go
@@ -0,0 +1,27 @@
+// +build !windows
+
+package archive
+
+import (
+	"syscall"
+
+	"github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
+	// Don't look at size for dirs, its not a good measure of change
+	if oldStat.Mode() != newStat.Mode() ||
+		oldStat.Uid() != newStat.Uid() ||
+		oldStat.Gid() != newStat.Gid() ||
+		oldStat.Rdev() != newStat.Rdev() ||
+		// Don't look at size for dirs, its not a good measure of change
+		(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
+			(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+		return true
+	}
+	return false
+}
+
+func (info *FileInfo) isDir() bool {
+	return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
+}
diff --git a/pkg/archive/changes_windows.go b/pkg/archive/changes_windows.go
new file mode 100644
index 0000000..4809b7a
--- /dev/null
+++ b/pkg/archive/changes_windows.go
@@ -0,0 +1,20 @@
+package archive
+
+import (
+	"github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
+
+	// Don't look at size for dirs, its not a good measure of change
+	if oldStat.ModTime() != newStat.ModTime() ||
+		oldStat.Mode() != newStat.Mode() ||
+		oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
+		return true
+	}
+	return false
+}
+
+func (info *FileInfo) isDir() bool {
+	return info.parent == nil || info.stat.IsDir()
+}
diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go
index b5eb63f..fd49460 100644
--- a/pkg/archive/diff.go
+++ b/pkg/archive/diff.go
@@ -1,6 +1,7 @@
 package archive
 
 import (
+	"archive/tar"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -9,8 +10,6 @@
 	"strings"
 	"syscall"
 
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
-
 	"github.com/docker/docker/pkg/pools"
 	"github.com/docker/docker/pkg/system"
 )
@@ -48,7 +47,7 @@
 			parent := filepath.Dir(hdr.Name)
 			parentPath := filepath.Join(dest, parent)
 			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
-				err = os.MkdirAll(parentPath, 0600)
+				err = system.MkdirAll(parentPath, 0600)
 				if err != nil {
 					return 0, err
 				}
diff --git a/pkg/archive/diff_test.go b/pkg/archive/diff_test.go
index 758c411..01ed437 100644
--- a/pkg/archive/diff_test.go
+++ b/pkg/archive/diff_test.go
@@ -1,9 +1,8 @@
 package archive
 
 import (
+	"archive/tar"
 	"testing"
-
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
 func TestApplyLayerInvalidFilenames(t *testing.T) {
diff --git a/pkg/archive/utils_test.go b/pkg/archive/utils_test.go
index 9048027..2a266c2 100644
--- a/pkg/archive/utils_test.go
+++ b/pkg/archive/utils_test.go
@@ -1,6 +1,7 @@
 package archive
 
 import (
+	"archive/tar"
 	"bytes"
 	"fmt"
 	"io"
@@ -8,8 +9,6 @@
 	"os"
 	"path/filepath"
 	"time"
-
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
 var testUntarFns = map[string]func(string, io.Reader) error{
diff --git a/pkg/archive/wrap.go b/pkg/archive/wrap.go
index b8b6019..dfb335c 100644
--- a/pkg/archive/wrap.go
+++ b/pkg/archive/wrap.go
@@ -1,8 +1,8 @@
 package archive
 
 import (
+	"archive/tar"
 	"bytes"
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 	"io/ioutil"
 )
 
diff --git a/pkg/archive/wrap_test.go b/pkg/archive/wrap_test.go
new file mode 100644
index 0000000..46ab366
--- /dev/null
+++ b/pkg/archive/wrap_test.go
@@ -0,0 +1,98 @@
+package archive
+
+import (
+	"archive/tar"
+	"bytes"
+	"io"
+	"testing"
+)
+
+func TestGenerateEmptyFile(t *testing.T) {
+	archive, err := Generate("emptyFile")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if archive == nil {
+		t.Fatal("The generated archive should not be nil.")
+	}
+
+	expectedFiles := [][]string{
+		{"emptyFile", ""},
+	}
+
+	tr := tar.NewReader(archive)
+	actualFiles := make([][]string, 0, 10)
+	i := 0
+	for {
+		hdr, err := tr.Next()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			t.Fatal(err)
+		}
+		buf := new(bytes.Buffer)
+		buf.ReadFrom(tr)
+		content := buf.String()
+		actualFiles = append(actualFiles, []string{hdr.Name, content})
+		i++
+	}
+	if len(actualFiles) != len(expectedFiles) {
+		t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles))
+	}
+	for i := 0; i < len(expectedFiles); i++ {
+		actual := actualFiles[i]
+		expected := expectedFiles[i]
+		if actual[0] != expected[0] {
+			t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0])
+		}
+		if actual[1] != expected[1] {
+			t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1])
+		}
+	}
+}
+
+func TestGenerateWithContent(t *testing.T) {
+	archive, err := Generate("file", "content")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if archive == nil {
+		t.Fatal("The generated archive should not be nil.")
+	}
+
+	expectedFiles := [][]string{
+		{"file", "content"},
+	}
+
+	tr := tar.NewReader(archive)
+	actualFiles := make([][]string, 0, 10)
+	i := 0
+	for {
+		hdr, err := tr.Next()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			t.Fatal(err)
+		}
+		buf := new(bytes.Buffer)
+		buf.ReadFrom(tr)
+		content := buf.String()
+		actualFiles = append(actualFiles, []string{hdr.Name, content})
+		i++
+	}
+	if len(actualFiles) != len(expectedFiles) {
+		t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles))
+	}
+	for i := 0; i < len(expectedFiles); i++ {
+		actual := actualFiles[i]
+		expected := expectedFiles[i]
+		if actual[0] != expected[0] {
+			t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0])
+		}
+		if actual[1] != expected[1] {
+			t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1])
+		}
+	}
+}
diff --git a/pkg/broadcastwriter/broadcastwriter.go b/pkg/broadcastwriter/broadcastwriter.go
index 9a0b176..bd9b675 100644
--- a/pkg/broadcastwriter/broadcastwriter.go
+++ b/pkg/broadcastwriter/broadcastwriter.go
@@ -6,8 +6,9 @@
 	"sync"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/pkg/timeutils"
 )
 
 // BroadcastWriter accumulate multiple io.WriteCloser by stream.
@@ -33,7 +34,6 @@
 // Write writes bytes to all writers. Failed writers will be evicted during
 // this call.
 func (w *BroadcastWriter) Write(p []byte) (n int, err error) {
-	created := time.Now().UTC()
 	w.Lock()
 	if writers, ok := w.streams[""]; ok {
 		for sw := range writers {
@@ -42,26 +42,47 @@
 				delete(writers, sw)
 			}
 		}
+		if len(w.streams) == 1 {
+			if w.buf.Len() >= 4096 {
+				w.buf.Reset()
+			} else {
+				w.buf.Write(p)
+			}
+			w.Unlock()
+			return len(p), nil
+		}
 	}
 	if w.jsLogBuf == nil {
 		w.jsLogBuf = new(bytes.Buffer)
 		w.jsLogBuf.Grow(1024)
 	}
+	var timestamp string
+	created := time.Now().UTC()
 	w.buf.Write(p)
 	for {
-		line, err := w.buf.ReadString('\n')
-		if err != nil {
-			w.buf.WriteString(line)
+		if n := w.buf.Len(); n == 0 {
 			break
 		}
+		i := bytes.IndexByte(w.buf.Bytes(), '\n')
+		if i < 0 {
+			break
+		}
+		lineBytes := w.buf.Next(i + 1)
+		if timestamp == "" {
+			timestamp, err = timeutils.FastMarshalJSON(created)
+			if err != nil {
+				continue
+			}
+		}
+
 		for stream, writers := range w.streams {
 			if stream == "" {
 				continue
 			}
-			jsonLog := jsonlog.JSONLog{Log: line, Stream: stream, Created: created}
+			jsonLog := jsonlog.JSONLogBytes{Log: lineBytes, Stream: stream, Created: timestamp}
 			err = jsonLog.MarshalJSONBuf(w.jsLogBuf)
 			if err != nil {
-				log.Errorf("Error making JSON log line: %s", err)
+				logrus.Errorf("Error making JSON log line: %s", err)
 				continue
 			}
 			w.jsLogBuf.WriteByte('\n')
diff --git a/pkg/broadcastwriter/broadcastwriter_test.go b/pkg/broadcastwriter/broadcastwriter_test.go
index 62ca126..7122782 100644
--- a/pkg/broadcastwriter/broadcastwriter_test.go
+++ b/pkg/broadcastwriter/broadcastwriter_test.go
@@ -142,3 +142,33 @@
 		b.StartTimer()
 	}
 }
+
+func BenchmarkBroadcastWriterWithoutStdoutStderr(b *testing.B) {
+	writer := New()
+	setUpWriter := func() {
+		for i := 0; i < 100; i++ {
+			writer.AddWriter(devNullCloser(0), "")
+		}
+	}
+	testLine := "Line that thinks that it is log line from docker"
+	var buf bytes.Buffer
+	for i := 0; i < 100; i++ {
+		buf.Write([]byte(testLine + "\n"))
+	}
+	// line without eol
+	buf.Write([]byte(testLine))
+	testText := buf.Bytes()
+	b.SetBytes(int64(5 * len(testText)))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		setUpWriter()
+
+		for j := 0; j < 5; j++ {
+			if _, err := writer.Write(testText); err != nil {
+				b.Fatal(err)
+			}
+		}
+
+		writer.Clean()
+	}
+}
diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go
index 17d3739..49d1917 100644
--- a/pkg/chrootarchive/archive.go
+++ b/pkg/chrootarchive/archive.go
@@ -1,6 +1,7 @@
 package chrootarchive
 
 import (
+	"bytes"
 	"encoding/json"
 	"flag"
 	"fmt"
@@ -29,7 +30,8 @@
 
 	var options *archive.TarOptions
 
-	if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
+	//read the options from the pipe "ExtraFiles"
+	if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
 		fatal(err)
 	}
 
@@ -62,28 +64,39 @@
 		}
 	}
 
-	// We can't pass the exclude list directly via cmd line
-	// because we easily overrun the shell max argument list length
-	// when the full image list is passed (e.g. when this is used
-	// by `docker load`). Instead we will add the JSON marshalled
-	// and placed in the env, which has significantly larger
-	// max size
-	data, err := json.Marshal(options)
-	if err != nil {
-		return fmt.Errorf("Untar json encode: %v", err)
-	}
 	decompressedArchive, err := archive.DecompressStream(tarArchive)
 	if err != nil {
 		return err
 	}
 	defer decompressedArchive.Close()
 
+	// We can't pass a potentially large exclude list directly via cmd line
+	// because we easily overrun the kernel's max argument/environment size
+	// when the full image list is passed (e.g. when this is used by
+	// `docker load`). We will marshall the options via a pipe to the
+	// child
+	r, w, err := os.Pipe()
+	if err != nil {
+		return fmt.Errorf("Untar pipe failure: %v", err)
+	}
 	cmd := reexec.Command("docker-untar", dest)
 	cmd.Stdin = decompressedArchive
-	cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
-	out, err := cmd.CombinedOutput()
-	if err != nil {
-		return fmt.Errorf("Untar %s %s", err, out)
+	cmd.ExtraFiles = append(cmd.ExtraFiles, r)
+	output := bytes.NewBuffer(nil)
+	cmd.Stdout = output
+	cmd.Stderr = output
+
+	if err := cmd.Start(); err != nil {
+		return fmt.Errorf("Untar error on re-exec cmd: %v", err)
+	}
+	//write the options to the pipe for the untar exec to read
+	if err := json.NewEncoder(w).Encode(options); err != nil {
+		return fmt.Errorf("Untar json encode to pipe failed: %v", err)
+	}
+	w.Close()
+
+	if err := cmd.Wait(); err != nil {
+		return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output)
 	}
 	return nil
 }
diff --git a/pkg/chrootarchive/archive_test.go b/pkg/chrootarchive/archive_test.go
index fb4c5c4..f9b5b09 100644
--- a/pkg/chrootarchive/archive_test.go
+++ b/pkg/chrootarchive/archive_test.go
@@ -1,10 +1,15 @@
 package chrootarchive
 
 import (
+	"bytes"
+	"fmt"
+	"hash/crc32"
 	"io"
 	"io/ioutil"
 	"os"
+	"path"
 	"path/filepath"
+	"strings"
 	"testing"
 	"time"
 
@@ -45,6 +50,255 @@
 	}
 }
 
+// gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of
+// local images)
+func TestChrootUntarWithHugeExcludesList(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+	src := filepath.Join(tmpdir, "src")
+	if err := os.MkdirAll(src, 0700); err != nil {
+		t.Fatal(err)
+	}
+	if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil {
+		t.Fatal(err)
+	}
+	stream, err := archive.Tar(src, archive.Uncompressed)
+	if err != nil {
+		t.Fatal(err)
+	}
+	dest := filepath.Join(tmpdir, "dest")
+	if err := os.MkdirAll(dest, 0700); err != nil {
+		t.Fatal(err)
+	}
+	options := &archive.TarOptions{}
+	//65534 entries of 64-byte strings ~= 4MB of environment space which should overflow
+	//on most systems when passed via environment or command line arguments
+	excludes := make([]string, 65534, 65534)
+	for i := 0; i < 65534; i++ {
+		excludes[i] = strings.Repeat(string(i), 64)
+	}
+	options.ExcludePatterns = excludes
+	if err := Untar(stream, dest, options); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestChrootUntarEmptyArchive(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchive")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+	if err := Untar(nil, tmpdir, nil); err == nil {
+		t.Fatal("expected error on empty archive")
+	}
+}
+
+func prepareSourceDirectory(numberOfFiles int, targetPath string, makeSymLinks bool) (int, error) {
+	fileData := []byte("fooo")
+	for n := 0; n < numberOfFiles; n++ {
+		fileName := fmt.Sprintf("file-%d", n)
+		if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+			return 0, err
+		}
+		if makeSymLinks {
+			if err := os.Symlink(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+				return 0, err
+			}
+		}
+	}
+	totalSize := numberOfFiles * len(fileData)
+	return totalSize, nil
+}
+
+func getHash(filename string) (uint32, error) {
+	stream, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return 0, err
+	}
+	hash := crc32.NewIEEE()
+	hash.Write(stream)
+	return hash.Sum32(), nil
+}
+
+func compareDirectories(src string, dest string) error {
+	changes, err := archive.ChangesDirs(dest, src)
+	if err != nil {
+		return err
+	}
+	if len(changes) > 0 {
+		return fmt.Errorf("Unexpected differences after untar: %v", changes)
+	}
+	return nil
+}
+
+func compareFiles(src string, dest string) error {
+	srcHash, err := getHash(src)
+	if err != nil {
+		return err
+	}
+	destHash, err := getHash(dest)
+	if err != nil {
+		return err
+	}
+	if srcHash != destHash {
+		return fmt.Errorf("%s is different from %s", src, dest)
+	}
+	return nil
+}
+
+func TestChrootTarUntarWithSymlink(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+	src := filepath.Join(tmpdir, "src")
+	if err := os.MkdirAll(src, 0700); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := prepareSourceDirectory(10, src, true); err != nil {
+		t.Fatal(err)
+	}
+	dest := filepath.Join(tmpdir, "dest")
+	if err := TarUntar(src, dest); err != nil {
+		t.Fatal(err)
+	}
+	if err := compareDirectories(src, dest); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestChrootCopyWithTar(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+	src := filepath.Join(tmpdir, "src")
+	if err := os.MkdirAll(src, 0700); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := prepareSourceDirectory(10, src, true); err != nil {
+		t.Fatal(err)
+	}
+
+	// Copy directory
+	dest := filepath.Join(tmpdir, "dest")
+	if err := CopyWithTar(src, dest); err != nil {
+		t.Fatal(err)
+	}
+	if err := compareDirectories(src, dest); err != nil {
+		t.Fatal(err)
+	}
+
+	// Copy file
+	srcfile := filepath.Join(src, "file-1")
+	dest = filepath.Join(tmpdir, "destFile")
+	destfile := filepath.Join(dest, "file-1")
+	if err := CopyWithTar(srcfile, destfile); err != nil {
+		t.Fatal(err)
+	}
+	if err := compareFiles(srcfile, destfile); err != nil {
+		t.Fatal(err)
+	}
+
+	// Copy symbolic link
+	srcLinkfile := filepath.Join(src, "file-1-link")
+	dest = filepath.Join(tmpdir, "destSymlink")
+	destLinkfile := filepath.Join(dest, "file-1-link")
+	if err := CopyWithTar(srcLinkfile, destLinkfile); err != nil {
+		t.Fatal(err)
+	}
+	if err := compareFiles(srcLinkfile, destLinkfile); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestChrootCopyFileWithTar(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyFileWithTar")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+	src := filepath.Join(tmpdir, "src")
+	if err := os.MkdirAll(src, 0700); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := prepareSourceDirectory(10, src, true); err != nil {
+		t.Fatal(err)
+	}
+
+	// Copy directory
+	dest := filepath.Join(tmpdir, "dest")
+	if err := CopyFileWithTar(src, dest); err == nil {
+		t.Fatal("Expected error on copying directory")
+	}
+
+	// Copy file
+	srcfile := filepath.Join(src, "file-1")
+	dest = filepath.Join(tmpdir, "destFile")
+	destfile := filepath.Join(dest, "file-1")
+	if err := CopyFileWithTar(srcfile, destfile); err != nil {
+		t.Fatal(err)
+	}
+	if err := compareFiles(srcfile, destfile); err != nil {
+		t.Fatal(err)
+	}
+
+	// Copy symbolic link
+	srcLinkfile := filepath.Join(src, "file-1-link")
+	dest = filepath.Join(tmpdir, "destSymlink")
+	destLinkfile := filepath.Join(dest, "file-1-link")
+	if err := CopyFileWithTar(srcLinkfile, destLinkfile); err != nil {
+		t.Fatal(err)
+	}
+	if err := compareFiles(srcLinkfile, destLinkfile); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestChrootUntarPath(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+	src := filepath.Join(tmpdir, "src")
+	if err := os.MkdirAll(src, 0700); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := prepareSourceDirectory(10, src, true); err != nil {
+		t.Fatal(err)
+	}
+	dest := filepath.Join(tmpdir, "dest")
+	// Untar a directory
+	if err := UntarPath(src, dest); err == nil {
+		t.Fatal("Expected error on untaring a directory")
+	}
+
+	// Untar a tar file
+	stream, err := archive.Tar(src, archive.Uncompressed)
+	if err != nil {
+		t.Fatal(err)
+	}
+	buf := new(bytes.Buffer)
+	buf.ReadFrom(stream)
+	tarfile := filepath.Join(tmpdir, "src.tar")
+	if err := ioutil.WriteFile(tarfile, buf.Bytes(), 0644); err != nil {
+		t.Fatal(err)
+	}
+	if err := UntarPath(tarfile, dest); err != nil {
+		t.Fatal(err)
+	}
+	if err := compareDirectories(src, dest); err != nil {
+		t.Fatal(err)
+	}
+}
+
 type slowEmptyTarReader struct {
 	size      int
 	offset    int
diff --git a/pkg/common/randomid.go b/pkg/common/randomid.go
deleted file mode 100644
index 5c6d592..0000000
--- a/pkg/common/randomid.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package common
-
-import (
-	"crypto/rand"
-	"encoding/hex"
-	"io"
-	"strconv"
-)
-
-// TruncateID returns a shorthand version of a string identifier for convenience.
-// A collision with other shorthands is very unlikely, but possible.
-// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
-// will need to use a langer prefix, or the full-length Id.
-func TruncateID(id string) string {
-	shortLen := 12
-	if len(id) < shortLen {
-		shortLen = len(id)
-	}
-	return id[:shortLen]
-}
-
-// GenerateRandomID returns an unique id
-func GenerateRandomID() string {
-	for {
-		id := make([]byte, 32)
-		if _, err := io.ReadFull(rand.Reader, id); err != nil {
-			panic(err) // This shouldn't happen
-		}
-		value := hex.EncodeToString(id)
-		// if we try to parse the truncated for as an int and we don't have
-		// an error then the value is all numberic and causes issues when
-		// used as a hostname. ref #3869
-		if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil {
-			continue
-		}
-		return value
-	}
-}
-
-func RandomString() string {
-	id := make([]byte, 32)
-
-	if _, err := io.ReadFull(rand.Reader, id); err != nil {
-		panic(err) // This shouldn't happen
-	}
-	return hex.EncodeToString(id)
-}
diff --git a/pkg/common/randomid_test.go b/pkg/common/randomid_test.go
deleted file mode 100644
index 1dba412..0000000
--- a/pkg/common/randomid_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package common
-
-import (
-	"testing"
-)
-
-func TestShortenId(t *testing.T) {
-	id := GenerateRandomID()
-	truncID := TruncateID(id)
-	if len(truncID) != 12 {
-		t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID)
-	}
-}
-
-func TestShortenIdEmpty(t *testing.T) {
-	id := ""
-	truncID := TruncateID(id)
-	if len(truncID) > len(id) {
-		t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID)
-	}
-}
-
-func TestShortenIdInvalid(t *testing.T) {
-	id := "1234"
-	truncID := TruncateID(id)
-	if len(truncID) != len(id) {
-		t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID)
-	}
-}
-
-func TestGenerateRandomID(t *testing.T) {
-	id := GenerateRandomID()
-
-	if len(id) != 64 {
-		t.Fatalf("Id returned is incorrect: %s", id)
-	}
-}
-
-func TestRandomString(t *testing.T) {
-	id := RandomString()
-	if len(id) != 64 {
-		t.Fatalf("Id returned is incorrect: %s", id)
-	}
-}
-
-func TestRandomStringUniqueness(t *testing.T) {
-	repeats := 25
-	set := make(map[string]struct{}, repeats)
-	for i := 0; i < repeats; i = i + 1 {
-		id := RandomString()
-		if len(id) != 64 {
-			t.Fatalf("Id returned is incorrect: %s", id)
-		}
-		if _, ok := set[id]; ok {
-			t.Fatalf("Random number is repeated")
-		}
-		set[id] = struct{}{}
-	}
-}
diff --git a/pkg/devicemapper/attach_loopback.go b/pkg/devicemapper/attach_loopback.go
index d39cbc6..424a974 100644
--- a/pkg/devicemapper/attach_loopback.go
+++ b/pkg/devicemapper/attach_loopback.go
@@ -7,7 +7,7 @@
 	"os"
 	"syscall"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 )
 
 func stringToLoopName(src string) [LoNameSize]uint8 {
@@ -39,20 +39,20 @@
 		fi, err := os.Stat(target)
 		if err != nil {
 			if os.IsNotExist(err) {
-				log.Errorf("There are no more loopback devices available.")
+				logrus.Errorf("There are no more loopback devices available.")
 			}
 			return nil, ErrAttachLoopbackDevice
 		}
 
 		if fi.Mode()&os.ModeDevice != os.ModeDevice {
-			log.Errorf("Loopback device %s is not a block device.", target)
+			logrus.Errorf("Loopback device %s is not a block device.", target)
 			continue
 		}
 
 		// OpenFile adds O_CLOEXEC
 		loopFile, err = os.OpenFile(target, os.O_RDWR, 0644)
 		if err != nil {
-			log.Errorf("Error opening loopback device: %s", err)
+			logrus.Errorf("Error opening loopback device: %s", err)
 			return nil, ErrAttachLoopbackDevice
 		}
 
@@ -62,7 +62,7 @@
 
 			// If the error is EBUSY, then try the next loopback
 			if err != syscall.EBUSY {
-				log.Errorf("Cannot set up loopback device %s: %s", target, err)
+				logrus.Errorf("Cannot set up loopback device %s: %s", target, err)
 				return nil, ErrAttachLoopbackDevice
 			}
 
@@ -75,7 +75,7 @@
 
 	// This can't happen, but let's be sure
 	if loopFile == nil {
-		log.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
+		logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
 		return nil, ErrAttachLoopbackDevice
 	}
 
@@ -91,13 +91,13 @@
 	// loopback from index 0.
 	startIndex, err := getNextFreeLoopbackIndex()
 	if err != nil {
-		log.Debugf("Error retrieving the next available loopback: %s", err)
+		logrus.Debugf("Error retrieving the next available loopback: %s", err)
 	}
 
 	// OpenFile adds O_CLOEXEC
 	sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644)
 	if err != nil {
-		log.Errorf("Error opening sparse file %s: %s", sparseName, err)
+		logrus.Errorf("Error opening sparse file %s: %s", sparseName, err)
 		return nil, ErrAttachLoopbackDevice
 	}
 	defer sparseFile.Close()
@@ -115,11 +115,11 @@
 	}
 
 	if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil {
-		log.Errorf("Cannot set up loopback device info: %s", err)
+		logrus.Errorf("Cannot set up loopback device info: %s", err)
 
 		// If the call failed, then free the loopback device
 		if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
-			log.Errorf("Error while cleaning up the loopback device")
+			logrus.Errorf("Error while cleaning up the loopback device")
 		}
 		loopFile.Close()
 		return nil, ErrAttachLoopbackDevice
diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go
index f3b55c8..e7f17b8 100644
--- a/pkg/devicemapper/devmapper.go
+++ b/pkg/devicemapper/devmapper.go
@@ -8,8 +8,9 @@
 	"os"
 	"runtime"
 	"syscall"
+	"unsafe"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 )
 
 type DevmapperLogger interface {
@@ -54,6 +55,7 @@
 	ErrTaskGetDeps            = errors.New("dm_task_get_deps failed")
 	ErrTaskGetInfo            = errors.New("dm_task_get_info failed")
 	ErrTaskGetDriverVersion   = errors.New("dm_task_get_driver_version failed")
+	ErrTaskDeferredRemove     = errors.New("dm_task_deferred_remove failed")
 	ErrTaskSetCookie          = errors.New("dm_task_set_cookie failed")
 	ErrNilCookie              = errors.New("cookie ptr can't be nil")
 	ErrAttachLoopbackDevice   = errors.New("loopback mounting failed")
@@ -68,9 +70,11 @@
 	ErrLoopbackSetCapacity    = errors.New("Unable set loopback capacity")
 	ErrBusy                   = errors.New("Device is Busy")
 	ErrDeviceIdExists         = errors.New("Device Id Exists")
+	ErrEnxio                  = errors.New("No such device or address")
 
 	dmSawBusy  bool
 	dmSawExist bool
+	dmSawEnxio bool // No Such Device or Address
 )
 
 type (
@@ -83,16 +87,17 @@
 		Device []uint64
 	}
 	Info struct {
-		Exists        int
-		Suspended     int
-		LiveTable     int
-		InactiveTable int
-		OpenCount     int32
-		EventNr       uint32
-		Major         uint32
-		Minor         uint32
-		ReadOnly      int
-		TargetCount   int32
+		Exists         int
+		Suspended      int
+		LiveTable      int
+		InactiveTable  int
+		OpenCount      int32
+		EventNr        uint32
+		Major          uint32
+		Minor          uint32
+		ReadOnly       int
+		TargetCount    int32
+		DeferredRemove int
 	}
 	TaskType    int
 	AddNodeType int
@@ -218,6 +223,14 @@
 	return info, nil
 }
 
+func (t *Task) GetInfoWithDeferred() (*Info, error) {
+	info := &Info{}
+	if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 {
+		return nil, ErrTaskGetInfo
+	}
+	return info, nil
+}
+
 func (t *Task) GetDriverVersion() (string, error) {
 	res := DmTaskGetDriverVersion(t.unmanaged)
 	if res == "" {
@@ -226,7 +239,7 @@
 	return res, nil
 }
 
-func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64,
+func (t *Task) GetNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64,
 	length uint64, targetType string, params string) {
 
 	return DmGetNextTarget(t.unmanaged, next, &start, &length,
@@ -237,7 +250,7 @@
 func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
 	loopInfo, err := ioctlLoopGetStatus64(file.Fd())
 	if err != nil {
-		log.Errorf("Error get loopback backing file: %s", err)
+		logrus.Errorf("Error get loopback backing file: %s", err)
 		return 0, 0, ErrGetLoopbackBackingFile
 	}
 	return loopInfo.loDevice, loopInfo.loInode, nil
@@ -245,7 +258,7 @@
 
 func LoopbackSetCapacity(file *os.File) error {
 	if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
-		log.Errorf("Error loopbackSetCapacity: %s", err)
+		logrus.Errorf("Error loopbackSetCapacity: %s", err)
 		return ErrLoopbackSetCapacity
 	}
 	return nil
@@ -283,9 +296,9 @@
 	return nil
 }
 
-func UdevWait(cookie uint) error {
-	if res := DmUdevWait(cookie); res != 1 {
-		log.Debugf("Failed to wait on udev cookie %d", cookie)
+func UdevWait(cookie *uint) error {
+	if res := DmUdevWait(*cookie); res != 1 {
+		logrus.Debugf("Failed to wait on udev cookie %d", *cookie)
 		return ErrUdevWait
 	}
 	return nil
@@ -305,7 +318,7 @@
 
 func SetDevDir(dir string) error {
 	if res := DmSetDevDir(dir); res != 1 {
-		log.Debugf("Error dm_set_dev_dir")
+		logrus.Debugf("Error dm_set_dev_dir")
 		return ErrSetDevDir
 	}
 	return nil
@@ -348,8 +361,6 @@
 
 // Useful helper for cleanup
 func RemoveDevice(name string) error {
-	log.Debugf("[devmapper] RemoveDevice START(%s)", name)
-	defer log.Debugf("[devmapper] RemoveDevice END(%s)", name)
 	task, err := TaskCreateNamed(DeviceRemove, name)
 	if task == nil {
 		return err
@@ -359,7 +370,7 @@
 	if err := task.SetCookie(&cookie, 0); err != nil {
 		return fmt.Errorf("Can not set cookie: %s", err)
 	}
-	defer UdevWait(cookie)
+	defer UdevWait(&cookie)
 
 	dmSawBusy = false // reset before the task is run
 	if err = task.Run(); err != nil {
@@ -372,10 +383,59 @@
 	return nil
 }
 
+func RemoveDeviceDeferred(name string) error {
+	logrus.Debugf("[devmapper] RemoveDeviceDeferred START(%s)", name)
+	defer logrus.Debugf("[devmapper] RemoveDeviceDeferred END(%s)", name)
+	task, err := TaskCreateNamed(DeviceRemove, name)
+	if task == nil {
+		return err
+	}
+
+	if err := DmTaskDeferredRemove(task.unmanaged); err != 1 {
+		return ErrTaskDeferredRemove
+	}
+
+	if err = task.Run(); err != nil {
+		return fmt.Errorf("Error running RemoveDeviceDeferred %s", err)
+	}
+
+	return nil
+}
+
+// Useful helper for cleanup
+func CancelDeferredRemove(deviceName string) error {
+	task, err := TaskCreateNamed(DeviceTargetMsg, deviceName)
+	if task == nil {
+		return err
+	}
+
+	if err := task.SetSector(0); err != nil {
+		return fmt.Errorf("Can't set sector %s", err)
+	}
+
+	if err := task.SetMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil {
+		return fmt.Errorf("Can't set message %s", err)
+	}
+
+	dmSawBusy = false
+	dmSawEnxio = false
+	if err := task.Run(); err != nil {
+		// A device might be being deleted already
+		if dmSawBusy {
+			return ErrBusy
+		} else if dmSawEnxio {
+			return ErrEnxio
+		}
+		return fmt.Errorf("Error running CancelDeferredRemove %s", err)
+
+	}
+	return nil
+}
+
 func GetBlockDeviceSize(file *os.File) (uint64, error) {
 	size, err := ioctlBlkGetSize64(file.Fd())
 	if err != nil {
-		log.Errorf("Error getblockdevicesize: %s", err)
+		logrus.Errorf("Error getblockdevicesize: %s", err)
 		return 0, ErrGetBlockSize
 	}
 	return uint64(size), nil
@@ -426,7 +486,7 @@
 	if err := task.SetCookie(&cookie, flags); err != nil {
 		return fmt.Errorf("Can't set cookie %s", err)
 	}
-	defer UdevWait(cookie)
+	defer UdevWait(&cookie)
 
 	if err := task.Run(); err != nil {
 		return fmt.Errorf("Error running DeviceCreate (CreatePool) %s", err)
@@ -480,6 +540,17 @@
 	return task.GetInfo()
 }
 
+func GetInfoWithDeferred(name string) (*Info, error) {
+	task, err := TaskCreateNamed(DeviceInfo, name)
+	if task == nil {
+		return nil, err
+	}
+	if err := task.Run(); err != nil {
+		return nil, err
+	}
+	return task.GetInfoWithDeferred()
+}
+
 func GetDriverVersion() (string, error) {
 	task := TaskCreate(DeviceVersion)
 	if task == nil {
@@ -494,25 +565,25 @@
 func GetStatus(name string) (uint64, uint64, string, string, error) {
 	task, err := TaskCreateNamed(DeviceStatus, name)
 	if task == nil {
-		log.Debugf("GetStatus: Error TaskCreateNamed: %s", err)
+		logrus.Debugf("GetStatus: Error TaskCreateNamed: %s", err)
 		return 0, 0, "", "", err
 	}
 	if err := task.Run(); err != nil {
-		log.Debugf("GetStatus: Error Run: %s", err)
+		logrus.Debugf("GetStatus: Error Run: %s", err)
 		return 0, 0, "", "", err
 	}
 
 	devinfo, err := task.GetInfo()
 	if err != nil {
-		log.Debugf("GetStatus: Error GetInfo: %s", err)
+		logrus.Debugf("GetStatus: Error GetInfo: %s", err)
 		return 0, 0, "", "", err
 	}
 	if devinfo.Exists == 0 {
-		log.Debugf("GetStatus: Non existing device %s", name)
+		logrus.Debugf("GetStatus: Non existing device %s", name)
 		return 0, 0, "", "", fmt.Errorf("Non existing device %s", name)
 	}
 
-	_, start, length, targetType, params := task.GetNextTarget(0)
+	_, start, length, targetType, params := task.GetNextTarget(unsafe.Pointer(nil))
 	return start, length, targetType, params, nil
 }
 
@@ -557,7 +628,7 @@
 	if err := task.SetCookie(&cookie, 0); err != nil {
 		return fmt.Errorf("Can't set cookie %s", err)
 	}
-	defer UdevWait(cookie)
+	defer UdevWait(&cookie)
 
 	if err := task.Run(); err != nil {
 		return fmt.Errorf("Error running DeviceResume %s", err)
@@ -567,7 +638,7 @@
 }
 
 func CreateDevice(poolName string, deviceId int) error {
-	log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, deviceId)
+	logrus.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, deviceId)
 	task, err := TaskCreateNamed(DeviceTargetMsg, poolName)
 	if task == nil {
 		return err
@@ -586,9 +657,10 @@
 		// Caller wants to know about ErrDeviceIdExists so that it can try with a different device id.
 		if dmSawExist {
 			return ErrDeviceIdExists
-		} else {
-			return fmt.Errorf("Error running CreateDevice %s", err)
 		}
+
+		return fmt.Errorf("Error running CreateDevice %s", err)
+
 	}
 	return nil
 }
@@ -632,7 +704,7 @@
 		return fmt.Errorf("Can't set cookie %s", err)
 	}
 
-	defer UdevWait(cookie)
+	defer UdevWait(&cookie)
 
 	if err := task.Run(); err != nil {
 		return fmt.Errorf("Error running DeviceCreate (ActivateDevice) %s", err)
@@ -681,9 +753,10 @@
 		// Caller wants to know about ErrDeviceIdExists so that it can try with a different device id.
 		if dmSawExist {
 			return ErrDeviceIdExists
-		} else {
-			return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err)
 		}
+
+		return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err)
+
 	}
 
 	if doSuspend {
diff --git a/pkg/devicemapper/devmapper_log.go b/pkg/devicemapper/devmapper_log.go
index d6550bd..f66a208 100644
--- a/pkg/devicemapper/devmapper_log.go
+++ b/pkg/devicemapper/devmapper_log.go
@@ -22,6 +22,10 @@
 		if strings.Contains(msg, "File exists") {
 			dmSawExist = true
 		}
+
+		if strings.Contains(msg, "No such device or address") {
+			dmSawEnxio = true
+		}
 	}
 
 	if dmLogger != nil {
diff --git a/pkg/devicemapper/devmapper_wrapper.go b/pkg/devicemapper/devmapper_wrapper.go
index ae4f30f..87c2003 100644
--- a/pkg/devicemapper/devmapper_wrapper.go
+++ b/pkg/devicemapper/devmapper_wrapper.go
@@ -90,28 +90,30 @@
 )
 
 var (
-	DmGetLibraryVersion    = dmGetLibraryVersionFct
-	DmGetNextTarget        = dmGetNextTargetFct
-	DmLogInitVerbose       = dmLogInitVerboseFct
-	DmSetDevDir            = dmSetDevDirFct
-	DmTaskAddTarget        = dmTaskAddTargetFct
-	DmTaskCreate           = dmTaskCreateFct
-	DmTaskDestroy          = dmTaskDestroyFct
-	DmTaskGetDeps          = dmTaskGetDepsFct
-	DmTaskGetInfo          = dmTaskGetInfoFct
-	DmTaskGetDriverVersion = dmTaskGetDriverVersionFct
-	DmTaskRun              = dmTaskRunFct
-	DmTaskSetAddNode       = dmTaskSetAddNodeFct
-	DmTaskSetCookie        = dmTaskSetCookieFct
-	DmTaskSetMessage       = dmTaskSetMessageFct
-	DmTaskSetName          = dmTaskSetNameFct
-	DmTaskSetRo            = dmTaskSetRoFct
-	DmTaskSetSector        = dmTaskSetSectorFct
-	DmUdevWait             = dmUdevWaitFct
-	DmUdevSetSyncSupport   = dmUdevSetSyncSupportFct
-	DmUdevGetSyncSupport   = dmUdevGetSyncSupportFct
-	DmCookieSupported      = dmCookieSupportedFct
-	LogWithErrnoInit       = logWithErrnoInitFct
+	DmGetLibraryVersion       = dmGetLibraryVersionFct
+	DmGetNextTarget           = dmGetNextTargetFct
+	DmLogInitVerbose          = dmLogInitVerboseFct
+	DmSetDevDir               = dmSetDevDirFct
+	DmTaskAddTarget           = dmTaskAddTargetFct
+	DmTaskCreate              = dmTaskCreateFct
+	DmTaskDestroy             = dmTaskDestroyFct
+	DmTaskGetDeps             = dmTaskGetDepsFct
+	DmTaskGetInfo             = dmTaskGetInfoFct
+	DmTaskGetDriverVersion    = dmTaskGetDriverVersionFct
+	DmTaskRun                 = dmTaskRunFct
+	DmTaskSetAddNode          = dmTaskSetAddNodeFct
+	DmTaskSetCookie           = dmTaskSetCookieFct
+	DmTaskSetMessage          = dmTaskSetMessageFct
+	DmTaskSetName             = dmTaskSetNameFct
+	DmTaskSetRo               = dmTaskSetRoFct
+	DmTaskSetSector           = dmTaskSetSectorFct
+	DmUdevWait                = dmUdevWaitFct
+	DmUdevSetSyncSupport      = dmUdevSetSyncSupportFct
+	DmUdevGetSyncSupport      = dmUdevGetSyncSupportFct
+	DmCookieSupported         = dmCookieSupportedFct
+	LogWithErrnoInit          = logWithErrnoInitFct
+	DmTaskDeferredRemove      = dmTaskDeferredRemoveFct
+	DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct
 )
 
 func free(p *C.char) {
@@ -219,7 +221,7 @@
 	return C.GoString((*C.char)(buffer))
 }
 
-func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr {
+func dmGetNextTargetFct(task *CDmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer {
 	var (
 		Cstart, Clength      C.uint64_t
 		CtargetType, Cparams *C.char
@@ -231,8 +233,8 @@
 		*params = C.GoString(Cparams)
 	}()
 
-	nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams)
-	return uintptr(nextp)
+	nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams)
+	return nextp
 }
 
 func dmUdevSetSyncSupportFct(syncWithUdev int) {
diff --git a/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/pkg/devicemapper/devmapper_wrapper_deferred_remove.go
new file mode 100644
index 0000000..ced482c
--- /dev/null
+++ b/pkg/devicemapper/devmapper_wrapper_deferred_remove.go
@@ -0,0 +1,33 @@
+// +build linux,!libdm_no_deferred_remove
+
+package devicemapper
+
+/*
+#cgo LDFLAGS: -L. -ldevmapper
+#include <libdevmapper.h>
+*/
+import "C"
+
+const LibraryDeferredRemovalSupport = true
+
+func dmTaskDeferredRemoveFct(task *CDmTask) int {
+	return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task)))
+}
+
+func dmTaskGetInfoWithDeferredFct(task *CDmTask, info *Info) int {
+	Cinfo := C.struct_dm_info{}
+	defer func() {
+		info.Exists = int(Cinfo.exists)
+		info.Suspended = int(Cinfo.suspended)
+		info.LiveTable = int(Cinfo.live_table)
+		info.InactiveTable = int(Cinfo.inactive_table)
+		info.OpenCount = int32(Cinfo.open_count)
+		info.EventNr = uint32(Cinfo.event_nr)
+		info.Major = uint32(Cinfo.major)
+		info.Minor = uint32(Cinfo.minor)
+		info.ReadOnly = int(Cinfo.read_only)
+		info.TargetCount = int32(Cinfo.target_count)
+		info.DeferredRemove = int(Cinfo.deferred_remove)
+	}()
+	return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
+}
diff --git a/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go
new file mode 100644
index 0000000..16631bf
--- /dev/null
+++ b/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go
@@ -0,0 +1,14 @@
+// +build linux,libdm_no_deferred_remove
+
+package devicemapper
+
+const LibraryDeferredRemovalSupport = false
+
+func dmTaskDeferredRemoveFct(task *CDmTask) int {
+	// Error. Nobody should be calling it.
+	return -1
+}
+
+func dmTaskGetInfoWithDeferredFct(task *CDmTask, info *Info) int {
+	return -1
+}
diff --git a/pkg/fileutils/fileutils.go b/pkg/fileutils/fileutils.go
index 4e4a91b..fdafb53 100644
--- a/pkg/fileutils/fileutils.go
+++ b/pkg/fileutils/fileutils.go
@@ -1,26 +1,170 @@
 package fileutils
 
 import (
-	log "github.com/Sirupsen/logrus"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
 	"path/filepath"
+	"strings"
+
+	"github.com/Sirupsen/logrus"
 )
 
-// Matches returns true if relFilePath matches any of the patterns
-func Matches(relFilePath string, patterns []string) (bool, error) {
-	for _, exclude := range patterns {
-		matched, err := filepath.Match(exclude, relFilePath)
+func Exclusion(pattern string) bool {
+	return pattern[0] == '!'
+}
+
+func Empty(pattern string) bool {
+	return pattern == ""
+}
+
+// Cleanpatterns takes a slice of patterns returns a new
+// slice of patterns cleaned with filepath.Clean, stripped
+// of any empty patterns and lets the caller know whether the
+// slice contains any exception patterns (prefixed with !).
+func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
+	// Loop over exclusion patterns and:
+	// 1. Clean them up.
+	// 2. Indicate whether we are dealing with any exception rules.
+	// 3. Error if we see a single exclusion marker on it's own (!).
+	cleanedPatterns := []string{}
+	patternDirs := [][]string{}
+	exceptions := false
+	for _, pattern := range patterns {
+		// Eliminate leading and trailing whitespace.
+		pattern = strings.TrimSpace(pattern)
+		if Empty(pattern) {
+			continue
+		}
+		if Exclusion(pattern) {
+			if len(pattern) == 1 {
+				logrus.Errorf("Illegal exclusion pattern: %s", pattern)
+				return nil, nil, false, errors.New("Illegal exclusion pattern: !")
+			}
+			exceptions = true
+		}
+		pattern = filepath.Clean(pattern)
+		cleanedPatterns = append(cleanedPatterns, pattern)
+		if Exclusion(pattern) {
+			pattern = pattern[1:]
+		}
+		patternDirs = append(patternDirs, strings.Split(pattern, "/"))
+	}
+
+	return cleanedPatterns, patternDirs, exceptions, nil
+}
+
+// Matches returns true if file matches any of the patterns
+// and isn't excluded by any of the subsequent patterns.
+func Matches(file string, patterns []string) (bool, error) {
+	file = filepath.Clean(file)
+
+	if file == "." {
+		// Don't let them exclude everything, kind of silly.
+		return false, nil
+	}
+
+	patterns, patDirs, _, err := CleanPatterns(patterns)
+	if err != nil {
+		return false, err
+	}
+
+	return OptimizedMatches(file, patterns, patDirs)
+}
+
+// Matches is basically the same as fileutils.Matches() but optimized for archive.go.
+// It will assume that the inputs have been preprocessed and therefore the function
+// doen't need to do as much error checking and clean-up. This was done to avoid
+// repeating these steps on each file being checked during the archive process.
+// The more generic fileutils.Matches() can't make these assumptions.
+func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
+	matched := false
+	parentPath := filepath.Dir(file)
+	parentPathDirs := strings.Split(parentPath, "/")
+
+	for i, pattern := range patterns {
+		negative := false
+
+		if Exclusion(pattern) {
+			negative = true
+			pattern = pattern[1:]
+		}
+
+		match, err := filepath.Match(pattern, file)
 		if err != nil {
-			log.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
+			logrus.Errorf("Error matching: %s (pattern: %s)", file, pattern)
 			return false, err
 		}
-		if matched {
-			if filepath.Clean(relFilePath) == "." {
-				log.Errorf("Can't exclude whole path, excluding pattern: %s", exclude)
-				continue
+
+		if !match && parentPath != "." {
+			// Check to see if the pattern matches one of our parent dirs.
+			if len(patDirs[i]) <= len(parentPathDirs) {
+				match, _ = filepath.Match(strings.Join(patDirs[i], "/"),
+					strings.Join(parentPathDirs[:len(patDirs[i])], "/"))
 			}
-			log.Debugf("Skipping excluded path: %s", relFilePath)
-			return true, nil
+		}
+
+		if match {
+			matched = !negative
 		}
 	}
-	return false, nil
+
+	if matched {
+		logrus.Debugf("Skipping excluded path: %s", file)
+	}
+	return matched, nil
+}
+
+func CopyFile(src, dst string) (int64, error) {
+	cleanSrc := filepath.Clean(src)
+	cleanDst := filepath.Clean(dst)
+	if cleanSrc == cleanDst {
+		return 0, nil
+	}
+	sf, err := os.Open(cleanSrc)
+	if err != nil {
+		return 0, err
+	}
+	defer sf.Close()
+	if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
+		return 0, err
+	}
+	df, err := os.Create(cleanDst)
+	if err != nil {
+		return 0, err
+	}
+	defer df.Close()
+	return io.Copy(df, sf)
+}
+
+func GetTotalUsedFds() int {
+	if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+		logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+	} else {
+		return len(fds)
+	}
+	return -1
+}
+
+// ReadSymlinkedDirectory returns the target directory of a symlink.
+// The target of the symbolic link may not be a file.
+func ReadSymlinkedDirectory(path string) (string, error) {
+	var realPath string
+	var err error
+	if realPath, err = filepath.Abs(path); err != nil {
+		return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
+	}
+	if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
+		return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
+	}
+	realPathInfo, err := os.Stat(realPath)
+	if err != nil {
+		return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
+	}
+	if !realPathInfo.Mode().IsDir() {
+		return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
+	}
+	return realPath, nil
 }
diff --git a/pkg/fileutils/fileutils_test.go b/pkg/fileutils/fileutils_test.go
new file mode 100644
index 0000000..ef93168
--- /dev/null
+++ b/pkg/fileutils/fileutils_test.go
@@ -0,0 +1,357 @@
+package fileutils
+
+import (
+	"io/ioutil"
+	"os"
+	"path"
+	"testing"
+)
+
+// CopyFile with invalid src
+func TestCopyFileWithInvalidSrc(t *testing.T) {
+	tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+	defer os.RemoveAll(tempFolder)
+	if err != nil {
+		t.Fatal(err)
+	}
+	bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest"))
+	if err == nil {
+		t.Fatal("Should have fail to copy an invalid src file")
+	}
+	if bytes != 0 {
+		t.Fatal("Should have written 0 bytes")
+	}
+
+}
+
+// CopyFile with invalid dest
+func TestCopyFileWithInvalidDest(t *testing.T) {
+	tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+	defer os.RemoveAll(tempFolder)
+	if err != nil {
+		t.Fatal(err)
+	}
+	src := path.Join(tempFolder, "file")
+	err = ioutil.WriteFile(src, []byte("content"), 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path"))
+	if err == nil {
+		t.Fatal("Should have fail to copy an invalid src file")
+	}
+	if bytes != 0 {
+		t.Fatal("Should have written 0 bytes")
+	}
+
+}
+
+// CopyFile with same src and dest
+func TestCopyFileWithSameSrcAndDest(t *testing.T) {
+	tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+	defer os.RemoveAll(tempFolder)
+	if err != nil {
+		t.Fatal(err)
+	}
+	file := path.Join(tempFolder, "file")
+	err = ioutil.WriteFile(file, []byte("content"), 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	bytes, err := CopyFile(file, file)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bytes != 0 {
+		t.Fatal("Should have written 0 bytes as it is the same file.")
+	}
+}
+
+// CopyFile with same src and dest but path is different and not clean
+func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) {
+	tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+	defer os.RemoveAll(tempFolder)
+	if err != nil {
+		t.Fatal(err)
+	}
+	testFolder := path.Join(tempFolder, "test")
+	err = os.MkdirAll(testFolder, 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	file := path.Join(testFolder, "file")
+	sameFile := testFolder + "/../test/file"
+	err = ioutil.WriteFile(file, []byte("content"), 0740)
+	if err != nil {
+		t.Fatal(err)
+	}
+	bytes, err := CopyFile(file, sameFile)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bytes != 0 {
+		t.Fatal("Should have written 0 bytes as it is the same file.")
+	}
+}
+
+func TestCopyFile(t *testing.T) {
+	tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+	defer os.RemoveAll(tempFolder)
+	if err != nil {
+		t.Fatal(err)
+	}
+	src := path.Join(tempFolder, "src")
+	dest := path.Join(tempFolder, "dest")
+	ioutil.WriteFile(src, []byte("content"), 0777)
+	ioutil.WriteFile(dest, []byte("destContent"), 0777)
+	bytes, err := CopyFile(src, dest)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bytes != 7 {
+		t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes)
+	}
+	actual, err := ioutil.ReadFile(dest)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if string(actual) != "content" {
+		t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content")
+	}
+}
+
+// Reading a symlink to a directory must return the directory
+func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) {
+	var err error
+	if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil {
+		t.Errorf("failed to create directory: %s", err)
+	}
+
+	if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil {
+		t.Errorf("failed to create symlink: %s", err)
+	}
+
+	var path string
+	if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil {
+		t.Fatalf("failed to read symlink to directory: %s", err)
+	}
+
+	if path != "/tmp/testReadSymlinkToExistingDirectory" {
+		t.Fatalf("symlink returned unexpected directory: %s", path)
+	}
+
+	if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil {
+		t.Errorf("failed to remove temporary directory: %s", err)
+	}
+
+	if err = os.Remove("/tmp/dirLinkTest"); err != nil {
+		t.Errorf("failed to remove symlink: %s", err)
+	}
+}
+
+// Reading a non-existing symlink must fail
+func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) {
+	var path string
+	var err error
+	if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil {
+		t.Fatalf("error expected for non-existing symlink")
+	}
+
+	if path != "" {
+		t.Fatalf("expected empty path, but '%s' was returned", path)
+	}
+}
+
+// Reading a symlink to a file must fail
+func TestReadSymlinkedDirectoryToFile(t *testing.T) {
+	var err error
+	var file *os.File
+
+	if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil {
+		t.Fatalf("failed to create file: %s", err)
+	}
+
+	file.Close()
+
+	if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil {
+		t.Errorf("failed to create symlink: %s", err)
+	}
+
+	var path string
+	if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil {
+		t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed")
+	}
+
+	if path != "" {
+		t.Fatalf("path should've been empty: %s", path)
+	}
+
+	if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil {
+		t.Errorf("failed to remove file: %s", err)
+	}
+
+	if err = os.Remove("/tmp/fileLinkTest"); err != nil {
+		t.Errorf("failed to remove symlink: %s", err)
+	}
+}
+
+func TestWildcardMatches(t *testing.T) {
+	match, _ := Matches("fileutils.go", []string{"*"})
+	if match != true {
+		t.Errorf("failed to get a wildcard match, got %v", match)
+	}
+}
+
+// A simple pattern match should return true.
+func TestPatternMatches(t *testing.T) {
+	match, _ := Matches("fileutils.go", []string{"*.go"})
+	if match != true {
+		t.Errorf("failed to get a match, got %v", match)
+	}
+}
+
+// An exclusion followed by an inclusion should return true.
+func TestExclusionPatternMatchesPatternBefore(t *testing.T) {
+	match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"})
+	if match != true {
+		t.Errorf("failed to get true match on exclusion pattern, got %v", match)
+	}
+}
+
+// A folder pattern followed by an exception should return false.
+func TestPatternMatchesFolderExclusions(t *testing.T) {
+	match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"})
+	if match != false {
+		t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
+	}
+}
+
+// A folder pattern followed by an exception should return false.
+func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) {
+	match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"})
+	if match != false {
+		t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
+	}
+}
+
+// A folder pattern followed by an exception should return false.
+func TestPatternMatchesFolderWildcardExclusions(t *testing.T) {
+	match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"})
+	if match != false {
+		t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
+	}
+}
+
+// A pattern followed by an exclusion should return false.
+func TestExclusionPatternMatchesPatternAfter(t *testing.T) {
+	match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"})
+	if match != false {
+		t.Errorf("failed to get false match on exclusion pattern, got %v", match)
+	}
+}
+
+// A filename evaluating to . should return false.
+func TestExclusionPatternMatchesWholeDirectory(t *testing.T) {
+	match, _ := Matches(".", []string{"*.go"})
+	if match != false {
+		t.Errorf("failed to get false match on ., got %v", match)
+	}
+}
+
+// A single ! pattern should return an error.
+func TestSingleExclamationError(t *testing.T) {
+	_, err := Matches("fileutils.go", []string{"!"})
+	if err == nil {
+		t.Errorf("failed to get an error for a single exclamation point, got %v", err)
+	}
+}
+
+// A string preceded with a ! should return true from Exclusion.
+func TestExclusion(t *testing.T) {
+	exclusion := Exclusion("!")
+	if !exclusion {
+		t.Errorf("failed to get true for a single !, got %v", exclusion)
+	}
+}
+
+// Matches with no patterns
+func TestMatchesWithNoPatterns(t *testing.T) {
+	matches, err := Matches("/any/path/there", []string{})
+	if err != nil {
+		t.Fatal(err)
+	}
+	if matches {
+		t.Fatalf("Should not have match anything")
+	}
+}
+
+// Matches with malformed patterns
+func TestMatchesWithMalformedPatterns(t *testing.T) {
+	matches, err := Matches("/any/path/there", []string{"["})
+	if err == nil {
+		t.Fatal("Should have failed because of a malformed syntax in the pattern")
+	}
+	if matches {
+		t.Fatalf("Should not have match anything")
+	}
+}
+
+// An empty string should return true from Empty.
+func TestEmpty(t *testing.T) {
+	empty := Empty("")
+	if !empty {
+		t.Errorf("failed to get true for an empty string, got %v", empty)
+	}
+}
+
+func TestCleanPatterns(t *testing.T) {
+	cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"})
+	if len(cleaned) != 2 {
+		t.Errorf("expected 2 element slice, got %v", len(cleaned))
+	}
+}
+
+func TestCleanPatternsStripEmptyPatterns(t *testing.T) {
+	cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""})
+	if len(cleaned) != 2 {
+		t.Errorf("expected 2 element slice, got %v", len(cleaned))
+	}
+}
+
+func TestCleanPatternsExceptionFlag(t *testing.T) {
+	_, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"})
+	if !exceptions {
+		t.Errorf("expected exceptions to be true, got %v", exceptions)
+	}
+}
+
+func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) {
+	_, _, exceptions, _ := CleanPatterns([]string{"docs", "  !docs/README.md"})
+	if !exceptions {
+		t.Errorf("expected exceptions to be true, got %v", exceptions)
+	}
+}
+
+func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) {
+	_, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md  "})
+	if !exceptions {
+		t.Errorf("expected exceptions to be true, got %v", exceptions)
+	}
+}
+
+func TestCleanPatternsErrorSingleException(t *testing.T) {
+	_, _, _, err := CleanPatterns([]string{"!"})
+	if err == nil {
+		t.Errorf("expected error on single exclamation point, got %v", err)
+	}
+}
+
+func TestCleanPatternsFolderSplit(t *testing.T) {
+	_, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"})
+	if dirs[0][0] != "docs" {
+		t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1])
+	}
+	if dirs[0][1] != "config" {
+		t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1])
+	}
+}
diff --git a/pkg/graphdb/graphdb.go b/pkg/graphdb/graphdb.go
index c6f13ed..b9433db 100644
--- a/pkg/graphdb/graphdb.go
+++ b/pkg/graphdb/graphdb.go
@@ -378,12 +378,22 @@
 		tx.Rollback()
 		return -1, err
 	}
-
 	changes, err := rows.RowsAffected()
 	if err != nil {
 		return -1, err
 	}
 
+	// Clear who's using this id as parent
+	refs, err := tx.Exec("DELETE FROM edge WHERE parent_id = ?;", id)
+	if err != nil {
+		tx.Rollback()
+		return -1, err
+	}
+	refsCount, err := refs.RowsAffected()
+	if err != nil {
+		return -1, err
+	}
+
 	// Delete entity
 	if _, err := tx.Exec("DELETE FROM entity where id = ?;", id); err != nil {
 		tx.Rollback()
@@ -394,7 +404,7 @@
 		return -1, err
 	}
 
-	return int(changes), nil
+	return int(changes + refsCount), nil
 }
 
 // Rename an edge for a given path
diff --git a/pkg/graphdb/graphdb_test.go b/pkg/graphdb/graphdb_test.go
index f228285..1cd223b 100644
--- a/pkg/graphdb/graphdb_test.go
+++ b/pkg/graphdb/graphdb_test.go
@@ -52,7 +52,7 @@
 		t.Fatal("Entity should not be nil")
 	}
 	if e.ID() != "0" {
-		t.Fatalf("Enity id should be 0, got %s", e.ID())
+		t.Fatalf("Entity id should be 0, got %s", e.ID())
 	}
 }
 
@@ -74,7 +74,7 @@
 		t.Fatal(err)
 	}
 	if _, err := db.Set("/foo", "43"); err == nil {
-		t.Fatalf("Creating an entry with a duplciate path did not cause an error")
+		t.Fatalf("Creating an entry with a duplicate path did not cause an error")
 	}
 }
 
@@ -472,8 +472,8 @@
 
 	db.Set("/webapp", "1")
 
-	if db.Refs("1") != 1 {
-		t.Fatal("Expect reference count to be 1")
+	if c := db.Refs("1"); c != 1 {
+		t.Fatalf("Expect reference count to be 1, got %d", c)
 	}
 
 	db.Set("/db", "2")
@@ -484,7 +484,45 @@
 		t.Fatal(err)
 	}
 	if count != 2 {
-		t.Fatal("Expected 2 references to be removed")
+		t.Fatalf("Expected 2 references to be removed, got %d", count)
+	}
+}
+
+// Regression test https://github.com/docker/docker/issues/12334
+func TestPurgeIdRefPaths(t *testing.T) {
+	db, dbpath := newTestDb(t)
+	defer destroyTestDb(dbpath)
+
+	db.Set("/webapp", "1")
+	db.Set("/db", "2")
+
+	db.Set("/db/webapp", "1")
+
+	if c := db.Refs("1"); c != 2 {
+		t.Fatalf("Expected 2 reference for webapp, got %d", c)
+	}
+	if c := db.Refs("2"); c != 1 {
+		t.Fatalf("Expected 1 reference for db, got %d", c)
+	}
+
+	if rp := db.RefPaths("2"); len(rp) != 1 {
+		t.Fatalf("Expected 1 reference path for db, got %d", len(rp))
+	}
+
+	count, err := db.Purge("2")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if count != 2 {
+		t.Fatalf("Expected 2 rows to be removed, got %d", count)
+	}
+
+	if c := db.Refs("2"); c != 0 {
+		t.Fatalf("Expected 0 reference for db, got %d", c)
+	}
+	if c := db.Refs("1"); c != 1 {
+		t.Fatalf("Expected 1 reference for webapp, got %d", c)
 	}
 }
 
diff --git a/pkg/httputils/httputils.go b/pkg/httputils/httputils.go
new file mode 100644
index 0000000..1c92224
--- /dev/null
+++ b/pkg/httputils/httputils.go
@@ -0,0 +1,26 @@
+package httputils
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/docker/docker/pkg/jsonmessage"
+)
+
+// Request a given URL and return an io.Reader
+func Download(url string) (resp *http.Response, err error) {
+	if resp, err = http.Get(url); err != nil {
+		return nil, err
+	}
+	if resp.StatusCode >= 400 {
+		return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status)
+	}
+	return resp, nil
+}
+
+func NewHTTPRequestError(msg string, res *http.Response) error {
+	return &jsonmessage.JSONError{
+		Message: msg,
+		Code:    res.StatusCode,
+	}
+}
diff --git a/pkg/httputils/resumablerequestreader.go b/pkg/httputils/resumablerequestreader.go
index 10edd43..f690d0e 100644
--- a/pkg/httputils/resumablerequestreader.go
+++ b/pkg/httputils/resumablerequestreader.go
@@ -6,7 +6,7 @@
 	"net/http"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 )
 
 type resumableRequestReader struct {
@@ -72,7 +72,7 @@
 		r.cleanUpResponse()
 	}
 	if err != nil && err != io.EOF {
-		log.Infof("encountered error during pull and clearing it before resume: %s", err)
+		logrus.Infof("encountered error during pull and clearing it before resume: %s", err)
 		err = nil
 	}
 	return n, err
diff --git a/pkg/httputils/resumablerequestreader_test.go b/pkg/httputils/resumablerequestreader_test.go
new file mode 100644
index 0000000..3533860
--- /dev/null
+++ b/pkg/httputils/resumablerequestreader_test.go
@@ -0,0 +1,83 @@
+package httputils
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/http/httptest"
+	"strings"
+	"testing"
+)
+
+func TestResumableRequestReader(t *testing.T) {
+
+	srvtxt := "some response text data"
+
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		fmt.Fprintln(w, srvtxt)
+	}))
+	defer ts.Close()
+
+	var req *http.Request
+	req, err := http.NewRequest("GET", ts.URL, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	client := &http.Client{}
+	retries := uint32(5)
+	imgSize := int64(len(srvtxt))
+
+	resreq := ResumableRequestReader(client, req, retries, imgSize)
+	defer resreq.Close()
+
+	data, err := ioutil.ReadAll(resreq)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resstr := strings.TrimSuffix(string(data), "\n")
+
+	if resstr != srvtxt {
+		t.Errorf("resstr != srvtxt")
+	}
+}
+
+func TestResumableRequestReaderWithInitialResponse(t *testing.T) {
+
+	srvtxt := "some response text data"
+
+	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		fmt.Fprintln(w, srvtxt)
+	}))
+	defer ts.Close()
+
+	var req *http.Request
+	req, err := http.NewRequest("GET", ts.URL, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	client := &http.Client{}
+	retries := uint32(5)
+	imgSize := int64(len(srvtxt))
+
+	res, err := client.Do(req)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res)
+	defer resreq.Close()
+
+	data, err := ioutil.ReadAll(resreq)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resstr := strings.TrimSuffix(string(data), "\n")
+
+	if resstr != srvtxt {
+		t.Errorf("resstr != srvtxt")
+	}
+}
diff --git a/pkg/ioutils/readers.go b/pkg/ioutils/readers.go
index 58ff1af63..0e542cb 100644
--- a/pkg/ioutils/readers.go
+++ b/pkg/ioutils/readers.go
@@ -3,6 +3,8 @@
 import (
 	"bytes"
 	"crypto/rand"
+	"crypto/sha256"
+	"encoding/hex"
 	"io"
 	"math/big"
 	"sync"
@@ -215,3 +217,11 @@
 	}
 	return closer.Close()
 }
+
+func HashData(src io.Reader) (string, error) {
+	h := sha256.New()
+	if _, err := io.Copy(h, src); err != nil {
+		return "", err
+	}
+	return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
+}
diff --git a/pkg/ioutils/readers_test.go b/pkg/ioutils/readers_test.go
index 0af978e..d220487 100644
--- a/pkg/ioutils/readers_test.go
+++ b/pkg/ioutils/readers_test.go
@@ -2,11 +2,91 @@
 
 import (
 	"bytes"
+	"fmt"
 	"io"
 	"io/ioutil"
+	"strings"
 	"testing"
 )
 
+// Implement io.Reader
+type errorReader struct{}
+
+func (r *errorReader) Read(p []byte) (int, error) {
+	return 0, fmt.Errorf("Error reader always fail.")
+}
+
+func TestReadCloserWrapperClose(t *testing.T) {
+	reader := strings.NewReader("A string reader")
+	wrapper := NewReadCloserWrapper(reader, func() error {
+		return fmt.Errorf("This will be called when closing")
+	})
+	err := wrapper.Close()
+	if err == nil || !strings.Contains(err.Error(), "This will be called when closing") {
+		t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.")
+	}
+}
+
+func TestReaderErrWrapperReadOnError(t *testing.T) {
+	called := false
+	reader := &errorReader{}
+	wrapper := NewReaderErrWrapper(reader, func() {
+		called = true
+	})
+	_, err := wrapper.Read([]byte{})
+	if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") {
+		t.Fatalf("readErrWrapper should returned an error")
+	}
+	if !called {
+		t.Fatalf("readErrWrapper should have call the anonymous function on failure")
+	}
+}
+
+func TestReaderErrWrapperRead(t *testing.T) {
+	reader := strings.NewReader("a string reader.")
+	wrapper := NewReaderErrWrapper(reader, func() {
+		t.Fatalf("readErrWrapper should not have called the anonymous function on failure")
+	})
+	// Read 20 byte (should be ok with the string above)
+	num, err := wrapper.Read(make([]byte, 20))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if num != 16 {
+		t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num)
+	}
+}
+
+func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) {
+	reader, writer := io.Pipe()
+
+	drainBuffer := make([]byte, 1024)
+	buffer := bytes.Buffer{}
+	bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, &buffer)
+
+	// Write everything down to a Pipe
+	// Usually, a pipe should block but because of the buffered reader,
+	// the writes will go through
+	done := make(chan bool)
+	go func() {
+		writer.Write([]byte("hello world"))
+		writer.Close()
+		done <- true
+	}()
+
+	// Drain the reader *after* everything has been written, just to verify
+	// it is indeed buffering
+	<-done
+
+	output, err := ioutil.ReadAll(bufreader)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(output, []byte("hello world")) {
+		t.Error(string(output))
+	}
+}
+
 func TestBufReader(t *testing.T) {
 	reader, writer := io.Pipe()
 	bufreader := NewBufReader(reader)
@@ -33,6 +113,50 @@
 	}
 }
 
+func TestBufReaderCloseWithNonReaderCloser(t *testing.T) {
+	reader := strings.NewReader("buffer")
+	bufreader := NewBufReader(reader)
+
+	if err := bufreader.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+}
+
+// implements io.ReadCloser
+type simpleReaderCloser struct{}
+
+func (r *simpleReaderCloser) Read(p []byte) (n int, err error) {
+	return 0, nil
+}
+
+func (r *simpleReaderCloser) Close() error {
+	return nil
+}
+
+func TestBufReaderCloseWithReaderCloser(t *testing.T) {
+	reader := &simpleReaderCloser{}
+	bufreader := NewBufReader(reader)
+
+	err := bufreader.Close()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+}
+
+func TestHashData(t *testing.T) {
+	reader := strings.NewReader("hash-me")
+	actual, err := HashData(reader)
+	if err != nil {
+		t.Fatal(err)
+	}
+	expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa"
+	if actual != expected {
+		t.Fatalf("Expecting %s, got %s", expected, actual)
+	}
+}
+
 type repeatedReader struct {
 	readCount int
 	maxReads  int
diff --git a/pkg/ioutils/writeflusher.go b/pkg/ioutils/writeflusher.go
new file mode 100644
index 0000000..2509547
--- /dev/null
+++ b/pkg/ioutils/writeflusher.go
@@ -0,0 +1,47 @@
+package ioutils
+
+import (
+	"io"
+	"net/http"
+	"sync"
+)
+
+type WriteFlusher struct {
+	sync.Mutex
+	w       io.Writer
+	flusher http.Flusher
+	flushed bool
+}
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+	wf.Lock()
+	defer wf.Unlock()
+	n, err = wf.w.Write(b)
+	wf.flushed = true
+	wf.flusher.Flush()
+	return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+	wf.Lock()
+	defer wf.Unlock()
+	wf.flushed = true
+	wf.flusher.Flush()
+}
+
+func (wf *WriteFlusher) Flushed() bool {
+	wf.Lock()
+	defer wf.Unlock()
+	return wf.flushed
+}
+
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+	var flusher http.Flusher
+	if f, ok := w.(http.Flusher); ok {
+		flusher = f
+	} else {
+		flusher = &NopFlusher{}
+	}
+	return &WriteFlusher{w: w, flusher: flusher}
+}
diff --git a/pkg/ioutils/writers.go b/pkg/ioutils/writers.go
index c0b3608..43fdc44 100644
--- a/pkg/ioutils/writers.go
+++ b/pkg/ioutils/writers.go
@@ -37,3 +37,24 @@
 		closer: closer,
 	}
 }
+
+// Wrap a concrete io.Writer and hold a count of the number
+// of bytes written to the writer during a "session".
+// This can be convenient when write return is masked
+// (e.g., json.Encoder.Encode())
+type WriteCounter struct {
+	Count  int64
+	Writer io.Writer
+}
+
+func NewWriteCounter(w io.Writer) *WriteCounter {
+	return &WriteCounter{
+		Writer: w,
+	}
+}
+
+func (wc *WriteCounter) Write(p []byte) (count int, err error) {
+	count, err = wc.Writer.Write(p)
+	wc.Count += int64(count)
+	return
+}
diff --git a/pkg/ioutils/writers_test.go b/pkg/ioutils/writers_test.go
new file mode 100644
index 0000000..564b1cd
--- /dev/null
+++ b/pkg/ioutils/writers_test.go
@@ -0,0 +1,65 @@
+package ioutils
+
+import (
+	"bytes"
+	"strings"
+	"testing"
+)
+
+func TestWriteCloserWrapperClose(t *testing.T) {
+	called := false
+	writer := bytes.NewBuffer([]byte{})
+	wrapper := NewWriteCloserWrapper(writer, func() error {
+		called = true
+		return nil
+	})
+	if err := wrapper.Close(); err != nil {
+		t.Fatal(err)
+	}
+	if !called {
+		t.Fatalf("writeCloserWrapper should have call the anonymous function.")
+	}
+}
+
+func TestNopWriteCloser(t *testing.T) {
+	writer := bytes.NewBuffer([]byte{})
+	wrapper := NopWriteCloser(writer)
+	if err := wrapper.Close(); err != nil {
+		t.Fatal("NopWriteCloser always return nil on Close.")
+	}
+
+}
+
+func TestNopWriter(t *testing.T) {
+	nw := &NopWriter{}
+	l, err := nw.Write([]byte{'c'})
+	if err != nil {
+		t.Fatal(err)
+	}
+	if l != 1 {
+		t.Fatalf("Expected 1 got %d", l)
+	}
+}
+
+func TestWriteCounter(t *testing.T) {
+	dummy1 := "This is a dummy string."
+	dummy2 := "This is another dummy string."
+	totalLength := int64(len(dummy1) + len(dummy2))
+
+	reader1 := strings.NewReader(dummy1)
+	reader2 := strings.NewReader(dummy2)
+
+	var buffer bytes.Buffer
+	wc := NewWriteCounter(&buffer)
+
+	reader1.WriteTo(wc)
+	reader2.WriteTo(wc)
+
+	if wc.Count != totalLength {
+		t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength)
+	}
+
+	if buffer.String() != dummy1+dummy2 {
+		t.Error("Wrong message written")
+	}
+}
diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go
deleted file mode 100644
index 3e083a4..0000000
--- a/pkg/iptables/iptables.go
+++ /dev/null
@@ -1,299 +0,0 @@
-package iptables
-
-import (
-	"errors"
-	"fmt"
-	"net"
-	"os/exec"
-	"regexp"
-	"strconv"
-	"strings"
-
-	log "github.com/Sirupsen/logrus"
-)
-
-type Action string
-type Table string
-
-const (
-	Append Action = "-A"
-	Delete Action = "-D"
-	Insert Action = "-I"
-	Nat    Table  = "nat"
-	Filter Table  = "filter"
-	Mangle Table  = "mangle"
-)
-
-var (
-	iptablesPath        string
-	supportsXlock       = false
-	ErrIptablesNotFound = errors.New("Iptables not found")
-)
-
-type Chain struct {
-	Name   string
-	Bridge string
-	Table  Table
-}
-
-type ChainError struct {
-	Chain  string
-	Output []byte
-}
-
-func (e *ChainError) Error() string {
-	return fmt.Sprintf("Error iptables %s: %s", e.Chain, string(e.Output))
-}
-
-func initCheck() error {
-
-	if iptablesPath == "" {
-		path, err := exec.LookPath("iptables")
-		if err != nil {
-			return ErrIptablesNotFound
-		}
-		iptablesPath = path
-		supportsXlock = exec.Command(iptablesPath, "--wait", "-L", "-n").Run() == nil
-	}
-	return nil
-}
-
-func NewChain(name, bridge string, table Table) (*Chain, error) {
-	c := &Chain{
-		Name:   name,
-		Bridge: bridge,
-		Table:  table,
-	}
-
-	if string(c.Table) == "" {
-		c.Table = Filter
-	}
-
-	// Add chain if it doesn't exist
-	if _, err := Raw("-t", string(c.Table), "-n", "-L", c.Name); err != nil {
-		if output, err := Raw("-t", string(c.Table), "-N", c.Name); err != nil {
-			return nil, err
-		} else if len(output) != 0 {
-			return nil, fmt.Errorf("Could not create %s/%s chain: %s", c.Table, c.Name, output)
-		}
-	}
-
-	switch table {
-	case Nat:
-		preroute := []string{
-			"-m", "addrtype",
-			"--dst-type", "LOCAL"}
-		if !Exists(Nat, "PREROUTING", preroute...) {
-			if err := c.Prerouting(Append, preroute...); err != nil {
-				return nil, fmt.Errorf("Failed to inject docker in PREROUTING chain: %s", err)
-			}
-		}
-		output := []string{
-			"-m", "addrtype",
-			"--dst-type", "LOCAL",
-			"!", "--dst", "127.0.0.0/8"}
-		if !Exists(Nat, "OUTPUT", output...) {
-			if err := c.Output(Append, output...); err != nil {
-				return nil, fmt.Errorf("Failed to inject docker in OUTPUT chain: %s", err)
-			}
-		}
-	case Filter:
-		link := []string{
-			"-o", c.Bridge,
-			"-j", c.Name}
-		if !Exists(Filter, "FORWARD", link...) {
-			insert := append([]string{string(Insert), "FORWARD"}, link...)
-			if output, err := Raw(insert...); err != nil {
-				return nil, err
-			} else if len(output) != 0 {
-				return nil, fmt.Errorf("Could not create linking rule to %s/%s: %s", c.Table, c.Name, output)
-			}
-		}
-	}
-	return c, nil
-}
-
-func RemoveExistingChain(name string, table Table) error {
-	c := &Chain{
-		Name:  name,
-		Table: table,
-	}
-	if string(c.Table) == "" {
-		c.Table = Filter
-	}
-	return c.Remove()
-}
-
-// Add forwarding rule to 'filter' table and corresponding nat rule to 'nat' table
-func (c *Chain) Forward(action Action, ip net.IP, port int, proto, destAddr string, destPort int) error {
-	daddr := ip.String()
-	if ip.IsUnspecified() {
-		// iptables interprets "0.0.0.0" as "0.0.0.0/32", whereas we
-		// want "0.0.0.0/0". "0/0" is correctly interpreted as "any
-		// value" by both iptables and ip6tables.
-		daddr = "0/0"
-	}
-	if output, err := Raw("-t", string(Nat), string(action), c.Name,
-		"-p", proto,
-		"-d", daddr,
-		"--dport", strconv.Itoa(port),
-		"!", "-i", c.Bridge,
-		"-j", "DNAT",
-		"--to-destination", net.JoinHostPort(destAddr, strconv.Itoa(destPort))); err != nil {
-		return err
-	} else if len(output) != 0 {
-		return &ChainError{Chain: "FORWARD", Output: output}
-	}
-
-	if output, err := Raw("-t", string(Filter), string(action), c.Name,
-		"!", "-i", c.Bridge,
-		"-o", c.Bridge,
-		"-p", proto,
-		"-d", destAddr,
-		"--dport", strconv.Itoa(destPort),
-		"-j", "ACCEPT"); err != nil {
-		return err
-	} else if len(output) != 0 {
-		return &ChainError{Chain: "FORWARD", Output: output}
-	}
-
-	if output, err := Raw("-t", string(Nat), string(action), "POSTROUTING",
-		"-p", proto,
-		"-s", destAddr,
-		"-d", destAddr,
-		"--dport", strconv.Itoa(destPort),
-		"-j", "MASQUERADE"); err != nil {
-		return err
-	} else if len(output) != 0 {
-		return &ChainError{Chain: "FORWARD", Output: output}
-	}
-
-	return nil
-}
-
-// Add reciprocal ACCEPT rule for two supplied IP addresses.
-// Traffic is allowed from ip1 to ip2 and vice-versa
-func (c *Chain) Link(action Action, ip1, ip2 net.IP, port int, proto string) error {
-	if output, err := Raw("-t", string(Filter), string(action), c.Name,
-		"-i", c.Bridge, "-o", c.Bridge,
-		"-p", proto,
-		"-s", ip1.String(),
-		"-d", ip2.String(),
-		"--dport", strconv.Itoa(port),
-		"-j", "ACCEPT"); err != nil {
-		return err
-	} else if len(output) != 0 {
-		return fmt.Errorf("Error iptables forward: %s", output)
-	}
-	if output, err := Raw("-t", string(Filter), string(action), c.Name,
-		"-i", c.Bridge, "-o", c.Bridge,
-		"-p", proto,
-		"-s", ip2.String(),
-		"-d", ip1.String(),
-		"--sport", strconv.Itoa(port),
-		"-j", "ACCEPT"); err != nil {
-		return err
-	} else if len(output) != 0 {
-		return fmt.Errorf("Error iptables forward: %s", output)
-	}
-	return nil
-}
-
-// Add linking rule to nat/PREROUTING chain.
-func (c *Chain) Prerouting(action Action, args ...string) error {
-	a := []string{"-t", string(Nat), string(action), "PREROUTING"}
-	if len(args) > 0 {
-		a = append(a, args...)
-	}
-	if output, err := Raw(append(a, "-j", c.Name)...); err != nil {
-		return err
-	} else if len(output) != 0 {
-		return &ChainError{Chain: "PREROUTING", Output: output}
-	}
-	return nil
-}
-
-// Add linking rule to an OUTPUT chain
-func (c *Chain) Output(action Action, args ...string) error {
-	a := []string{"-t", string(c.Table), string(action), "OUTPUT"}
-	if len(args) > 0 {
-		a = append(a, args...)
-	}
-	if output, err := Raw(append(a, "-j", c.Name)...); err != nil {
-		return err
-	} else if len(output) != 0 {
-		return &ChainError{Chain: "OUTPUT", Output: output}
-	}
-	return nil
-}
-
-func (c *Chain) Remove() error {
-	// Ignore errors - This could mean the chains were never set up
-	if c.Table == Nat {
-		c.Prerouting(Delete, "-m", "addrtype", "--dst-type", "LOCAL")
-		c.Output(Delete, "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8")
-		c.Output(Delete, "-m", "addrtype", "--dst-type", "LOCAL") // Created in versions <= 0.1.6
-
-		c.Prerouting(Delete)
-		c.Output(Delete)
-	}
-	Raw("-t", string(c.Table), "-F", c.Name)
-	Raw("-t", string(c.Table), "-X", c.Name)
-	return nil
-}
-
-// Check if a rule exists
-func Exists(table Table, chain string, rule ...string) bool {
-	if string(table) == "" {
-		table = Filter
-	}
-
-	// iptables -C, --check option was added in v.1.4.11
-	// http://ftp.netfilter.org/pub/iptables/changes-iptables-1.4.11.txt
-
-	// try -C
-	// if exit status is 0 then return true, the rule exists
-	if _, err := Raw(append([]string{
-		"-t", string(table), "-C", chain}, rule...)...); err == nil {
-		return true
-	}
-
-	// parse "iptables -S" for the rule (this checks rules in a specific chain
-	// in a specific table)
-	rule_string := strings.Join(rule, " ")
-	existingRules, _ := exec.Command("iptables", "-t", string(table), "-S", chain).Output()
-
-	// regex to replace ips in rule
-	// because MASQUERADE rule will not be exactly what was passed
-	re := regexp.MustCompile(`[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/[0-9]{1,2}`)
-
-	return strings.Contains(
-		re.ReplaceAllString(string(existingRules), "?"),
-		re.ReplaceAllString(rule_string, "?"),
-	)
-}
-
-// Call 'iptables' system command, passing supplied arguments
-func Raw(args ...string) ([]byte, error) {
-
-	if err := initCheck(); err != nil {
-		return nil, err
-	}
-	if supportsXlock {
-		args = append([]string{"--wait"}, args...)
-	}
-
-	log.Debugf("%s, %v", iptablesPath, args)
-
-	output, err := exec.Command(iptablesPath, args...).CombinedOutput()
-	if err != nil {
-		return nil, fmt.Errorf("iptables failed: iptables %v: %s (%s)", strings.Join(args, " "), output, err)
-	}
-
-	// ignore iptables' message about xtables lock
-	if strings.Contains(string(output), "waiting for it to exit") {
-		output = []byte("")
-	}
-
-	return output, err
-}
diff --git a/pkg/iptables/iptables_test.go b/pkg/iptables/iptables_test.go
deleted file mode 100644
index ced4262..0000000
--- a/pkg/iptables/iptables_test.go
+++ /dev/null
@@ -1,198 +0,0 @@
-package iptables
-
-import (
-	"net"
-	"os/exec"
-	"strconv"
-	"strings"
-	"testing"
-)
-
-const chainName = "DOCKERTEST"
-
-var natChain *Chain
-var filterChain *Chain
-
-func TestNewChain(t *testing.T) {
-	var err error
-
-	natChain, err = NewChain(chainName, "lo", Nat)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	filterChain, err = NewChain(chainName, "lo", Filter)
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestForward(t *testing.T) {
-	ip := net.ParseIP("192.168.1.1")
-	port := 1234
-	dstAddr := "172.17.0.1"
-	dstPort := 4321
-	proto := "tcp"
-
-	err := natChain.Forward(Insert, ip, port, proto, dstAddr, dstPort)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	dnatRule := []string{
-		"!", "-i", filterChain.Bridge,
-		"-d", ip.String(),
-		"-p", proto,
-		"--dport", strconv.Itoa(port),
-		"-j", "DNAT",
-		"--to-destination", dstAddr + ":" + strconv.Itoa(dstPort),
-	}
-
-	if !Exists(natChain.Table, natChain.Name, dnatRule...) {
-		t.Fatalf("DNAT rule does not exist")
-	}
-
-	filterRule := []string{
-		"!", "-i", filterChain.Bridge,
-		"-o", filterChain.Bridge,
-		"-d", dstAddr,
-		"-p", proto,
-		"--dport", strconv.Itoa(dstPort),
-		"-j", "ACCEPT",
-	}
-
-	if !Exists(filterChain.Table, filterChain.Name, filterRule...) {
-		t.Fatalf("filter rule does not exist")
-	}
-
-	masqRule := []string{
-		"-d", dstAddr,
-		"-s", dstAddr,
-		"-p", proto,
-		"--dport", strconv.Itoa(dstPort),
-		"-j", "MASQUERADE",
-	}
-
-	if !Exists(natChain.Table, "POSTROUTING", masqRule...) {
-		t.Fatalf("MASQUERADE rule does not exist")
-	}
-}
-
-func TestLink(t *testing.T) {
-	var err error
-
-	ip1 := net.ParseIP("192.168.1.1")
-	ip2 := net.ParseIP("192.168.1.2")
-	port := 1234
-	proto := "tcp"
-
-	err = filterChain.Link(Append, ip1, ip2, port, proto)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	rule1 := []string{
-		"-i", filterChain.Bridge,
-		"-o", filterChain.Bridge,
-		"-p", proto,
-		"-s", ip1.String(),
-		"-d", ip2.String(),
-		"--dport", strconv.Itoa(port),
-		"-j", "ACCEPT"}
-
-	if !Exists(filterChain.Table, filterChain.Name, rule1...) {
-		t.Fatalf("rule1 does not exist")
-	}
-
-	rule2 := []string{
-		"-i", filterChain.Bridge,
-		"-o", filterChain.Bridge,
-		"-p", proto,
-		"-s", ip2.String(),
-		"-d", ip1.String(),
-		"--sport", strconv.Itoa(port),
-		"-j", "ACCEPT"}
-
-	if !Exists(filterChain.Table, filterChain.Name, rule2...) {
-		t.Fatalf("rule2 does not exist")
-	}
-}
-
-func TestPrerouting(t *testing.T) {
-	args := []string{
-		"-i", "lo",
-		"-d", "192.168.1.1"}
-
-	err := natChain.Prerouting(Insert, args...)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	rule := []string{
-		"-j", natChain.Name}
-
-	rule = append(rule, args...)
-
-	if !Exists(natChain.Table, "PREROUTING", rule...) {
-		t.Fatalf("rule does not exist")
-	}
-
-	delRule := append([]string{"-D", "PREROUTING", "-t", string(Nat)}, rule...)
-	if _, err = Raw(delRule...); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestOutput(t *testing.T) {
-	args := []string{
-		"-o", "lo",
-		"-d", "192.168.1.1"}
-
-	err := natChain.Output(Insert, args...)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	rule := []string{
-		"-j", natChain.Name}
-
-	rule = append(rule, args...)
-
-	if !Exists(natChain.Table, "OUTPUT", rule...) {
-		t.Fatalf("rule does not exist")
-	}
-
-	delRule := append([]string{"-D", "OUTPUT", "-t",
-		string(natChain.Table)}, rule...)
-	if _, err = Raw(delRule...); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestCleanup(t *testing.T) {
-	var err error
-	var rules []byte
-
-	// Cleanup filter/FORWARD first otherwise output of iptables-save is dirty
-	link := []string{"-t", string(filterChain.Table),
-		string(Delete), "FORWARD",
-		"-o", filterChain.Bridge,
-		"-j", filterChain.Name}
-	if _, err = Raw(link...); err != nil {
-		t.Fatal(err)
-	}
-	filterChain.Remove()
-
-	err = RemoveExistingChain(chainName, Nat)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	rules, err = exec.Command("iptables-save").Output()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if strings.Contains(string(rules), chainName) {
-		t.Fatalf("Removing chain failed. %s found in iptables-save", chainName)
-	}
-}
diff --git a/pkg/jsonlog/jsonlog.go b/pkg/jsonlog/jsonlog.go
index e2c2a2c..85afb3b 100644
--- a/pkg/jsonlog/jsonlog.go
+++ b/pkg/jsonlog/jsonlog.go
@@ -5,8 +5,6 @@
 	"fmt"
 	"io"
 	"time"
-
-	log "github.com/Sirupsen/logrus"
 )
 
 type JSONLog struct {
@@ -32,16 +30,21 @@
 	jl.Created = time.Time{}
 }
 
-func WriteLog(src io.Reader, dst io.Writer, format string) error {
+func WriteLog(src io.Reader, dst io.Writer, format string, since time.Time) error {
 	dec := json.NewDecoder(src)
 	l := &JSONLog{}
 	for {
-		if err := dec.Decode(l); err == io.EOF {
-			return nil
-		} else if err != nil {
-			log.Printf("Error streaming logs: %s", err)
+		l.Reset()
+		if err := dec.Decode(l); err != nil {
+			if err == io.EOF {
+				return nil
+			}
 			return err
 		}
+		if !since.IsZero() && l.Created.Before(since) {
+			continue
+		}
+
 		line, err := l.Format(format)
 		if err != nil {
 			return err
@@ -49,6 +52,5 @@
 		if _, err := io.WriteString(dst, line); err != nil {
 			return err
 		}
-		l.Reset()
 	}
 }
diff --git a/pkg/jsonlog/jsonlog_marshalling.go b/pkg/jsonlog/jsonlog_marshalling.go
index 6244eb0..abaa8a7 100644
--- a/pkg/jsonlog/jsonlog_marshalling.go
+++ b/pkg/jsonlog/jsonlog_marshalling.go
@@ -65,8 +65,7 @@
 func (mj *JSONLog) MarshalJSON() ([]byte, error) {
 	var buf bytes.Buffer
 	buf.Grow(1024)
-	err := mj.MarshalJSONBuf(&buf)
-	if err != nil {
+	if err := mj.MarshalJSONBuf(&buf); err != nil {
 		return nil, err
 	}
 	return buf.Bytes(), nil
diff --git a/pkg/jsonlog/jsonlog_test.go b/pkg/jsonlog/jsonlog_test.go
index fa53825..d4b26fc 100644
--- a/pkg/jsonlog/jsonlog_test.go
+++ b/pkg/jsonlog/jsonlog_test.go
@@ -21,7 +21,7 @@
 	}
 	w := bytes.NewBuffer(nil)
 	format := timeutils.RFC3339NanoFixed
-	if err := WriteLog(&buf, w, format); err != nil {
+	if err := WriteLog(&buf, w, format, time.Time{}); err != nil {
 		t.Fatal(err)
 	}
 	res := w.String()
@@ -52,7 +52,7 @@
 	b.SetBytes(int64(r.Len()))
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
-		if err := WriteLog(r, w, format); err != nil {
+		if err := WriteLog(r, w, format, time.Time{}); err != nil {
 			b.Fatal(err)
 		}
 		b.StopTimer()
diff --git a/pkg/jsonlog/jsonlogbytes.go b/pkg/jsonlog/jsonlogbytes.go
new file mode 100644
index 0000000..0d8fd9c
--- /dev/null
+++ b/pkg/jsonlog/jsonlogbytes.go
@@ -0,0 +1,115 @@
+package jsonlog
+
+import (
+	"bytes"
+	"unicode/utf8"
+)
+
+// JSONLogBytes is based on JSONLog.
+// It allows marshalling JSONLog from Log as []byte
+// and an already marshalled Created timestamp.
+type JSONLogBytes struct {
+	Log     []byte `json:"log,omitempty"`
+	Stream  string `json:"stream,omitempty"`
+	Created string `json:"time"`
+}
+
+// MarshalJSONBuf is based on the same method from JSONLog
+// It has been modified to take into account the necessary changes.
+func (mj *JSONLogBytes) MarshalJSONBuf(buf *bytes.Buffer) error {
+	var first = true
+
+	buf.WriteString(`{`)
+	if len(mj.Log) != 0 {
+		if first == true {
+			first = false
+		} else {
+			buf.WriteString(`,`)
+		}
+		buf.WriteString(`"log":`)
+		ffjson_WriteJsonBytesAsString(buf, mj.Log)
+	}
+	if len(mj.Stream) != 0 {
+		if first == true {
+			first = false
+		} else {
+			buf.WriteString(`,`)
+		}
+		buf.WriteString(`"stream":`)
+		ffjson_WriteJsonString(buf, mj.Stream)
+	}
+	if first == true {
+		first = false
+	} else {
+		buf.WriteString(`,`)
+	}
+	buf.WriteString(`"time":`)
+	buf.WriteString(mj.Created)
+	buf.WriteString(`}`)
+	return nil
+}
+
+// This is based on ffjson_WriteJsonString. It has been changed
+// to accept a string passed as a slice of bytes.
+func ffjson_WriteJsonBytesAsString(buf *bytes.Buffer, s []byte) {
+	const hex = "0123456789abcdef"
+
+	buf.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+				i++
+				continue
+			}
+			if start < i {
+				buf.Write(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				buf.WriteByte('\\')
+				buf.WriteByte(b)
+			case '\n':
+				buf.WriteByte('\\')
+				buf.WriteByte('n')
+			case '\r':
+				buf.WriteByte('\\')
+				buf.WriteByte('r')
+			default:
+
+				buf.WriteString(`\u00`)
+				buf.WriteByte(hex[b>>4])
+				buf.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRune(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				buf.Write(s[start:i])
+			}
+			buf.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				buf.Write(s[start:i])
+			}
+			buf.WriteString(`\u202`)
+			buf.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		buf.Write(s[start:])
+	}
+	buf.WriteByte('"')
+}
diff --git a/pkg/jsonmessage/jsonmessage.go b/pkg/jsonmessage/jsonmessage.go
new file mode 100644
index 0000000..7db1626
--- /dev/null
+++ b/pkg/jsonmessage/jsonmessage.go
@@ -0,0 +1,172 @@
+package jsonmessage
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/docker/pkg/timeutils"
+	"github.com/docker/docker/pkg/units"
+)
+
+type JSONError struct {
+	Code    int    `json:"code,omitempty"`
+	Message string `json:"message,omitempty"`
+}
+
+func (e *JSONError) Error() string {
+	return e.Message
+}
+
+type JSONProgress struct {
+	terminalFd uintptr
+	Current    int   `json:"current,omitempty"`
+	Total      int   `json:"total,omitempty"`
+	Start      int64 `json:"start,omitempty"`
+}
+
+func (p *JSONProgress) String() string {
+	var (
+		width       = 200
+		pbBox       string
+		numbersBox  string
+		timeLeftBox string
+	)
+
+	ws, err := term.GetWinsize(p.terminalFd)
+	if err == nil {
+		width = int(ws.Width)
+	}
+
+	if p.Current <= 0 && p.Total <= 0 {
+		return ""
+	}
+	current := units.HumanSize(float64(p.Current))
+	if p.Total <= 0 {
+		return fmt.Sprintf("%8v", current)
+	}
+	total := units.HumanSize(float64(p.Total))
+	percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
+	if percentage > 50 {
+		percentage = 50
+	}
+	if width > 110 {
+		// this number can't be negetive gh#7136
+		numSpaces := 0
+		if 50-percentage > 0 {
+			numSpaces = 50 - percentage
+		}
+		pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
+	}
+	numbersBox = fmt.Sprintf("%8v/%v", current, total)
+
+	if p.Current > 0 && p.Start > 0 && percentage < 50 {
+		fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0))
+		perEntry := fromStart / time.Duration(p.Current)
+		left := time.Duration(p.Total-p.Current) * perEntry
+		left = (left / time.Second) * time.Second
+
+		if width > 50 {
+			timeLeftBox = " " + left.String()
+		}
+	}
+	return pbBox + numbersBox + timeLeftBox
+}
+
+type JSONMessage struct {
+	Stream          string        `json:"stream,omitempty"`
+	Status          string        `json:"status,omitempty"`
+	Progress        *JSONProgress `json:"progressDetail,omitempty"`
+	ProgressMessage string        `json:"progress,omitempty"` //deprecated
+	ID              string        `json:"id,omitempty"`
+	From            string        `json:"from,omitempty"`
+	Time            int64         `json:"time,omitempty"`
+	Error           *JSONError    `json:"errorDetail,omitempty"`
+	ErrorMessage    string        `json:"error,omitempty"` //deprecated
+}
+
+func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
+	if jm.Error != nil {
+		if jm.Error.Code == 401 {
+			return fmt.Errorf("Authentication is required.")
+		}
+		return jm.Error
+	}
+	var endl string
+	if isTerminal && jm.Stream == "" && jm.Progress != nil {
+		// <ESC>[2K = erase entire current line
+		fmt.Fprintf(out, "%c[2K\r", 27)
+		endl = "\r"
+	} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
+		return nil
+	}
+	if jm.Time != 0 {
+		fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed))
+	}
+	if jm.ID != "" {
+		fmt.Fprintf(out, "%s: ", jm.ID)
+	}
+	if jm.From != "" {
+		fmt.Fprintf(out, "(from %s) ", jm.From)
+	}
+	if jm.Progress != nil && isTerminal {
+		fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
+	} else if jm.ProgressMessage != "" { //deprecated
+		fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
+	} else if jm.Stream != "" {
+		fmt.Fprintf(out, "%s%s", jm.Stream, endl)
+	} else {
+		fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
+	}
+	return nil
+}
+
+func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool) error {
+	var (
+		dec  = json.NewDecoder(in)
+		ids  = make(map[string]int)
+		diff = 0
+	)
+	for {
+		var jm JSONMessage
+		if err := dec.Decode(&jm); err != nil {
+			if err == io.EOF {
+				break
+			}
+			return err
+		}
+
+		if jm.Progress != nil {
+			jm.Progress.terminalFd = terminalFd
+		}
+		if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
+			line, ok := ids[jm.ID]
+			if !ok {
+				line = len(ids)
+				ids[jm.ID] = line
+				if isTerminal {
+					fmt.Fprintf(out, "\n")
+				}
+				diff = 0
+			} else {
+				diff = len(ids) - line
+			}
+			if jm.ID != "" && isTerminal {
+				// <ESC>[{diff}A = move cursor up diff rows
+				fmt.Fprintf(out, "%c[%dA", 27, diff)
+			}
+		}
+		err := jm.Display(out, isTerminal)
+		if jm.ID != "" && isTerminal {
+			// <ESC>[{diff}B = move cursor down diff rows
+			fmt.Fprintf(out, "%c[%dB", 27, diff)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/pkg/jsonmessage/jsonmessage_test.go b/pkg/jsonmessage/jsonmessage_test.go
new file mode 100644
index 0000000..4c3f566
--- /dev/null
+++ b/pkg/jsonmessage/jsonmessage_test.go
@@ -0,0 +1,38 @@
+package jsonmessage
+
+import (
+	"testing"
+)
+
+func TestError(t *testing.T) {
+	je := JSONError{404, "Not found"}
+	if je.Error() != "Not found" {
+		t.Fatalf("Expected 'Not found' got '%s'", je.Error())
+	}
+}
+
+func TestProgress(t *testing.T) {
+	jp := JSONProgress{}
+	if jp.String() != "" {
+		t.Fatalf("Expected empty string, got '%s'", jp.String())
+	}
+
+	expected := "     1 B"
+	jp2 := JSONProgress{Current: 1}
+	if jp2.String() != expected {
+		t.Fatalf("Expected %q, got %q", expected, jp2.String())
+	}
+
+	expected = "[=========================>                         ]     50 B/100 B"
+	jp3 := JSONProgress{Current: 50, Total: 100}
+	if jp3.String() != expected {
+		t.Fatalf("Expected %q, got %q", expected, jp3.String())
+	}
+
+	// this number can't be negetive gh#7136
+	expected = "[==================================================>]     50 B/40 B"
+	jp4 := JSONProgress{Current: 50, Total: 40}
+	if jp4.String() != expected {
+		t.Fatalf("Expected %q, got %q", expected, jp4.String())
+	}
+}
diff --git a/pkg/listenbuffer/README.md b/pkg/listenbuffer/README.md
new file mode 100644
index 0000000..2273509
--- /dev/null
+++ b/pkg/listenbuffer/README.md
@@ -0,0 +1,27 @@
+# listenbuffer
+
+listenbuffer uses the kernel's listening backlog functionality to queue
+connections, allowing applications to start listening immediately and handle
+connections later. This is signaled by closing the activation channel passed to
+the constructor.
+
+The maximum amount of queued connections depends on the configuration of your
+kernel (typically called SOMAXXCON) and cannot be configured in Go with the
+net package. See `src/net/sock_platform.go` in the Go tree or consult your
+kernel's manual.
+
+	activator := make(chan struct{})
+	buffer, err := NewListenBuffer("tcp", "localhost:4000", activator)
+	if err != nil {
+		panic(err)
+	}
+
+	// will block until activator has been closed or is sent an event
+	client, err := buffer.Accept()
+
+Somewhere else in your application once it's been booted:
+
+	close(activator)
+
+`buffer.Accept()` will return the first client in the kernel listening queue, or
+continue to block until a client connects or an error occurs.
diff --git a/pkg/listenbuffer/buffer.go b/pkg/listenbuffer/buffer.go
index 17572c8..97d622c 100644
--- a/pkg/listenbuffer/buffer.go
+++ b/pkg/listenbuffer/buffer.go
@@ -1,14 +1,38 @@
 /*
-   Package to allow go applications to immediately start
-   listening on a socket, unix, tcp, udp but hold connections
-   until the application has booted and is ready to accept them
+listenbuffer uses the kernel's listening backlog functionality to queue
+connections, allowing applications to start listening immediately and handle
+connections later. This is signaled by closing the activation channel passed to
+the constructor.
+
+The maximum amount of queued connections depends on the configuration of your
+kernel (typically called SOMAXXCON) and cannot be configured in Go with the
+net package. See `src/net/sock_platform.go` in the Go tree or consult your
+kernel's manual.
+
+	activator := make(chan struct{})
+	buffer, err := NewListenBuffer("tcp", "localhost:4000", activator)
+	if err != nil {
+		panic(err)
+	}
+
+	// will block until activator has been closed or is sent an event
+	client, err := buffer.Accept()
+
+Somewhere else in your application once it's been booted:
+
+	close(activator)
+
+`buffer.Accept()` will return the first client in the kernel listening queue, or
+continue to block until a client connects or an error occurs.
 */
 package listenbuffer
 
 import "net"
 
-// NewListenBuffer returns a listener listening on addr with the protocol.
-func NewListenBuffer(proto, addr string, activate chan struct{}) (net.Listener, error) {
+// NewListenBuffer returns a net.Listener listening on addr with the protocol
+// passed. The channel passed is used to activate the listenbuffer when the
+// caller is ready to accept connections.
+func NewListenBuffer(proto, addr string, activate <-chan struct{}) (net.Listener, error) {
 	wrapped, err := net.Listen(proto, addr)
 	if err != nil {
 		return nil, err
@@ -20,20 +44,26 @@
 	}, nil
 }
 
+// defaultListener is the buffered wrapper around the net.Listener
 type defaultListener struct {
-	wrapped  net.Listener // the real listener to wrap
-	ready    bool         // is the listner ready to start accpeting connections
-	activate chan struct{}
+	wrapped  net.Listener    // The net.Listener wrapped by listenbuffer
+	ready    bool            // Whether the listenbuffer has been activated
+	activate <-chan struct{} // Channel to control activation of the listenbuffer
 }
 
+// Close closes the wrapped socket.
 func (l *defaultListener) Close() error {
 	return l.wrapped.Close()
 }
 
+// Addr returns the listening address of the wrapped socket.
 func (l *defaultListener) Addr() net.Addr {
 	return l.wrapped.Addr()
 }
 
+// Accept returns a client connection on the wrapped socket if the listen buffer
+// has been activated. To active the listenbuffer the activation channel passed
+// to NewListenBuffer must have been closed or sent an event.
 func (l *defaultListener) Accept() (net.Conn, error) {
 	// if the listen has been told it is ready then we can go ahead and
 	// start returning connections
diff --git a/pkg/listenbuffer/listen_buffer_test.go b/pkg/listenbuffer/listen_buffer_test.go
new file mode 100644
index 0000000..6ffd2f7
--- /dev/null
+++ b/pkg/listenbuffer/listen_buffer_test.go
@@ -0,0 +1,41 @@
+package listenbuffer
+
+import (
+	"io/ioutil"
+	"net"
+	"testing"
+)
+
+func TestListenBufferAllowsAcceptingWhenActivated(t *testing.T) {
+	lock := make(chan struct{})
+	buffer, err := NewListenBuffer("tcp", "", lock)
+	if err != nil {
+		t.Fatal("Unable to create listen buffer: ", err)
+	}
+
+	go func() {
+		conn, err := net.Dial("tcp", buffer.Addr().String())
+		if err != nil {
+			t.Fatal("Client failed to establish connection to server: ", err)
+		}
+
+		conn.Write([]byte("ping"))
+		conn.Close()
+	}()
+
+	close(lock)
+
+	client, err := buffer.Accept()
+	if err != nil {
+		t.Fatal("Failed to accept client: ", err)
+	}
+
+	response, err := ioutil.ReadAll(client)
+	if err != nil {
+		t.Fatal("Failed to read from client: ", err)
+	}
+
+	if string(response) != "ping" {
+		t.Fatal("Expected to receive ping from client, received: ", string(response))
+	}
+}
diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go
index b35692b..ce003cd 100644
--- a/pkg/mflag/flag.go
+++ b/pkg/mflag/flag.go
@@ -486,8 +486,7 @@
 	if !ok {
 		return fmt.Errorf("no such flag -%v", name)
 	}
-	err := flag.Value.Set(value)
-	if err != nil {
+	if err := flag.Value.Set(value); err != nil {
 		return err
 	}
 	if f.actual == nil {
@@ -561,7 +560,7 @@
 // Usage prints to standard error a usage message documenting all defined command-line flags.
 // The function is a variable that may be changed to point to a custom function.
 var Usage = func() {
-	fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0])
+	fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0])
 	PrintDefaults()
 }
 
@@ -941,11 +940,11 @@
 
 	// it's a flag. does it have an argument?
 	f.args = f.args[1:]
-	has_value := false
+	hasValue := false
 	value := ""
 	if i := strings.Index(name, "="); i != -1 {
 		value = trimQuotes(name[i+1:])
-		has_value = true
+		hasValue = true
 		name = name[:i]
 	}
 
@@ -962,7 +961,7 @@
 		return false, name, ErrRetry
 	}
 	if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg
-		if has_value {
+		if hasValue {
 			if err := fv.Set(value); err != nil {
 				return false, "", f.failf("invalid boolean value %q for  -%s: %v", value, name, err)
 			}
@@ -971,12 +970,12 @@
 		}
 	} else {
 		// It must have a value, which might be the next argument.
-		if !has_value && len(f.args) > 0 {
+		if !hasValue && len(f.args) > 0 {
 			// value is the next arg
-			has_value = true
+			hasValue = true
 			value, f.args = f.args[0], f.args[1:]
 		}
-		if !has_value {
+		if !hasValue {
 			return false, "", f.failf("flag needs an argument: -%s", name)
 		}
 		if err := flag.Value.Set(value); err != nil {
@@ -1054,6 +1053,42 @@
 	return nil
 }
 
+// ParseFlags is a utility function that adds a help flag if withHelp is true,
+// calls cmd.Parse(args) and prints a relevant error message if there are
+// incorrect number of arguments. It returns error only if error handling is
+// set to ContinueOnError and parsing fails. If error handling is set to
+// ExitOnError, it's safe to ignore the return value.
+func (cmd *FlagSet) ParseFlags(args []string, withHelp bool) error {
+	var help *bool
+	if withHelp {
+		help = cmd.Bool([]string{"#help", "-help"}, false, "Print usage")
+	}
+	if err := cmd.Parse(args); err != nil {
+		return err
+	}
+	if help != nil && *help {
+		cmd.Usage()
+		// just in case Usage does not exit
+		os.Exit(0)
+	}
+	if str := cmd.CheckArgs(); str != "" {
+		cmd.ReportError(str, withHelp)
+	}
+	return nil
+}
+
+func (cmd *FlagSet) ReportError(str string, withHelp bool) {
+	if withHelp {
+		if os.Args[0] == cmd.Name() {
+			str += ". See '" + os.Args[0] + " --help'"
+		} else {
+			str += ". See '" + os.Args[0] + " " + cmd.Name() + " --help'"
+		}
+	}
+	fmt.Fprintf(cmd.Out(), "docker: %s\n", str)
+	os.Exit(1)
+}
+
 // Parsed reports whether f.Parse has been called.
 func (f *FlagSet) Parsed() bool {
 	return f.parsed
diff --git a/pkg/mount/flags_freebsd.go b/pkg/mount/flags_freebsd.go
index a59b589..f166cb2 100644
--- a/pkg/mount/flags_freebsd.go
+++ b/pkg/mount/flags_freebsd.go
@@ -8,12 +8,25 @@
 import "C"
 
 const (
-	RDONLY      = C.MNT_RDONLY
-	NOSUID      = C.MNT_NOSUID
-	NOEXEC      = C.MNT_NOEXEC
-	SYNCHRONOUS = C.MNT_SYNCHRONOUS
-	NOATIME     = C.MNT_NOATIME
+	// RDONLY will mount the filesystem as read-only.
+	RDONLY = C.MNT_RDONLY
 
+	// NOSUID will not allow set-user-identifier or set-group-identifier bits to
+	// take effect.
+	NOSUID = C.MNT_NOSUID
+
+	// NOEXEC will not allow execution of any binaries on the mounted file system.
+	NOEXEC = C.MNT_NOEXEC
+
+	// SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
+	SYNCHRONOUS = C.MNT_SYNCHRONOUS
+
+	// NOATIME will not update the file access time when reading from a file.
+	NOATIME = C.MNT_NOATIME
+)
+
+// These flags are unsupported.
+const (
 	BIND        = 0
 	DIRSYNC     = 0
 	MANDLOCK    = 0
diff --git a/pkg/mount/flags_linux.go b/pkg/mount/flags_linux.go
index 9986621..2f9f5c5 100644
--- a/pkg/mount/flags_linux.go
+++ b/pkg/mount/flags_linux.go
@@ -5,26 +5,81 @@
 )
 
 const (
-	RDONLY      = syscall.MS_RDONLY
-	NOSUID      = syscall.MS_NOSUID
-	NODEV       = syscall.MS_NODEV
-	NOEXEC      = syscall.MS_NOEXEC
+	// RDONLY will mount the file system read-only.
+	RDONLY = syscall.MS_RDONLY
+
+	// NOSUID will not allow set-user-identifier or set-group-identifier bits to
+	// take effect.
+	NOSUID = syscall.MS_NOSUID
+
+	// NODEV will not interpret character or block special devices on the file
+	// system.
+	NODEV = syscall.MS_NODEV
+
+	// NOEXEC will not allow execution of any binaries on the mounted file system.
+	NOEXEC = syscall.MS_NOEXEC
+
+	// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
 	SYNCHRONOUS = syscall.MS_SYNCHRONOUS
-	DIRSYNC     = syscall.MS_DIRSYNC
-	REMOUNT     = syscall.MS_REMOUNT
-	MANDLOCK    = syscall.MS_MANDLOCK
-	NOATIME     = syscall.MS_NOATIME
-	NODIRATIME  = syscall.MS_NODIRATIME
-	BIND        = syscall.MS_BIND
-	RBIND       = syscall.MS_BIND | syscall.MS_REC
-	UNBINDABLE  = syscall.MS_UNBINDABLE
+
+	// DIRSYNC will force all directory updates within the file system to be done
+	// synchronously. This affects the following system calls: creat, link,
+	// unlink, symlink, mkdir, rmdir, mknod and rename.
+	DIRSYNC = syscall.MS_DIRSYNC
+
+	// REMOUNT will attempt to remount an already-mounted file system. This is
+	// commonly used to change the mount flags for a file system, especially to
+	// make a readonly file system writeable. It does not change device or mount
+	// point.
+	REMOUNT = syscall.MS_REMOUNT
+
+	// MANDLOCK will force mandatory locks on a filesystem.
+	MANDLOCK = syscall.MS_MANDLOCK
+
+	// NOATIME will not update the file access time when reading from a file.
+	NOATIME = syscall.MS_NOATIME
+
+	// NODIRATIME will not update the directory access time.
+	NODIRATIME = syscall.MS_NODIRATIME
+
+	// BIND remounts a subtree somewhere else.
+	BIND = syscall.MS_BIND
+
+	// RBIND remounts a subtree and all possible submounts somewhere else.
+	RBIND = syscall.MS_BIND | syscall.MS_REC
+
+	// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
+	UNBINDABLE = syscall.MS_UNBINDABLE
+
+	// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
 	RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC
-	PRIVATE     = syscall.MS_PRIVATE
-	RPRIVATE    = syscall.MS_PRIVATE | syscall.MS_REC
-	SLAVE       = syscall.MS_SLAVE
-	RSLAVE      = syscall.MS_SLAVE | syscall.MS_REC
-	SHARED      = syscall.MS_SHARED
-	RSHARED     = syscall.MS_SHARED | syscall.MS_REC
-	RELATIME    = syscall.MS_RELATIME
+
+	// PRIVATE creates a mount which carries no propagation abilities.
+	PRIVATE = syscall.MS_PRIVATE
+
+	// RPRIVATE marks the entire mount tree as PRIVATE.
+	RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC
+
+	// SLAVE creates a mount which receives propagation from its master, but not
+	// vice versa.
+	SLAVE = syscall.MS_SLAVE
+
+	// RSLAVE marks the entire mount tree as SLAVE.
+	RSLAVE = syscall.MS_SLAVE | syscall.MS_REC
+
+	// SHARED creates a mount which provides the ability to create mirrors of
+	// that mount such that mounts and unmounts within any of the mirrors
+	// propagate to the other mirrors.
+	SHARED = syscall.MS_SHARED
+
+	// RSHARED marks the entire mount tree as SHARED.
+	RSHARED = syscall.MS_SHARED | syscall.MS_REC
+
+	// RELATIME updates inode access times relative to modify or change time.
+	RELATIME = syscall.MS_RELATIME
+
+	// STRICTATIME allows to explicitly request full atime updates.  This makes
+	// it possible for the kernel to default to relatime or noatime but still
+	// allow userspace to override it.
 	STRICTATIME = syscall.MS_STRICTATIME
 )
diff --git a/pkg/mount/flags_unsupported.go b/pkg/mount/flags_unsupported.go
index c4f8217..a90d3d1 100644
--- a/pkg/mount/flags_unsupported.go
+++ b/pkg/mount/flags_unsupported.go
@@ -2,6 +2,7 @@
 
 package mount
 
+// These flags are unsupported.
 const (
 	BIND        = 0
 	DIRSYNC     = 0
diff --git a/pkg/mount/mount.go b/pkg/mount/mount.go
index 5ca7316..9a20df2 100644
--- a/pkg/mount/mount.go
+++ b/pkg/mount/mount.go
@@ -4,11 +4,12 @@
 	"time"
 )
 
+// GetMounts retrieves a list of mounts for the current running process.
 func GetMounts() ([]*MountInfo, error) {
 	return parseMountTable()
 }
 
-// Looks at /proc/self/mountinfo to determine of the specified
+// Mounted looks at /proc/self/mountinfo to determine of the specified
 // mountpoint has been mounted
 func Mounted(mountpoint string) (bool, error) {
 	entries, err := parseMountTable()
@@ -25,9 +26,10 @@
 	return false, nil
 }
 
-// Mount the specified options at the target path only if
-// the target is not mounted
-// Options must be specified as fstab style
+// Mount will mount filesystem according to the specified configuration, on the
+// condition that the target path is *not* already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
 func Mount(device, target, mType, options string) error {
 	flag, _ := parseOptions(options)
 	if flag&REMOUNT != REMOUNT {
@@ -38,9 +40,10 @@
 	return ForceMount(device, target, mType, options)
 }
 
-// Mount the specified options at the target path
-// reguardless if the target is mounted or not
-// Options must be specified as fstab style
+// ForceMount will mount a filesystem according to the specified configuration,
+// *regardless* if the target path is not already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
 func ForceMount(device, target, mType, options string) error {
 	flag, data := parseOptions(options)
 	if err := mount(device, target, mType, uintptr(flag), data); err != nil {
@@ -49,7 +52,7 @@
 	return nil
 }
 
-// Unmount the target only if it is mounted
+// Unmount will unmount the target filesystem, so long as it is mounted.
 func Unmount(target string) error {
 	if mounted, err := Mounted(target); err != nil || !mounted {
 		return err
@@ -57,7 +60,8 @@
 	return ForceUnmount(target)
 }
 
-// Unmount the target reguardless if it is mounted or not
+// ForceUnmount will force an unmount of the target filesystem, regardless if
+// it is mounted or not.
 func ForceUnmount(target string) (err error) {
 	// Simple retry logic for unmount
 	for i := 0; i < 10; i++ {
diff --git a/pkg/mount/mountinfo.go b/pkg/mount/mountinfo.go
index ec8e8bc..8ea0864 100644
--- a/pkg/mount/mountinfo.go
+++ b/pkg/mount/mountinfo.go
@@ -1,7 +1,40 @@
 package mount
 
+// MountInfo reveals information about a particular mounted filesystem. This
+// struct is populated from the content in the /proc/<pid>/mountinfo file.
 type MountInfo struct {
-	Id, Parent, Major, Minor         int
-	Root, Mountpoint, Opts, Optional string
-	Fstype, Source, VfsOpts          string
+	// Id is a unique identifier of the mount (may be reused after umount).
+	Id int
+
+	// Parent indicates the ID of the mount parent (or of self for the top of the
+	// mount tree).
+	Parent int
+
+	// Major indicates one half of the device ID which identifies the device class.
+	Major int
+
+	// Minor indicates one half of the device ID which identifies a specific
+	// instance of device.
+	Minor int
+
+	// Root of the mount within the filesystem.
+	Root string
+
+	// Mountpoint indicates the mount point relative to the process's root.
+	Mountpoint string
+
+	// Opts represents mount-specific options.
+	Opts string
+
+	// Optional represents optional fields.
+	Optional string
+
+	// Fstype indicates the type of filesystem, such as EXT3.
+	Fstype string
+
+	// Source indicates filesystem specific information or "none".
+	Source string
+
+	// VfsOpts represents per super block options.
+	VfsOpts string
 }
diff --git a/pkg/mount/mountinfo_freebsd.go b/pkg/mount/mountinfo_freebsd.go
index 2fe9186..add7c3b 100644
--- a/pkg/mount/mountinfo_freebsd.go
+++ b/pkg/mount/mountinfo_freebsd.go
@@ -13,7 +13,8 @@
 	"unsafe"
 )
 
-// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts.
 func parseMountTable() ([]*MountInfo, error) {
 	var rawEntries *C.struct_statfs
 
diff --git a/pkg/mount/mountinfo_linux.go b/pkg/mount/mountinfo_linux.go
index 0eb018e..351a58e 100644
--- a/pkg/mount/mountinfo_linux.go
+++ b/pkg/mount/mountinfo_linux.go
@@ -28,7 +28,8 @@
 	mountinfoFormat = "%d %d %d:%d %s %s %s %s"
 )
 
-// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts
 func parseMountTable() ([]*MountInfo, error) {
 	f, err := os.Open("/proc/self/mountinfo")
 	if err != nil {
@@ -80,7 +81,9 @@
 	return out, nil
 }
 
-// PidMountInfo collects the mounts for a specific Pid
+// PidMountInfo collects the mounts for a specific process ID. If the process
+// ID is unknown, it is better to use `GetMounts` which will inspect
+// "/proc/self/mountinfo" instead.
 func PidMountInfo(pid int) ([]*MountInfo, error) {
 	f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
 	if err != nil {
diff --git a/pkg/mount/sharedsubtree_linux.go b/pkg/mount/sharedsubtree_linux.go
index cd9b86c..47303bb 100644
--- a/pkg/mount/sharedsubtree_linux.go
+++ b/pkg/mount/sharedsubtree_linux.go
@@ -2,34 +2,50 @@
 
 package mount
 
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
 func MakeShared(mountPoint string) error {
 	return ensureMountedAs(mountPoint, "shared")
 }
 
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
 func MakeRShared(mountPoint string) error {
 	return ensureMountedAs(mountPoint, "rshared")
 }
 
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
 func MakePrivate(mountPoint string) error {
 	return ensureMountedAs(mountPoint, "private")
 }
 
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
 func MakeRPrivate(mountPoint string) error {
 	return ensureMountedAs(mountPoint, "rprivate")
 }
 
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
 func MakeSlave(mountPoint string) error {
 	return ensureMountedAs(mountPoint, "slave")
 }
 
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
 func MakeRSlave(mountPoint string) error {
 	return ensureMountedAs(mountPoint, "rslave")
 }
 
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
 func MakeUnbindable(mountPoint string) error {
 	return ensureMountedAs(mountPoint, "unbindable")
 }
 
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
 func MakeRUnbindable(mountPoint string) error {
 	return ensureMountedAs(mountPoint, "runbindable")
 }
diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go
index 0a1eee3..b081cc7 100644
--- a/pkg/namesgenerator/names-generator.go
+++ b/pkg/namesgenerator/names-generator.go
@@ -3,7 +3,8 @@
 import (
 	"fmt"
 	"math/rand"
-	"time"
+
+	"github.com/docker/docker/pkg/random"
 )
 
 var (
@@ -119,6 +120,9 @@
 		// Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori
 		"cori",
 
+		// Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray
+		"cray",
+
 		// Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie.
 		"curie",
 
@@ -185,6 +189,12 @@
 		// Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones
 		"jones",
 
+		// Jack Kilby and Robert Noyce have invented silicone integrated circuits and gave Silicon Valley its name.
+		// - https://en.wikipedia.org/wiki/Jack_Kilby
+		// - https://en.wikipedia.org/wiki/Robert_Noyce
+		"kilby",
+		"noyce",
+
 		// Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch
 		"kirch",
 
@@ -299,19 +309,19 @@
 		// Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath
 		"yonath",
 	}
+
+	rnd = rand.New(random.NewSource())
 )
 
 func GetRandomName(retry int) string {
-	rand.Seed(time.Now().UnixNano())
-
 begin:
-	name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))])
+	name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))])
 	if name == "boring_wozniak" /* Steve Wozniak is not boring */ {
 		goto begin
 	}
 
 	if retry > 0 {
-		name = fmt.Sprintf("%s%d", name, rand.Intn(10))
+		name = fmt.Sprintf("%s%d", name, rnd.Intn(10))
 	}
 	return name
 }
diff --git a/pkg/networkfs/etchosts/etchosts.go b/pkg/networkfs/etchosts/etchosts.go
deleted file mode 100644
index d7edef2..0000000
--- a/pkg/networkfs/etchosts/etchosts.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package etchosts
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"regexp"
-)
-
-type Record struct {
-	Hosts string
-	IP    string
-}
-
-func (r Record) WriteTo(w io.Writer) (int64, error) {
-	n, err := fmt.Fprintf(w, "%s\t%s\n", r.IP, r.Hosts)
-	return int64(n), err
-}
-
-var defaultContent = []Record{
-	{Hosts: "localhost", IP: "127.0.0.1"},
-	{Hosts: "localhost ip6-localhost ip6-loopback", IP: "::1"},
-	{Hosts: "ip6-localnet", IP: "fe00::0"},
-	{Hosts: "ip6-mcastprefix", IP: "ff00::0"},
-	{Hosts: "ip6-allnodes", IP: "ff02::1"},
-	{Hosts: "ip6-allrouters", IP: "ff02::2"},
-}
-
-func Build(path, IP, hostname, domainname string, extraContent []Record) error {
-	content := bytes.NewBuffer(nil)
-	if IP != "" {
-		var mainRec Record
-		mainRec.IP = IP
-		if domainname != "" {
-			mainRec.Hosts = fmt.Sprintf("%s.%s %s", hostname, domainname, hostname)
-		} else {
-			mainRec.Hosts = hostname
-		}
-		if _, err := mainRec.WriteTo(content); err != nil {
-			return err
-		}
-	}
-
-	for _, r := range defaultContent {
-		if _, err := r.WriteTo(content); err != nil {
-			return err
-		}
-	}
-
-	for _, r := range extraContent {
-		if _, err := r.WriteTo(content); err != nil {
-			return err
-		}
-	}
-
-	return ioutil.WriteFile(path, content.Bytes(), 0644)
-}
-
-func Update(path, IP, hostname string) error {
-	old, err := ioutil.ReadFile(path)
-	if err != nil {
-		return err
-	}
-	var re = regexp.MustCompile(fmt.Sprintf("(\\S*)(\\t%s)", regexp.QuoteMeta(hostname)))
-	return ioutil.WriteFile(path, re.ReplaceAll(old, []byte(IP+"$2")), 0644)
-}
diff --git a/pkg/networkfs/etchosts/etchosts_test.go b/pkg/networkfs/etchosts/etchosts_test.go
deleted file mode 100644
index c033904..0000000
--- a/pkg/networkfs/etchosts/etchosts_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package etchosts
-
-import (
-	"bytes"
-	"io/ioutil"
-	"os"
-	"testing"
-)
-
-func TestBuildDefault(t *testing.T) {
-	file, err := ioutil.TempFile("", "")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.Remove(file.Name())
-
-	// check that /etc/hosts has consistent ordering
-	for i := 0; i <= 5; i++ {
-		err = Build(file.Name(), "", "", "", nil)
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		content, err := ioutil.ReadFile(file.Name())
-		if err != nil {
-			t.Fatal(err)
-		}
-		expected := "127.0.0.1\tlocalhost\n::1\tlocalhost ip6-localhost ip6-loopback\nfe00::0\tip6-localnet\nff00::0\tip6-mcastprefix\nff02::1\tip6-allnodes\nff02::2\tip6-allrouters\n"
-
-		if expected != string(content) {
-			t.Fatalf("Expected to find '%s' got '%s'", expected, content)
-		}
-	}
-}
-
-func TestBuildHostnameDomainname(t *testing.T) {
-	file, err := ioutil.TempFile("", "")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.Remove(file.Name())
-
-	err = Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	content, err := ioutil.ReadFile(file.Name())
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if expected := "10.11.12.13\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) {
-		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
-	}
-}
-
-func TestBuildHostname(t *testing.T) {
-	file, err := ioutil.TempFile("", "")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.Remove(file.Name())
-
-	err = Build(file.Name(), "10.11.12.13", "testhostname", "", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	content, err := ioutil.ReadFile(file.Name())
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if expected := "10.11.12.13\ttesthostname\n"; !bytes.Contains(content, []byte(expected)) {
-		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
-	}
-}
-
-func TestBuildNoIP(t *testing.T) {
-	file, err := ioutil.TempFile("", "")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.Remove(file.Name())
-
-	err = Build(file.Name(), "", "testhostname", "", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	content, err := ioutil.ReadFile(file.Name())
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if expected := ""; !bytes.Contains(content, []byte(expected)) {
-		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
-	}
-}
-
-func TestUpdate(t *testing.T) {
-	file, err := ioutil.TempFile("", "")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.Remove(file.Name())
-
-	if err := Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname", nil); err != nil {
-		t.Fatal(err)
-	}
-
-	content, err := ioutil.ReadFile(file.Name())
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if expected := "10.11.12.13\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) {
-		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
-	}
-
-	if err := Update(file.Name(), "1.1.1.1", "testhostname"); err != nil {
-		t.Fatal(err)
-	}
-
-	content, err = ioutil.ReadFile(file.Name())
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if expected := "1.1.1.1\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) {
-		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
-	}
-}
diff --git a/pkg/networkfs/resolvconf/resolvconf.go b/pkg/networkfs/resolvconf/resolvconf.go
deleted file mode 100644
index 61f92d9..0000000
--- a/pkg/networkfs/resolvconf/resolvconf.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package resolvconf
-
-import (
-	"bytes"
-	"io/ioutil"
-	"regexp"
-	"strings"
-	"sync"
-
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/utils"
-)
-
-var (
-	// Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS
-	defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"}
-	defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"}
-	ipv4NumBlock   = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)`
-	ipv4Address    = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock
-	// This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also
-	// will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants
-	// -- e.g. other link-local types -- either won't work in containers or are unnecessary.
-	// For readability and sufficiency for Docker purposes this seemed more reasonable than a
-	// 1000+ character regexp with exact and complete IPv6 validation
-	ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})`
-	ipLocalhost = `((127\.([0-9]{1,3}.){2}[0-9]{1,3})|(::1))`
-
-	localhostIPRegexp = regexp.MustCompile(ipLocalhost)
-	localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipLocalhost + `\s*\n*`)
-	nsIPv6Regexp      = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`)
-	nsRegexp          = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`)
-	searchRegexp      = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`)
-)
-
-var lastModified struct {
-	sync.Mutex
-	sha256   string
-	contents []byte
-}
-
-func Get() ([]byte, error) {
-	resolv, err := ioutil.ReadFile("/etc/resolv.conf")
-	if err != nil {
-		return nil, err
-	}
-	return resolv, nil
-}
-
-// Retrieves the host /etc/resolv.conf file, checks against the last hash
-// and, if modified since last check, returns the bytes and new hash.
-// This feature is used by the resolv.conf updater for containers
-func GetIfChanged() ([]byte, string, error) {
-	lastModified.Lock()
-	defer lastModified.Unlock()
-
-	resolv, err := ioutil.ReadFile("/etc/resolv.conf")
-	if err != nil {
-		return nil, "", err
-	}
-	newHash, err := utils.HashData(bytes.NewReader(resolv))
-	if err != nil {
-		return nil, "", err
-	}
-	if lastModified.sha256 != newHash {
-		lastModified.sha256 = newHash
-		lastModified.contents = resolv
-		return resolv, newHash, nil
-	}
-	// nothing changed, so return no data
-	return nil, "", nil
-}
-
-// retrieve the last used contents and hash of the host resolv.conf
-// Used by containers updating on restart
-func GetLastModified() ([]byte, string) {
-	lastModified.Lock()
-	defer lastModified.Unlock()
-
-	return lastModified.contents, lastModified.sha256
-}
-
-// FilterResolvDns has two main jobs:
-// 1. It looks for localhost (127.*|::1) entries in the provided
-//    resolv.conf, removing local nameserver entries, and, if the resulting
-//    cleaned config has no defined nameservers left, adds default DNS entries
-// 2. Given the caller provides the enable/disable state of IPv6, the filter
-//    code will remove all IPv6 nameservers if it is not enabled for containers
-//
-// It also returns a boolean to notify the caller if changes were made at all
-func FilterResolvDns(resolvConf []byte, ipv6Enabled bool) ([]byte, bool) {
-	changed := false
-	cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{})
-	// if IPv6 is not enabled, also clean out any IPv6 address nameserver
-	if !ipv6Enabled {
-		cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{})
-	}
-	// if the resulting resolvConf has no more nameservers defined, add appropriate
-	// default DNS servers for IPv4 and (optionally) IPv6
-	if len(GetNameservers(cleanedResolvConf)) == 0 {
-		log.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers : %v", defaultIPv4Dns)
-		dns := defaultIPv4Dns
-		if ipv6Enabled {
-			log.Infof("IPv6 enabled; Adding default IPv6 external servers : %v", defaultIPv6Dns)
-			dns = append(dns, defaultIPv6Dns...)
-		}
-		cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...)
-	}
-	if !bytes.Equal(resolvConf, cleanedResolvConf) {
-		changed = true
-	}
-	return cleanedResolvConf, changed
-}
-
-// getLines parses input into lines and strips away comments.
-func getLines(input []byte, commentMarker []byte) [][]byte {
-	lines := bytes.Split(input, []byte("\n"))
-	var output [][]byte
-	for _, currentLine := range lines {
-		var commentIndex = bytes.Index(currentLine, commentMarker)
-		if commentIndex == -1 {
-			output = append(output, currentLine)
-		} else {
-			output = append(output, currentLine[:commentIndex])
-		}
-	}
-	return output
-}
-
-// returns true if the IP string matches the localhost IP regular expression.
-// Used for determining if nameserver settings are being passed which are
-// localhost addresses
-func IsLocalhost(ip string) bool {
-	return localhostIPRegexp.MatchString(ip)
-}
-
-// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf
-func GetNameservers(resolvConf []byte) []string {
-	nameservers := []string{}
-	for _, line := range getLines(resolvConf, []byte("#")) {
-		var ns = nsRegexp.FindSubmatch(line)
-		if len(ns) > 0 {
-			nameservers = append(nameservers, string(ns[1]))
-		}
-	}
-	return nameservers
-}
-
-// GetNameserversAsCIDR returns nameservers (if any) listed in
-// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
-// This function's output is intended for net.ParseCIDR
-func GetNameserversAsCIDR(resolvConf []byte) []string {
-	nameservers := []string{}
-	for _, nameserver := range GetNameservers(resolvConf) {
-		nameservers = append(nameservers, nameserver+"/32")
-	}
-	return nameservers
-}
-
-// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf
-// If more than one search line is encountered, only the contents of the last
-// one is returned.
-func GetSearchDomains(resolvConf []byte) []string {
-	domains := []string{}
-	for _, line := range getLines(resolvConf, []byte("#")) {
-		match := searchRegexp.FindSubmatch(line)
-		if match == nil {
-			continue
-		}
-		domains = strings.Fields(string(match[1]))
-	}
-	return domains
-}
-
-func Build(path string, dns, dnsSearch []string) error {
-	content := bytes.NewBuffer(nil)
-	for _, dns := range dns {
-		if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil {
-			return err
-		}
-	}
-	if len(dnsSearch) > 0 {
-		if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." {
-			if _, err := content.WriteString("search " + searchString + "\n"); err != nil {
-				return err
-			}
-		}
-	}
-
-	return ioutil.WriteFile(path, content.Bytes(), 0644)
-}
diff --git a/pkg/networkfs/resolvconf/resolvconf_test.go b/pkg/networkfs/resolvconf/resolvconf_test.go
deleted file mode 100644
index b0647e7..0000000
--- a/pkg/networkfs/resolvconf/resolvconf_test.go
+++ /dev/null
@@ -1,238 +0,0 @@
-package resolvconf
-
-import (
-	"bytes"
-	"io/ioutil"
-	"os"
-	"testing"
-)
-
-func TestGet(t *testing.T) {
-	resolvConfUtils, err := Get()
-	if err != nil {
-		t.Fatal(err)
-	}
-	resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if string(resolvConfUtils) != string(resolvConfSystem) {
-		t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.")
-	}
-}
-
-func TestGetNameservers(t *testing.T) {
-	for resolv, result := range map[string][]string{`
-nameserver 1.2.3.4
-nameserver 40.3.200.10
-search example.com`: {"1.2.3.4", "40.3.200.10"},
-		`search example.com`: {},
-		`nameserver 1.2.3.4
-search example.com
-nameserver 4.30.20.100`: {"1.2.3.4", "4.30.20.100"},
-		``: {},
-		`  nameserver 1.2.3.4   `: {"1.2.3.4"},
-		`search example.com
-nameserver 1.2.3.4
-#nameserver 4.3.2.1`: {"1.2.3.4"},
-		`search example.com
-nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4"},
-	} {
-		test := GetNameservers([]byte(resolv))
-		if !strSlicesEqual(test, result) {
-			t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv)
-		}
-	}
-}
-
-func TestGetNameserversAsCIDR(t *testing.T) {
-	for resolv, result := range map[string][]string{`
-nameserver 1.2.3.4
-nameserver 40.3.200.10
-search example.com`: {"1.2.3.4/32", "40.3.200.10/32"},
-		`search example.com`: {},
-		`nameserver 1.2.3.4
-search example.com
-nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"},
-		``: {},
-		`  nameserver 1.2.3.4   `: {"1.2.3.4/32"},
-		`search example.com
-nameserver 1.2.3.4
-#nameserver 4.3.2.1`: {"1.2.3.4/32"},
-		`search example.com
-nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"},
-	} {
-		test := GetNameserversAsCIDR([]byte(resolv))
-		if !strSlicesEqual(test, result) {
-			t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv)
-		}
-	}
-}
-
-func TestGetSearchDomains(t *testing.T) {
-	for resolv, result := range map[string][]string{
-		`search example.com`:           {"example.com"},
-		`search example.com # ignored`: {"example.com"},
-		` 	  search 	 example.com 	  `: {"example.com"},
-		` 	  search 	 example.com 	  # ignored`: {"example.com"},
-		`search foo.example.com example.com`: {"foo.example.com", "example.com"},
-		`	   search   	   foo.example.com 	 example.com 	`: {"foo.example.com", "example.com"},
-		`	   search   	   foo.example.com 	 example.com 	# ignored`: {"foo.example.com", "example.com"},
-		``:          {},
-		`# ignored`: {},
-		`nameserver 1.2.3.4
-search foo.example.com example.com`: {"foo.example.com", "example.com"},
-		`nameserver 1.2.3.4
-search dup1.example.com dup2.example.com
-search foo.example.com example.com`: {"foo.example.com", "example.com"},
-		`nameserver 1.2.3.4
-search foo.example.com example.com
-nameserver 4.30.20.100`: {"foo.example.com", "example.com"},
-	} {
-		test := GetSearchDomains([]byte(resolv))
-		if !strSlicesEqual(test, result) {
-			t.Fatalf("Wrong search domain string {%s} should be %v. Input: %s", test, result, resolv)
-		}
-	}
-}
-
-func strSlicesEqual(a, b []string) bool {
-	if len(a) != len(b) {
-		return false
-	}
-
-	for i, v := range a {
-		if v != b[i] {
-			return false
-		}
-	}
-
-	return true
-}
-
-func TestBuild(t *testing.T) {
-	file, err := ioutil.TempFile("", "")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.Remove(file.Name())
-
-	err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"search1"})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	content, err := ioutil.ReadFile(file.Name())
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\nsearch search1\n"; !bytes.Contains(content, []byte(expected)) {
-		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
-	}
-}
-
-func TestBuildWithZeroLengthDomainSearch(t *testing.T) {
-	file, err := ioutil.TempFile("", "")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.Remove(file.Name())
-
-	err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"."})
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	content, err := ioutil.ReadFile(file.Name())
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\n"; !bytes.Contains(content, []byte(expected)) {
-		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
-	}
-	if notExpected := "search ."; bytes.Contains(content, []byte(notExpected)) {
-		t.Fatalf("Expected to not find '%s' got '%s'", notExpected, content)
-	}
-}
-
-func TestFilterResolvDns(t *testing.T) {
-	ns0 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\n"
-
-	if result, _ := FilterResolvDns([]byte(ns0), false); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed No Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-
-	ns1 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\nnameserver 127.0.0.1\n"
-	if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-
-	ns1 = "nameserver 10.16.60.14\nnameserver 127.0.0.1\nnameserver 10.16.60.21\n"
-	if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-
-	ns1 = "nameserver 127.0.1.1\nnameserver 10.16.60.14\nnameserver 10.16.60.21\n"
-	if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-
-	ns1 = "nameserver ::1\nnameserver 10.16.60.14\nnameserver 127.0.2.1\nnameserver 10.16.60.21\n"
-	if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-
-	ns1 = "nameserver 10.16.60.14\nnameserver ::1\nnameserver 10.16.60.21\nnameserver ::1"
-	if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-
-	// with IPv6 disabled (false param), the IPv6 nameserver should be removed
-	ns1 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\nnameserver ::1"
-	if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed Localhost+IPv6 off: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-
-	// with IPv6 enabled, the IPv6 nameserver should be preserved
-	ns0 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\n"
-	ns1 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\nnameserver ::1"
-	if result, _ := FilterResolvDns([]byte(ns1), true); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed Localhost+IPv6 on: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-
-	// with IPv6 enabled, and no non-localhost servers, Google defaults (both IPv4+IPv6) should be added
-	ns0 = "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\nnameserver 2001:4860:4860::8888\nnameserver 2001:4860:4860::8844"
-	ns1 = "nameserver 127.0.0.1\nnameserver ::1\nnameserver 127.0.2.1"
-	if result, _ := FilterResolvDns([]byte(ns1), true); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed no Localhost+IPv6 enabled: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-
-	// with IPv6 disabled, and no non-localhost servers, Google defaults (only IPv4) should be added
-	ns0 = "\nnameserver 8.8.8.8\nnameserver 8.8.4.4"
-	ns1 = "nameserver 127.0.0.1\nnameserver ::1\nnameserver 127.0.2.1"
-	if result, _ := FilterResolvDns([]byte(ns1), false); result != nil {
-		if ns0 != string(result) {
-			t.Fatalf("Failed no Localhost+IPv6 enabled: expected \n<%s> got \n<%s>", ns0, string(result))
-		}
-	}
-}
diff --git a/pkg/parsers/filters/parse.go b/pkg/parsers/filters/parse.go
index 9c056bb..df5486d 100644
--- a/pkg/parsers/filters/parse.go
+++ b/pkg/parsers/filters/parse.go
@@ -58,8 +58,7 @@
 	if len(p) == 0 {
 		return args, nil
 	}
-	err := json.Unmarshal([]byte(p), &args)
-	if err != nil {
+	if err := json.NewDecoder(strings.NewReader(p)).Decode(&args); err != nil {
 		return nil, err
 	}
 	return args, nil
diff --git a/pkg/parsers/kernel/kernel.go b/pkg/parsers/kernel/kernel.go
index 70d0900..5f79306 100644
--- a/pkg/parsers/kernel/kernel.go
+++ b/pkg/parsers/kernel/kernel.go
@@ -1,3 +1,5 @@
+// +build !windows
+
 package kernel
 
 import (
diff --git a/pkg/parsers/kernel/kernel_windows.go b/pkg/parsers/kernel/kernel_windows.go
new file mode 100644
index 0000000..399d63e
--- /dev/null
+++ b/pkg/parsers/kernel/kernel_windows.go
@@ -0,0 +1,65 @@
+package kernel
+
+import (
+	"fmt"
+	"syscall"
+	"unsafe"
+)
+
+type KernelVersionInfo struct {
+	kvi   string
+	major int
+	minor int
+	build int
+}
+
+func (k *KernelVersionInfo) String() string {
+	return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi)
+}
+
+func GetKernelVersion() (*KernelVersionInfo, error) {
+
+	var (
+		h         syscall.Handle
+		dwVersion uint32
+		err       error
+	)
+
+	KVI := &KernelVersionInfo{"Unknown", 0, 0, 0}
+
+	if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,
+		syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
+		0,
+		syscall.KEY_READ,
+		&h); err != nil {
+		return KVI, err
+	}
+	defer syscall.RegCloseKey(h)
+
+	var buf [1 << 10]uint16
+	var typ uint32
+	n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
+
+	if err = syscall.RegQueryValueEx(h,
+		syscall.StringToUTF16Ptr("BuildLabEx"),
+		nil,
+		&typ,
+		(*byte)(unsafe.Pointer(&buf[0])),
+		&n); err != nil {
+		return KVI, err
+	}
+
+	KVI.kvi = syscall.UTF16ToString(buf[:])
+
+	// Important - docker.exe MUST be manifested for this API to return
+	// the correct information.
+	if dwVersion, err = syscall.GetVersion(); err != nil {
+		return KVI, err
+	}
+
+	KVI.major = int(dwVersion & 0xFF)
+	KVI.minor = int((dwVersion & 0XFF00) >> 8)
+	KVI.build = int((dwVersion & 0xFFFF0000) >> 16)
+
+	return KVI, nil
+}
diff --git a/pkg/parsers/operatingsystem/operatingsystem.go b/pkg/parsers/operatingsystem/operatingsystem_linux.go
similarity index 100%
rename from pkg/parsers/operatingsystem/operatingsystem.go
rename to pkg/parsers/operatingsystem/operatingsystem_linux.go
diff --git a/pkg/parsers/operatingsystem/operatingsystem_windows.go b/pkg/parsers/operatingsystem/operatingsystem_windows.go
new file mode 100644
index 0000000..c843c6f
--- /dev/null
+++ b/pkg/parsers/operatingsystem/operatingsystem_windows.go
@@ -0,0 +1,47 @@
+package operatingsystem
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c
+// for a similar sample
+
+func GetOperatingSystem() (string, error) {
+
+	var h syscall.Handle
+
+	// Default return value
+	ret := "Unknown Operating System"
+
+	if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,
+		syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
+		0,
+		syscall.KEY_READ,
+		&h); err != nil {
+		return ret, err
+	}
+	defer syscall.RegCloseKey(h)
+
+	var buf [1 << 10]uint16
+	var typ uint32
+	n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
+
+	if err := syscall.RegQueryValueEx(h,
+		syscall.StringToUTF16Ptr("ProductName"),
+		nil,
+		&typ,
+		(*byte)(unsafe.Pointer(&buf[0])),
+		&n); err != nil {
+		return ret, err
+	}
+	ret = syscall.UTF16ToString(buf[:])
+
+	return ret, nil
+}
+
+// No-op on Windows
+func IsContainerized() (bool, error) {
+	return false, nil
+}
diff --git a/pkg/parsers/parsers.go b/pkg/parsers/parsers.go
index 59e294d..32d8773 100644
--- a/pkg/parsers/parsers.go
+++ b/pkg/parsers/parsers.go
@@ -2,6 +2,7 @@
 
 import (
 	"fmt"
+	"runtime"
 	"strconv"
 	"strings"
 )
@@ -10,7 +11,12 @@
 func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) {
 	addr = strings.TrimSpace(addr)
 	if addr == "" {
-		addr = fmt.Sprintf("unix://%s", defaultUnixAddr)
+		if runtime.GOOS != "windows" {
+			addr = fmt.Sprintf("unix://%s", defaultUnixAddr)
+		} else {
+			// Note - defaultTCPAddr already includes tcp:// prefix
+			addr = fmt.Sprintf("%s", defaultTCPAddr)
+		}
 	}
 	addrParts := strings.Split(addr, "://")
 	if len(addrParts) == 1 {
@@ -135,3 +141,17 @@
 	}
 	return start, end, nil
 }
+
+func ParseLink(val string) (string, string, error) {
+	if val == "" {
+		return "", "", fmt.Errorf("empty string specified for links")
+	}
+	arr := strings.Split(val, ":")
+	if len(arr) > 2 {
+		return "", "", fmt.Errorf("bad format for links: %s", val)
+	}
+	if len(arr) == 1 {
+		return val, val, nil
+	}
+	return arr[0], arr[1], nil
+}
diff --git a/pkg/parsers/parsers_test.go b/pkg/parsers/parsers_test.go
index bc9a1e9..89f4ae0 100644
--- a/pkg/parsers/parsers_test.go
+++ b/pkg/parsers/parsers_test.go
@@ -123,3 +123,35 @@
 		t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err)
 	}
 }
+
+func TestParseLink(t *testing.T) {
+	name, alias, err := ParseLink("name:alias")
+	if err != nil {
+		t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err)
+	}
+	if name != "name" {
+		t.Fatalf("Link name should have been name, got %s instead", name)
+	}
+	if alias != "alias" {
+		t.Fatalf("Link alias should have been alias, got %s instead", alias)
+	}
+	// short format definition
+	name, alias, err = ParseLink("name")
+	if err != nil {
+		t.Fatalf("Expected not to error out on a valid name only format but got: %v", err)
+	}
+	if name != "name" {
+		t.Fatalf("Link name should have been name, got %s instead", name)
+	}
+	if alias != "name" {
+		t.Fatalf("Link alias should have been name, got %s instead", alias)
+	}
+	// empty string link definition is not allowed
+	if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") {
+		t.Fatalf("Expected error 'empty string specified for links' but got: %v", err)
+	}
+	// more than two colons are not allowed
+	if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") {
+		t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err)
+	}
+}
diff --git a/pkg/pidfile/pidfile.go b/pkg/pidfile/pidfile.go
new file mode 100644
index 0000000..3e57073
--- /dev/null
+++ b/pkg/pidfile/pidfile.go
@@ -0,0 +1,42 @@
+package pidfile
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+)
+
+type PidFile struct {
+	path string
+}
+
+func checkPidFileAlreadyExists(path string) error {
+	if pidString, err := ioutil.ReadFile(path); err == nil {
+		if pid, err := strconv.Atoi(string(pidString)); err == nil {
+			if _, err := os.Stat(filepath.Join("/proc", string(pid))); err == nil {
+				return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path)
+			}
+		}
+	}
+	return nil
+}
+
+func New(path string) (*PidFile, error) {
+	if err := checkPidFileAlreadyExists(path); err != nil {
+		return nil, err
+	}
+	if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil {
+		return nil, err
+	}
+
+	return &PidFile{path: path}, nil
+}
+
+func (file PidFile) Remove() error {
+	if err := os.Remove(file.path); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/pkg/pidfile/pidfile_test.go b/pkg/pidfile/pidfile_test.go
new file mode 100644
index 0000000..6ed9cfc
--- /dev/null
+++ b/pkg/pidfile/pidfile_test.go
@@ -0,0 +1,32 @@
+package pidfile
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+)
+
+func TestNewAndRemove(t *testing.T) {
+	dir, err := ioutil.TempDir(os.TempDir(), "test-pidfile")
+	if err != nil {
+		t.Fatal("Could not create test directory")
+	}
+
+	file, err := New(filepath.Join(dir, "testfile"))
+	if err != nil {
+		t.Fatal("Could not create test file", err)
+	}
+
+	if err := file.Remove(); err != nil {
+		t.Fatal("Could not delete created test file")
+	}
+}
+
+func TestRemoveInvalidPath(t *testing.T) {
+	file := PidFile{path: filepath.Join("foo", "bar")}
+
+	if err := file.Remove(); err == nil {
+		t.Fatal("Non-existing file doesn't give an error on delete")
+	}
+}
diff --git a/pkg/plugins/client.go b/pkg/plugins/client.go
new file mode 100644
index 0000000..d531fa4
--- /dev/null
+++ b/pkg/plugins/client.go
@@ -0,0 +1,112 @@
+package plugins
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/Sirupsen/logrus"
+)
+
+const (
+	versionMimetype = "application/vnd.docker.plugins.v1+json"
+	defaultTimeOut  = 30
+)
+
+func NewClient(addr string) *Client {
+	tr := &http.Transport{}
+	protoAndAddr := strings.Split(addr, "://")
+	configureTCPTransport(tr, protoAndAddr[0], protoAndAddr[1])
+	return &Client{&http.Client{Transport: tr}, protoAndAddr[1]}
+}
+
+type Client struct {
+	http *http.Client
+	addr string
+}
+
+func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error {
+	return c.callWithRetry(serviceMethod, args, ret, true)
+}
+
+func (c *Client) callWithRetry(serviceMethod string, args interface{}, ret interface{}, retry bool) error {
+	var buf bytes.Buffer
+	if err := json.NewEncoder(&buf).Encode(args); err != nil {
+		return err
+	}
+
+	req, err := http.NewRequest("POST", "/"+serviceMethod, &buf)
+	if err != nil {
+		return err
+	}
+	req.Header.Add("Accept", versionMimetype)
+	req.URL.Scheme = "http"
+	req.URL.Host = c.addr
+
+	var retries int
+	start := time.Now()
+
+	for {
+		resp, err := c.http.Do(req)
+		if err != nil {
+			if !retry {
+				return err
+			}
+
+			timeOff := backoff(retries)
+			if abort(start, timeOff) {
+				return err
+			}
+			retries++
+			logrus.Warnf("Unable to connect to plugin: %s, retrying in %v", c.addr, timeOff)
+			time.Sleep(timeOff)
+			continue
+		}
+
+		if resp.StatusCode != http.StatusOK {
+			remoteErr, err := ioutil.ReadAll(resp.Body)
+			if err != nil {
+				return nil
+			}
+			return fmt.Errorf("Plugin Error: %s", remoteErr)
+		}
+
+		return json.NewDecoder(resp.Body).Decode(&ret)
+	}
+}
+
+func backoff(retries int) time.Duration {
+	b, max := 1, defaultTimeOut
+	for b < max && retries > 0 {
+		b *= 2
+		retries--
+	}
+	if b > max {
+		b = max
+	}
+	return time.Duration(b) * time.Second
+}
+
+func abort(start time.Time, timeOff time.Duration) bool {
+	return timeOff+time.Since(start) > time.Duration(defaultTimeOut)*time.Second
+}
+
+func configureTCPTransport(tr *http.Transport, proto, addr string) {
+	// Why 32? See https://github.com/docker/docker/pull/8035.
+	timeout := 32 * time.Second
+	if proto == "unix" {
+		// No need for compression in local communications.
+		tr.DisableCompression = true
+		tr.Dial = func(_, _ string) (net.Conn, error) {
+			return net.DialTimeout(proto, addr, timeout)
+		}
+	} else {
+		tr.Proxy = http.ProxyFromEnvironment
+		tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
+	}
+}
diff --git a/pkg/plugins/client_test.go b/pkg/plugins/client_test.go
new file mode 100644
index 0000000..0f7cd34
--- /dev/null
+++ b/pkg/plugins/client_test.go
@@ -0,0 +1,105 @@
+package plugins
+
+import (
+	"io"
+	"net/http"
+	"net/http/httptest"
+	"reflect"
+	"testing"
+	"time"
+)
+
+var (
+	mux    *http.ServeMux
+	server *httptest.Server
+)
+
+func setupRemotePluginServer() string {
+	mux = http.NewServeMux()
+	server = httptest.NewServer(mux)
+	return server.URL
+}
+
+func teardownRemotePluginServer() {
+	if server != nil {
+		server.Close()
+	}
+}
+
+func TestFailedConnection(t *testing.T) {
+	c := NewClient("tcp://127.0.0.1:1")
+	err := c.callWithRetry("Service.Method", nil, nil, false)
+	if err == nil {
+		t.Fatal("Unexpected successful connection")
+	}
+}
+
+func TestEchoInputOutput(t *testing.T) {
+	addr := setupRemotePluginServer()
+	defer teardownRemotePluginServer()
+
+	m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}}
+
+	mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) {
+		if r.Method != "POST" {
+			t.Fatalf("Expected POST, got %s\n", r.Method)
+		}
+
+		header := w.Header()
+		header.Set("Content-Type", versionMimetype)
+
+		io.Copy(w, r.Body)
+	})
+
+	c := NewClient(addr)
+	var output Manifest
+	err := c.Call("Test.Echo", m, &output)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !reflect.DeepEqual(output, m) {
+		t.Fatalf("Expected %v, was %v\n", m, output)
+	}
+}
+
+func TestBackoff(t *testing.T) {
+	cases := []struct {
+		retries    int
+		expTimeOff time.Duration
+	}{
+		{0, time.Duration(1)},
+		{1, time.Duration(2)},
+		{2, time.Duration(4)},
+		{4, time.Duration(16)},
+		{6, time.Duration(30)},
+		{10, time.Duration(30)},
+	}
+
+	for _, c := range cases {
+		s := c.expTimeOff * time.Second
+		if d := backoff(c.retries); d != s {
+			t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d)
+		}
+	}
+}
+
+func TestAbortRetry(t *testing.T) {
+	cases := []struct {
+		timeOff  time.Duration
+		expAbort bool
+	}{
+		{time.Duration(1), false},
+		{time.Duration(2), false},
+		{time.Duration(10), false},
+		{time.Duration(30), true},
+		{time.Duration(40), true},
+	}
+
+	for _, c := range cases {
+		s := c.timeOff * time.Second
+		if a := abort(time.Now(), s); a != c.expAbort {
+			t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a)
+		}
+	}
+}
diff --git a/pkg/plugins/discovery.go b/pkg/plugins/discovery.go
new file mode 100644
index 0000000..3a42ba6
--- /dev/null
+++ b/pkg/plugins/discovery.go
@@ -0,0 +1,78 @@
+package plugins
+
+import (
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/url"
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+const defaultLocalRegistry = "/usr/share/docker/plugins"
+
+var (
+	ErrNotFound = errors.New("Plugin not found")
+)
+
+type Registry interface {
+	Plugins() ([]*Plugin, error)
+	Plugin(name string) (*Plugin, error)
+}
+
+type LocalRegistry struct {
+	path string
+}
+
+func newLocalRegistry(path string) *LocalRegistry {
+	if len(path) == 0 {
+		path = defaultLocalRegistry
+	}
+
+	return &LocalRegistry{path}
+}
+
+func (l *LocalRegistry) Plugin(name string) (*Plugin, error) {
+	filepath := filepath.Join(l.path, name)
+	specpath := filepath + ".spec"
+	if fi, err := os.Stat(specpath); err == nil {
+		return readPluginInfo(specpath, fi)
+	}
+	socketpath := filepath + ".sock"
+	if fi, err := os.Stat(socketpath); err == nil {
+		return readPluginInfo(socketpath, fi)
+	}
+	return nil, ErrNotFound
+}
+
+func readPluginInfo(path string, fi os.FileInfo) (*Plugin, error) {
+	name := strings.Split(fi.Name(), ".")[0]
+
+	if fi.Mode()&os.ModeSocket != 0 {
+		return &Plugin{
+			Name: name,
+			Addr: "unix://" + path,
+		}, nil
+	}
+
+	content, err := ioutil.ReadFile(path)
+	if err != nil {
+		return nil, err
+	}
+	addr := strings.TrimSpace(string(content))
+
+	u, err := url.Parse(addr)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(u.Scheme) == 0 {
+		return nil, fmt.Errorf("Unknown protocol")
+	}
+
+	return &Plugin{
+		Name: name,
+		Addr: addr,
+	}, nil
+}
diff --git a/pkg/plugins/discovery_test.go b/pkg/plugins/discovery_test.go
new file mode 100644
index 0000000..b6e66e2
--- /dev/null
+++ b/pkg/plugins/discovery_test.go
@@ -0,0 +1,108 @@
+package plugins
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net"
+	"os"
+	"path"
+	"path/filepath"
+	"reflect"
+	"testing"
+)
+
+func TestUnknownLocalPath(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "docker-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+
+	l := newLocalRegistry(filepath.Join(tmpdir, "unknown"))
+	_, err = l.Plugin("foo")
+	if err == nil || err != ErrNotFound {
+		t.Fatalf("Expected error for unknown directory")
+	}
+}
+
+func TestLocalSocket(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "docker-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpdir)
+	l, err := net.Listen("unix", filepath.Join(tmpdir, "echo.sock"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer l.Close()
+
+	r := newLocalRegistry(tmpdir)
+	p, err := r.Plugin("echo")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	pp, err := r.Plugin("echo")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !reflect.DeepEqual(p, pp) {
+		t.Fatalf("Expected %v, was %v\n", p, pp)
+	}
+
+	if p.Name != "echo" {
+		t.Fatalf("Expected plugin `echo`, got %s\n", p.Name)
+	}
+
+	addr := fmt.Sprintf("unix://%s/echo.sock", tmpdir)
+	if p.Addr != addr {
+		t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr)
+	}
+}
+
+func TestFileSpecPlugin(t *testing.T) {
+	tmpdir, err := ioutil.TempDir("", "docker-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	cases := []struct {
+		path string
+		name string
+		addr string
+		fail bool
+	}{
+		{filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false},
+		{filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false},
+		{filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport
+	}
+
+	for _, c := range cases {
+		if err = os.MkdirAll(path.Dir(c.path), 0755); err != nil {
+			t.Fatal(err)
+		}
+		if err = ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil {
+			t.Fatal(err)
+		}
+
+		r := newLocalRegistry(tmpdir)
+		p, err := r.Plugin(c.name)
+		if c.fail && err == nil {
+			continue
+		}
+
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if p.Name != c.name {
+			t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.Name)
+		}
+
+		if p.Addr != c.addr {
+			t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr)
+		}
+		os.Remove(c.path)
+	}
+}
diff --git a/pkg/plugins/plugins.go b/pkg/plugins/plugins.go
new file mode 100644
index 0000000..4751948
--- /dev/null
+++ b/pkg/plugins/plugins.go
@@ -0,0 +1,100 @@
+package plugins
+
+import (
+	"errors"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
+)
+
+var (
+	ErrNotImplements = errors.New("Plugin does not implement the requested driver")
+)
+
+type plugins struct {
+	sync.Mutex
+	plugins map[string]*Plugin
+}
+
+var (
+	storage          = plugins{plugins: make(map[string]*Plugin)}
+	extpointHandlers = make(map[string]func(string, *Client))
+)
+
+type Manifest struct {
+	Implements []string
+}
+
+type Plugin struct {
+	Name     string
+	Addr     string
+	Client   *Client
+	Manifest *Manifest
+}
+
+func (p *Plugin) activate() error {
+	m := new(Manifest)
+	p.Client = NewClient(p.Addr)
+	err := p.Client.Call("Plugin.Activate", nil, m)
+	if err != nil {
+		return err
+	}
+
+	logrus.Debugf("%s's manifest: %v", p.Name, m)
+	p.Manifest = m
+	for _, iface := range m.Implements {
+		handler, handled := extpointHandlers[iface]
+		if !handled {
+			continue
+		}
+		handler(p.Name, p.Client)
+	}
+	return nil
+}
+
+func load(name string) (*Plugin, error) {
+	registry := newLocalRegistry("")
+	pl, err := registry.Plugin(name)
+	if err != nil {
+		return nil, err
+	}
+	if err := pl.activate(); err != nil {
+		return nil, err
+	}
+	return pl, nil
+}
+
+func get(name string) (*Plugin, error) {
+	storage.Lock()
+	defer storage.Unlock()
+	pl, ok := storage.plugins[name]
+	if ok {
+		return pl, nil
+	}
+	pl, err := load(name)
+	if err != nil {
+		return nil, err
+	}
+
+	logrus.Debugf("Plugin: %v", pl)
+	storage.plugins[name] = pl
+	return pl, nil
+}
+
+func Get(name, imp string) (*Plugin, error) {
+	pl, err := get(name)
+	if err != nil {
+		return nil, err
+	}
+	for _, driver := range pl.Manifest.Implements {
+		logrus.Debugf("%s implements: %s", name, driver)
+		if driver == imp {
+			return pl, nil
+		}
+	}
+	return nil, ErrNotImplements
+}
+
+func Handle(iface string, fn func(string, *Client)) {
+	extpointHandlers[iface] = fn
+}
diff --git a/pkg/pools/pools.go b/pkg/pools/pools.go
index 5338a0c..f366fa6 100644
--- a/pkg/pools/pools.go
+++ b/pkg/pools/pools.go
@@ -1,5 +1,3 @@
-// +build go1.3
-
 // Package pools provides a collection of pools which provide various
 // data types with buffers. These can be used to lower the number of
 // memory allocations and reuse buffers.
diff --git a/pkg/pools/pools_nopool.go b/pkg/pools/pools_nopool.go
deleted file mode 100644
index 48903c2..0000000
--- a/pkg/pools/pools_nopool.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// +build !go1.3
-
-package pools
-
-import (
-	"bufio"
-	"io"
-
-	"github.com/docker/docker/pkg/ioutils"
-)
-
-var (
-	BufioReader32KPool *BufioReaderPool
-	BufioWriter32KPool *BufioWriterPool
-)
-
-const buffer32K = 32 * 1024
-
-type BufioReaderPool struct {
-	size int
-}
-
-func init() {
-	BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
-	BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
-}
-
-func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
-	return &BufioReaderPool{size: size}
-}
-
-func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
-	return bufio.NewReaderSize(r, bufPool.size)
-}
-
-func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
-	b.Reset(nil)
-}
-
-func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
-	return ioutils.NewReadCloserWrapper(r, func() error {
-		if readCloser, ok := r.(io.ReadCloser); ok {
-			return readCloser.Close()
-		}
-		return nil
-	})
-}
-
-type BufioWriterPool struct {
-	size int
-}
-
-func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
-	return &BufioWriterPool{size: size}
-}
-
-func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
-	return bufio.NewWriterSize(w, bufPool.size)
-}
-
-func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
-	b.Reset(nil)
-}
-
-func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
-	return ioutils.NewWriteCloserWrapper(w, func() error {
-		buf.Flush()
-		if writeCloser, ok := w.(io.WriteCloser); ok {
-			return writeCloser.Close()
-		}
-		return nil
-	})
-}
diff --git a/pkg/progressreader/progressreader.go b/pkg/progressreader/progressreader.go
index f6ad06a..652831b 100644
--- a/pkg/progressreader/progressreader.go
+++ b/pkg/progressreader/progressreader.go
@@ -2,36 +2,16 @@
 
 import (
 	"io"
+
+	"github.com/docker/docker/pkg/jsonmessage"
+	"github.com/docker/docker/pkg/streamformatter"
 )
 
-type StreamFormatter interface {
-	FormatProg(string, string, interface{}) []byte
-	FormatStatus(string, string, ...interface{}) []byte
-	FormatError(error) []byte
-}
-
-type PR_JSONProgress interface {
-	GetCurrent() int
-	GetTotal() int
-}
-
-type JSONProg struct {
-	Current int
-	Total   int
-}
-
-func (j *JSONProg) GetCurrent() int {
-	return j.Current
-}
-func (j *JSONProg) GetTotal() int {
-	return j.Total
-}
-
 // Reader with progress bar
 type Config struct {
 	In         io.ReadCloser // Stream to read from
 	Out        io.Writer     // Where to send progress bar to
-	Formatter  StreamFormatter
+	Formatter  *streamformatter.StreamFormatter
 	Size       int
 	Current    int
 	LastUpdate int
@@ -54,7 +34,7 @@
 		}
 	}
 	if config.Current-config.LastUpdate > updateEvery || err != nil {
-		config.Out.Write(config.Formatter.FormatProg(config.ID, config.Action, &JSONProg{Current: config.Current, Total: config.Size}))
+		config.Out.Write(config.Formatter.FormatProgress(config.ID, config.Action, &jsonmessage.JSONProgress{Current: config.Current, Total: config.Size}))
 		config.LastUpdate = config.Current
 	}
 	// Send newline when complete
@@ -65,6 +45,6 @@
 }
 func (config *Config) Close() error {
 	config.Current = config.Size
-	config.Out.Write(config.Formatter.FormatProg(config.ID, config.Action, &JSONProg{Current: config.Current, Total: config.Size}))
+	config.Out.Write(config.Formatter.FormatProgress(config.ID, config.Action, &jsonmessage.JSONProgress{Current: config.Current, Total: config.Size}))
 	return config.In.Close()
 }
diff --git a/pkg/proxy/tcp_proxy.go b/pkg/proxy/tcp_proxy.go
index eacf142..9942e6d 100644
--- a/pkg/proxy/tcp_proxy.go
+++ b/pkg/proxy/tcp_proxy.go
@@ -5,7 +5,7 @@
 	"net"
 	"syscall"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 )
 
 type TCPProxy struct {
@@ -31,7 +31,7 @@
 func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) {
 	backend, err := net.DialTCP("tcp", nil, proxy.backendAddr)
 	if err != nil {
-		log.Printf("Can't forward traffic to backend tcp/%v: %s\n", proxy.backendAddr, err)
+		logrus.Printf("Can't forward traffic to backend tcp/%v: %s\n", proxy.backendAddr, err)
 		client.Close()
 		return
 	}
@@ -78,7 +78,7 @@
 	for {
 		client, err := proxy.listener.Accept()
 		if err != nil {
-			log.Printf("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err)
+			logrus.Printf("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err)
 			return
 		}
 		go proxy.clientLoop(client.(*net.TCPConn), quit)
diff --git a/pkg/proxy/udp_proxy.go b/pkg/proxy/udp_proxy.go
index a3fcf11..2a073df 100644
--- a/pkg/proxy/udp_proxy.go
+++ b/pkg/proxy/udp_proxy.go
@@ -8,7 +8,7 @@
 	"syscall"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 )
 
 const (
@@ -105,7 +105,7 @@
 			// ECONNREFUSED like Read do (see comment in
 			// UDPProxy.replyLoop)
 			if !isClosedError(err) {
-				log.Printf("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err)
+				logrus.Printf("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err)
 			}
 			break
 		}
@@ -116,7 +116,7 @@
 		if !hit {
 			proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr)
 			if err != nil {
-				log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err)
+				logrus.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err)
 				proxy.connTrackLock.Unlock()
 				continue
 			}
@@ -127,7 +127,7 @@
 		for i := 0; i != read; {
 			written, err := proxyConn.Write(readBuf[i:read])
 			if err != nil {
-				log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err)
+				logrus.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err)
 				break
 			}
 			i += written
diff --git a/pkg/random/random.go b/pkg/random/random.go
new file mode 100644
index 0000000..05b7f7f
--- /dev/null
+++ b/pkg/random/random.go
@@ -0,0 +1,34 @@
+package random
+
+import (
+	"math/rand"
+	"sync"
+	"time"
+)
+
+// copypaste from standard math/rand
+type lockedSource struct {
+	lk  sync.Mutex
+	src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+	r.lk.Lock()
+	n = r.src.Int63()
+	r.lk.Unlock()
+	return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+	r.lk.Lock()
+	r.src.Seed(seed)
+	r.lk.Unlock()
+}
+
+// NewSource returns math/rand.Source safe for concurrent use and initialized
+// with current unix-nano timestamp
+func NewSource() rand.Source {
+	return &lockedSource{
+		src: rand.NewSource(time.Now().UnixNano()),
+	}
+}
diff --git a/pkg/random/random_test.go b/pkg/random/random_test.go
new file mode 100644
index 0000000..cf405f7
--- /dev/null
+++ b/pkg/random/random_test.go
@@ -0,0 +1,22 @@
+package random
+
+import (
+	"math/rand"
+	"sync"
+	"testing"
+)
+
+// for go test -v -race
+func TestConcurrency(t *testing.T) {
+	rnd := rand.New(NewSource())
+	var wg sync.WaitGroup
+
+	for i := 0; i < 10; i++ {
+		wg.Add(1)
+		go func() {
+			rnd.Int63()
+			wg.Done()
+		}()
+	}
+	wg.Wait()
+}
diff --git a/pkg/reexec/command_unsupported.go b/pkg/reexec/command_unsupported.go
index a579318..4adcd8f 100644
--- a/pkg/reexec/command_unsupported.go
+++ b/pkg/reexec/command_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+// +build !linux,!windows
 
 package reexec
 
diff --git a/pkg/reexec/command_windows.go b/pkg/reexec/command_windows.go
new file mode 100644
index 0000000..124d42f
--- /dev/null
+++ b/pkg/reexec/command_windows.go
@@ -0,0 +1,14 @@
+// +build windows
+
+package reexec
+
+import (
+	"os/exec"
+)
+
+func Command(args ...string) *exec.Cmd {
+	return &exec.Cmd{
+		Path: Self(),
+		Args: args,
+	}
+}
diff --git a/pkg/signal/README.md b/pkg/signal/README.md
new file mode 100644
index 0000000..2b237a5
--- /dev/null
+++ b/pkg/signal/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with signals across various operating systems
\ No newline at end of file
diff --git a/pkg/signal/trap.go b/pkg/signal/trap.go
index 78a709b..3772db5 100644
--- a/pkg/signal/trap.go
+++ b/pkg/signal/trap.go
@@ -3,10 +3,11 @@
 import (
 	"os"
 	gosignal "os/signal"
+	"runtime"
 	"sync/atomic"
 	"syscall"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 )
 
 // Trap sets up a simplified signal "trap", appropriate for common
@@ -14,41 +15,50 @@
 // (and the Docker engine in particular).
 //
 // * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
-// * If SIGINT or SIGTERM are repeated 3 times before cleanup is complete, then cleanup is
-// skipped and the process terminated directly.
-// * If "DEBUG" is set in the environment, SIGQUIT causes an exit without cleanup.
+// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is
+//   skipped and the process is terminated immediately (allows force quit of stuck daemon)
+// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit.
 //
 func Trap(cleanup func()) {
 	c := make(chan os.Signal, 1)
-	signals := []os.Signal{os.Interrupt, syscall.SIGTERM}
-	if os.Getenv("DEBUG") == "" {
-		signals = append(signals, syscall.SIGQUIT)
-	}
+	// we will handle INT, TERM, QUIT here
+	signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT}
 	gosignal.Notify(c, signals...)
 	go func() {
 		interruptCount := uint32(0)
 		for sig := range c {
 			go func(sig os.Signal) {
-				log.Infof("Received signal '%v', starting shutdown of docker...", sig)
+				logrus.Infof("Processing signal '%v'", sig)
 				switch sig {
 				case os.Interrupt, syscall.SIGTERM:
-					// If the user really wants to interrupt, let him do so.
 					if atomic.LoadUint32(&interruptCount) < 3 {
 						// Initiate the cleanup only once
 						if atomic.AddUint32(&interruptCount, 1) == 1 {
-							// Call cleanup handler
+							// Call the provided cleanup handler
 							cleanup()
 							os.Exit(0)
 						} else {
 							return
 						}
 					} else {
-						log.Infof("Force shutdown of docker, interrupting cleanup")
+						// 3 SIGTERM/INT signals received; force exit without cleanup
+						logrus.Infof("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
 					}
 				case syscall.SIGQUIT:
+					DumpStacks()
+					logrus.Infof("Forcing docker daemon shutdown without cleanup on SIGQUIT")
 				}
+				//for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
 				os.Exit(128 + int(sig.(syscall.Signal)))
 			}(sig)
 		}
 	}()
 }
+
+func DumpStacks() {
+	buf := make([]byte, 16384)
+	buf = buf[:runtime.Stack(buf, true)]
+	// Note that if the daemon is started with a less-verbose log-level than "info" (the default), the goroutine
+	// traces won't show up in the log.
+	logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
+}
diff --git a/pkg/sockets/README.md b/pkg/sockets/README.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg/sockets/README.md
diff --git a/pkg/sockets/tcp_socket.go b/pkg/sockets/tcp_socket.go
new file mode 100644
index 0000000..ac9edae
--- /dev/null
+++ b/pkg/sockets/tcp_socket.go
@@ -0,0 +1,69 @@
+package sockets
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"os"
+
+	"github.com/docker/docker/pkg/listenbuffer"
+)
+
+type TlsConfig struct {
+	CA          string
+	Certificate string
+	Key         string
+	Verify      bool
+}
+
+func NewTlsConfig(tlsCert, tlsKey, tlsCA string, verify bool) *TlsConfig {
+	return &TlsConfig{
+		Verify:      verify,
+		Certificate: tlsCert,
+		Key:         tlsKey,
+		CA:          tlsCA,
+	}
+}
+
+func NewTcpSocket(addr string, config *TlsConfig, activate <-chan struct{}) (net.Listener, error) {
+	l, err := listenbuffer.NewListenBuffer("tcp", addr, activate)
+	if err != nil {
+		return nil, err
+	}
+	if config != nil {
+		if l, err = setupTls(l, config); err != nil {
+			return nil, err
+		}
+	}
+	return l, nil
+}
+
+func setupTls(l net.Listener, config *TlsConfig) (net.Listener, error) {
+	tlsCert, err := tls.LoadX509KeyPair(config.Certificate, config.Key)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return nil, fmt.Errorf("Could not load X509 key pair (%s, %s): %v", config.Certificate, config.Key, err)
+		}
+		return nil, fmt.Errorf("Error reading X509 key pair (%s, %s): %q. Make sure the key is encrypted.",
+			config.Certificate, config.Key, err)
+	}
+	tlsConfig := &tls.Config{
+		NextProtos:   []string{"http/1.1"},
+		Certificates: []tls.Certificate{tlsCert},
+		// Avoid fallback on insecure SSL protocols
+		MinVersion: tls.VersionTLS10,
+	}
+	if config.CA != "" {
+		certPool := x509.NewCertPool()
+		file, err := ioutil.ReadFile(config.CA)
+		if err != nil {
+			return nil, fmt.Errorf("Could not read CA certificate: %v", err)
+		}
+		certPool.AppendCertsFromPEM(file)
+		tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+		tlsConfig.ClientCAs = certPool
+	}
+	return tls.NewListener(l, tlsConfig), nil
+}
diff --git a/pkg/sockets/unix_socket.go b/pkg/sockets/unix_socket.go
new file mode 100644
index 0000000..0536382
--- /dev/null
+++ b/pkg/sockets/unix_socket.go
@@ -0,0 +1,80 @@
+// +build linux
+
+package sockets
+
+import (
+	"fmt"
+	"net"
+	"os"
+	"strconv"
+	"syscall"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/listenbuffer"
+	"github.com/docker/libcontainer/user"
+)
+
+func NewUnixSocket(path, group string, activate <-chan struct{}) (net.Listener, error) {
+	if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) {
+		return nil, err
+	}
+	mask := syscall.Umask(0777)
+	defer syscall.Umask(mask)
+	l, err := listenbuffer.NewListenBuffer("unix", path, activate)
+	if err != nil {
+		return nil, err
+	}
+	if err := setSocketGroup(path, group); err != nil {
+		l.Close()
+		return nil, err
+	}
+	if err := os.Chmod(path, 0660); err != nil {
+		l.Close()
+		return nil, err
+	}
+	return l, nil
+}
+
+func setSocketGroup(path, group string) error {
+	if group == "" {
+		return nil
+	}
+	if err := changeGroup(path, group); err != nil {
+		if group != "docker" {
+			return err
+		}
+		logrus.Debugf("Warning: could not change group %s to docker: %v", path, err)
+	}
+	return nil
+}
+
+func changeGroup(path string, nameOrGid string) error {
+	gid, err := lookupGidByName(nameOrGid)
+	if err != nil {
+		return err
+	}
+	logrus.Debugf("%s group found. gid: %d", nameOrGid, gid)
+	return os.Chown(path, 0, gid)
+}
+
+func lookupGidByName(nameOrGid string) (int, error) {
+	groupFile, err := user.GetGroupPath()
+	if err != nil {
+		return -1, err
+	}
+	groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool {
+		return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid
+	})
+	if err != nil {
+		return -1, err
+	}
+	if groups != nil && len(groups) > 0 {
+		return groups[0].Gid, nil
+	}
+	gid, err := strconv.Atoi(nameOrGid)
+	if err == nil {
+		logrus.Warnf("Could not find GID %d", gid)
+		return gid, nil
+	}
+	return -1, fmt.Errorf("Group %s not found", nameOrGid)
+}
diff --git a/pkg/stdcopy/stdcopy.go b/pkg/stdcopy/stdcopy.go
index a61779c..dbb74e5 100644
--- a/pkg/stdcopy/stdcopy.go
+++ b/pkg/stdcopy/stdcopy.go
@@ -5,7 +5,7 @@
 	"errors"
 	"io"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 )
 
 const (
@@ -52,12 +52,8 @@
 // and written to the underlying `w` stream.
 // This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
 // `t` indicates the id of the stream to encapsulate.
-// It can be utils.Stdin, utils.Stdout, utils.Stderr.
+// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
 func NewStdWriter(w io.Writer, t StdType) *StdWriter {
-	if len(t) != StdWriterPrefixLen {
-		return nil
-	}
-
 	return &StdWriter{
 		Writer:  w,
 		prefix:  t,
@@ -95,13 +91,13 @@
 			nr += nr2
 			if er == io.EOF {
 				if nr < StdWriterPrefixLen {
-					log.Debugf("Corrupted prefix: %v", buf[:nr])
+					logrus.Debugf("Corrupted prefix: %v", buf[:nr])
 					return written, nil
 				}
 				break
 			}
 			if er != nil {
-				log.Debugf("Error reading header: %s", er)
+				logrus.Debugf("Error reading header: %s", er)
 				return 0, er
 			}
 		}
@@ -117,18 +113,18 @@
 			// Write on stderr
 			out = dsterr
 		default:
-			log.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
+			logrus.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
 			return 0, ErrInvalidStdHeader
 		}
 
 		// Retrieve the size of the frame
 		frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4]))
-		log.Debugf("framesize: %d", frameSize)
+		logrus.Debugf("framesize: %d", frameSize)
 
 		// Check if the buffer is big enough to read the frame.
 		// Extend it if necessary.
 		if frameSize+StdWriterPrefixLen > bufLen {
-			log.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
+			logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
 			buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...)
 			bufLen = len(buf)
 		}
@@ -140,13 +136,13 @@
 			nr += nr2
 			if er == io.EOF {
 				if nr < frameSize+StdWriterPrefixLen {
-					log.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
+					logrus.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
 					return written, nil
 				}
 				break
 			}
 			if er != nil {
-				log.Debugf("Error reading frame: %s", er)
+				logrus.Debugf("Error reading frame: %s", er)
 				return 0, er
 			}
 		}
@@ -154,12 +150,12 @@
 		// Write the retrieved frame (without header)
 		nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen])
 		if ew != nil {
-			log.Debugf("Error writing frame: %s", ew)
+			logrus.Debugf("Error writing frame: %s", ew)
 			return 0, ew
 		}
 		// If the frame has not been fully written: error
 		if nw != frameSize {
-			log.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
+			logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
 			return 0, io.ErrShortWrite
 		}
 		written += int64(nw)
diff --git a/pkg/stdcopy/stdcopy_test.go b/pkg/stdcopy/stdcopy_test.go
index 14e6ed3..a9fd73a 100644
--- a/pkg/stdcopy/stdcopy_test.go
+++ b/pkg/stdcopy/stdcopy_test.go
@@ -3,9 +3,74 @@
 import (
 	"bytes"
 	"io/ioutil"
+	"strings"
 	"testing"
 )
 
+func TestNewStdWriter(t *testing.T) {
+	writer := NewStdWriter(ioutil.Discard, Stdout)
+	if writer == nil {
+		t.Fatalf("NewStdWriter with an invalid StdType should not return nil.")
+	}
+}
+
+func TestWriteWithUnitializedStdWriter(t *testing.T) {
+	writer := StdWriter{
+		Writer:  nil,
+		prefix:  Stdout,
+		sizeBuf: make([]byte, 4),
+	}
+	n, err := writer.Write([]byte("Something here"))
+	if n != 0 || err == nil {
+		t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter")
+	}
+}
+
+func TestWriteWithNilBytes(t *testing.T) {
+	writer := NewStdWriter(ioutil.Discard, Stdout)
+	n, err := writer.Write(nil)
+	if err != nil {
+		t.Fatalf("Shouldn't have fail when given no data")
+	}
+	if n > 0 {
+		t.Fatalf("Write should have written 0 byte, but has written %d", n)
+	}
+}
+
+func TestWrite(t *testing.T) {
+	writer := NewStdWriter(ioutil.Discard, Stdout)
+	data := []byte("Test StdWrite.Write")
+	n, err := writer.Write(data)
+	if err != nil {
+		t.Fatalf("Error while writing with StdWrite")
+	}
+	if n != len(data) {
+		t.Fatalf("Write should have writen %d byte but wrote %d.", len(data), n)
+	}
+}
+
+func TestStdCopyWithInvalidInputHeader(t *testing.T) {
+	dstOut := NewStdWriter(ioutil.Discard, Stdout)
+	dstErr := NewStdWriter(ioutil.Discard, Stderr)
+	src := strings.NewReader("Invalid input")
+	_, err := StdCopy(dstOut, dstErr, src)
+	if err == nil {
+		t.Fatal("StdCopy with invalid input header should fail.")
+	}
+}
+
+func TestStdCopyWithCorruptedPrefix(t *testing.T) {
+	data := []byte{0x01, 0x02, 0x03}
+	src := bytes.NewReader(data)
+	written, err := StdCopy(nil, nil, src)
+	if err != nil {
+		t.Fatalf("StdCopy should not return an error with corrupted prefix.")
+	}
+	if written != 0 {
+		t.Fatalf("StdCopy should have written 0, but has written %d", written)
+	}
+}
+
 func BenchmarkWrite(b *testing.B) {
 	w := NewStdWriter(ioutil.Discard, Stdout)
 	data := []byte("Test line for testing stdwriter performance\n")
diff --git a/pkg/streamformatter/streamformatter.go b/pkg/streamformatter/streamformatter.go
new file mode 100644
index 0000000..792ce00
--- /dev/null
+++ b/pkg/streamformatter/streamformatter.go
@@ -0,0 +1,115 @@
+package streamformatter
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+
+	"github.com/docker/docker/pkg/jsonmessage"
+)
+
+type StreamFormatter struct {
+	json bool
+}
+
+// NewStreamFormatter returns a simple StreamFormatter
+func NewStreamFormatter() *StreamFormatter {
+	return &StreamFormatter{}
+}
+
+// NewJSONStreamFormatter returns a StreamFormatter configured to stream json
+func NewJSONStreamFormatter() *StreamFormatter {
+	return &StreamFormatter{true}
+}
+
+const streamNewline = "\r\n"
+
+var streamNewlineBytes = []byte(streamNewline)
+
+func (sf *StreamFormatter) FormatStream(str string) []byte {
+	if sf.json {
+		b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str})
+		if err != nil {
+			return sf.FormatError(err)
+		}
+		return append(b, streamNewlineBytes...)
+	}
+	return []byte(str + "\r")
+}
+
+func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte {
+	str := fmt.Sprintf(format, a...)
+	if sf.json {
+		b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str})
+		if err != nil {
+			return sf.FormatError(err)
+		}
+		return append(b, streamNewlineBytes...)
+	}
+	return []byte(str + streamNewline)
+}
+
+func (sf *StreamFormatter) FormatError(err error) []byte {
+	if sf.json {
+		jsonError, ok := err.(*jsonmessage.JSONError)
+		if !ok {
+			jsonError = &jsonmessage.JSONError{Message: err.Error()}
+		}
+		if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil {
+			return append(b, streamNewlineBytes...)
+		}
+		return []byte("{\"error\":\"format error\"}" + streamNewline)
+	}
+	return []byte("Error: " + err.Error() + streamNewline)
+}
+
+func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress) []byte {
+	if progress == nil {
+		progress = &jsonmessage.JSONProgress{}
+	}
+	if sf.json {
+		b, err := json.Marshal(&jsonmessage.JSONMessage{
+			Status:          action,
+			ProgressMessage: progress.String(),
+			Progress:        progress,
+			ID:              id,
+		})
+		if err != nil {
+			return nil
+		}
+		return b
+	}
+	endl := "\r"
+	if progress.String() == "" {
+		endl += "\n"
+	}
+	return []byte(action + " " + progress.String() + endl)
+}
+
+type StdoutFormater struct {
+	io.Writer
+	*StreamFormatter
+}
+
+func (sf *StdoutFormater) Write(buf []byte) (int, error) {
+	formattedBuf := sf.StreamFormatter.FormatStream(string(buf))
+	n, err := sf.Writer.Write(formattedBuf)
+	if n != len(formattedBuf) {
+		return n, io.ErrShortWrite
+	}
+	return len(buf), err
+}
+
+type StderrFormater struct {
+	io.Writer
+	*StreamFormatter
+}
+
+func (sf *StderrFormater) Write(buf []byte) (int, error) {
+	formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m")
+	n, err := sf.Writer.Write(formattedBuf)
+	if n != len(formattedBuf) {
+		return n, io.ErrShortWrite
+	}
+	return len(buf), err
+}
diff --git a/pkg/streamformatter/streamformatter_test.go b/pkg/streamformatter/streamformatter_test.go
new file mode 100644
index 0000000..acf81be
--- /dev/null
+++ b/pkg/streamformatter/streamformatter_test.go
@@ -0,0 +1,93 @@
+package streamformatter
+
+import (
+	"encoding/json"
+	"errors"
+	"reflect"
+	"testing"
+
+	"github.com/docker/docker/pkg/jsonmessage"
+)
+
+func TestFormatStream(t *testing.T) {
+	sf := NewStreamFormatter()
+	res := sf.FormatStream("stream")
+	if string(res) != "stream"+"\r" {
+		t.Fatalf("%q", res)
+	}
+}
+
+func TestFormatJSONStatus(t *testing.T) {
+	sf := NewStreamFormatter()
+	res := sf.FormatStatus("ID", "%s%d", "a", 1)
+	if string(res) != "a1\r\n" {
+		t.Fatalf("%q", res)
+	}
+}
+
+func TestFormatSimpleError(t *testing.T) {
+	sf := NewStreamFormatter()
+	res := sf.FormatError(errors.New("Error for formatter"))
+	if string(res) != "Error: Error for formatter\r\n" {
+		t.Fatalf("%q", res)
+	}
+}
+
+func TestJSONFormatStream(t *testing.T) {
+	sf := NewJSONStreamFormatter()
+	res := sf.FormatStream("stream")
+	if string(res) != `{"stream":"stream"}`+"\r\n" {
+		t.Fatalf("%q", res)
+	}
+}
+
+func TestJSONFormatStatus(t *testing.T) {
+	sf := NewJSONStreamFormatter()
+	res := sf.FormatStatus("ID", "%s%d", "a", 1)
+	if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" {
+		t.Fatalf("%q", res)
+	}
+}
+
+func TestJSONFormatSimpleError(t *testing.T) {
+	sf := NewJSONStreamFormatter()
+	res := sf.FormatError(errors.New("Error for formatter"))
+	if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" {
+		t.Fatalf("%q", res)
+	}
+}
+
+func TestJSONFormatJSONError(t *testing.T) {
+	sf := NewJSONStreamFormatter()
+	err := &jsonmessage.JSONError{Code: 50, Message: "Json error"}
+	res := sf.FormatError(err)
+	if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" {
+		t.Fatalf("%q", res)
+	}
+}
+
+func TestJSONFormatProgress(t *testing.T) {
+	sf := NewJSONStreamFormatter()
+	progress := &jsonmessage.JSONProgress{
+		Current: 15,
+		Total:   30,
+		Start:   1,
+	}
+	res := sf.FormatProgress("id", "action", progress)
+	msg := &jsonmessage.JSONMessage{}
+	if err := json.Unmarshal(res, msg); err != nil {
+		t.Fatal(err)
+	}
+	if msg.ID != "id" {
+		t.Fatalf("ID must be 'id', got: %s", msg.ID)
+	}
+	if msg.Status != "action" {
+		t.Fatalf("Status must be 'action', got: %s", msg.Status)
+	}
+	if msg.ProgressMessage != progress.String() {
+		t.Fatalf("ProgressMessage must be %s, got: %s", progress.String(), msg.ProgressMessage)
+	}
+	if !reflect.DeepEqual(msg.Progress, progress) {
+		t.Fatal("Original progress not equals progress from FormatProgress")
+	}
+}
diff --git a/pkg/stringid/README.md b/pkg/stringid/README.md
new file mode 100644
index 0000000..37a5098
--- /dev/null
+++ b/pkg/stringid/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with string identifiers
diff --git a/pkg/stringid/stringid.go b/pkg/stringid/stringid.go
new file mode 100644
index 0000000..6a683b6
--- /dev/null
+++ b/pkg/stringid/stringid.go
@@ -0,0 +1,48 @@
+package stringid
+
+import (
+	"crypto/rand"
+	"encoding/hex"
+	"io"
+	"regexp"
+	"strconv"
+)
+
+const shortLen = 12
+
+var validShortID = regexp.MustCompile("^[a-z0-9]{12}$")
+
+// Determine if an arbitrary string *looks like* a short ID.
+func IsShortID(id string) bool {
+	return validShortID.MatchString(id)
+}
+
+// TruncateID returns a shorthand version of a string identifier for convenience.
+// A collision with other shorthands is very unlikely, but possible.
+// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
+// will need to use a langer prefix, or the full-length Id.
+func TruncateID(id string) string {
+	trimTo := shortLen
+	if len(id) < shortLen {
+		trimTo = len(id)
+	}
+	return id[:trimTo]
+}
+
+// GenerateRandomID returns an unique id
+func GenerateRandomID() string {
+	for {
+		id := make([]byte, 32)
+		if _, err := io.ReadFull(rand.Reader, id); err != nil {
+			panic(err) // This shouldn't happen
+		}
+		value := hex.EncodeToString(id)
+		// if we try to parse the truncated for as an int and we don't have
+		// an error then the value is all numberic and causes issues when
+		// used as a hostname. ref #3869
+		if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil {
+			continue
+		}
+		return value
+	}
+}
diff --git a/pkg/stringid/stringid_test.go b/pkg/stringid/stringid_test.go
new file mode 100644
index 0000000..bcb1365
--- /dev/null
+++ b/pkg/stringid/stringid_test.go
@@ -0,0 +1,56 @@
+package stringid
+
+import (
+	"strings"
+	"testing"
+)
+
+func TestGenerateRandomID(t *testing.T) {
+	id := GenerateRandomID()
+
+	if len(id) != 64 {
+		t.Fatalf("Id returned is incorrect: %s", id)
+	}
+}
+
+func TestShortenId(t *testing.T) {
+	id := GenerateRandomID()
+	truncID := TruncateID(id)
+	if len(truncID) != 12 {
+		t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID)
+	}
+}
+
+func TestShortenIdEmpty(t *testing.T) {
+	id := ""
+	truncID := TruncateID(id)
+	if len(truncID) > len(id) {
+		t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID)
+	}
+}
+
+func TestShortenIdInvalid(t *testing.T) {
+	id := "1234"
+	truncID := TruncateID(id)
+	if len(truncID) != len(id) {
+		t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID)
+	}
+}
+
+func TestIsShortIDNonHex(t *testing.T) {
+	id := "some non-hex value"
+	if IsShortID(id) {
+		t.Fatalf("%s is not a short ID", id)
+	}
+}
+
+func TestIsShortIDNotCorrectSize(t *testing.T) {
+	id := strings.Repeat("a", shortLen+1)
+	if IsShortID(id) {
+		t.Fatalf("%s is not a short ID", id)
+	}
+	id = strings.Repeat("a", shortLen-1)
+	if IsShortID(id) {
+		t.Fatalf("%s is not a short ID", id)
+	}
+}
diff --git a/pkg/stringutils/README.md b/pkg/stringutils/README.md
new file mode 100644
index 0000000..b3e4545
--- /dev/null
+++ b/pkg/stringutils/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with strings
diff --git a/pkg/stringutils/stringutils.go b/pkg/stringutils/stringutils.go
new file mode 100644
index 0000000..aee2648
--- /dev/null
+++ b/pkg/stringutils/stringutils.go
@@ -0,0 +1,87 @@
+package stringutils
+
+import (
+	"bytes"
+	"math/rand"
+	"strings"
+
+	"github.com/docker/docker/pkg/random"
+)
+
+// Generate alpha only random stirng with length n
+func GenerateRandomAlphaOnlyString(n int) string {
+	// make a really long string
+	letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+	b := make([]byte, n)
+	r := rand.New(random.NewSource())
+	for i := range b {
+		b[i] = letters[r.Intn(len(letters))]
+	}
+	return string(b)
+}
+
+// Generate Ascii random stirng with length n
+func GenerateRandomAsciiString(n int) string {
+	chars := "abcdefghijklmnopqrstuvwxyz" +
+		"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
+		"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
+	res := make([]byte, n)
+	for i := 0; i < n; i++ {
+		res[i] = chars[rand.Intn(len(chars))]
+	}
+	return string(res)
+}
+
+// Truncate a string to maxlen
+func Truncate(s string, maxlen int) string {
+	if len(s) <= maxlen {
+		return s
+	}
+	return s[:maxlen]
+}
+
+// Test wheather a string is contained in a slice of strings or not.
+// Comparison is case insensitive
+func InSlice(slice []string, s string) bool {
+	for _, ss := range slice {
+		if strings.ToLower(s) == strings.ToLower(ss) {
+			return true
+		}
+	}
+	return false
+}
+
+func quote(word string, buf *bytes.Buffer) {
+	// Bail out early for "simple" strings
+	if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
+		buf.WriteString(word)
+		return
+	}
+
+	buf.WriteString("'")
+
+	for i := 0; i < len(word); i++ {
+		b := word[i]
+		if b == '\'' {
+			// Replace literal ' with a close ', a \', and a open '
+			buf.WriteString("'\\''")
+		} else {
+			buf.WriteByte(b)
+		}
+	}
+
+	buf.WriteString("'")
+}
+
+// Take a list of strings and escape them so they will be handled right
+// when passed as arguments to an program via a shell
+func ShellQuoteArguments(args []string) string {
+	var buf bytes.Buffer
+	for i, arg := range args {
+		if i != 0 {
+			buf.WriteByte(' ')
+		}
+		quote(arg, &buf)
+	}
+	return buf.String()
+}
diff --git a/pkg/stringutils/stringutils_test.go b/pkg/stringutils/stringutils_test.go
new file mode 100644
index 0000000..8dcb469
--- /dev/null
+++ b/pkg/stringutils/stringutils_test.go
@@ -0,0 +1,87 @@
+package stringutils
+
+import "testing"
+
+func testLengthHelper(generator func(int) string, t *testing.T) {
+	expectedLength := 20
+	s := generator(expectedLength)
+	if len(s) != expectedLength {
+		t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength)
+	}
+}
+
+func testUniquenessHelper(generator func(int) string, t *testing.T) {
+	repeats := 25
+	set := make(map[string]struct{}, repeats)
+	for i := 0; i < repeats; i = i + 1 {
+		str := generator(64)
+		if len(str) != 64 {
+			t.Fatalf("Id returned is incorrect: %s", str)
+		}
+		if _, ok := set[str]; ok {
+			t.Fatalf("Random number is repeated")
+		}
+		set[str] = struct{}{}
+	}
+}
+
+func isASCII(s string) bool {
+	for _, c := range s {
+		if c > 127 {
+			return false
+		}
+	}
+	return true
+}
+
+func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) {
+	testLengthHelper(GenerateRandomAlphaOnlyString, t)
+}
+
+func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) {
+	testUniquenessHelper(GenerateRandomAlphaOnlyString, t)
+}
+
+func TestGenerateRandomAsciiStringLength(t *testing.T) {
+	testLengthHelper(GenerateRandomAsciiString, t)
+}
+
+func TestGenerateRandomAsciiStringUniqueness(t *testing.T) {
+	testUniquenessHelper(GenerateRandomAsciiString, t)
+}
+
+func TestGenerateRandomAsciiStringIsAscii(t *testing.T) {
+	str := GenerateRandomAsciiString(64)
+	if !isASCII(str) {
+		t.Fatalf("%s contained non-ascii characters", str)
+	}
+}
+
+func TestTruncate(t *testing.T) {
+	str := "teststring"
+	newstr := Truncate(str, 4)
+	if newstr != "test" {
+		t.Fatalf("Expected test, got %s", newstr)
+	}
+	newstr = Truncate(str, 20)
+	if newstr != "teststring" {
+		t.Fatalf("Expected teststring, got %s", newstr)
+	}
+}
+
+func TestInSlice(t *testing.T) {
+	slice := []string{"test", "in", "slice"}
+
+	test := InSlice(slice, "test")
+	if !test {
+		t.Fatalf("Expected string test to be in slice")
+	}
+	test = InSlice(slice, "SLICE")
+	if !test {
+		t.Fatalf("Expected string SLICE to be in slice")
+	}
+	test = InSlice(slice, "notinslice")
+	if test {
+		t.Fatalf("Expected string notinslice not to be in slice")
+	}
+}
diff --git a/pkg/sysinfo/README.md b/pkg/sysinfo/README.md
new file mode 100644
index 0000000..c1530ce
--- /dev/null
+++ b/pkg/sysinfo/README.md
@@ -0,0 +1 @@
+SysInfo stores information about which features a kernel supports.
diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go
index 1d540d2..5b7eca2 100644
--- a/pkg/sysinfo/sysinfo.go
+++ b/pkg/sysinfo/sysinfo.go
@@ -1,47 +1,13 @@
 package sysinfo
 
-import (
-	"io/ioutil"
-	"os"
-	"path"
-
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/libcontainer/cgroups"
-)
-
+// SysInfo stores information about which features a kernel supports.
+// TODO Windows: Factor out platform specific capabilities.
 type SysInfo struct {
 	MemoryLimit            bool
 	SwapLimit              bool
+	CpuCfsPeriod           bool
+	CpuCfsQuota            bool
 	IPv4ForwardingDisabled bool
 	AppArmor               bool
-}
-
-func New(quiet bool) *SysInfo {
-	sysInfo := &SysInfo{}
-	if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil {
-		if !quiet {
-			log.Warnf("%s", err)
-		}
-	} else {
-		_, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes"))
-		_, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes"))
-		sysInfo.MemoryLimit = err1 == nil && err2 == nil
-		if !sysInfo.MemoryLimit && !quiet {
-			log.Warnf("Your kernel does not support cgroup memory limit.")
-		}
-
-		_, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes"))
-		sysInfo.SwapLimit = err == nil
-		if !sysInfo.SwapLimit && !quiet {
-			log.Warnf("Your kernel does not support cgroup swap limit.")
-		}
-	}
-
-	// Check if AppArmor seems to be enabled on this system.
-	if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) {
-		sysInfo.AppArmor = false
-	} else {
-		sysInfo.AppArmor = true
-	}
-	return sysInfo
+	OomKillDisable         bool
 }
diff --git a/pkg/sysinfo/sysinfo_linux.go b/pkg/sysinfo/sysinfo_linux.go
new file mode 100644
index 0000000..396ea3b
--- /dev/null
+++ b/pkg/sysinfo/sysinfo_linux.go
@@ -0,0 +1,79 @@
+package sysinfo
+
+import (
+	"io/ioutil"
+	"os"
+	"path"
+	"strconv"
+	"strings"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/libcontainer/cgroups"
+)
+
+// New returns a new SysInfo, using the filesystem to detect which features the kernel supports.
+func New(quiet bool) *SysInfo {
+	sysInfo := &SysInfo{}
+	if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil {
+		if !quiet {
+			logrus.Warnf("Your kernel does not support cgroup memory limit: %v", err)
+		}
+	} else {
+		// If memory cgroup is mounted, MemoryLimit is always enabled.
+		sysInfo.MemoryLimit = true
+
+		_, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes"))
+		sysInfo.SwapLimit = err1 == nil
+		if !sysInfo.SwapLimit && !quiet {
+			logrus.Warn("Your kernel does not support swap memory limit.")
+		}
+
+		_, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.oom_control"))
+		sysInfo.OomKillDisable = err == nil
+		if !sysInfo.OomKillDisable && !quiet {
+			logrus.Warnf("Your kernel does not support oom control.")
+		}
+	}
+
+	if cgroupCpuMountpoint, err := cgroups.FindCgroupMountpoint("cpu"); err != nil {
+		if !quiet {
+			logrus.Warnf("%v", err)
+		}
+	} else {
+		_, err := ioutil.ReadFile(path.Join(cgroupCpuMountpoint, "cpu.cfs_period_us"))
+		sysInfo.CpuCfsPeriod = err == nil
+		if !sysInfo.CpuCfsPeriod && !quiet {
+			logrus.Warn("Your kernel does not support cgroup cfs period")
+		}
+		_, err = ioutil.ReadFile(path.Join(cgroupCpuMountpoint, "cpu.cfs_quota_us"))
+		sysInfo.CpuCfsQuota = err == nil
+		if !sysInfo.CpuCfsQuota && !quiet {
+			logrus.Warn("Your kernel does not support cgroup cfs quotas")
+		}
+	}
+
+	// Checek if ipv4_forward is disabled.
+	if data, err := ioutil.ReadFile("/proc/sys/net/ipv4/ip_forward"); os.IsNotExist(err) {
+		sysInfo.IPv4ForwardingDisabled = true
+	} else {
+		if enabled, _ := strconv.Atoi(strings.TrimSpace(string(data))); enabled == 0 {
+			sysInfo.IPv4ForwardingDisabled = true
+		} else {
+			sysInfo.IPv4ForwardingDisabled = false
+		}
+	}
+
+	// Check if AppArmor is supported.
+	if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) {
+		sysInfo.AppArmor = false
+	} else {
+		sysInfo.AppArmor = true
+	}
+
+	// Check if Devices cgroup is mounted, it is hard requirement for container security.
+	if _, err := cgroups.FindCgroupMountpoint("devices"); err != nil {
+		logrus.Fatalf("Error mounting devices cgroup: %v", err)
+	}
+
+	return sysInfo
+}
diff --git a/pkg/sysinfo/sysinfo_windows.go b/pkg/sysinfo/sysinfo_windows.go
new file mode 100644
index 0000000..b4d3151
--- /dev/null
+++ b/pkg/sysinfo/sysinfo_windows.go
@@ -0,0 +1,7 @@
+package sysinfo
+
+// TODO Windows
+func New(quiet bool) *SysInfo {
+	sysInfo := &SysInfo{}
+	return sysInfo
+}
diff --git a/pkg/system/filesys.go b/pkg/system/filesys.go
new file mode 100644
index 0000000..e1f70e8
--- /dev/null
+++ b/pkg/system/filesys.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package system
+
+import (
+	"os"
+)
+
+func MkdirAll(path string, perm os.FileMode) error {
+	return os.MkdirAll(path, perm)
+}
diff --git a/pkg/system/filesys_windows.go b/pkg/system/filesys_windows.go
new file mode 100644
index 0000000..90b5006
--- /dev/null
+++ b/pkg/system/filesys_windows.go
@@ -0,0 +1,64 @@
+// +build windows
+
+package system
+
+import (
+	"os"
+	"regexp"
+	"syscall"
+)
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, perm os.FileMode) error {
+	if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+		return nil
+	}
+
+	// The rest of this method is copied from os.MkdirAll and should be kept
+	// as-is to ensure compatibility.
+
+	// Fast path: if we can tell whether path is a directory or file, stop with success or error.
+	dir, err := os.Stat(path)
+	if err == nil {
+		if dir.IsDir() {
+			return nil
+		}
+		return &os.PathError{
+			Op:   "mkdir",
+			Path: path,
+			Err:  syscall.ENOTDIR,
+		}
+	}
+
+	// Slow path: make sure parent exists and then call Mkdir for path.
+	i := len(path)
+	for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+		i--
+	}
+
+	j := i
+	for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+		j--
+	}
+
+	if j > 1 {
+		// Create parent
+		err = MkdirAll(path[0:j-1], perm)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Parent now exists; invoke Mkdir and use its result.
+	err = os.Mkdir(path, perm)
+	if err != nil {
+		// Handle arguments like "foo/." by
+		// double-checking that directory doesn't exist.
+		dir, err1 := os.Lstat(path)
+		if err1 == nil && dir.IsDir() {
+			return nil
+		}
+		return err
+	}
+	return nil
+}
diff --git a/pkg/system/lstat.go b/pkg/system/lstat.go
index 6c1ed2e..d0e43b3 100644
--- a/pkg/system/lstat.go
+++ b/pkg/system/lstat.go
@@ -6,10 +6,13 @@
 	"syscall"
 )
 
+// Lstat takes a path to a file and returns
+// a system.Stat_t type pertaining to that file.
+//
+// Throws an error if the file does not exist
 func Lstat(path string) (*Stat_t, error) {
 	s := &syscall.Stat_t{}
-	err := syscall.Lstat(path, s)
-	if err != nil {
+	if err := syscall.Lstat(path, s); err != nil {
 		return nil, err
 	}
 	return fromStatT(s)
diff --git a/pkg/system/lstat_test.go b/pkg/system/lstat_test.go
index 9bab4d7..6bac492 100644
--- a/pkg/system/lstat_test.go
+++ b/pkg/system/lstat_test.go
@@ -5,6 +5,7 @@
 	"testing"
 )
 
+// TestLstat tests Lstat for existing and non existing files
 func TestLstat(t *testing.T) {
 	file, invalid, _, dir := prepareFiles(t)
 	defer os.RemoveAll(dir)
diff --git a/pkg/system/lstat_windows.go b/pkg/system/lstat_windows.go
index 801e756..eee1be2 100644
--- a/pkg/system/lstat_windows.go
+++ b/pkg/system/lstat_windows.go
@@ -2,7 +2,28 @@
 
 package system
 
+import (
+	"os"
+)
+
+// Some explanation for my own sanity, and hopefully maintainers in the
+// future.
+//
+// Lstat calls os.Lstat to get a fileinfo interface back.
+// This is then copied into our own locally defined structure.
+// Note the Linux version uses fromStatT to do the copy back,
+// but that not strictly necessary when already in an OS specific module.
+
 func Lstat(path string) (*Stat_t, error) {
-	// should not be called on cli code path
-	return nil, ErrNotSupportedPlatform
+	fi, err := os.Lstat(path)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Stat_t{
+		name:    fi.Name(),
+		size:    fi.Size(),
+		mode:    fi.Mode(),
+		modTime: fi.ModTime(),
+		isDir:   fi.IsDir()}, nil
 }
diff --git a/pkg/system/meminfo_linux.go b/pkg/system/meminfo_linux.go
index b7de3ff..e2ca140 100644
--- a/pkg/system/meminfo_linux.go
+++ b/pkg/system/meminfo_linux.go
@@ -15,8 +15,8 @@
 	ErrMalformed = errors.New("malformed file")
 )
 
-// Retrieve memory statistics of the host system and parse them into a MemInfo
-// type.
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+//  MemInfo type.
 func ReadMemInfo() (*MemInfo, error) {
 	file, err := os.Open("/proc/meminfo")
 	if err != nil {
@@ -26,6 +26,10 @@
 	return parseMemInfo(file)
 }
 
+// parseMemInfo parses the /proc/meminfo file into
+// a MemInfo object given a io.Reader to the file.
+//
+// Throws error if there are problems reading from the file
 func parseMemInfo(reader io.Reader) (*MemInfo, error) {
 	meminfo := &MemInfo{}
 	scanner := bufio.NewScanner(reader)
diff --git a/pkg/system/meminfo_linux_test.go b/pkg/system/meminfo_linux_test.go
index 377405e..10ddf79 100644
--- a/pkg/system/meminfo_linux_test.go
+++ b/pkg/system/meminfo_linux_test.go
@@ -7,6 +7,7 @@
 	"github.com/docker/docker/pkg/units"
 )
 
+// TestMemInfo tests parseMemInfo with a static meminfo string
 func TestMemInfo(t *testing.T) {
 	const input = `
 	MemTotal:      1 kB
diff --git a/pkg/system/mknod.go b/pkg/system/mknod.go
index 06f9c6a..26617eb 100644
--- a/pkg/system/mknod.go
+++ b/pkg/system/mknod.go
@@ -6,6 +6,8 @@
 	"syscall"
 )
 
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev
 func Mknod(path string, mode uint32, dev int) error {
 	return syscall.Mknod(path, mode, dev)
 }
diff --git a/pkg/system/mknod_windows.go b/pkg/system/mknod_windows.go
index b4020c1..1811542 100644
--- a/pkg/system/mknod_windows.go
+++ b/pkg/system/mknod_windows.go
@@ -3,10 +3,9 @@
 package system
 
 func Mknod(path string, mode uint32, dev int) error {
-	// should not be called on cli code path
 	return ErrNotSupportedPlatform
 }
 
 func Mkdev(major int64, minor int64) uint32 {
-	panic("Mkdev not implemented on windows, should not be called on cli code")
+	panic("Mkdev not implemented on Windows.")
 }
diff --git a/pkg/system/stat.go b/pkg/system/stat.go
index 186e852..e2ecfe5 100644
--- a/pkg/system/stat.go
+++ b/pkg/system/stat.go
@@ -1,9 +1,13 @@
+// +build !windows
+
 package system
 
 import (
 	"syscall"
 )
 
+// Stat_t type contains status of a file. It contains metadata
+// like permission, owner, group, size, etc about a file
 type Stat_t struct {
 	mode uint32
 	uid  uint32
diff --git a/pkg/system/stat_linux.go b/pkg/system/stat_linux.go
index 072728d..3899b3e 100644
--- a/pkg/system/stat_linux.go
+++ b/pkg/system/stat_linux.go
@@ -4,6 +4,7 @@
 	"syscall"
 )
 
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
 func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
 	return &Stat_t{size: s.Size,
 		mode: s.Mode,
@@ -13,10 +14,13 @@
 		mtim: s.Mtim}, nil
 }
 
+// Stat takes a path to a file and returns
+// a system.Stat_t type pertaining to that file.
+//
+// Throws an error if the file does not exist
 func Stat(path string) (*Stat_t, error) {
 	s := &syscall.Stat_t{}
-	err := syscall.Stat(path, s)
-	if err != nil {
+	if err := syscall.Stat(path, s); err != nil {
 		return nil, err
 	}
 	return fromStatT(s)
diff --git a/pkg/system/stat_test.go b/pkg/system/stat_test.go
index abcc8ea..4534129 100644
--- a/pkg/system/stat_test.go
+++ b/pkg/system/stat_test.go
@@ -6,6 +6,7 @@
 	"testing"
 )
 
+// TestFromStatT tests fromStatT for a tempfile
 func TestFromStatT(t *testing.T) {
 	file, _, _, dir := prepareFiles(t)
 	defer os.RemoveAll(dir)
diff --git a/pkg/system/stat_unsupported.go b/pkg/system/stat_unsupported.go
index 66323ee..7e0d034 100644
--- a/pkg/system/stat_unsupported.go
+++ b/pkg/system/stat_unsupported.go
@@ -6,6 +6,7 @@
 	"syscall"
 )
 
+// fromStatT creates a system.Stat_t type from a syscall.Stat_t type
 func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
 	return &Stat_t{size: s.Size,
 		mode: uint32(s.Mode),
diff --git a/pkg/system/stat_windows.go b/pkg/system/stat_windows.go
index 42d29d6..b1fd39e 100644
--- a/pkg/system/stat_windows.go
+++ b/pkg/system/stat_windows.go
@@ -3,15 +3,34 @@
 package system
 
 import (
-	"errors"
-	"syscall"
+	"os"
+	"time"
 )
 
-func fromStatT(s *syscall.Win32FileAttributeData) (*Stat_t, error) {
-	return nil, errors.New("fromStatT should not be called on windows path")
+type Stat_t struct {
+	name    string
+	size    int64
+	mode    os.FileMode
+	modTime time.Time
+	isDir   bool
 }
 
-func Stat(path string) (*Stat_t, error) {
-	// should not be called on cli code path
-	return nil, ErrNotSupportedPlatform
+func (s Stat_t) Name() string {
+	return s.name
+}
+
+func (s Stat_t) Size() int64 {
+	return s.size
+}
+
+func (s Stat_t) Mode() os.FileMode {
+	return s.mode
+}
+
+func (s Stat_t) ModTime() time.Time {
+	return s.modTime
+}
+
+func (s Stat_t) IsDir() bool {
+	return s.isDir
 }
diff --git a/pkg/system/utimes_test.go b/pkg/system/utimes_test.go
index 1dea47c..350cce1 100644
--- a/pkg/system/utimes_test.go
+++ b/pkg/system/utimes_test.go
@@ -8,6 +8,7 @@
 	"testing"
 )
 
+// prepareFiles creates files for testing in the temp directory
 func prepareFiles(t *testing.T) (string, string, string, string) {
 	dir, err := ioutil.TempDir("", "docker-system-test")
 	if err != nil {
diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go
index 88fcbe4..a778bb0 100644
--- a/pkg/tarsum/tarsum.go
+++ b/pkg/tarsum/tarsum.go
@@ -1,6 +1,7 @@
 package tarsum
 
 import (
+	"archive/tar"
 	"bytes"
 	"compress/gzip"
 	"crypto"
@@ -11,8 +12,6 @@
 	"hash"
 	"io"
 	"strings"
-
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
 const (
diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go
index 26f12cc..968d7c7 100644
--- a/pkg/tarsum/tarsum_test.go
+++ b/pkg/tarsum/tarsum_test.go
@@ -1,6 +1,7 @@
 package tarsum
 
 import (
+	"archive/tar"
 	"bytes"
 	"compress/gzip"
 	"crypto/md5"
@@ -14,8 +15,6 @@
 	"io/ioutil"
 	"os"
 	"testing"
-
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
 type testLayer struct {
diff --git a/pkg/tarsum/versioning.go b/pkg/tarsum/versioning.go
index 0ceb529..3cdc6dd 100644
--- a/pkg/tarsum/versioning.go
+++ b/pkg/tarsum/versioning.go
@@ -1,12 +1,11 @@
 package tarsum
 
 import (
+	"archive/tar"
 	"errors"
 	"sort"
 	"strconv"
 	"strings"
-
-	"github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
 )
 
 // versioning of the TarSum algorithm
diff --git a/pkg/term/tc_linux_cgo.go b/pkg/term/tc_linux_cgo.go
index ae9516c..d47cf59 100644
--- a/pkg/term/tc_linux_cgo.go
+++ b/pkg/term/tc_linux_cgo.go
@@ -24,6 +24,7 @@
 	newState := oldState.termios
 
 	C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState)))
+	newState.Oflag = newState.Oflag | C.OPOST
 	if err := tcset(fd, &newState); err != 0 {
 		return nil, err
 	}
diff --git a/pkg/term/winconsole/console_windows_test.go b/pkg/term/winconsole/console_windows_test.go
index ee9d968..edb5d6f 100644
--- a/pkg/term/winconsole/console_windows_test.go
+++ b/pkg/term/winconsole/console_windows_test.go
@@ -18,7 +18,7 @@
 		t.Errorf(format, args)
 	}
 	if expectedValue != value {
-		t.Errorf("The value returned does not macth expected\n\tExpected:%v\n\t:Actual%v", expectedValue, value)
+		t.Errorf("The value returned does not match expected\n\tExpected:%v\n\t:Actual%v", expectedValue, value)
 		t.Errorf(format, args)
 	}
 }
diff --git a/pkg/term/winconsole/term_emulator_test.go b/pkg/term/winconsole/term_emulator_test.go
index 65de5a7..94104ff 100644
--- a/pkg/term/winconsole/term_emulator_test.go
+++ b/pkg/term/winconsole/term_emulator_test.go
@@ -138,7 +138,7 @@
 	AssertBytesEqual(t, []byte{1, 2, 3}, []byte{1, 1, 1}, "content mismatch")
 }*/
 
-// Checks that the calls recieved
+// Checks that the calls received
 func assertHandlerOutput(t *testing.T, mock *mockTerminal, plainText string, commands ...string) {
 	text := make([]byte, 0, 3*len(plainText))
 	cmdIndex := 0
diff --git a/pkg/testutils/README.md b/pkg/testutils/README.md
deleted file mode 100644
index a208a90..0000000
--- a/pkg/testutils/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-`testutils` is a collection of utility functions to facilitate the writing
-of tests. It is used in various places by the Docker test suite.
diff --git a/pkg/testutils/utils.go b/pkg/testutils/utils.go
deleted file mode 100644
index 9c664ff..0000000
--- a/pkg/testutils/utils.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package testutils
-
-import (
-	"math/rand"
-	"testing"
-	"time"
-)
-
-const chars = "abcdefghijklmnopqrstuvwxyz" +
-	"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
-	"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
-
-// Timeout calls f and waits for 100ms for it to complete.
-// If it doesn't, it causes the tests to fail.
-// t must be a valid testing context.
-func Timeout(t *testing.T, f func()) {
-	onTimeout := time.After(100 * time.Millisecond)
-	onDone := make(chan bool)
-	go func() {
-		f()
-		close(onDone)
-	}()
-	select {
-	case <-onTimeout:
-		t.Fatalf("timeout")
-	case <-onDone:
-	}
-}
-
-// RandomString returns random string of specified length
-func RandomString(length int) string {
-	res := make([]byte, length)
-	for i := 0; i < length; i++ {
-		res[i] = chars[rand.Intn(len(chars))]
-	}
-	return string(res)
-}
diff --git a/pkg/timeoutconn/timeoutconn.go b/pkg/timeoutconn/timeoutconn.go
index 3a55455..d9534b5 100644
--- a/pkg/timeoutconn/timeoutconn.go
+++ b/pkg/timeoutconn/timeoutconn.go
@@ -17,8 +17,7 @@
 
 func (c *conn) Read(b []byte) (int, error) {
 	if c.timeout > 0 {
-		err := c.Conn.SetReadDeadline(time.Now().Add(c.timeout))
-		if err != nil {
+		if err := c.Conn.SetReadDeadline(time.Now().Add(c.timeout)); err != nil {
 			return 0, err
 		}
 	}
diff --git a/pkg/timeutils/utils.go b/pkg/timeutils/utils.go
new file mode 100644
index 0000000..6af16a1
--- /dev/null
+++ b/pkg/timeutils/utils.go
@@ -0,0 +1,29 @@
+package timeutils
+
+import (
+	"strconv"
+	"strings"
+	"time"
+)
+
+// GetTimestamp tries to parse given string as RFC3339 time
+// or Unix timestamp (with seconds precision), if successful
+//returns a Unix timestamp as string otherwise returns value back.
+func GetTimestamp(value string) string {
+	var format string
+	if strings.Contains(value, ".") {
+		format = time.RFC3339Nano
+	} else {
+		format = time.RFC3339
+	}
+
+	loc := time.FixedZone(time.Now().Zone())
+	if len(value) < len(format) {
+		format = format[:len(value)]
+	}
+	t, err := time.ParseInLocation(format, value, loc)
+	if err != nil {
+		return value
+	}
+	return strconv.FormatInt(t.Unix(), 10)
+}
diff --git a/pkg/timeutils/utils_test.go b/pkg/timeutils/utils_test.go
new file mode 100644
index 0000000..1d724fb
--- /dev/null
+++ b/pkg/timeutils/utils_test.go
@@ -0,0 +1,36 @@
+package timeutils
+
+import (
+	"testing"
+)
+
+func TestGetTimestamp(t *testing.T) {
+	cases := []struct{ in, expected string }{
+		{"0", "-62167305600"}, // 0 gets parsed year 0
+
+		// Partial RFC3339 strings get parsed with second precision
+		{"2006-01-02T15:04:05.999999999+07:00", "1136189045"},
+		{"2006-01-02T15:04:05.999999999Z", "1136214245"},
+		{"2006-01-02T15:04:05.999999999", "1136214245"},
+		{"2006-01-02T15:04:05", "1136214245"},
+		{"2006-01-02T15:04", "1136214240"},
+		{"2006-01-02T15", "1136214000"},
+		{"2006-01-02T", "1136160000"},
+		{"2006-01-02", "1136160000"},
+		{"2006", "1136073600"},
+		{"2015-05-13T20:39:09Z", "1431549549"},
+
+		// unix timestamps returned as is
+		{"1136073600", "1136073600"},
+
+		// String fallback
+		{"invalid", "invalid"},
+	}
+
+	for _, c := range cases {
+		o := GetTimestamp(c.in)
+		if o != c.expected {
+			t.Fatalf("wrong value for '%s'. expected:'%s' got:'%s'", c.in, c.expected, o)
+		}
+	}
+}
diff --git a/pkg/transport/LICENSE b/pkg/transport/LICENSE
new file mode 100644
index 0000000..d02f24f
--- /dev/null
+++ b/pkg/transport/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The oauth2 Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/pkg/transport/transport.go b/pkg/transport/transport.go
new file mode 100644
index 0000000..510d8b4
--- /dev/null
+++ b/pkg/transport/transport.go
@@ -0,0 +1,148 @@
+package transport
+
+import (
+	"io"
+	"net/http"
+	"sync"
+)
+
+type RequestModifier interface {
+	ModifyRequest(*http.Request) error
+}
+
+type headerModifier http.Header
+
+// NewHeaderRequestModifier returns a RequestModifier that merges the HTTP headers
+// passed as an argument, with the HTTP headers of a request.
+//
+// If the same key is present in both, the modifying header values for that key,
+// are appended to the values for that same key in the request header.
+func NewHeaderRequestModifier(header http.Header) RequestModifier {
+	return headerModifier(header)
+}
+
+func (h headerModifier) ModifyRequest(req *http.Request) error {
+	for k, s := range http.Header(h) {
+		req.Header[k] = append(req.Header[k], s...)
+	}
+
+	return nil
+}
+
+// NewTransport returns an http.RoundTripper that modifies requests according to
+// the RequestModifiers passed in the arguments, before sending the requests to
+// the base http.RoundTripper (which, if nil, defaults to http.DefaultTransport).
+func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
+	return &transport{
+		Modifiers: modifiers,
+		Base:      base,
+	}
+}
+
+// transport is an http.RoundTripper that makes HTTP requests after
+// copying and modifying the request
+type transport struct {
+	Modifiers []RequestModifier
+	Base      http.RoundTripper
+
+	mu     sync.Mutex                      // guards modReq
+	modReq map[*http.Request]*http.Request // original -> modified
+}
+
+func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
+	req2 := CloneRequest(req)
+	for _, modifier := range t.Modifiers {
+		if err := modifier.ModifyRequest(req2); err != nil {
+			return nil, err
+		}
+	}
+
+	t.setModReq(req, req2)
+	res, err := t.base().RoundTrip(req2)
+	if err != nil {
+		t.setModReq(req, nil)
+		return nil, err
+	}
+	res.Body = &OnEOFReader{
+		Rc: res.Body,
+		Fn: func() { t.setModReq(req, nil) },
+	}
+	return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *transport) CancelRequest(req *http.Request) {
+	type canceler interface {
+		CancelRequest(*http.Request)
+	}
+	if cr, ok := t.base().(canceler); ok {
+		t.mu.Lock()
+		modReq := t.modReq[req]
+		delete(t.modReq, req)
+		t.mu.Unlock()
+		cr.CancelRequest(modReq)
+	}
+}
+
+func (t *transport) base() http.RoundTripper {
+	if t.Base != nil {
+		return t.Base
+	}
+	return http.DefaultTransport
+}
+
+func (t *transport) setModReq(orig, mod *http.Request) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.modReq == nil {
+		t.modReq = make(map[*http.Request]*http.Request)
+	}
+	if mod == nil {
+		delete(t.modReq, orig)
+	} else {
+		t.modReq[orig] = mod
+	}
+}
+
+// CloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func CloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header, len(r.Header))
+	for k, s := range r.Header {
+		r2.Header[k] = append([]string(nil), s...)
+	}
+
+	return r2
+}
+
+// OnEOFReader ensures a callback function is called
+// on Close() and when the underlying Reader returns an io.EOF error
+type OnEOFReader struct {
+	Rc io.ReadCloser
+	Fn func()
+}
+
+func (r *OnEOFReader) Read(p []byte) (n int, err error) {
+	n, err = r.Rc.Read(p)
+	if err == io.EOF {
+		r.runFunc()
+	}
+	return
+}
+
+func (r *OnEOFReader) Close() error {
+	err := r.Rc.Close()
+	r.runFunc()
+	return err
+}
+
+func (r *OnEOFReader) runFunc() {
+	if fn := r.Fn; fn != nil {
+		fn()
+		r.Fn = nil
+	}
+}
diff --git a/pkg/truncindex/truncindex.go b/pkg/truncindex/truncindex.go
index 73c7e24..9aae5c0 100644
--- a/pkg/truncindex/truncindex.go
+++ b/pkg/truncindex/truncindex.go
@@ -14,12 +14,6 @@
 	ErrAmbiguousPrefix = errors.New("Multiple IDs found with provided prefix")
 )
 
-func init() {
-	// Change patricia max prefix per node length,
-	// because our len(ID) always 64
-	patricia.MaxPrefixPerNode = 64
-}
-
 // TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
 // This is used to retrieve image and container IDs by more convenient shorthand prefixes.
 type TruncIndex struct {
@@ -31,8 +25,11 @@
 // NewTruncIndex creates a new TruncIndex and initializes with a list of IDs
 func NewTruncIndex(ids []string) (idx *TruncIndex) {
 	idx = &TruncIndex{
-		ids:  make(map[string]struct{}),
-		trie: patricia.NewTrie(),
+		ids: make(map[string]struct{}),
+
+		// Change patricia max prefix per node length,
+		// because our len(ID) always 64
+		trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)),
 	}
 	for _, id := range ids {
 		idx.addID(id)
diff --git a/pkg/truncindex/truncindex_test.go b/pkg/truncindex/truncindex_test.go
index 9286534..7253f6a 100644
--- a/pkg/truncindex/truncindex_test.go
+++ b/pkg/truncindex/truncindex_test.go
@@ -4,7 +4,7 @@
 	"math/rand"
 	"testing"
 
-	"github.com/docker/docker/pkg/common"
+	"github.com/docker/docker/pkg/stringid"
 )
 
 // Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix.
@@ -60,7 +60,7 @@
 	assertIndexGet(t, index, id[:1], "", true)
 
 	// An ambiguous id prefix should return an error
-	if _, err := index.Get(id[:4]); err == nil || err == nil {
+	if _, err := index.Get(id[:4]); err == nil {
 		t.Fatal("An ambiguous id prefix should return an error")
 	}
 
@@ -111,7 +111,7 @@
 func BenchmarkTruncIndexAdd100(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 100; i++ {
-		testSet = append(testSet, common.GenerateRandomID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -127,7 +127,7 @@
 func BenchmarkTruncIndexAdd250(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 250; i++ {
-		testSet = append(testSet, common.GenerateRandomID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -143,7 +143,7 @@
 func BenchmarkTruncIndexAdd500(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 500; i++ {
-		testSet = append(testSet, common.GenerateRandomID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -160,7 +160,7 @@
 	var testSet []string
 	var testKeys []string
 	for i := 0; i < 100; i++ {
-		testSet = append(testSet, common.GenerateRandomID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	index := NewTruncIndex([]string{})
 	for _, id := range testSet {
@@ -184,7 +184,7 @@
 	var testSet []string
 	var testKeys []string
 	for i := 0; i < 250; i++ {
-		testSet = append(testSet, common.GenerateRandomID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	index := NewTruncIndex([]string{})
 	for _, id := range testSet {
@@ -208,7 +208,7 @@
 	var testSet []string
 	var testKeys []string
 	for i := 0; i < 500; i++ {
-		testSet = append(testSet, common.GenerateRandomID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	index := NewTruncIndex([]string{})
 	for _, id := range testSet {
@@ -231,7 +231,7 @@
 func BenchmarkTruncIndexDelete100(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 100; i++ {
-		testSet = append(testSet, common.GenerateRandomID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -254,7 +254,7 @@
 func BenchmarkTruncIndexDelete250(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 250; i++ {
-		testSet = append(testSet, common.GenerateRandomID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -277,7 +277,7 @@
 func BenchmarkTruncIndexDelete500(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 500; i++ {
-		testSet = append(testSet, common.GenerateRandomID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -300,7 +300,7 @@
 func BenchmarkTruncIndexNew100(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 100; i++ {
-		testSet = append(testSet, common.GenerateRandomID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -311,7 +311,7 @@
 func BenchmarkTruncIndexNew250(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 250; i++ {
-		testSet = append(testSet, common.GenerateRandomID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -322,7 +322,7 @@
 func BenchmarkTruncIndexNew500(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 500; i++ {
-		testSet = append(testSet, common.GenerateRandomID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -334,7 +334,7 @@
 	var testSet []string
 	var testKeys []string
 	for i := 0; i < 500; i++ {
-		id := common.GenerateRandomID()
+		id := stringid.GenerateRandomID()
 		testSet = append(testSet, id)
 		l := rand.Intn(12) + 12
 		testKeys = append(testKeys, id[:l])
@@ -359,7 +359,7 @@
 	var testSet []string
 	var testKeys []string
 	for i := 0; i < 500; i++ {
-		id := common.GenerateRandomID()
+		id := stringid.GenerateRandomID()
 		testSet = append(testSet, id)
 		l := rand.Intn(12) + 12
 		testKeys = append(testKeys, id[:l])
@@ -384,7 +384,7 @@
 	var testSet []string
 	var testKeys []string
 	for i := 0; i < 500; i++ {
-		id := common.GenerateRandomID()
+		id := stringid.GenerateRandomID()
 		testSet = append(testSet, id)
 		l := rand.Intn(12) + 12
 		testKeys = append(testKeys, id[:l])
diff --git a/pkg/ulimit/ulimit.go b/pkg/ulimit/ulimit.go
index 2375315..eb2ae4e 100644
--- a/pkg/ulimit/ulimit.go
+++ b/pkg/ulimit/ulimit.go
@@ -102,5 +102,5 @@
 }
 
 func (u *Ulimit) String() string {
-	return fmt.Sprintf("%s=%s:%s", u.Name, u.Soft, u.Hard)
+	return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
 }
diff --git a/pkg/ulimit/ulimit_test.go b/pkg/ulimit/ulimit_test.go
index 419b5e0..1e8c881 100644
--- a/pkg/ulimit/ulimit_test.go
+++ b/pkg/ulimit/ulimit_test.go
@@ -2,6 +2,13 @@
 
 import "testing"
 
+func TestParseValid(t *testing.T) {
+	u1 := &Ulimit{"nofile", 1024, 512}
+	if u2, _ := Parse("nofile=512:1024"); *u1 != *u2 {
+		t.Fatalf("expected %q, but got %q", u1, u2)
+	}
+}
+
 func TestParseInvalidLimitType(t *testing.T) {
 	if _, err := Parse("notarealtype=1024:1024"); err == nil {
 		t.Fatalf("expected error on invalid ulimit type")
@@ -39,3 +46,10 @@
 		t.Fatal("expected error on bad value type")
 	}
 }
+
+func TestStringOutput(t *testing.T) {
+	u := &Ulimit{"nofile", 1024, 512}
+	if s := u.String(); s != "nofile=512:1024" {
+		t.Fatal("expected String to return nofile=512:1024, but got", s)
+	}
+}
diff --git a/pkg/units/duration.go b/pkg/units/duration.go
index cd33121..44012aa 100644
--- a/pkg/units/duration.go
+++ b/pkg/units/duration.go
@@ -27,5 +27,5 @@
 	} else if hours < 24*365*2 {
 		return fmt.Sprintf("%d months", hours/24/30)
 	}
-	return fmt.Sprintf("%f years", d.Hours()/24/365)
+	return fmt.Sprintf("%d years", int(d.Hours())/24/365)
 }
diff --git a/pkg/units/duration_test.go b/pkg/units/duration_test.go
index a229474..fcfb6b7 100644
--- a/pkg/units/duration_test.go
+++ b/pkg/units/duration_test.go
@@ -41,6 +41,6 @@
 	assertEquals(t, "13 months", HumanDuration(13*month))
 	assertEquals(t, "23 months", HumanDuration(23*month))
 	assertEquals(t, "24 months", HumanDuration(24*month))
-	assertEquals(t, "2.010959 years", HumanDuration(24*month+2*week))
-	assertEquals(t, "3.164384 years", HumanDuration(3*year+2*month))
+	assertEquals(t, "2 years", HumanDuration(24*month+2*week))
+	assertEquals(t, "3 years", HumanDuration(3*year+2*month))
 }
diff --git a/pkg/units/size.go b/pkg/units/size.go
index 7cfb57b..d7850ad 100644
--- a/pkg/units/size.go
+++ b/pkg/units/size.go
@@ -37,23 +37,25 @@
 var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
 var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
 
+// CustomSize returns a human-readable approximation of a size
+// using custom format
+func CustomSize(format string, size float64, base float64, _map []string) string {
+	i := 0
+	for size >= base {
+		size = size / base
+		i++
+	}
+	return fmt.Sprintf(format, size, _map[i])
+}
+
 // HumanSize returns a human-readable approximation of a size
 // using SI standard (eg. "44kB", "17MB")
 func HumanSize(size float64) string {
-	return intToString(float64(size), 1000.0, decimapAbbrs)
+	return CustomSize("%.4g %s", float64(size), 1000.0, decimapAbbrs)
 }
 
 func BytesSize(size float64) string {
-	return intToString(size, 1024.0, binaryAbbrs)
-}
-
-func intToString(size, unit float64, _map []string) string {
-	i := 0
-	for size >= unit {
-		size = size / unit
-		i++
-	}
-	return fmt.Sprintf("%.4g %s", size, _map[i])
+	return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
 }
 
 // FromHumanSize returns an integer from a human-readable specification of a
diff --git a/pkg/urlutil/git.go b/pkg/urlutil/git.go
index ba88ddf..dc4d666 100644
--- a/pkg/urlutil/git.go
+++ b/pkg/urlutil/git.go
@@ -1,6 +1,9 @@
 package urlutil
 
-import "strings"
+import (
+	"regexp"
+	"strings"
+)
 
 var (
 	validPrefixes = []string{
@@ -8,11 +11,13 @@
 		"github.com/",
 		"git@",
 	}
+
+	urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$")
 )
 
 // IsGitURL returns true if the provided str is a git repository URL.
 func IsGitURL(str string) bool {
-	if IsURL(str) && strings.HasSuffix(str, ".git") {
+	if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) {
 		return true
 	}
 	for _, prefix := range validPrefixes {
diff --git a/pkg/urlutil/git_test.go b/pkg/urlutil/git_test.go
index 01dcea7..bb89d8b 100644
--- a/pkg/urlutil/git_test.go
+++ b/pkg/urlutil/git_test.go
@@ -9,10 +9,15 @@
 		"git@bitbucket.org:atlassianlabs/atlassian-docker.git",
 		"https://github.com/docker/docker.git",
 		"http://github.com/docker/docker.git",
+		"http://github.com/docker/docker.git#branch",
+		"http://github.com/docker/docker.git#:dir",
 	}
 	incompleteGitUrls = []string{
 		"github.com/docker/docker",
 	}
+	invalidGitUrls = []string{
+		"http://github.com/docker/docker.git:#branch",
+	}
 )
 
 func TestValidGitTransport(t *testing.T) {
@@ -35,9 +40,16 @@
 			t.Fatalf("%q should be detected as valid Git url", url)
 		}
 	}
+
 	for _, url := range incompleteGitUrls {
 		if IsGitURL(url) == false {
 			t.Fatalf("%q should be detected as valid Git url", url)
 		}
 	}
+
+	for _, url := range invalidGitUrls {
+		if IsGitURL(url) == true {
+			t.Fatalf("%q should not be detected as valid Git prefix", url)
+		}
+	}
 }
diff --git a/pkg/useragent/README.md b/pkg/useragent/README.md
new file mode 100644
index 0000000..d9cb367
--- /dev/null
+++ b/pkg/useragent/README.md
@@ -0,0 +1 @@
+This package provides helper functions to pack version information into a single User-Agent header.
diff --git a/pkg/useragent/useragent.go b/pkg/useragent/useragent.go
new file mode 100644
index 0000000..9e35d1c
--- /dev/null
+++ b/pkg/useragent/useragent.go
@@ -0,0 +1,60 @@
+// Package useragent provides helper functions to pack
+// version information into a single User-Agent header.
+package useragent
+
+import (
+	"errors"
+	"strings"
+)
+
+var (
+	ErrNilRequest = errors.New("request cannot be nil")
+)
+
+// VersionInfo is used to model UserAgent versions.
+type VersionInfo struct {
+	Name    string
+	Version string
+}
+
+func (vi *VersionInfo) isValid() bool {
+	const stopChars = " \t\r\n/"
+	name := vi.Name
+	vers := vi.Version
+	if len(name) == 0 || strings.ContainsAny(name, stopChars) {
+		return false
+	}
+	if len(vers) == 0 || strings.ContainsAny(vers, stopChars) {
+		return false
+	}
+	return true
+}
+
+// Convert versions to a string and append the string to the string base.
+//
+// Each VersionInfo will be converted to a string in the format of
+// "product/version", where the "product" is get from the name field, while
+// version is get from the version field. Several pieces of verson information
+// will be concatinated and separated by space.
+//
+// Example:
+// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"})
+// results in "base foo/1.0 bar/2.0".
+func AppendVersions(base string, versions ...VersionInfo) string {
+	if len(versions) == 0 {
+		return base
+	}
+
+	verstrs := make([]string, 0, 1+len(versions))
+	if len(base) > 0 {
+		verstrs = append(verstrs, base)
+	}
+
+	for _, v := range versions {
+		if !v.isValid() {
+			continue
+		}
+		verstrs = append(verstrs, v.Name+"/"+v.Version)
+	}
+	return strings.Join(verstrs, " ")
+}
diff --git a/pkg/useragent/useragent_test.go b/pkg/useragent/useragent_test.go
new file mode 100644
index 0000000..0ad7243
--- /dev/null
+++ b/pkg/useragent/useragent_test.go
@@ -0,0 +1,31 @@
+package useragent
+
+import "testing"
+
+func TestVersionInfo(t *testing.T) {
+	vi := VersionInfo{"foo", "bar"}
+	if !vi.isValid() {
+		t.Fatalf("VersionInfo should be valid")
+	}
+	vi = VersionInfo{"", "bar"}
+	if vi.isValid() {
+		t.Fatalf("Expected VersionInfo to be invalid")
+	}
+	vi = VersionInfo{"foo", ""}
+	if vi.isValid() {
+		t.Fatalf("Expected VersionInfo to be invalid")
+	}
+}
+
+func TestAppendVersions(t *testing.T) {
+	vis := []VersionInfo{
+		{"foo", "1.0"},
+		{"bar", "0.1"},
+		{"pi", "3.1.4"},
+	}
+	v := AppendVersions("base", vis...)
+	expect := "base foo/1.0 bar/0.1 pi/3.1.4"
+	if v != expect {
+		t.Fatalf("expected %q, got %q", expect, v)
+	}
+}
diff --git a/pkg/version/version.go b/pkg/version/version.go
index cc802a6..bd5ec7a 100644
--- a/pkg/version/version.go
+++ b/pkg/version/version.go
@@ -37,7 +37,7 @@
 	return 0
 }
 
-// LessThan checks if a version is less than another version
+// LessThan checks if a version is less than another
 func (v Version) LessThan(other Version) bool {
 	return v.compareTo(other) == -1
 }
@@ -47,12 +47,12 @@
 	return v.compareTo(other) <= 0
 }
 
-// GreaterThan checks if a version is greater than another one
+// GreaterThan checks if a version is greater than another
 func (v Version) GreaterThan(other Version) bool {
 	return v.compareTo(other) == 1
 }
 
-// GreaterThanOrEqualTo checks ia version is greater than or equal to another
+// GreaterThanOrEqualTo checks if a version is greater than or equal to another
 func (v Version) GreaterThanOrEqualTo(other Version) bool {
 	return v.compareTo(other) >= 0
 }
diff --git a/project/GOVERNANCE.md b/project/GOVERNANCE.md
index 52a8bf0..6ae7baf 100644
--- a/project/GOVERNANCE.md
+++ b/project/GOVERNANCE.md
@@ -4,7 +4,7 @@
 All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership.
 
 The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at 
-[Google Docs Folder](http://goo.gl/Alfj8r)
+[Google Docs Folder](https://goo.gl/Alfj8r)
 
 These include:
 
diff --git a/project/PACKAGERS.md b/project/PACKAGERS.md
index 701e552..fd2156c 100644
--- a/project/PACKAGERS.md
+++ b/project/PACKAGERS.md
@@ -45,9 +45,9 @@
 To build Docker, you will need the following:
 
 * A recent version of Git and Mercurial
-* Go version 1.3 or later
+* Go version 1.4 or later
 * A clean checkout of the source added to a valid [Go
-  workspace](http://golang.org/doc/code.html#Workspaces) under the path
+  workspace](https://golang.org/doc/code.html#Workspaces) under the path
   *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
   explained in more detail below)
 
@@ -58,8 +58,7 @@
 * libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version
   2.02.89 or later
 * btrfs-progs version 3.16.1 or later (unless using an older version is
-  absolutely necessary, in which case 3.8 is the minimum and the note below
-  regarding `btrfs_noversion` applies)
+  absolutely necessary, in which case 3.8 is the minimum)
 
 Be sure to also check out Docker's Dockerfile for the most up-to-date list of
 these build-time dependencies.
@@ -163,12 +162,6 @@
 export DOCKER_BUILDTAGS='selinux'
 ```
 
-If your version of btrfs-progs (also called btrfs-tools) is < 3.16.1, then you
-will need the following tag to not check for btrfs version headers:
-```bash
-export DOCKER_BUILDTAGS='btrfs_noversion'
-```
-
 There are build tags for disabling graphdrivers as well. By default, support
 for all graphdrivers are built in.
 
@@ -244,9 +237,9 @@
   installed at "/usr/bin/docker", then "/usr/bin/dockerinit" will be the first
   place this file is searched for)
 * "/usr/libexec/docker/dockerinit" or "/usr/local/libexec/docker/dockerinit"
-  ([FHS 3.0 Draft](http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec))
+  ([FHS 3.0 Draft](https://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec))
 * "/usr/lib/docker/dockerinit" or "/usr/local/lib/docker/dockerinit" ([FHS
-  2.3](http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA))
+  2.3](https://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA))
 
 If (and please, only if) one of the paths above is insufficient due to distro
 policy or similar issues, you may use the `DOCKER_INITPATH` environment variable
@@ -310,6 +303,7 @@
 * AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
   least the "auplink" utility from aufs-tools)
 * BTRFS graph driver (requires BTRFS support enabled in the kernel)
+* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
 
 ## Daemon Init Script
 
diff --git a/project/RELEASE-CHECKLIST.md b/project/RELEASE-CHECKLIST.md
index d9382b9..d2b9650 100644
--- a/project/RELEASE-CHECKLIST.md
+++ b/project/RELEASE-CHECKLIST.md
@@ -49,7 +49,17 @@
 ...
 ```
 
-### 2. Update CHANGELOG.md
+### 2. Bump the API version on master
+
+We don't want to stop contributions to master just because we are releasing. At
+the same time, now that the release branch exists, we don't want API changes to
+go to the now frozen API version.
+
+Create a new entry in `docs/sources/reference/api/` by copying the latest and
+bumping the version number (in both the file's name and content), and submit
+this in a PR against master.
+
+### 3. Update CHANGELOG.md
 
 You can run this command for reference with git 2.0:
 
@@ -124,7 +134,7 @@
 Obviously, you'll need to adjust version numbers as necessary.  If you just need
 a count, add a simple `| wc -l`.
 
-### 3. Change the contents of the VERSION file
+### 4. Change the contents of the VERSION file
 
 Before the big thing, you'll want to make successive release candidates and get
 people to test. The release candidate number `N` should be part of the version:
@@ -134,7 +144,7 @@
 echo ${RC_VERSION#v} > VERSION
 ```
 
-### 4. Test the docs
+### 5. Test the docs
 
 Make sure that your tree includes documentation for any modified or
 new features, syntax or semantic changes.
@@ -145,7 +155,7 @@
 make docs
 ```
 
-To make a shared test at http://beta-docs.docker.io:
+To make a shared test at https://beta-docs.docker.io:
 
 (You will need the `awsconfig` file added to the `docs/` dir)
 
@@ -153,7 +163,7 @@
 make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release
 ```
 
-### 5. Commit and create a pull request to the "release" branch
+### 6. Commit and create a pull request to the "release" branch
 
 ```bash
 git add VERSION CHANGELOG.md
@@ -166,7 +176,7 @@
 open the PR against the "release" branch instead of accidentally against
 "master" (like so many brave souls before you already have).
 
-### 6. Publish release candidate binaries
+### 7. Publish release candidate binaries
 
 To run this you will need access to the release credentials. Get them from the
 Core maintainers.
@@ -219,7 +229,7 @@
 - The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group
 - Any social media that can bring some attention to the release candidate
 
-### 7. Iterate on successive release candidates
+### 8. Iterate on successive release candidates
 
 Spend several days along with the community explicitly investing time and
 resources to try and break Docker in every possible way, documenting any
@@ -269,7 +279,7 @@
 Repeat step 6 to tag the code, publish new binaries, announce availability, and
 get help testing.
 
-### 8. Finalize the bump branch
+### 9. Finalize the bump branch
 
 When you're happy with the quality of a release candidate, you can move on and
 create the real thing.
@@ -285,9 +295,9 @@
 
 You will then repeat step 6 to publish the binaries to test
 
-### 9. Get 2 other maintainers to validate the pull request
+### 10. Get 2 other maintainers to validate the pull request
 
-### 10. Publish final binaries
+### 11. Publish final binaries
 
 Once they're tested and reasonably believed to be working, run against
 get.docker.com:
@@ -303,7 +313,7 @@
        hack/release.sh
 ```
 
-### 9. Apply tag
+### 12. Apply tag
 
 It's very important that we don't make the tag until after the official
 release is uploaded to get.docker.com!
@@ -313,12 +323,12 @@
 git push origin $VERSION
 ```
 
-### 10. Go to github to merge the `bump_$VERSION` branch into release
+### 13. Go to github to merge the `bump_$VERSION` branch into release
 
 Don't forget to push that pretty blue button to delete the leftover
 branch afterwards!
 
-### 11. Update the docs branch
+### 14. Update the docs branch
 
 If this is a MAJOR.MINOR.0 release, you need to make an branch for the previous release's
 documentation:
@@ -341,7 +351,7 @@
 make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes DISTRIBUTION_ID=C2K6......FL2F docs-release
 ```
 
-The docs will appear on http://docs.docker.com/ (though there may be cached
+The docs will appear on https://docs.docker.com/ (though there may be cached
 versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/).
 For more information about documentation releases, see `docs/README.md`.
 
@@ -350,7 +360,7 @@
 _if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run
 and you can check its progress with the CDN Cloudfront Chrome addin.
 
-### 12. Create a new pull request to merge your bump commit back into master
+### 15. Create a new pull request to merge your bump commit back into master
 
 ```bash
 git checkout master
@@ -364,7 +374,14 @@
 Again, get two maintainers to validate, then merge, then push that pretty
 blue button to delete your branch.
 
-### 13. Rejoice and Evangelize!
+### 16. Update the VERSION files
+
+Now that version X.Y.Z is out, time to start working on the next! Update the
+content of the `VERSION` file to be the next minor (incrementing Y) and add the
+`-dev` suffix. For example, after 1.5.0 release, the `VERSION` file gets
+updated to `1.6.0-dev` (as in "1.6.0 in the making").
+
+### 17. Rejoice and Evangelize!
 
 Congratulations! You're done.
 
diff --git a/project/TOOLS.md b/project/TOOLS.md
index f057ccd..79bd283 100644
--- a/project/TOOLS.md
+++ b/project/TOOLS.md
@@ -14,11 +14,11 @@
 
 Leeroy is a Go application which integrates Jenkins with 
 GitHub pull requests. Leeroy uses 
-[GitHub hooks](http://developer.github.com/v3/repos/hooks/) 
+[GitHub hooks](https://developer.github.com/v3/repos/hooks/) 
 to listen for pull request notifications and starts jobs on your Jenkins 
 server.  Using the Jenkins [notification plugin][jnp], Leeroy updates the 
 pull request using GitHub's 
-[status API](http://developer.github.com/v3/repos/statuses/)
+[status API](https://developer.github.com/v3/repos/statuses/)
 with pending, success, failure, or error statuses.
 
 The leeroy repository is maintained at
diff --git a/registry/auth.go b/registry/auth.go
index 4baf114..33f8fa0 100644
--- a/registry/auth.go
+++ b/registry/auth.go
@@ -1,46 +1,20 @@
 package registry
 
 import (
-	"encoding/base64"
 	"encoding/json"
-	"errors"
 	"fmt"
 	"io/ioutil"
 	"net/http"
-	"os"
-	"path"
 	"strings"
 	"sync"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/utils"
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/cliconfig"
 )
 
-const (
-	// Where we store the config file
-	CONFIGFILE = ".dockercfg"
-)
-
-var (
-	ErrConfigFileMissing = errors.New("The Auth config file is missing")
-)
-
-type AuthConfig struct {
-	Username      string `json:"username,omitempty"`
-	Password      string `json:"password,omitempty"`
-	Auth          string `json:"auth"`
-	Email         string `json:"email"`
-	ServerAddress string `json:"serveraddress,omitempty"`
-}
-
-type ConfigFile struct {
-	Configs  map[string]AuthConfig `json:"configs,omitempty"`
-	rootPath string
-}
-
 type RequestAuthorization struct {
-	authConfig       *AuthConfig
+	authConfig       *cliconfig.AuthConfig
 	registryEndpoint *Endpoint
 	resource         string
 	scope            string
@@ -51,7 +25,7 @@
 	tokenExpiration time.Time
 }
 
-func NewRequestAuthorization(authConfig *AuthConfig, registryEndpoint *Endpoint, resource, scope string, actions []string) *RequestAuthorization {
+func NewRequestAuthorization(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, resource, scope string, actions []string) *RequestAuthorization {
 	return &RequestAuthorization{
 		authConfig:       authConfig,
 		registryEndpoint: registryEndpoint,
@@ -66,25 +40,22 @@
 	defer auth.tokenLock.Unlock()
 	now := time.Now()
 	if now.Before(auth.tokenExpiration) {
-		log.Debugf("Using cached token for %s", auth.authConfig.Username)
+		logrus.Debugf("Using cached token for %s", auth.authConfig.Username)
 		return auth.tokenCache, nil
 	}
 
-	client := auth.registryEndpoint.HTTPClient()
-	factory := HTTPRequestFactory(nil)
-
 	for _, challenge := range auth.registryEndpoint.AuthChallenges {
 		switch strings.ToLower(challenge.Scheme) {
 		case "basic":
 			// no token necessary
 		case "bearer":
-			log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, auth.authConfig.Username)
+			logrus.Debugf("Getting bearer token with %s for %s", challenge.Parameters, auth.authConfig.Username)
 			params := map[string]string{}
 			for k, v := range challenge.Parameters {
 				params[k] = v
 			}
 			params["scope"] = fmt.Sprintf("%s:%s:%s", auth.resource, auth.scope, strings.Join(auth.actions, ","))
-			token, err := getToken(auth.authConfig.Username, auth.authConfig.Password, params, auth.registryEndpoint, client, factory)
+			token, err := getToken(auth.authConfig.Username, auth.authConfig.Password, params, auth.registryEndpoint)
 			if err != nil {
 				return "", err
 			}
@@ -93,7 +64,7 @@
 
 			return token, nil
 		default:
-			log.Infof("Unsupported auth scheme: %q", challenge.Scheme)
+			logrus.Infof("Unsupported auth scheme: %q", challenge.Scheme)
 		}
 	}
 
@@ -116,136 +87,26 @@
 	return nil
 }
 
-// create a base64 encoded auth string to store in config
-func encodeAuth(authConfig *AuthConfig) string {
-	authStr := authConfig.Username + ":" + authConfig.Password
-	msg := []byte(authStr)
-	encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
-	base64.StdEncoding.Encode(encoded, msg)
-	return string(encoded)
-}
-
-// decode the auth string
-func decodeAuth(authStr string) (string, string, error) {
-	decLen := base64.StdEncoding.DecodedLen(len(authStr))
-	decoded := make([]byte, decLen)
-	authByte := []byte(authStr)
-	n, err := base64.StdEncoding.Decode(decoded, authByte)
-	if err != nil {
-		return "", "", err
-	}
-	if n > decLen {
-		return "", "", fmt.Errorf("Something went wrong decoding auth config")
-	}
-	arr := strings.SplitN(string(decoded), ":", 2)
-	if len(arr) != 2 {
-		return "", "", fmt.Errorf("Invalid auth configuration file")
-	}
-	password := strings.Trim(arr[1], "\x00")
-	return arr[0], password, nil
-}
-
-// load up the auth config information and return values
-// FIXME: use the internal golang config parser
-func LoadConfig(rootPath string) (*ConfigFile, error) {
-	configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath}
-	confFile := path.Join(rootPath, CONFIGFILE)
-	if _, err := os.Stat(confFile); err != nil {
-		return &configFile, nil //missing file is not an error
-	}
-	b, err := ioutil.ReadFile(confFile)
-	if err != nil {
-		return &configFile, err
-	}
-
-	if err := json.Unmarshal(b, &configFile.Configs); err != nil {
-		arr := strings.Split(string(b), "\n")
-		if len(arr) < 2 {
-			return &configFile, fmt.Errorf("The Auth config file is empty")
-		}
-		authConfig := AuthConfig{}
-		origAuth := strings.Split(arr[0], " = ")
-		if len(origAuth) != 2 {
-			return &configFile, fmt.Errorf("Invalid Auth config file")
-		}
-		authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1])
-		if err != nil {
-			return &configFile, err
-		}
-		origEmail := strings.Split(arr[1], " = ")
-		if len(origEmail) != 2 {
-			return &configFile, fmt.Errorf("Invalid Auth config file")
-		}
-		authConfig.Email = origEmail[1]
-		authConfig.ServerAddress = IndexServerAddress()
-		// *TODO: Switch to using IndexServerName() instead?
-		configFile.Configs[IndexServerAddress()] = authConfig
-	} else {
-		for k, authConfig := range configFile.Configs {
-			authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth)
-			if err != nil {
-				return &configFile, err
-			}
-			authConfig.Auth = ""
-			authConfig.ServerAddress = k
-			configFile.Configs[k] = authConfig
-		}
-	}
-	return &configFile, nil
-}
-
-// save the auth config
-func SaveConfig(configFile *ConfigFile) error {
-	confFile := path.Join(configFile.rootPath, CONFIGFILE)
-	if len(configFile.Configs) == 0 {
-		os.Remove(confFile)
-		return nil
-	}
-
-	configs := make(map[string]AuthConfig, len(configFile.Configs))
-	for k, authConfig := range configFile.Configs {
-		authCopy := authConfig
-
-		authCopy.Auth = encodeAuth(&authCopy)
-		authCopy.Username = ""
-		authCopy.Password = ""
-		authCopy.ServerAddress = ""
-		configs[k] = authCopy
-	}
-
-	b, err := json.MarshalIndent(configs, "", "\t")
-	if err != nil {
-		return err
-	}
-	err = ioutil.WriteFile(confFile, b, 0600)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
 // Login tries to register/login to the registry server.
-func Login(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *utils.HTTPRequestFactory) (string, error) {
+func Login(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) {
 	// Separates the v2 registry login logic from the v1 logic.
 	if registryEndpoint.Version == APIVersion2 {
-		return loginV2(authConfig, registryEndpoint, factory)
+		return loginV2(authConfig, registryEndpoint)
 	}
-
-	return loginV1(authConfig, registryEndpoint, factory)
+	return loginV1(authConfig, registryEndpoint)
 }
 
 // loginV1 tries to register/login to the v1 registry server.
-func loginV1(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *utils.HTTPRequestFactory) (string, error) {
+func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) {
 	var (
 		status        string
 		reqBody       []byte
 		err           error
-		client        = registryEndpoint.HTTPClient()
 		reqStatusCode = 0
 		serverAddress = authConfig.ServerAddress
 	)
 
-	log.Debugf("attempting v1 login to registry endpoint %s", registryEndpoint)
+	logrus.Debugf("attempting v1 login to registry endpoint %s", registryEndpoint)
 
 	if serverAddress == "" {
 		return "", fmt.Errorf("Server Error: Server Address not set.")
@@ -264,7 +125,7 @@
 
 	// using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status.
 	b := strings.NewReader(string(jsonBody))
-	req1, err := client.Post(serverAddress+"users/", "application/json; charset=utf-8", b)
+	req1, err := registryEndpoint.client.Post(serverAddress+"users/", "application/json; charset=utf-8", b)
 	if err != nil {
 		return "", fmt.Errorf("Server Error: %s", err)
 	}
@@ -285,9 +146,9 @@
 		}
 	} else if reqStatusCode == 400 {
 		if string(reqBody) == "\"Username or email already exists\"" {
-			req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
+			req, err := http.NewRequest("GET", serverAddress+"users/", nil)
 			req.SetBasicAuth(authConfig.Username, authConfig.Password)
-			resp, err := client.Do(req)
+			resp, err := registryEndpoint.client.Do(req)
 			if err != nil {
 				return "", err
 			}
@@ -314,9 +175,9 @@
 	} else if reqStatusCode == 401 {
 		// This case would happen with private registries where /v1/users is
 		// protected, so people can use `docker login` as an auth check.
-		req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
+		req, err := http.NewRequest("GET", serverAddress+"users/", nil)
 		req.SetBasicAuth(authConfig.Username, authConfig.Password)
-		resp, err := client.Do(req)
+		resp, err := registryEndpoint.client.Do(req)
 		if err != nil {
 			return "", err
 		}
@@ -348,22 +209,21 @@
 // now, users should create their account through other means like directly from a web page
 // served by the v2 registry service provider. Whether this will be supported in the future
 // is to be determined.
-func loginV2(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *utils.HTTPRequestFactory) (string, error) {
-	log.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint)
+func loginV2(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint) (string, error) {
+	logrus.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint)
 	var (
 		err       error
 		allErrors []error
-		client    = registryEndpoint.HTTPClient()
 	)
 
 	for _, challenge := range registryEndpoint.AuthChallenges {
-		log.Debugf("trying %q auth challenge with params %s", challenge.Scheme, challenge.Parameters)
+		logrus.Debugf("trying %q auth challenge with params %s", challenge.Scheme, challenge.Parameters)
 
 		switch strings.ToLower(challenge.Scheme) {
 		case "basic":
-			err = tryV2BasicAuthLogin(authConfig, challenge.Parameters, registryEndpoint, client, factory)
+			err = tryV2BasicAuthLogin(authConfig, challenge.Parameters, registryEndpoint)
 		case "bearer":
-			err = tryV2TokenAuthLogin(authConfig, challenge.Parameters, registryEndpoint, client, factory)
+			err = tryV2TokenAuthLogin(authConfig, challenge.Parameters, registryEndpoint)
 		default:
 			// Unsupported challenge types are explicitly skipped.
 			err = fmt.Errorf("unsupported auth scheme: %q", challenge.Scheme)
@@ -373,7 +233,7 @@
 			return "Login Succeeded", nil
 		}
 
-		log.Debugf("error trying auth challenge %q: %s", challenge.Scheme, err)
+		logrus.Debugf("error trying auth challenge %q: %s", challenge.Scheme, err)
 
 		allErrors = append(allErrors, err)
 	}
@@ -381,15 +241,15 @@
 	return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors)
 }
 
-func tryV2BasicAuthLogin(authConfig *AuthConfig, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *utils.HTTPRequestFactory) error {
-	req, err := factory.NewRequest("GET", registryEndpoint.Path(""), nil)
+func tryV2BasicAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error {
+	req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil)
 	if err != nil {
 		return err
 	}
 
 	req.SetBasicAuth(authConfig.Username, authConfig.Password)
 
-	resp, err := client.Do(req)
+	resp, err := registryEndpoint.client.Do(req)
 	if err != nil {
 		return err
 	}
@@ -402,20 +262,20 @@
 	return nil
 }
 
-func tryV2TokenAuthLogin(authConfig *AuthConfig, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *utils.HTTPRequestFactory) error {
-	token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint, client, factory)
+func tryV2TokenAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error {
+	token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint)
 	if err != nil {
 		return err
 	}
 
-	req, err := factory.NewRequest("GET", registryEndpoint.Path(""), nil)
+	req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil)
 	if err != nil {
 		return err
 	}
 
 	req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
 
-	resp, err := client.Do(req)
+	resp, err := registryEndpoint.client.Do(req)
 	if err != nil {
 		return err
 	}
@@ -429,10 +289,10 @@
 }
 
 // this method matches a auth configuration to a server address or a url
-func (config *ConfigFile) ResolveAuthConfig(index *IndexInfo) AuthConfig {
+func ResolveAuthConfig(config *cliconfig.ConfigFile, index *IndexInfo) cliconfig.AuthConfig {
 	configKey := index.GetAuthConfigKey()
 	// First try the happy case
-	if c, found := config.Configs[configKey]; found || index.Official {
+	if c, found := config.AuthConfigs[configKey]; found || index.Official {
 		return c
 	}
 
@@ -451,12 +311,12 @@
 
 	// Maybe they have a legacy config file, we will iterate the keys converting
 	// them to the new format and testing
-	for registry, config := range config.Configs {
+	for registry, ac := range config.AuthConfigs {
 		if configKey == convertToHostname(registry) {
-			return config
+			return ac
 		}
 	}
 
 	// When all else fails, return an empty auth config
-	return AuthConfig{}
+	return cliconfig.AuthConfig{}
 }
diff --git a/registry/auth_test.go b/registry/auth_test.go
index 9cc299a..71b963a 100644
--- a/registry/auth_test.go
+++ b/registry/auth_test.go
@@ -3,15 +3,18 @@
 import (
 	"io/ioutil"
 	"os"
+	"path/filepath"
 	"testing"
+
+	"github.com/docker/docker/cliconfig"
 )
 
 func TestEncodeAuth(t *testing.T) {
-	newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"}
-	authStr := encodeAuth(newAuthConfig)
-	decAuthConfig := &AuthConfig{}
+	newAuthConfig := &cliconfig.AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"}
+	authStr := cliconfig.EncodeAuth(newAuthConfig)
+	decAuthConfig := &cliconfig.AuthConfig{}
 	var err error
-	decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr)
+	decAuthConfig.Username, decAuthConfig.Password, err = cliconfig.DecodeAuth(authStr)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -26,18 +29,16 @@
 	}
 }
 
-func setupTempConfigFile() (*ConfigFile, error) {
+func setupTempConfigFile() (*cliconfig.ConfigFile, error) {
 	root, err := ioutil.TempDir("", "docker-test-auth")
 	if err != nil {
 		return nil, err
 	}
-	configFile := &ConfigFile{
-		rootPath: root,
-		Configs:  make(map[string]AuthConfig),
-	}
+	root = filepath.Join(root, cliconfig.CONFIGFILE)
+	configFile := cliconfig.NewConfigFile(root)
 
 	for _, registry := range []string{"testIndex", IndexServerAddress()} {
-		configFile.Configs[registry] = AuthConfig{
+		configFile.AuthConfigs[registry] = cliconfig.AuthConfig{
 			Username: "docker-user",
 			Password: "docker-pass",
 			Email:    "docker@docker.io",
@@ -52,14 +53,14 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	defer os.RemoveAll(configFile.rootPath)
+	defer os.RemoveAll(configFile.Filename())
 
-	err = SaveConfig(configFile)
+	err = configFile.Save()
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	authConfig := configFile.Configs["testIndex"]
+	authConfig := configFile.AuthConfigs["testIndex"]
 	if authConfig.Username != "docker-user" {
 		t.Fail()
 	}
@@ -79,9 +80,9 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	defer os.RemoveAll(configFile.rootPath)
+	defer os.RemoveAll(configFile.Filename())
 
-	indexConfig := configFile.Configs[IndexServerAddress()]
+	indexConfig := configFile.AuthConfigs[IndexServerAddress()]
 
 	officialIndex := &IndexInfo{
 		Official: true,
@@ -90,10 +91,10 @@
 		Official: false,
 	}
 
-	resolved := configFile.ResolveAuthConfig(officialIndex)
+	resolved := ResolveAuthConfig(configFile, officialIndex)
 	assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServerAddress()")
 
-	resolved = configFile.ResolveAuthConfig(privateIndex)
+	resolved = ResolveAuthConfig(configFile, privateIndex)
 	assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServerAddress()")
 }
 
@@ -102,26 +103,26 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	defer os.RemoveAll(configFile.rootPath)
+	defer os.RemoveAll(configFile.Filename())
 
-	registryAuth := AuthConfig{
+	registryAuth := cliconfig.AuthConfig{
 		Username: "foo-user",
 		Password: "foo-pass",
 		Email:    "foo@example.com",
 	}
-	localAuth := AuthConfig{
+	localAuth := cliconfig.AuthConfig{
 		Username: "bar-user",
 		Password: "bar-pass",
 		Email:    "bar@example.com",
 	}
-	officialAuth := AuthConfig{
+	officialAuth := cliconfig.AuthConfig{
 		Username: "baz-user",
 		Password: "baz-pass",
 		Email:    "baz@example.com",
 	}
-	configFile.Configs[IndexServerAddress()] = officialAuth
+	configFile.AuthConfigs[IndexServerAddress()] = officialAuth
 
-	expectedAuths := map[string]AuthConfig{
+	expectedAuths := map[string]cliconfig.AuthConfig{
 		"registry.example.com": registryAuth,
 		"localhost:8000":       localAuth,
 		"registry.com":         localAuth,
@@ -157,13 +158,13 @@
 			Name: configKey,
 		}
 		for _, registry := range registries {
-			configFile.Configs[registry] = configured
-			resolved := configFile.ResolveAuthConfig(index)
+			configFile.AuthConfigs[registry] = configured
+			resolved := ResolveAuthConfig(configFile, index)
 			if resolved.Email != configured.Email {
 				t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email)
 			}
-			delete(configFile.Configs, registry)
-			resolved = configFile.ResolveAuthConfig(index)
+			delete(configFile.AuthConfigs, registry)
+			resolved = ResolveAuthConfig(configFile, index)
 			if resolved.Email == configured.Email {
 				t.Errorf("%s -> %q == %q\n", registry, resolved.Email, configured.Email)
 			}
diff --git a/registry/config.go b/registry/config.go
index a706f17..92ef4d9 100644
--- a/registry/config.go
+++ b/registry/config.go
@@ -9,9 +9,9 @@
 	"regexp"
 	"strings"
 
+	"github.com/docker/docker/image"
 	"github.com/docker/docker/opts"
 	flag "github.com/docker/docker/pkg/mflag"
-	"github.com/docker/docker/utils"
 )
 
 // Options holds command line options.
@@ -60,10 +60,10 @@
 }
 
 func (ipnet *netIPNet) UnmarshalJSON(b []byte) (err error) {
-	var ipnet_str string
-	if err = json.Unmarshal(b, &ipnet_str); err == nil {
+	var ipnetStr string
+	if err = json.Unmarshal(b, &ipnetStr); err == nil {
 		var cidr *net.IPNet
-		if _, cidr, err = net.ParseCIDR(ipnet_str); err == nil {
+		if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
 			*ipnet = netIPNet(*cidr)
 		}
 	}
@@ -189,7 +189,7 @@
 		return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI")
 	}
 
-	return fmt.Sprintf("%s://%s/v1/", uri.Scheme, uri.Host), nil
+	return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil
 }
 
 // ValidateIndexName validates an index name.
@@ -198,6 +198,9 @@
 	if val == "index."+IndexServerName() {
 		val = IndexServerName()
 	}
+	if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") {
+		return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val)
+	}
 	// *TODO: Check if valid hostname[:port]/ip[:port]?
 	return val, nil
 }
@@ -213,7 +216,7 @@
 		name = nameParts[0]
 
 		// the repository name must not be a valid image ID
-		if err := utils.ValidateID(name); err == nil {
+		if err := image.ValidateID(name); err == nil {
 			return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name)
 		}
 	} else {
@@ -235,6 +238,9 @@
 	if !validRepo.MatchString(name) {
 		return fmt.Errorf("Invalid repository name (%s), only [a-z0-9-_.] are allowed", name)
 	}
+	if strings.HasPrefix(name, "-") || strings.HasSuffix(name, "-") {
+		return fmt.Errorf("Invalid repository name (%s). Cannot begin or end with a hyphen.", name)
+	}
 	return nil
 }
 
@@ -352,7 +358,9 @@
 		// *TODO: Decouple index name from hostname (via registry configuration?)
 		repoInfo.LocalName = repoInfo.Index.Name + "/" + repoInfo.RemoteName
 		repoInfo.CanonicalName = repoInfo.LocalName
+
 	}
+
 	return repoInfo, nil
 }
 
diff --git a/registry/endpoint.go b/registry/endpoint.go
index 59ae4dd..ce92668 100644
--- a/registry/endpoint.go
+++ b/registry/endpoint.go
@@ -1,7 +1,6 @@
 package registry
 
 import (
-	"crypto/tls"
 	"encoding/json"
 	"fmt"
 	"io/ioutil"
@@ -10,9 +9,9 @@
 	"net/url"
 	"strings"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/registry/v2"
-	"github.com/docker/docker/utils"
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/registry/api/v2"
+	"github.com/docker/docker/pkg/transport"
 )
 
 // for mocking in unit tests
@@ -43,9 +42,9 @@
 }
 
 // NewEndpoint parses the given address to return a registry endpoint.
-func NewEndpoint(index *IndexInfo) (*Endpoint, error) {
+func NewEndpoint(index *IndexInfo, metaHeaders http.Header) (*Endpoint, error) {
 	// *TODO: Allow per-registry configuration of endpoints.
-	endpoint, err := newEndpoint(index.GetAuthConfigKey(), index.Secure)
+	endpoint, err := newEndpoint(index.GetAuthConfigKey(), index.Secure, metaHeaders)
 	if err != nil {
 		return nil, err
 	}
@@ -57,7 +56,7 @@
 }
 
 func validateEndpoint(endpoint *Endpoint) error {
-	log.Debugf("pinging registry endpoint %s", endpoint)
+	logrus.Debugf("pinging registry endpoint %s", endpoint)
 
 	// Try HTTPS ping to registry
 	endpoint.URL.Scheme = "https"
@@ -69,7 +68,7 @@
 		}
 
 		// If registry is insecure and HTTPS failed, fallback to HTTP.
-		log.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err)
+		logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err)
 		endpoint.URL.Scheme = "http"
 
 		var err2 error
@@ -83,7 +82,7 @@
 	return nil
 }
 
-func newEndpoint(address string, secure bool) (*Endpoint, error) {
+func newEndpoint(address string, secure bool, metaHeaders http.Header) (*Endpoint, error) {
 	var (
 		endpoint       = new(Endpoint)
 		trimmedAddress string
@@ -100,15 +99,18 @@
 		return nil, err
 	}
 	endpoint.IsSecure = secure
+	tr := NewTransport(ConnectTimeout, endpoint.IsSecure)
+	endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(metaHeaders)...))
 	return endpoint, nil
 }
 
-func (repoInfo *RepositoryInfo) GetEndpoint() (*Endpoint, error) {
-	return NewEndpoint(repoInfo.Index)
+func (repoInfo *RepositoryInfo) GetEndpoint(metaHeaders http.Header) (*Endpoint, error) {
+	return NewEndpoint(repoInfo.Index, metaHeaders)
 }
 
 // Endpoint stores basic information about a registry endpoint.
 type Endpoint struct {
+	client         *http.Client
 	URL            *url.URL
 	Version        APIVersion
 	IsSecure       bool
@@ -135,25 +137,24 @@
 
 func (e *Endpoint) Ping() (RegistryInfo, error) {
 	// The ping logic to use is determined by the registry endpoint version.
-	factory := HTTPRequestFactory(nil)
 	switch e.Version {
 	case APIVersion1:
-		return e.pingV1(factory)
+		return e.pingV1()
 	case APIVersion2:
-		return e.pingV2(factory)
+		return e.pingV2()
 	}
 
 	// APIVersionUnknown
 	// We should try v2 first...
 	e.Version = APIVersion2
-	regInfo, errV2 := e.pingV2(factory)
+	regInfo, errV2 := e.pingV2()
 	if errV2 == nil {
 		return regInfo, nil
 	}
 
 	// ... then fallback to v1.
 	e.Version = APIVersion1
-	regInfo, errV1 := e.pingV1(factory)
+	regInfo, errV1 := e.pingV1()
 	if errV1 == nil {
 		return regInfo, nil
 	}
@@ -162,8 +163,8 @@
 	return RegistryInfo{}, fmt.Errorf("unable to ping registry endpoint %s\nv2 ping attempt failed with error: %s\n v1 ping attempt failed with error: %s", e, errV2, errV1)
 }
 
-func (e *Endpoint) pingV1(factory *utils.HTTPRequestFactory) (RegistryInfo, error) {
-	log.Debugf("attempting v1 ping for registry endpoint %s", e)
+func (e *Endpoint) pingV1() (RegistryInfo, error) {
+	logrus.Debugf("attempting v1 ping for registry endpoint %s", e)
 
 	if e.String() == IndexServerAddress() {
 		// Skip the check, we know this one is valid
@@ -171,12 +172,12 @@
 		return RegistryInfo{Standalone: false}, nil
 	}
 
-	req, err := factory.NewRequest("GET", e.Path("_ping"), nil)
+	req, err := http.NewRequest("GET", e.Path("_ping"), nil)
 	if err != nil {
 		return RegistryInfo{Standalone: false}, err
 	}
 
-	resp, _, err := doRequest(req, nil, ConnectTimeout, e.IsSecure)
+	resp, err := e.client.Do(req)
 	if err != nil {
 		return RegistryInfo{Standalone: false}, err
 	}
@@ -194,17 +195,17 @@
 		Standalone: true,
 	}
 	if err := json.Unmarshal(jsonString, &info); err != nil {
-		log.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err)
+		logrus.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err)
 		// don't stop here. Just assume sane defaults
 	}
 	if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" {
-		log.Debugf("Registry version header: '%s'", hdr)
+		logrus.Debugf("Registry version header: '%s'", hdr)
 		info.Version = hdr
 	}
-	log.Debugf("RegistryInfo.Version: %q", info.Version)
+	logrus.Debugf("RegistryInfo.Version: %q", info.Version)
 
 	standalone := resp.Header.Get("X-Docker-Registry-Standalone")
-	log.Debugf("Registry standalone header: '%s'", standalone)
+	logrus.Debugf("Registry standalone header: '%s'", standalone)
 	// Accepted values are "true" (case-insensitive) and "1".
 	if strings.EqualFold(standalone, "true") || standalone == "1" {
 		info.Standalone = true
@@ -212,19 +213,19 @@
 		// there is a header set, and it is not "true" or "1", so assume fails
 		info.Standalone = false
 	}
-	log.Debugf("RegistryInfo.Standalone: %t", info.Standalone)
+	logrus.Debugf("RegistryInfo.Standalone: %t", info.Standalone)
 	return info, nil
 }
 
-func (e *Endpoint) pingV2(factory *utils.HTTPRequestFactory) (RegistryInfo, error) {
-	log.Debugf("attempting v2 ping for registry endpoint %s", e)
+func (e *Endpoint) pingV2() (RegistryInfo, error) {
+	logrus.Debugf("attempting v2 ping for registry endpoint %s", e)
 
-	req, err := factory.NewRequest("GET", e.Path(""), nil)
+	req, err := http.NewRequest("GET", e.Path(""), nil)
 	if err != nil {
 		return RegistryInfo{}, err
 	}
 
-	resp, _, err := doRequest(req, nil, ConnectTimeout, e.IsSecure)
+	resp, err := e.client.Do(req)
 	if err != nil {
 		return RegistryInfo{}, err
 	}
@@ -263,20 +264,3 @@
 
 	return RegistryInfo{}, fmt.Errorf("v2 registry endpoint returned status %d: %q", resp.StatusCode, http.StatusText(resp.StatusCode))
 }
-
-func (e *Endpoint) HTTPClient() *http.Client {
-	tlsConfig := tls.Config{
-		MinVersion: tls.VersionTLS10,
-	}
-	if !e.IsSecure {
-		tlsConfig.InsecureSkipVerify = true
-	}
-	return &http.Client{
-		Transport: &http.Transport{
-			DisableKeepAlives: true,
-			Proxy:             http.ProxyFromEnvironment,
-			TLSClientConfig:   &tlsConfig,
-		},
-		CheckRedirect: AddRequiredHeadersToRedirectedRequests,
-	}
-}
diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go
index 9567ba2..6f67867 100644
--- a/registry/endpoint_test.go
+++ b/registry/endpoint_test.go
@@ -19,7 +19,7 @@
 		{"0.0.0.0:5000", "https://0.0.0.0:5000/v0/"},
 	}
 	for _, td := range testData {
-		e, err := newEndpoint(td.str, false)
+		e, err := newEndpoint(td.str, false, nil)
 		if err != nil {
 			t.Errorf("%q: %s", td.str, err)
 		}
@@ -60,6 +60,7 @@
 	testEndpoint := Endpoint{
 		URL:     testServerURL,
 		Version: APIVersionUnknown,
+		client:  HTTPClient(NewTransport(ConnectTimeout, false)),
 	}
 
 	if err = validateEndpoint(&testEndpoint); err != nil {
diff --git a/registry/httpfactory.go b/registry/httpfactory.go
deleted file mode 100644
index a4fea38..0000000
--- a/registry/httpfactory.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package registry
-
-import (
-	"runtime"
-
-	"github.com/docker/docker/autogen/dockerversion"
-	"github.com/docker/docker/pkg/parsers/kernel"
-	"github.com/docker/docker/utils"
-)
-
-func HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory {
-	// FIXME: this replicates the 'info' job.
-	httpVersion := make([]utils.VersionInfo, 0, 4)
-	httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION})
-	httpVersion = append(httpVersion, &simpleVersionInfo{"go", runtime.Version()})
-	httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT})
-	if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
-		httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()})
-	}
-	httpVersion = append(httpVersion, &simpleVersionInfo{"os", runtime.GOOS})
-	httpVersion = append(httpVersion, &simpleVersionInfo{"arch", runtime.GOARCH})
-	ud := utils.NewHTTPUserAgentDecorator(httpVersion...)
-	md := &utils.HTTPMetaHeadersDecorator{
-		Headers: metaHeaders,
-	}
-	factory := utils.NewHTTPRequestFactory(ud, md)
-	return factory
-}
-
-// simpleVersionInfo is a simple implementation of
-// the interface VersionInfo, which is used
-// to provide version information for some product,
-// component, etc. It stores the product name and the version
-// in string and returns them on calls to Name() and Version().
-type simpleVersionInfo struct {
-	name    string
-	version string
-}
-
-func (v *simpleVersionInfo) Name() string {
-	return v.name
-}
-
-func (v *simpleVersionInfo) Version() string {
-	return v.version
-}
diff --git a/registry/registry.go b/registry/registry.go
index a8bb833..4436f13 100644
--- a/registry/registry.go
+++ b/registry/registry.go
@@ -8,13 +8,20 @@
 	"io/ioutil"
 	"net"
 	"net/http"
+	"net/http/httputil"
 	"os"
 	"path"
+	"path/filepath"
+	"runtime"
 	"strings"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/autogen/dockerversion"
+	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/docker/pkg/timeoutconn"
+	"github.com/docker/docker/pkg/transport"
+	"github.com/docker/docker/pkg/useragent"
 )
 
 var (
@@ -31,29 +38,118 @@
 	ConnectTimeout
 )
 
-func newClient(jar http.CookieJar, roots *x509.CertPool, certs []tls.Certificate, timeout TimeoutType, secure bool) *http.Client {
-	tlsConfig := tls.Config{
-		RootCAs: roots,
+// dockerUserAgent is the User-Agent the Docker client uses to identify itself.
+// It is populated on init(), comprising version information of different components.
+var dockerUserAgent string
+
+func init() {
+	httpVersion := make([]useragent.VersionInfo, 0, 6)
+	httpVersion = append(httpVersion, useragent.VersionInfo{"docker", dockerversion.VERSION})
+	httpVersion = append(httpVersion, useragent.VersionInfo{"go", runtime.Version()})
+	httpVersion = append(httpVersion, useragent.VersionInfo{"git-commit", dockerversion.GITCOMMIT})
+	if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
+		httpVersion = append(httpVersion, useragent.VersionInfo{"kernel", kernelVersion.String()})
+	}
+	httpVersion = append(httpVersion, useragent.VersionInfo{"os", runtime.GOOS})
+	httpVersion = append(httpVersion, useragent.VersionInfo{"arch", runtime.GOARCH})
+
+	dockerUserAgent = useragent.AppendVersions("", httpVersion...)
+}
+
+type httpsRequestModifier struct{ tlsConfig *tls.Config }
+
+// DRAGONS(tiborvass): If someone wonders why do we set tlsconfig in a roundtrip,
+// it's because it's so as to match the current behavior in master: we generate the
+// certpool on every-goddam-request. It's not great, but it allows people to just put
+// the certs in /etc/docker/certs.d/.../ and let docker "pick it up" immediately. Would
+// prefer an fsnotify implementation, but that was out of scope of my refactoring.
+func (m *httpsRequestModifier) ModifyRequest(req *http.Request) error {
+	var (
+		roots   *x509.CertPool
+		certs   []tls.Certificate
+		hostDir string
+	)
+
+	if req.URL.Scheme == "https" {
+		hasFile := func(files []os.FileInfo, name string) bool {
+			for _, f := range files {
+				if f.Name() == name {
+					return true
+				}
+			}
+			return false
+		}
+
+		if runtime.GOOS == "windows" {
+			hostDir = path.Join(os.TempDir(), "/docker/certs.d", req.URL.Host)
+		} else {
+			hostDir = path.Join("/etc/docker/certs.d", req.URL.Host)
+		}
+		logrus.Debugf("hostDir: %s", hostDir)
+		fs, err := ioutil.ReadDir(hostDir)
+		if err != nil && !os.IsNotExist(err) {
+			return nil
+		}
+
+		for _, f := range fs {
+			if strings.HasSuffix(f.Name(), ".crt") {
+				if roots == nil {
+					roots = x509.NewCertPool()
+				}
+				logrus.Debugf("crt: %s", hostDir+"/"+f.Name())
+				data, err := ioutil.ReadFile(filepath.Join(hostDir, f.Name()))
+				if err != nil {
+					return err
+				}
+				roots.AppendCertsFromPEM(data)
+			}
+			if strings.HasSuffix(f.Name(), ".cert") {
+				certName := f.Name()
+				keyName := certName[:len(certName)-5] + ".key"
+				logrus.Debugf("cert: %s", hostDir+"/"+f.Name())
+				if !hasFile(fs, keyName) {
+					return fmt.Errorf("Missing key %s for certificate %s", keyName, certName)
+				}
+				cert, err := tls.LoadX509KeyPair(filepath.Join(hostDir, certName), path.Join(hostDir, keyName))
+				if err != nil {
+					return err
+				}
+				certs = append(certs, cert)
+			}
+			if strings.HasSuffix(f.Name(), ".key") {
+				keyName := f.Name()
+				certName := keyName[:len(keyName)-4] + ".cert"
+				logrus.Debugf("key: %s", hostDir+"/"+f.Name())
+				if !hasFile(fs, certName) {
+					return fmt.Errorf("Missing certificate %s for key %s", certName, keyName)
+				}
+			}
+		}
+		m.tlsConfig.RootCAs = roots
+		m.tlsConfig.Certificates = certs
+	}
+	return nil
+}
+
+func NewTransport(timeout TimeoutType, secure bool) http.RoundTripper {
+	tlsConfig := &tls.Config{
 		// Avoid fallback to SSL protocols < TLS1.0
-		MinVersion:   tls.VersionTLS10,
-		Certificates: certs,
+		MinVersion:         tls.VersionTLS10,
+		InsecureSkipVerify: !secure,
 	}
 
-	if !secure {
-		tlsConfig.InsecureSkipVerify = true
-	}
-
-	httpTransport := &http.Transport{
+	tr := &http.Transport{
 		DisableKeepAlives: true,
 		Proxy:             http.ProxyFromEnvironment,
-		TLSClientConfig:   &tlsConfig,
+		TLSClientConfig:   tlsConfig,
 	}
 
 	switch timeout {
 	case ConnectTimeout:
-		httpTransport.Dial = func(proto string, addr string) (net.Conn, error) {
-			// Set the connect timeout to 5 seconds
-			d := net.Dialer{Timeout: 5 * time.Second, DualStack: true}
+		tr.Dial = func(proto string, addr string) (net.Conn, error) {
+			// Set the connect timeout to 30 seconds to allow for slower connection
+			// times...
+			d := net.Dialer{Timeout: 30 * time.Second, DualStack: true}
 
 			conn, err := d.Dial(proto, addr)
 			if err != nil {
@@ -64,7 +160,7 @@
 			return conn, nil
 		}
 	case ReceiveTimeout:
-		httpTransport.Dial = func(proto string, addr string) (net.Conn, error) {
+		tr.Dial = func(proto string, addr string) (net.Conn, error) {
 			d := net.Dialer{DualStack: true}
 
 			conn, err := d.Dial(proto, addr)
@@ -76,84 +172,57 @@
 		}
 	}
 
-	return &http.Client{
-		Transport:     httpTransport,
-		CheckRedirect: AddRequiredHeadersToRedirectedRequests,
-		Jar:           jar,
+	if secure {
+		// note: httpsTransport also handles http transport
+		// but for HTTPS, it sets up the certs
+		return transport.NewTransport(tr, &httpsRequestModifier{tlsConfig})
 	}
+
+	return tr
 }
 
-func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType, secure bool) (*http.Response, *http.Client, error) {
-	var (
-		pool  *x509.CertPool
-		certs []tls.Certificate
-	)
+// DockerHeaders returns request modifiers that ensure requests have
+// the User-Agent header set to dockerUserAgent and that metaHeaders
+// are added.
+func DockerHeaders(metaHeaders http.Header) []transport.RequestModifier {
+	modifiers := []transport.RequestModifier{
+		transport.NewHeaderRequestModifier(http.Header{"User-Agent": []string{dockerUserAgent}}),
+	}
+	if metaHeaders != nil {
+		modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders))
+	}
+	return modifiers
+}
 
-	if secure && req.URL.Scheme == "https" {
-		hasFile := func(files []os.FileInfo, name string) bool {
-			for _, f := range files {
-				if f.Name() == name {
-					return true
-				}
-			}
-			return false
-		}
+type debugTransport struct{ http.RoundTripper }
 
-		hostDir := path.Join("/etc/docker/certs.d", req.URL.Host)
-		log.Debugf("hostDir: %s", hostDir)
-		fs, err := ioutil.ReadDir(hostDir)
-		if err != nil && !os.IsNotExist(err) {
-			return nil, nil, err
-		}
+func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+	dump, err := httputil.DumpRequestOut(req, false)
+	if err != nil {
+		fmt.Println("could not dump request")
+	}
+	fmt.Println(string(dump))
+	resp, err := tr.RoundTripper.RoundTrip(req)
+	if err != nil {
+		return nil, err
+	}
+	dump, err = httputil.DumpResponse(resp, false)
+	if err != nil {
+		fmt.Println("could not dump response")
+	}
+	fmt.Println(string(dump))
+	return resp, err
+}
 
-		for _, f := range fs {
-			if strings.HasSuffix(f.Name(), ".crt") {
-				if pool == nil {
-					pool = x509.NewCertPool()
-				}
-				log.Debugf("crt: %s", hostDir+"/"+f.Name())
-				data, err := ioutil.ReadFile(path.Join(hostDir, f.Name()))
-				if err != nil {
-					return nil, nil, err
-				}
-				pool.AppendCertsFromPEM(data)
-			}
-			if strings.HasSuffix(f.Name(), ".cert") {
-				certName := f.Name()
-				keyName := certName[:len(certName)-5] + ".key"
-				log.Debugf("cert: %s", hostDir+"/"+f.Name())
-				if !hasFile(fs, keyName) {
-					return nil, nil, fmt.Errorf("Missing key %s for certificate %s", keyName, certName)
-				}
-				cert, err := tls.LoadX509KeyPair(path.Join(hostDir, certName), path.Join(hostDir, keyName))
-				if err != nil {
-					return nil, nil, err
-				}
-				certs = append(certs, cert)
-			}
-			if strings.HasSuffix(f.Name(), ".key") {
-				keyName := f.Name()
-				certName := keyName[:len(keyName)-4] + ".cert"
-				log.Debugf("key: %s", hostDir+"/"+f.Name())
-				if !hasFile(fs, certName) {
-					return nil, nil, fmt.Errorf("Missing certificate %s for key %s", certName, keyName)
-				}
-			}
-		}
+func HTTPClient(transport http.RoundTripper) *http.Client {
+	if transport == nil {
+		transport = NewTransport(ConnectTimeout, true)
 	}
 
-	if len(certs) == 0 {
-		client := newClient(jar, pool, nil, timeout, secure)
-		res, err := client.Do(req)
-		if err != nil {
-			return nil, nil, err
-		}
-		return res, client, nil
+	return &http.Client{
+		Transport:     transport,
+		CheckRedirect: AddRequiredHeadersToRedirectedRequests,
 	}
-
-	client := newClient(jar, pool, certs, timeout, secure)
-	res, err := client.Do(req)
-	return res, client, err
 }
 
 func trustedLocation(req *http.Request) bool {
diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go
index 57233d7..d58904d 100644
--- a/registry/registry_mock_test.go
+++ b/registry/registry_mock_test.go
@@ -18,7 +18,7 @@
 	"github.com/docker/docker/opts"
 	"github.com/gorilla/mux"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 )
 
 var (
@@ -134,7 +134,7 @@
 
 func handlerAccessLog(handler http.Handler) http.Handler {
 	logHandler := func(w http.ResponseWriter, r *http.Request) {
-		log.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL)
+		logrus.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL)
 		handler.ServeHTTP(w, r)
 	}
 	return http.HandlerFunc(logHandler)
@@ -171,7 +171,7 @@
 	return index
 }
 
-func makeServiceConfig(mirrors []string, insecure_registries []string) *ServiceConfig {
+func makeServiceConfig(mirrors []string, insecureRegistries []string) *ServiceConfig {
 	options := &Options{
 		Mirrors:            opts.NewListOpts(nil),
 		InsecureRegistries: opts.NewListOpts(nil),
@@ -181,9 +181,9 @@
 			options.Mirrors.Set(mirror)
 		}
 	}
-	if insecure_registries != nil {
-		for _, insecure_registries := range insecure_registries {
-			options.InsecureRegistries.Set(insecure_registries)
+	if insecureRegistries != nil {
+		for _, insecureRegistries := range insecureRegistries {
+			options.InsecureRegistries.Set(insecureRegistries)
 		}
 	}
 
@@ -467,7 +467,7 @@
  * WARNING: Don't push on the repos uncommented, it'll block the tests
  *
 func TestWait(t *testing.T) {
-	log.Println("Test HTTP server ready and waiting:", testHttpServer.URL)
+	logrus.Println("Test HTTP server ready and waiting:", testHttpServer.URL)
 	c := make(chan int)
 	<-c
 }
diff --git a/registry/registry_test.go b/registry/registry_test.go
index d96630d..33e86ff 100644
--- a/registry/registry_test.go
+++ b/registry/registry_test.go
@@ -7,7 +7,8 @@
 	"strings"
 	"testing"
 
-	"github.com/docker/docker/utils"
+	"github.com/docker/docker/cliconfig"
+	"github.com/docker/docker/pkg/transport"
 )
 
 var (
@@ -20,46 +21,35 @@
 )
 
 func spawnTestRegistrySession(t *testing.T) *Session {
-	authConfig := &AuthConfig{}
-	endpoint, err := NewEndpoint(makeIndex("/v1/"))
+	authConfig := &cliconfig.AuthConfig{}
+	endpoint, err := NewEndpoint(makeIndex("/v1/"), nil)
 	if err != nil {
 		t.Fatal(err)
 	}
-	r, err := NewSession(authConfig, utils.NewHTTPRequestFactory(), endpoint, true)
+	var tr http.RoundTripper = debugTransport{NewTransport(ReceiveTimeout, endpoint.IsSecure)}
+	tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(nil)...)
+	client := HTTPClient(tr)
+	r, err := NewSession(client, authConfig, endpoint)
 	if err != nil {
 		t.Fatal(err)
 	}
+	// In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true`
+	// header while authenticating, in order to retrieve a token that can be later used to
+	// perform authenticated actions.
+	//
+	// The mock v1 registry does not support that, (TODO(tiborvass): support it), instead,
+	// it will consider authenticated any request with the header `X-Docker-Token: fake-token`.
+	//
+	// Because we know that the client's transport is an `*authTransport` we simply cast it,
+	// in order to set the internal cached token to the fake token, and thus send that fake token
+	// upon every subsequent requests.
+	r.client.Transport.(*authTransport).token = token
 	return r
 }
 
-func TestPublicSession(t *testing.T) {
-	authConfig := &AuthConfig{}
-
-	getSessionDecorators := func(index *IndexInfo) int {
-		endpoint, err := NewEndpoint(index)
-		if err != nil {
-			t.Fatal(err)
-		}
-		r, err := NewSession(authConfig, utils.NewHTTPRequestFactory(), endpoint, true)
-		if err != nil {
-			t.Fatal(err)
-		}
-		return len(r.reqFactory.GetDecorators())
-	}
-
-	decorators := getSessionDecorators(makeIndex("/v1/"))
-	assertEqual(t, decorators, 0, "Expected no decorator on http session")
-
-	decorators = getSessionDecorators(makeHttpsIndex("/v1/"))
-	assertNotEqual(t, decorators, 0, "Expected decorator on https session")
-
-	decorators = getSessionDecorators(makePublicIndex())
-	assertEqual(t, decorators, 0, "Expected no decorator on public session")
-}
-
 func TestPingRegistryEndpoint(t *testing.T) {
 	testPing := func(index *IndexInfo, expectedStandalone bool, assertMessage string) {
-		ep, err := NewEndpoint(index)
+		ep, err := NewEndpoint(index, nil)
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -79,7 +69,7 @@
 func TestEndpoint(t *testing.T) {
 	// Simple wrapper to fail test if err != nil
 	expandEndpoint := func(index *IndexInfo) *Endpoint {
-		endpoint, err := NewEndpoint(index)
+		endpoint, err := NewEndpoint(index, nil)
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -88,7 +78,7 @@
 
 	assertInsecureIndex := func(index *IndexInfo) {
 		index.Secure = true
-		_, err := NewEndpoint(index)
+		_, err := NewEndpoint(index, nil)
 		assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index")
 		assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry  error for insecure index")
 		index.Secure = false
@@ -96,7 +86,7 @@
 
 	assertSecureIndex := func(index *IndexInfo) {
 		index.Secure = true
-		_, err := NewEndpoint(index)
+		_, err := NewEndpoint(index, nil)
 		assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index")
 		assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index")
 		index.Secure = false
@@ -162,14 +152,14 @@
 	}
 	for _, address := range badEndpoints {
 		index.Name = address
-		_, err := NewEndpoint(index)
+		_, err := NewEndpoint(index, nil)
 		checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint")
 	}
 }
 
 func TestGetRemoteHistory(t *testing.T) {
 	r := spawnTestRegistrySession(t)
-	hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/"), token)
+	hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/"))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -181,16 +171,16 @@
 
 func TestLookupRemoteImage(t *testing.T) {
 	r := spawnTestRegistrySession(t)
-	err := r.LookupRemoteImage(imageID, makeURL("/v1/"), token)
+	err := r.LookupRemoteImage(imageID, makeURL("/v1/"))
 	assertEqual(t, err, nil, "Expected error of remote lookup to nil")
-	if err := r.LookupRemoteImage("abcdef", makeURL("/v1/"), token); err == nil {
+	if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil {
 		t.Fatal("Expected error of remote lookup to not nil")
 	}
 }
 
 func TestGetRemoteImageJSON(t *testing.T) {
 	r := spawnTestRegistrySession(t)
-	json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/"), token)
+	json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/"))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -199,7 +189,7 @@
 		t.Fatal("Expected non-empty json")
 	}
 
-	_, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/"), token)
+	_, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/"))
 	if err == nil {
 		t.Fatal("Expected image not found error")
 	}
@@ -207,7 +197,7 @@
 
 func TestGetRemoteImageLayer(t *testing.T) {
 	r := spawnTestRegistrySession(t)
-	data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), token, 0)
+	data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -215,7 +205,7 @@
 		t.Fatal("Expected non-nil data result")
 	}
 
-	_, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), token, 0)
+	_, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0)
 	if err == nil {
 		t.Fatal("Expected image not found error")
 	}
@@ -223,14 +213,14 @@
 
 func TestGetRemoteTags(t *testing.T) {
 	r := spawnTestRegistrySession(t)
-	tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO, token)
+	tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO)
 	if err != nil {
 		t.Fatal(err)
 	}
 	assertEqual(t, len(tags), 1, "Expected one tag")
 	assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID)
 
-	_, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz", token)
+	_, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz")
 	if err == nil {
 		t.Fatal("Expected error when fetching tags for bogus repo")
 	}
@@ -264,7 +254,7 @@
 		Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37",
 	}
 
-	err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/"), token)
+	err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/"))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -273,7 +263,7 @@
 func TestPushImageLayerRegistry(t *testing.T) {
 	r := spawnTestRegistrySession(t)
 	layer := strings.NewReader("")
-	_, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), token, []byte{})
+	_, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{})
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -298,6 +288,9 @@
 	invalidRepoNames := []string{
 		"https://github.com/docker/docker",
 		"docker/Docker",
+		"-docker",
+		"-docker/docker",
+		"-docker.io/docker/docker",
 		"docker///docker",
 		"docker.io/docker/Docker",
 		"docker.io/docker///docker",
@@ -690,7 +683,7 @@
 
 func TestPushRegistryTag(t *testing.T) {
 	r := spawnTestRegistrySession(t)
-	err := r.PushRegistryTag("foo42/bar", imageID, "stable", makeURL("/v1/"), token)
+	err := r.PushRegistryTag("foo42/bar", imageID, "stable", makeURL("/v1/"))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -735,7 +728,7 @@
 	}
 	assertEqual(t, results.NumResults, 1, "Expected 1 search results")
 	assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query")
-	assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' a ot hae 42 stars")
+	assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars")
 }
 
 func TestValidRemoteName(t *testing.T) {
diff --git a/registry/service.go b/registry/service.go
index 0483402..6811749 100644
--- a/registry/service.go
+++ b/registry/service.go
@@ -1,18 +1,11 @@
 package registry
 
 import (
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/engine"
+	"net/http"
+
+	"github.com/docker/docker/cliconfig"
 )
 
-// Service exposes registry capabilities in the standard Engine
-// interface. Once installed, it extends the engine with the
-// following calls:
-//
-//  'auth': Authenticate against the public registry
-//  'search': Search for images on the public registry
-//  'pull': Download images from any registry (TODO)
-//  'push': Upload images to any registry (TODO)
 type Service struct {
 	Config *ServiceConfig
 }
@@ -25,201 +18,54 @@
 	}
 }
 
-// Install installs registry capabilities to eng.
-func (s *Service) Install(eng *engine.Engine) error {
-	eng.Register("auth", s.Auth)
-	eng.Register("search", s.Search)
-	eng.Register("resolve_repository", s.ResolveRepository)
-	eng.Register("resolve_index", s.ResolveIndex)
-	eng.Register("registry_config", s.GetRegistryConfig)
-	return nil
-}
-
 // Auth contacts the public registry with the provided credentials,
 // and returns OK if authentication was sucessful.
 // It can be used to verify the validity of a client's credentials.
-func (s *Service) Auth(job *engine.Job) engine.Status {
-	var (
-		authConfig = new(AuthConfig)
-		endpoint   *Endpoint
-		index      *IndexInfo
-		status     string
-		err        error
-	)
-
-	job.GetenvJson("authConfig", authConfig)
-
+func (s *Service) Auth(authConfig *cliconfig.AuthConfig) (string, error) {
 	addr := authConfig.ServerAddress
 	if addr == "" {
 		// Use the official registry address if not specified.
 		addr = IndexServerAddress()
 	}
-
-	if index, err = ResolveIndexInfo(job, addr); err != nil {
-		return job.Error(err)
+	index, err := s.ResolveIndex(addr)
+	if err != nil {
+		return "", err
 	}
-
-	if endpoint, err = NewEndpoint(index); err != nil {
-		log.Errorf("unable to get new registry endpoint: %s", err)
-		return job.Error(err)
+	endpoint, err := NewEndpoint(index, nil)
+	if err != nil {
+		return "", err
 	}
-
 	authConfig.ServerAddress = endpoint.String()
-
-	if status, err = Login(authConfig, endpoint, HTTPRequestFactory(nil)); err != nil {
-		log.Errorf("unable to login against registry endpoint %s: %s", endpoint, err)
-		return job.Error(err)
-	}
-
-	log.Infof("successful registry login for endpoint %s: %s", endpoint, status)
-	job.Printf("%s\n", status)
-
-	return engine.StatusOK
+	return Login(authConfig, endpoint)
 }
 
 // Search queries the public registry for images matching the specified
 // search terms, and returns the results.
-//
-// Argument syntax: search TERM
-//
-// Option environment:
-//	'authConfig': json-encoded credentials to authenticate against the registry.
-//		The search extends to images only accessible via the credentials.
-//
-//	'metaHeaders': extra HTTP headers to include in the request to the registry.
-//		The headers should be passed as a json-encoded dictionary.
-//
-// Output:
-//	Results are sent as a collection of structured messages (using engine.Table).
-//	Each result is sent as a separate message.
-//	Results are ordered by number of stars on the public registry.
-func (s *Service) Search(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s TERM", job.Name)
+func (s *Service) Search(term string, authConfig *cliconfig.AuthConfig, headers map[string][]string) (*SearchResults, error) {
+	repoInfo, err := s.ResolveRepository(term)
+	if err != nil {
+		return nil, err
 	}
-	var (
-		term        = job.Args[0]
-		metaHeaders = map[string][]string{}
-		authConfig  = &AuthConfig{}
-	)
-	job.GetenvJson("authConfig", authConfig)
-	job.GetenvJson("metaHeaders", metaHeaders)
 
-	repoInfo, err := ResolveRepositoryInfo(job, term)
-	if err != nil {
-		return job.Error(err)
-	}
 	// *TODO: Search multiple indexes.
-	endpoint, err := repoInfo.GetEndpoint()
+	endpoint, err := repoInfo.GetEndpoint(http.Header(headers))
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
-	r, err := NewSession(authConfig, HTTPRequestFactory(metaHeaders), endpoint, true)
+	r, err := NewSession(endpoint.client, authConfig, endpoint)
 	if err != nil {
-		return job.Error(err)
+		return nil, err
 	}
-	results, err := r.SearchRepositories(repoInfo.GetSearchTerm())
-	if err != nil {
-		return job.Error(err)
-	}
-	outs := engine.NewTable("star_count", 0)
-	for _, result := range results.Results {
-		out := &engine.Env{}
-		out.Import(result)
-		outs.Add(out)
-	}
-	outs.ReverseSort()
-	if _, err := outs.WriteListTo(job.Stdout); err != nil {
-		return job.Error(err)
-	}
-	return engine.StatusOK
+	return r.SearchRepositories(repoInfo.GetSearchTerm())
 }
 
 // ResolveRepository splits a repository name into its components
 // and configuration of the associated registry.
-func (s *Service) ResolveRepository(job *engine.Job) engine.Status {
-	var (
-		reposName = job.Args[0]
-	)
-
-	repoInfo, err := s.Config.NewRepositoryInfo(reposName)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	out := engine.Env{}
-	err = out.SetJson("repository", repoInfo)
-	if err != nil {
-		return job.Error(err)
-	}
-	out.WriteTo(job.Stdout)
-
-	return engine.StatusOK
-}
-
-// Convenience wrapper for calling resolve_repository Job from a running job.
-func ResolveRepositoryInfo(jobContext *engine.Job, reposName string) (*RepositoryInfo, error) {
-	job := jobContext.Eng.Job("resolve_repository", reposName)
-	env, err := job.Stdout.AddEnv()
-	if err != nil {
-		return nil, err
-	}
-	if err := job.Run(); err != nil {
-		return nil, err
-	}
-	info := RepositoryInfo{}
-	if err := env.GetJson("repository", &info); err != nil {
-		return nil, err
-	}
-	return &info, nil
+func (s *Service) ResolveRepository(name string) (*RepositoryInfo, error) {
+	return s.Config.NewRepositoryInfo(name)
 }
 
 // ResolveIndex takes indexName and returns index info
-func (s *Service) ResolveIndex(job *engine.Job) engine.Status {
-	var (
-		indexName = job.Args[0]
-	)
-
-	index, err := s.Config.NewIndexInfo(indexName)
-	if err != nil {
-		return job.Error(err)
-	}
-
-	out := engine.Env{}
-	err = out.SetJson("index", index)
-	if err != nil {
-		return job.Error(err)
-	}
-	out.WriteTo(job.Stdout)
-
-	return engine.StatusOK
-}
-
-// Convenience wrapper for calling resolve_index Job from a running job.
-func ResolveIndexInfo(jobContext *engine.Job, indexName string) (*IndexInfo, error) {
-	job := jobContext.Eng.Job("resolve_index", indexName)
-	env, err := job.Stdout.AddEnv()
-	if err != nil {
-		return nil, err
-	}
-	if err := job.Run(); err != nil {
-		return nil, err
-	}
-	info := IndexInfo{}
-	if err := env.GetJson("index", &info); err != nil {
-		return nil, err
-	}
-	return &info, nil
-}
-
-// GetRegistryConfig returns current registry configuration.
-func (s *Service) GetRegistryConfig(job *engine.Job) engine.Status {
-	out := engine.Env{}
-	err := out.SetJson("config", s.Config)
-	if err != nil {
-		return job.Error(err)
-	}
-	out.WriteTo(job.Stdout)
-
-	return engine.StatusOK
+func (s *Service) ResolveIndex(name string) (*IndexInfo, error) {
+	return s.Config.NewIndexInfo(name)
 }
diff --git a/registry/session.go b/registry/session.go
index bf04b58..71b27be 100644
--- a/registry/session.go
+++ b/registry/session.go
@@ -3,6 +3,8 @@
 import (
 	"bytes"
 	"crypto/sha256"
+	"errors"
+	"sync"
 	// this is required for some certificates
 	_ "crypto/sha512"
 	"encoding/hex"
@@ -17,66 +19,143 @@
 	"strings"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/cliconfig"
 	"github.com/docker/docker/pkg/httputils"
 	"github.com/docker/docker/pkg/tarsum"
-	"github.com/docker/docker/utils"
+	"github.com/docker/docker/pkg/transport"
 )
 
 type Session struct {
-	authConfig    *AuthConfig
-	reqFactory    *utils.HTTPRequestFactory
 	indexEndpoint *Endpoint
-	jar           *cookiejar.Jar
-	timeout       TimeoutType
+	client        *http.Client
+	// TODO(tiborvass): remove authConfig
+	authConfig *cliconfig.AuthConfig
 }
 
-func NewSession(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, endpoint *Endpoint, timeout bool) (r *Session, err error) {
+type authTransport struct {
+	http.RoundTripper
+	*cliconfig.AuthConfig
+
+	alwaysSetBasicAuth bool
+	token              []string
+
+	mu     sync.Mutex                      // guards modReq
+	modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// AuthTransport handles the auth layer when communicating with a v1 registry (private or official)
+//
+// For private v1 registries, set alwaysSetBasicAuth to true.
+//
+// For the official v1 registry, if there isn't already an Authorization header in the request,
+// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header.
+// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing
+// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent
+// requests.
+//
+// If the server sends a token without the client having requested it, it is ignored.
+//
+// This RoundTripper also has a CancelRequest method important for correct timeout handling.
+func AuthTransport(base http.RoundTripper, authConfig *cliconfig.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper {
+	if base == nil {
+		base = http.DefaultTransport
+	}
+	return &authTransport{
+		RoundTripper:       base,
+		AuthConfig:         authConfig,
+		alwaysSetBasicAuth: alwaysSetBasicAuth,
+		modReq:             make(map[*http.Request]*http.Request),
+	}
+}
+
+func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) {
+	req := transport.CloneRequest(orig)
+	tr.mu.Lock()
+	tr.modReq[orig] = req
+	tr.mu.Unlock()
+
+	if tr.alwaysSetBasicAuth {
+		req.SetBasicAuth(tr.Username, tr.Password)
+		return tr.RoundTripper.RoundTrip(req)
+	}
+
+	// Don't override
+	if req.Header.Get("Authorization") == "" {
+		if req.Header.Get("X-Docker-Token") == "true" && len(tr.Username) > 0 {
+			req.SetBasicAuth(tr.Username, tr.Password)
+		} else if len(tr.token) > 0 {
+			req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ","))
+		}
+	}
+	resp, err := tr.RoundTripper.RoundTrip(req)
+	if err != nil {
+		delete(tr.modReq, orig)
+		return nil, err
+	}
+	if len(resp.Header["X-Docker-Token"]) > 0 {
+		tr.token = resp.Header["X-Docker-Token"]
+	}
+	resp.Body = &transport.OnEOFReader{
+		Rc: resp.Body,
+		Fn: func() { delete(tr.modReq, orig) },
+	}
+	return resp, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (tr *authTransport) CancelRequest(req *http.Request) {
+	type canceler interface {
+		CancelRequest(*http.Request)
+	}
+	if cr, ok := tr.RoundTripper.(canceler); ok {
+		tr.mu.Lock()
+		modReq := tr.modReq[req]
+		delete(tr.modReq, req)
+		tr.mu.Unlock()
+		cr.CancelRequest(modReq)
+	}
+}
+
+// TODO(tiborvass): remove authConfig param once registry client v2 is vendored
+func NewSession(client *http.Client, authConfig *cliconfig.AuthConfig, endpoint *Endpoint) (r *Session, err error) {
 	r = &Session{
 		authConfig:    authConfig,
+		client:        client,
 		indexEndpoint: endpoint,
 	}
 
-	if timeout {
-		r.timeout = ReceiveTimeout
-	}
-
-	r.jar, err = cookiejar.New(nil)
-	if err != nil {
-		return nil, err
-	}
+	var alwaysSetBasicAuth bool
 
 	// If we're working with a standalone private registry over HTTPS, send Basic Auth headers
-	// alongside our requests.
-	if r.indexEndpoint.VersionString(1) != IndexServerAddress() && r.indexEndpoint.URL.Scheme == "https" {
-		info, err := r.indexEndpoint.Ping()
+	// alongside all our requests.
+	if endpoint.VersionString(1) != IndexServerAddress() && endpoint.URL.Scheme == "https" {
+		info, err := endpoint.Ping()
 		if err != nil {
 			return nil, err
 		}
-		if info.Standalone {
-			log.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", r.indexEndpoint.String())
-			dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password)
-			factory.AddDecorator(dec)
+
+		if info.Standalone && authConfig != nil {
+			logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String())
+			alwaysSetBasicAuth = true
 		}
 	}
 
-	r.reqFactory = factory
-	return r, nil
-}
+	client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth)
 
-func (r *Session) doRequest(req *http.Request) (*http.Response, *http.Client, error) {
-	return doRequest(req, r.jar, r.timeout, r.indexEndpoint.IsSecure)
+	jar, err := cookiejar.New(nil)
+	if err != nil {
+		return nil, errors.New("cookiejar.New is not supposed to return an error")
+	}
+	client.Jar = jar
+
+	return r, nil
 }
 
 // Retrieve the history of a given image from the Registry.
 // Return a list of the parent's json (requested image included)
-func (r *Session) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) {
-	req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/ancestry", nil)
-	if err != nil {
-		return nil, err
-	}
-	setTokenAuth(req, token)
-	res, _, err := r.doRequest(req)
+func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) {
+	res, err := r.client.Get(registry + "images/" + imgID + "/ancestry")
 	if err != nil {
 		return nil, err
 	}
@@ -85,55 +164,40 @@
 		if res.StatusCode == 401 {
 			return nil, errLoginRequired
 		}
-		return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res)
+		return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res)
 	}
 
-	jsonString, err := ioutil.ReadAll(res.Body)
-	if err != nil {
-		return nil, fmt.Errorf("Error while reading the http response: %s", err)
+	var history []string
+	if err := json.NewDecoder(res.Body).Decode(&history); err != nil {
+		return nil, fmt.Errorf("Error while reading the http response: %v", err)
 	}
 
-	log.Debugf("Ancestry: %s", jsonString)
-	history := new([]string)
-	if err := json.Unmarshal(jsonString, history); err != nil {
-		return nil, err
-	}
-	return *history, nil
+	logrus.Debugf("Ancestry: %v", history)
+	return history, nil
 }
 
 // Check if an image exists in the Registry
-func (r *Session) LookupRemoteImage(imgID, registry string, token []string) error {
-	req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
-	if err != nil {
-		return err
-	}
-	setTokenAuth(req, token)
-	res, _, err := r.doRequest(req)
+func (r *Session) LookupRemoteImage(imgID, registry string) error {
+	res, err := r.client.Get(registry + "images/" + imgID + "/json")
 	if err != nil {
 		return err
 	}
 	res.Body.Close()
 	if res.StatusCode != 200 {
-		return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
+		return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
 	}
 	return nil
 }
 
 // Retrieve an image from the Registry.
-func (r *Session) GetRemoteImageJSON(imgID, registry string, token []string) ([]byte, int, error) {
-	// Get the JSON
-	req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil)
-	if err != nil {
-		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
-	}
-	setTokenAuth(req, token)
-	res, _, err := r.doRequest(req)
+func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int, error) {
+	res, err := r.client.Get(registry + "images/" + imgID + "/json")
 	if err != nil {
 		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
 	}
 	defer res.Body.Close()
 	if res.StatusCode != 200 {
-		return nil, -1, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
+		return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
 	}
 	// if the size header is not present, then set it to '-1'
 	imageSize := -1
@@ -146,44 +210,44 @@
 
 	jsonString, err := ioutil.ReadAll(res.Body)
 	if err != nil {
-		return nil, -1, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString)
+		return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString)
 	}
 	return jsonString, imageSize, nil
 }
 
-func (r *Session) GetRemoteImageLayer(imgID, registry string, token []string, imgSize int64) (io.ReadCloser, error) {
+func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) {
 	var (
 		retries    = 5
 		statusCode = 0
-		client     *http.Client
 		res        *http.Response
+		err        error
 		imageURL   = fmt.Sprintf("%simages/%s/layer", registry, imgID)
 	)
 
-	req, err := r.reqFactory.NewRequest("GET", imageURL, nil)
+	req, err := http.NewRequest("GET", imageURL, nil)
 	if err != nil {
-		return nil, fmt.Errorf("Error while getting from the server: %s\n", err)
+		return nil, fmt.Errorf("Error while getting from the server: %v", err)
 	}
-	setTokenAuth(req, token)
+	// TODO: why are we doing retries at this level?
+	// These retries should be generic to both v1 and v2
 	for i := 1; i <= retries; i++ {
 		statusCode = 0
-		res, client, err = r.doRequest(req)
-		if err != nil {
-			log.Debugf("Error contacting registry: %s", err)
-			if res != nil {
-				if res.Body != nil {
-					res.Body.Close()
-				}
-				statusCode = res.StatusCode
-			}
-			if i == retries {
-				return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
-					statusCode, imgID)
-			}
-			time.Sleep(time.Duration(i) * 5 * time.Second)
-			continue
+		res, err = r.client.Do(req)
+		if err == nil {
+			break
 		}
-		break
+		logrus.Debugf("Error contacting registry %s: %v", registry, err)
+		if res != nil {
+			if res.Body != nil {
+				res.Body.Close()
+			}
+			statusCode = res.StatusCode
+		}
+		if i == retries {
+			return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
+				statusCode, imgID)
+		}
+		time.Sleep(time.Duration(i) * 5 * time.Second)
 	}
 
 	if res.StatusCode != 200 {
@@ -193,14 +257,14 @@
 	}
 
 	if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 {
-		log.Debugf("server supports resume")
-		return httputils.ResumableRequestReaderWithInitialResponse(client, req, 5, imgSize, res), nil
+		logrus.Debugf("server supports resume")
+		return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil
 	}
-	log.Debugf("server doesn't support resume")
+	logrus.Debugf("server doesn't support resume")
 	return res.Body, nil
 }
 
-func (r *Session) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) {
+func (r *Session) GetRemoteTags(registries []string, repository string) (map[string]string, error) {
 	if strings.Count(repository, "/") == 0 {
 		// This will be removed once the Registry supports auto-resolution on
 		// the "library" namespace
@@ -208,25 +272,20 @@
 	}
 	for _, host := range registries {
 		endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository)
-		req, err := r.reqFactory.NewRequest("GET", endpoint, nil)
-
-		if err != nil {
-			return nil, err
-		}
-		setTokenAuth(req, token)
-		res, _, err := r.doRequest(req)
+		res, err := r.client.Get(endpoint)
 		if err != nil {
 			return nil, err
 		}
 
-		log.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
+		logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
 		defer res.Body.Close()
 
-		if res.StatusCode != 200 && res.StatusCode != 404 {
-			continue
-		} else if res.StatusCode == 404 {
+		if res.StatusCode == 404 {
 			return nil, fmt.Errorf("Repository not found")
 		}
+		if res.StatusCode != 200 {
+			continue
+		}
 
 		result := make(map[string]string)
 		if err := json.NewDecoder(res.Body).Decode(&result); err != nil {
@@ -259,18 +318,15 @@
 func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) {
 	repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), remote)
 
-	log.Debugf("[registry] Calling GET %s", repositoryTarget)
+	logrus.Debugf("[registry] Calling GET %s", repositoryTarget)
 
-	req, err := r.reqFactory.NewRequest("GET", repositoryTarget, nil)
+	req, err := http.NewRequest("GET", repositoryTarget, nil)
 	if err != nil {
 		return nil, err
 	}
-	if r.authConfig != nil && len(r.authConfig.Username) > 0 {
-		req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
-	}
+	// this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests
 	req.Header.Set("X-Docker-Token", "true")
-
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
 		return nil, err
 	}
@@ -281,18 +337,13 @@
 	// TODO: Right now we're ignoring checksums in the response body.
 	// In the future, we need to use them to check image validity.
 	if res.StatusCode == 404 {
-		return nil, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res)
+		return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res)
 	} else if res.StatusCode != 200 {
 		errBody, err := ioutil.ReadAll(res.Body)
 		if err != nil {
-			log.Debugf("Error reading response body: %s", err)
+			logrus.Debugf("Error reading response body: %s", err)
 		}
-		return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, remote, errBody), res)
-	}
-
-	var tokens []string
-	if res.Header.Get("X-Docker-Token") != "" {
-		tokens = res.Header["X-Docker-Token"]
+		return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, remote, errBody), res)
 	}
 
 	var endpoints []string
@@ -320,29 +371,29 @@
 	return &RepositoryData{
 		ImgList:   imgsData,
 		Endpoints: endpoints,
-		Tokens:    tokens,
 	}, nil
 }
 
-func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string, token []string) error {
+func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error {
 
-	log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/checksum")
+	u := registry + "images/" + imgData.ID + "/checksum"
 
-	req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/checksum", nil)
+	logrus.Debugf("[registry] Calling PUT %s", u)
+
+	req, err := http.NewRequest("PUT", u, nil)
 	if err != nil {
 		return err
 	}
-	setTokenAuth(req, token)
 	req.Header.Set("X-Docker-Checksum", imgData.Checksum)
 	req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload)
 
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
-		return fmt.Errorf("Failed to upload metadata: %s", err)
+		return fmt.Errorf("Failed to upload metadata: %v", err)
 	}
 	defer res.Body.Close()
 	if len(res.Cookies()) > 0 {
-		r.jar.SetCookies(req.URL, res.Cookies())
+		r.client.Jar.SetCookies(req.URL, res.Cookies())
 	}
 	if res.StatusCode != 200 {
 		errBody, err := ioutil.ReadAll(res.Body)
@@ -361,29 +412,30 @@
 }
 
 // Push a local image to the registry
-func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error {
+func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error {
 
-	log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/json")
+	u := registry + "images/" + imgData.ID + "/json"
 
-	req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/json", bytes.NewReader(jsonRaw))
+	logrus.Debugf("[registry] Calling PUT %s", u)
+
+	req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw))
 	if err != nil {
 		return err
 	}
 	req.Header.Add("Content-type", "application/json")
-	setTokenAuth(req, token)
 
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
 		return fmt.Errorf("Failed to upload metadata: %s", err)
 	}
 	defer res.Body.Close()
 	if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") {
-		return utils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res)
+		return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res)
 	}
 	if res.StatusCode != 200 {
 		errBody, err := ioutil.ReadAll(res.Body)
 		if err != nil {
-			return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
+			return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
 		}
 		var jsonBody map[string]string
 		if err := json.Unmarshal(errBody, &jsonBody); err != nil {
@@ -391,14 +443,16 @@
 		} else if jsonBody["error"] == "Image already exists" {
 			return ErrAlreadyExists
 		}
-		return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res)
+		return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res)
 	}
 	return nil
 }
 
-func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string, jsonRaw []byte) (checksum string, checksumPayload string, err error) {
+func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) {
 
-	log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer")
+	u := registry + "images/" + imgID + "/layer"
+
+	logrus.Debugf("[registry] Calling PUT %s", u)
 
 	tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0)
 	if err != nil {
@@ -409,17 +463,16 @@
 	h.Write([]byte{'\n'})
 	checksumLayer := io.TeeReader(tarsumLayer, h)
 
-	req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", checksumLayer)
+	req, err := http.NewRequest("PUT", u, checksumLayer)
 	if err != nil {
 		return "", "", err
 	}
 	req.Header.Add("Content-Type", "application/octet-stream")
 	req.ContentLength = -1
 	req.TransferEncoding = []string{"chunked"}
-	setTokenAuth(req, token)
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
-		return "", "", fmt.Errorf("Failed to upload layer: %s", err)
+		return "", "", fmt.Errorf("Failed to upload layer: %v", err)
 	}
 	if rc, ok := layer.(io.Closer); ok {
 		if err := rc.Close(); err != nil {
@@ -431,9 +484,9 @@
 	if res.StatusCode != 200 {
 		errBody, err := ioutil.ReadAll(res.Body)
 		if err != nil {
-			return "", "", utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
+			return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
 		}
-		return "", "", utils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res)
+		return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res)
 	}
 
 	checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil))
@@ -442,25 +495,24 @@
 
 // push a tag on the registry.
 // Remote has the format '<user>/<repo>
-func (r *Session) PushRegistryTag(remote, revision, tag, registry string, token []string) error {
+func (r *Session) PushRegistryTag(remote, revision, tag, registry string) error {
 	// "jsonify" the string
 	revision = "\"" + revision + "\""
 	path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag)
 
-	req, err := r.reqFactory.NewRequest("PUT", registry+path, strings.NewReader(revision))
+	req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision))
 	if err != nil {
 		return err
 	}
 	req.Header.Add("Content-type", "application/json")
-	setTokenAuth(req, token)
 	req.ContentLength = int64(len(revision))
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
 		return err
 	}
 	res.Body.Close()
 	if res.StatusCode != 200 && res.StatusCode != 201 {
-		return utils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res)
+		return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res)
 	}
 	return nil
 }
@@ -486,10 +538,11 @@
 		suffix = "images"
 	}
 	u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote, suffix)
-	log.Debugf("[registry] PUT %s", u)
-	log.Debugf("Image list pushed to index:\n%s", imgListJSON)
+	logrus.Debugf("[registry] PUT %s", u)
+	logrus.Debugf("Image list pushed to index:\n%s", imgListJSON)
 	headers := map[string][]string{
-		"Content-type":   {"application/json"},
+		"Content-type": {"application/json"},
+		// this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests
 		"X-Docker-Token": {"true"},
 	}
 	if validate {
@@ -507,7 +560,7 @@
 		}
 		res.Body.Close()
 		u = res.Header.Get("Location")
-		log.Debugf("Redirected to %s", u)
+		logrus.Debugf("Redirected to %s", u)
 	}
 	defer res.Body.Close()
 
@@ -520,53 +573,45 @@
 		if res.StatusCode != 200 && res.StatusCode != 201 {
 			errBody, err := ioutil.ReadAll(res.Body)
 			if err != nil {
-				log.Debugf("Error reading response body: %s", err)
+				logrus.Debugf("Error reading response body: %s", err)
 			}
-			return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote, errBody), res)
+			return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote, errBody), res)
 		}
-		if res.Header.Get("X-Docker-Token") != "" {
-			tokens = res.Header["X-Docker-Token"]
-			log.Debugf("Auth token: %v", tokens)
-		} else {
-			return nil, fmt.Errorf("Index response didn't contain an access token")
-		}
+		tokens = res.Header["X-Docker-Token"]
+		logrus.Debugf("Auth token: %v", tokens)
 
-		if res.Header.Get("X-Docker-Endpoints") != "" {
-			endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1))
-			if err != nil {
-				return nil, err
-			}
-		} else {
+		if res.Header.Get("X-Docker-Endpoints") == "" {
 			return nil, fmt.Errorf("Index response didn't contain any endpoints")
 		}
-	}
-	if validate {
+		endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1))
+		if err != nil {
+			return nil, err
+		}
+	} else {
 		if res.StatusCode != 204 {
 			errBody, err := ioutil.ReadAll(res.Body)
 			if err != nil {
-				log.Debugf("Error reading response body: %s", err)
+				logrus.Debugf("Error reading response body: %s", err)
 			}
-			return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote, errBody), res)
+			return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote, errBody), res)
 		}
 	}
 
 	return &RepositoryData{
-		Tokens:    tokens,
 		Endpoints: endpoints,
 	}, nil
 }
 
 func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) {
-	req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(body))
+	req, err := http.NewRequest("PUT", u, bytes.NewReader(body))
 	if err != nil {
 		return nil, err
 	}
-	req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
 	req.ContentLength = int64(len(body))
 	for k, v := range headers {
 		req.Header[k] = v
 	}
-	response, _, err := r.doRequest(req)
+	response, err := r.client.Do(req)
 	if err != nil {
 		return nil, err
 	}
@@ -578,43 +623,29 @@
 }
 
 func (r *Session) SearchRepositories(term string) (*SearchResults, error) {
-	log.Debugf("Index server: %s", r.indexEndpoint)
+	logrus.Debugf("Index server: %s", r.indexEndpoint)
 	u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term)
-	req, err := r.reqFactory.NewRequest("GET", u, nil)
-	if err != nil {
-		return nil, err
-	}
-	if r.authConfig != nil && len(r.authConfig.Username) > 0 {
-		req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password)
-	}
-	req.Header.Set("X-Docker-Token", "true")
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Get(u)
 	if err != nil {
 		return nil, err
 	}
 	defer res.Body.Close()
 	if res.StatusCode != 200 {
-		return nil, utils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res)
+		return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res)
 	}
 	result := new(SearchResults)
-	err = json.NewDecoder(res.Body).Decode(result)
-	return result, err
+	return result, json.NewDecoder(res.Body).Decode(result)
 }
 
-func (r *Session) GetAuthConfig(withPasswd bool) *AuthConfig {
+// TODO(tiborvass): remove this once registry client v2 is vendored
+func (r *Session) GetAuthConfig(withPasswd bool) *cliconfig.AuthConfig {
 	password := ""
 	if withPasswd {
 		password = r.authConfig.Password
 	}
-	return &AuthConfig{
+	return &cliconfig.AuthConfig{
 		Username: r.authConfig.Username,
 		Password: password,
 		Email:    r.authConfig.Email,
 	}
 }
-
-func setTokenAuth(req *http.Request, token []string) {
-	if req.Header.Get("Authorization") == "" { // Don't override
-		req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
-	}
-}
diff --git a/registry/session_v2.go b/registry/session_v2.go
index 833abee..43d638c 100644
--- a/registry/session_v2.go
+++ b/registry/session_v2.go
@@ -9,10 +9,10 @@
 	"net/http"
 	"strconv"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution/digest"
-	"github.com/docker/docker/registry/v2"
-	"github.com/docker/docker/utils"
+	"github.com/docker/distribution/registry/api/v2"
+	"github.com/docker/docker/pkg/httputils"
 )
 
 const DockerDigestHeader = "Docker-Content-Digest"
@@ -27,7 +27,7 @@
 func (r *Session) V2RegistryEndpoint(index *IndexInfo) (ep *Endpoint, err error) {
 	// TODO check if should use Mirror
 	if index.Official {
-		ep, err = newEndpoint(REGISTRYSERVER, true)
+		ep, err = newEndpoint(REGISTRYSERVER, true, nil)
 		if err != nil {
 			return
 		}
@@ -38,7 +38,7 @@
 	} else if r.indexEndpoint.String() == index.GetAuthConfigKey() {
 		ep = r.indexEndpoint
 	} else {
-		ep, err = NewEndpoint(index)
+		ep, err = NewEndpoint(index, nil)
 		if err != nil {
 			return
 		}
@@ -49,7 +49,7 @@
 }
 
 // GetV2Authorization gets the authorization needed to the given image
-// If readonly access is requested, then only the authorization may
+// If readonly access is requested, then the authorization may
 // only be used for Get operations.
 func (r *Session) GetV2Authorization(ep *Endpoint, imageName string, readOnly bool) (auth *RequestAuthorization, err error) {
 	scopes := []string{"pull"}
@@ -57,7 +57,7 @@
 		scopes = append(scopes, "push")
 	}
 
-	log.Debugf("Getting authorization for %s %s", imageName, scopes)
+	logrus.Debugf("Getting authorization for %s %s", imageName, scopes)
 	return NewRequestAuthorization(r.GetAuthConfig(true), ep, "repository", imageName, scopes), nil
 }
 
@@ -75,16 +75,16 @@
 	}
 
 	method := "GET"
-	log.Debugf("[registry] Calling %q %s", method, routeURL)
+	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
 
-	req, err := r.reqFactory.NewRequest(method, routeURL, nil)
+	req, err := http.NewRequest(method, routeURL, nil)
 	if err != nil {
 		return nil, "", err
 	}
 	if err := auth.Authorize(req); err != nil {
 		return nil, "", err
 	}
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
 		return nil, "", err
 	}
@@ -95,7 +95,7 @@
 		} else if res.StatusCode == 404 {
 			return nil, "", ErrDoesNotExist
 		}
-		return nil, "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res)
+		return nil, "", httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res)
 	}
 
 	manifestBytes, err := ioutil.ReadAll(res.Body)
@@ -109,23 +109,23 @@
 // - Succeeded to head image blob (already exists)
 // - Failed with no error (continue to Push the Blob)
 // - Failed with error
-func (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, auth *RequestAuthorization) (bool, error) {
-	routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+":"+sum)
+func (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Digest, auth *RequestAuthorization) (bool, error) {
+	routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, dgst)
 	if err != nil {
 		return false, err
 	}
 
 	method := "HEAD"
-	log.Debugf("[registry] Calling %q %s", method, routeURL)
+	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
 
-	req, err := r.reqFactory.NewRequest(method, routeURL, nil)
+	req, err := http.NewRequest(method, routeURL, nil)
 	if err != nil {
 		return false, err
 	}
 	if err := auth.Authorize(req); err != nil {
 		return false, err
 	}
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
 		return false, err
 	}
@@ -141,25 +141,25 @@
 		return false, nil
 	}
 
-	return false, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying head request for %s - %s:%s", res.StatusCode, imageName, sumType, sum), res)
+	return false, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying head request for %s - %s", res.StatusCode, imageName, dgst), res)
 }
 
-func (r *Session) GetV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, blobWrtr io.Writer, auth *RequestAuthorization) error {
-	routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+":"+sum)
+func (r *Session) GetV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Digest, blobWrtr io.Writer, auth *RequestAuthorization) error {
+	routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, dgst)
 	if err != nil {
 		return err
 	}
 
 	method := "GET"
-	log.Debugf("[registry] Calling %q %s", method, routeURL)
-	req, err := r.reqFactory.NewRequest(method, routeURL, nil)
+	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
+	req, err := http.NewRequest(method, routeURL, nil)
 	if err != nil {
 		return err
 	}
 	if err := auth.Authorize(req); err != nil {
 		return err
 	}
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
 		return err
 	}
@@ -168,29 +168,29 @@
 		if res.StatusCode == 401 {
 			return errLoginRequired
 		}
-		return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res)
+		return httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res)
 	}
 
 	_, err = io.Copy(blobWrtr, res.Body)
 	return err
 }
 
-func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName, sumType, sum string, auth *RequestAuthorization) (io.ReadCloser, int64, error) {
-	routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+":"+sum)
+func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName string, dgst digest.Digest, auth *RequestAuthorization) (io.ReadCloser, int64, error) {
+	routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, dgst)
 	if err != nil {
 		return nil, 0, err
 	}
 
 	method := "GET"
-	log.Debugf("[registry] Calling %q %s", method, routeURL)
-	req, err := r.reqFactory.NewRequest(method, routeURL, nil)
+	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
+	req, err := http.NewRequest(method, routeURL, nil)
 	if err != nil {
 		return nil, 0, err
 	}
 	if err := auth.Authorize(req); err != nil {
 		return nil, 0, err
 	}
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
 		return nil, 0, err
 	}
@@ -198,7 +198,7 @@
 		if res.StatusCode == 401 {
 			return nil, 0, errLoginRequired
 		}
-		return nil, 0, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob - %s:%s", res.StatusCode, imageName, sumType, sum), res)
+		return nil, 0, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob - %s", res.StatusCode, imageName, dgst), res)
 	}
 	lenStr := res.Header.Get("Content-Length")
 	l, err := strconv.ParseInt(lenStr, 10, 64)
@@ -212,25 +212,25 @@
 // Push the image to the server for storage.
 // 'layer' is an uncompressed reader of the blob to be pushed.
 // The server will generate it's own checksum calculation.
-func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string, blobRdr io.Reader, auth *RequestAuthorization) error {
+func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Digest, blobRdr io.Reader, auth *RequestAuthorization) error {
 	location, err := r.initiateBlobUpload(ep, imageName, auth)
 	if err != nil {
 		return err
 	}
 
 	method := "PUT"
-	log.Debugf("[registry] Calling %q %s", method, location)
-	req, err := r.reqFactory.NewRequest(method, location, ioutil.NopCloser(blobRdr))
+	logrus.Debugf("[registry] Calling %q %s", method, location)
+	req, err := http.NewRequest(method, location, ioutil.NopCloser(blobRdr))
 	if err != nil {
 		return err
 	}
 	queryParams := req.URL.Query()
-	queryParams.Add("digest", sumType+":"+sumStr)
+	queryParams.Add("digest", dgst.String())
 	req.URL.RawQuery = queryParams.Encode()
 	if err := auth.Authorize(req); err != nil {
 		return err
 	}
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
 		return err
 	}
@@ -244,8 +244,8 @@
 		if err != nil {
 			return err
 		}
-		log.Debugf("Unexpected response from server: %q %#v", errBody, res.Header)
-		return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s blob - %s:%s", res.StatusCode, imageName, sumType, sumStr), res)
+		logrus.Debugf("Unexpected response from server: %q %#v", errBody, res.Header)
+		return httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s blob - %s", res.StatusCode, imageName, dgst), res)
 	}
 
 	return nil
@@ -258,8 +258,8 @@
 		return "", err
 	}
 
-	log.Debugf("[registry] Calling %q %s", "POST", routeURL)
-	req, err := r.reqFactory.NewRequest("POST", routeURL, nil)
+	logrus.Debugf("[registry] Calling %q %s", "POST", routeURL)
+	req, err := http.NewRequest("POST", routeURL, nil)
 	if err != nil {
 		return "", err
 	}
@@ -267,7 +267,7 @@
 	if err := auth.Authorize(req); err != nil {
 		return "", err
 	}
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
 		return "", err
 	}
@@ -285,8 +285,8 @@
 			return "", err
 		}
 
-		log.Debugf("Unexpected response from server: %q %#v", errBody, res.Header)
-		return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: unexpected %d response status trying to initiate upload of %s", res.StatusCode, imageName), res)
+		logrus.Debugf("Unexpected response from server: %q %#v", errBody, res.Header)
+		return "", httputils.NewHTTPRequestError(fmt.Sprintf("Server error: unexpected %d response status trying to initiate upload of %s", res.StatusCode, imageName), res)
 	}
 
 	if location = res.Header.Get("Location"); location == "" {
@@ -304,15 +304,15 @@
 	}
 
 	method := "PUT"
-	log.Debugf("[registry] Calling %q %s", method, routeURL)
-	req, err := r.reqFactory.NewRequest(method, routeURL, bytes.NewReader(signedManifest))
+	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
+	req, err := http.NewRequest(method, routeURL, bytes.NewReader(signedManifest))
 	if err != nil {
 		return "", err
 	}
 	if err := auth.Authorize(req); err != nil {
 		return "", err
 	}
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
 		return "", err
 	}
@@ -327,8 +327,8 @@
 		if err != nil {
 			return "", err
 		}
-		log.Debugf("Unexpected response from server: %q %#v", errBody, res.Header)
-		return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res)
+		logrus.Debugf("Unexpected response from server: %q %#v", errBody, res.Header)
+		return "", httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res)
 	}
 
 	hdrDigest, err := digest.ParseDigest(res.Header.Get(DockerDigestHeader))
@@ -352,8 +352,8 @@
 }
 
 type remoteTags struct {
-	name string
-	tags []string
+	Name string   `json:"name"`
+	Tags []string `json:"tags"`
 }
 
 // Given a repository name, returns a json array of string tags
@@ -364,16 +364,16 @@
 	}
 
 	method := "GET"
-	log.Debugf("[registry] Calling %q %s", method, routeURL)
+	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
 
-	req, err := r.reqFactory.NewRequest(method, routeURL, nil)
+	req, err := http.NewRequest(method, routeURL, nil)
 	if err != nil {
 		return nil, err
 	}
 	if err := auth.Authorize(req); err != nil {
 		return nil, err
 	}
-	res, _, err := r.doRequest(req)
+	res, err := r.client.Do(req)
 	if err != nil {
 		return nil, err
 	}
@@ -384,14 +384,12 @@
 		} else if res.StatusCode == 404 {
 			return nil, ErrDoesNotExist
 		}
-		return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s", res.StatusCode, imageName), res)
+		return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s", res.StatusCode, imageName), res)
 	}
 
-	decoder := json.NewDecoder(res.Body)
 	var remote remoteTags
-	err = decoder.Decode(&remote)
-	if err != nil {
+	if err := json.NewDecoder(res.Body).Decode(&remote); err != nil {
 		return nil, fmt.Errorf("Error while decoding the http response: %s", err)
 	}
-	return remote.tags, nil
+	return remote.Tags, nil
 }
diff --git a/registry/token.go b/registry/token.go
index c79a8ca..e27cb6f 100644
--- a/registry/token.go
+++ b/registry/token.go
@@ -7,15 +7,13 @@
 	"net/http"
 	"net/url"
 	"strings"
-
-	"github.com/docker/docker/utils"
 )
 
 type tokenResponse struct {
 	Token string `json:"token"`
 }
 
-func getToken(username, password string, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *utils.HTTPRequestFactory) (token string, err error) {
+func getToken(username, password string, params map[string]string, registryEndpoint *Endpoint) (token string, err error) {
 	realm, ok := params["realm"]
 	if !ok {
 		return "", errors.New("no realm specified for token auth challenge")
@@ -34,7 +32,7 @@
 		}
 	}
 
-	req, err := factory.NewRequest("GET", realmURL.String(), nil)
+	req, err := http.NewRequest("GET", realmURL.String(), nil)
 	if err != nil {
 		return "", err
 	}
@@ -58,7 +56,7 @@
 
 	req.URL.RawQuery = reqParams.Encode()
 
-	resp, err := client.Do(req)
+	resp, err := registryEndpoint.client.Do(req)
 	if err != nil {
 		return "", err
 	}
diff --git a/registry/types.go b/registry/types.go
index bd0bf8b..2c8369b 100644
--- a/registry/types.go
+++ b/registry/types.go
@@ -5,6 +5,7 @@
 	IsOfficial  bool   `json:"is_official"`
 	Name        string `json:"name"`
 	IsTrusted   bool   `json:"is_trusted"`
+	IsAutomated bool   `json:"is_automated"`
 	Description string `json:"description"`
 }
 
diff --git a/registry/v2/descriptors.go b/registry/v2/descriptors.go
deleted file mode 100644
index 68d1824..0000000
--- a/registry/v2/descriptors.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package v2
-
-import "net/http"
-
-// TODO(stevvooe): Add route descriptors for each named route, along with
-// accepted methods, parameters, returned status codes and error codes.
-
-// ErrorDescriptor provides relevant information about a given error code.
-type ErrorDescriptor struct {
-	// Code is the error code that this descriptor describes.
-	Code ErrorCode
-
-	// Value provides a unique, string key, often captilized with
-	// underscores, to identify the error code. This value is used as the
-	// keyed value when serializing api errors.
-	Value string
-
-	// Message is a short, human readable decription of the error condition
-	// included in API responses.
-	Message string
-
-	// Description provides a complete account of the errors purpose, suitable
-	// for use in documentation.
-	Description string
-
-	// HTTPStatusCodes provides a list of status under which this error
-	// condition may arise. If it is empty, the error condition may be seen
-	// for any status code.
-	HTTPStatusCodes []int
-}
-
-// ErrorDescriptors provides a list of HTTP API Error codes that may be
-// encountered when interacting with the registry API.
-var ErrorDescriptors = []ErrorDescriptor{
-	{
-		Code:    ErrorCodeUnknown,
-		Value:   "UNKNOWN",
-		Message: "unknown error",
-		Description: `Generic error returned when the error does not have an
-		API classification.`,
-	},
-	{
-		Code:    ErrorCodeDigestInvalid,
-		Value:   "DIGEST_INVALID",
-		Message: "provided digest did not match uploaded content",
-		Description: `When a blob is uploaded, the registry will check that
-		the content matches the digest provided by the client. The error may
-		include a detail structure with the key "digest", including the
-		invalid digest string. This error may also be returned when a manifest
-		includes an invalid layer digest.`,
-		HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
-	},
-	{
-		Code:    ErrorCodeSizeInvalid,
-		Value:   "SIZE_INVALID",
-		Message: "provided length did not match content length",
-		Description: `When a layer is uploaded, the provided size will be
-		checked against the uploaded content. If they do not match, this error
-		will be returned.`,
-		HTTPStatusCodes: []int{http.StatusBadRequest},
-	},
-	{
-		Code:    ErrorCodeNameInvalid,
-		Value:   "NAME_INVALID",
-		Message: "manifest name did not match URI",
-		Description: `During a manifest upload, if the name in the manifest
-		does not match the uri name, this error will be returned.`,
-		HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
-	},
-	{
-		Code:    ErrorCodeTagInvalid,
-		Value:   "TAG_INVALID",
-		Message: "manifest tag did not match URI",
-		Description: `During a manifest upload, if the tag in the manifest
-		does not match the uri tag, this error will be returned.`,
-		HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
-	},
-	{
-		Code:    ErrorCodeNameUnknown,
-		Value:   "NAME_UNKNOWN",
-		Message: "repository name not known to registry",
-		Description: `This is returned if the name used during an operation is
-		unknown to the registry.`,
-		HTTPStatusCodes: []int{http.StatusNotFound},
-	},
-	{
-		Code:    ErrorCodeManifestUnknown,
-		Value:   "MANIFEST_UNKNOWN",
-		Message: "manifest unknown",
-		Description: `This error is returned when the manifest, identified by
-		name and tag is unknown to the repository.`,
-		HTTPStatusCodes: []int{http.StatusNotFound},
-	},
-	{
-		Code:    ErrorCodeManifestInvalid,
-		Value:   "MANIFEST_INVALID",
-		Message: "manifest invalid",
-		Description: `During upload, manifests undergo several checks ensuring
-		validity. If those checks fail, this error may be returned, unless a
-		more specific error is included. The detail will contain information
-		the failed validation.`,
-		HTTPStatusCodes: []int{http.StatusBadRequest},
-	},
-	{
-		Code:    ErrorCodeManifestUnverified,
-		Value:   "MANIFEST_UNVERIFIED",
-		Message: "manifest failed signature verification",
-		Description: `During manifest upload, if the manifest fails signature
-		verification, this error will be returned.`,
-		HTTPStatusCodes: []int{http.StatusBadRequest},
-	},
-	{
-		Code:    ErrorCodeBlobUnknown,
-		Value:   "BLOB_UNKNOWN",
-		Message: "blob unknown to registry",
-		Description: `This error may be returned when a blob is unknown to the
-		registry in a specified repository. This can be returned with a
-		standard get or if a manifest references an unknown layer during
-		upload.`,
-		HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
-	},
-
-	{
-		Code:    ErrorCodeBlobUploadUnknown,
-		Value:   "BLOB_UPLOAD_UNKNOWN",
-		Message: "blob upload unknown to registry",
-		Description: `If a blob upload has been cancelled or was never
-		started, this error code may be returned.`,
-		HTTPStatusCodes: []int{http.StatusNotFound},
-	},
-}
-
-var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor
-var idToDescriptors map[string]ErrorDescriptor
-
-func init() {
-	errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(ErrorDescriptors))
-	idToDescriptors = make(map[string]ErrorDescriptor, len(ErrorDescriptors))
-
-	for _, descriptor := range ErrorDescriptors {
-		errorCodeToDescriptors[descriptor.Code] = descriptor
-		idToDescriptors[descriptor.Value] = descriptor
-	}
-}
diff --git a/registry/v2/doc.go b/registry/v2/doc.go
deleted file mode 100644
index 30fe227..0000000
--- a/registry/v2/doc.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Package v2 describes routes, urls and the error codes used in the Docker
-// Registry JSON HTTP API V2. In addition to declarations, descriptors are
-// provided for routes and error codes that can be used for implementation and
-// automatically generating documentation.
-//
-// Definitions here are considered to be locked down for the V2 registry api.
-// Any changes must be considered carefully and should not proceed without a
-// change proposal.
-//
-// Currently, while the HTTP API definitions are considered stable, the Go API
-// exports are considered unstable. Go API consumers should take care when
-// relying on these definitions until this message is deleted.
-package v2
diff --git a/registry/v2/errors.go b/registry/v2/errors.go
deleted file mode 100644
index 8c85d3a..0000000
--- a/registry/v2/errors.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package v2
-
-import (
-	"fmt"
-	"strings"
-)
-
-// ErrorCode represents the error type. The errors are serialized via strings
-// and the integer format may change and should *never* be exported.
-type ErrorCode int
-
-const (
-	// ErrorCodeUnknown is a catch-all for errors not defined below.
-	ErrorCodeUnknown ErrorCode = iota
-
-	// ErrorCodeDigestInvalid is returned when uploading a blob if the
-	// provided digest does not match the blob contents.
-	ErrorCodeDigestInvalid
-
-	// ErrorCodeSizeInvalid is returned when uploading a blob if the provided
-	// size does not match the content length.
-	ErrorCodeSizeInvalid
-
-	// ErrorCodeNameInvalid is returned when the name in the manifest does not
-	// match the provided name.
-	ErrorCodeNameInvalid
-
-	// ErrorCodeTagInvalid is returned when the tag in the manifest does not
-	// match the provided tag.
-	ErrorCodeTagInvalid
-
-	// ErrorCodeNameUnknown when the repository name is not known.
-	ErrorCodeNameUnknown
-
-	// ErrorCodeManifestUnknown returned when image manifest is unknown.
-	ErrorCodeManifestUnknown
-
-	// ErrorCodeManifestInvalid returned when an image manifest is invalid,
-	// typically during a PUT operation. This error encompasses all errors
-	// encountered during manifest validation that aren't signature errors.
-	ErrorCodeManifestInvalid
-
-	// ErrorCodeManifestUnverified is returned when the manifest fails
-	// signature verfication.
-	ErrorCodeManifestUnverified
-
-	// ErrorCodeBlobUnknown is returned when a blob is unknown to the
-	// registry. This can happen when the manifest references a nonexistent
-	// layer or the result is not found by a blob fetch.
-	ErrorCodeBlobUnknown
-
-	// ErrorCodeBlobUploadUnknown is returned when an upload is unknown.
-	ErrorCodeBlobUploadUnknown
-)
-
-// ParseErrorCode attempts to parse the error code string, returning
-// ErrorCodeUnknown if the error is not known.
-func ParseErrorCode(s string) ErrorCode {
-	desc, ok := idToDescriptors[s]
-
-	if !ok {
-		return ErrorCodeUnknown
-	}
-
-	return desc.Code
-}
-
-// Descriptor returns the descriptor for the error code.
-func (ec ErrorCode) Descriptor() ErrorDescriptor {
-	d, ok := errorCodeToDescriptors[ec]
-
-	if !ok {
-		return ErrorCodeUnknown.Descriptor()
-	}
-
-	return d
-}
-
-// String returns the canonical identifier for this error code.
-func (ec ErrorCode) String() string {
-	return ec.Descriptor().Value
-}
-
-// Message returned the human-readable error message for this error code.
-func (ec ErrorCode) Message() string {
-	return ec.Descriptor().Message
-}
-
-// MarshalText encodes the receiver into UTF-8-encoded text and returns the
-// result.
-func (ec ErrorCode) MarshalText() (text []byte, err error) {
-	return []byte(ec.String()), nil
-}
-
-// UnmarshalText decodes the form generated by MarshalText.
-func (ec *ErrorCode) UnmarshalText(text []byte) error {
-	desc, ok := idToDescriptors[string(text)]
-
-	if !ok {
-		desc = ErrorCodeUnknown.Descriptor()
-	}
-
-	*ec = desc.Code
-
-	return nil
-}
-
-// Error provides a wrapper around ErrorCode with extra Details provided.
-type Error struct {
-	Code    ErrorCode   `json:"code"`
-	Message string      `json:"message,omitempty"`
-	Detail  interface{} `json:"detail,omitempty"`
-}
-
-// Error returns a human readable representation of the error.
-func (e Error) Error() string {
-	return fmt.Sprintf("%s: %s",
-		strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)),
-		e.Message)
-}
-
-// Errors provides the envelope for multiple errors and a few sugar methods
-// for use within the application.
-type Errors struct {
-	Errors []Error `json:"errors,omitempty"`
-}
-
-// Push pushes an error on to the error stack, with the optional detail
-// argument. It is a programming error (ie panic) to push more than one
-// detail at a time.
-func (errs *Errors) Push(code ErrorCode, details ...interface{}) {
-	if len(details) > 1 {
-		panic("please specify zero or one detail items for this error")
-	}
-
-	var detail interface{}
-	if len(details) > 0 {
-		detail = details[0]
-	}
-
-	if err, ok := detail.(error); ok {
-		detail = err.Error()
-	}
-
-	errs.PushErr(Error{
-		Code:    code,
-		Message: code.Message(),
-		Detail:  detail,
-	})
-}
-
-// PushErr pushes an error interface onto the error stack.
-func (errs *Errors) PushErr(err error) {
-	switch err.(type) {
-	case Error:
-		errs.Errors = append(errs.Errors, err.(Error))
-	default:
-		errs.Errors = append(errs.Errors, Error{Message: err.Error()})
-	}
-}
-
-func (errs *Errors) Error() string {
-	switch errs.Len() {
-	case 0:
-		return "<nil>"
-	case 1:
-		return errs.Errors[0].Error()
-	default:
-		msg := "errors:\n"
-		for _, err := range errs.Errors {
-			msg += err.Error() + "\n"
-		}
-		return msg
-	}
-}
-
-// Clear clears the errors.
-func (errs *Errors) Clear() {
-	errs.Errors = errs.Errors[:0]
-}
-
-// Len returns the current number of errors.
-func (errs *Errors) Len() int {
-	return len(errs.Errors)
-}
diff --git a/registry/v2/errors_test.go b/registry/v2/errors_test.go
deleted file mode 100644
index 4a80cdf..0000000
--- a/registry/v2/errors_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package v2
-
-import (
-	"encoding/json"
-	"reflect"
-	"testing"
-)
-
-// TestErrorCodes ensures that error code format, mappings and
-// marshaling/unmarshaling. round trips are stable.
-func TestErrorCodes(t *testing.T) {
-	for _, desc := range ErrorDescriptors {
-		if desc.Code.String() != desc.Value {
-			t.Fatalf("error code string incorrect: %q != %q", desc.Code.String(), desc.Value)
-		}
-
-		if desc.Code.Message() != desc.Message {
-			t.Fatalf("incorrect message for error code %v: %q != %q", desc.Code, desc.Code.Message(), desc.Message)
-		}
-
-		// Serialize the error code using the json library to ensure that we
-		// get a string and it works round trip.
-		p, err := json.Marshal(desc.Code)
-
-		if err != nil {
-			t.Fatalf("error marshaling error code %v: %v", desc.Code, err)
-		}
-
-		if len(p) <= 0 {
-			t.Fatalf("expected content in marshaled before for error code %v", desc.Code)
-		}
-
-		// First, unmarshal to interface and ensure we have a string.
-		var ecUnspecified interface{}
-		if err := json.Unmarshal(p, &ecUnspecified); err != nil {
-			t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err)
-		}
-
-		if _, ok := ecUnspecified.(string); !ok {
-			t.Fatalf("expected a string for error code %v on unmarshal got a %T", desc.Code, ecUnspecified)
-		}
-
-		// Now, unmarshal with the error code type and ensure they are equal
-		var ecUnmarshaled ErrorCode
-		if err := json.Unmarshal(p, &ecUnmarshaled); err != nil {
-			t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err)
-		}
-
-		if ecUnmarshaled != desc.Code {
-			t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, desc.Code)
-		}
-	}
-}
-
-// TestErrorsManagement does a quick check of the Errors type to ensure that
-// members are properly pushed and marshaled.
-func TestErrorsManagement(t *testing.T) {
-	var errs Errors
-
-	errs.Push(ErrorCodeDigestInvalid)
-	errs.Push(ErrorCodeBlobUnknown,
-		map[string]string{"digest": "sometestblobsumdoesntmatter"})
-
-	p, err := json.Marshal(errs)
-
-	if err != nil {
-		t.Fatalf("error marashaling errors: %v", err)
-	}
-
-	expectedJSON := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"BLOB_UNKNOWN\",\"message\":\"blob unknown to registry\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]}"
-
-	if string(p) != expectedJSON {
-		t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON)
-	}
-
-	errs.Clear()
-	errs.Push(ErrorCodeUnknown)
-	expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}"
-	p, err = json.Marshal(errs)
-
-	if err != nil {
-		t.Fatalf("error marashaling errors: %v", err)
-	}
-
-	if string(p) != expectedJSON {
-		t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON)
-	}
-}
-
-// TestMarshalUnmarshal ensures that api errors can round trip through json
-// without losing information.
-func TestMarshalUnmarshal(t *testing.T) {
-
-	var errors Errors
-
-	for _, testcase := range []struct {
-		description string
-		err         Error
-	}{
-		{
-			description: "unknown error",
-			err: Error{
-
-				Code:    ErrorCodeUnknown,
-				Message: ErrorCodeUnknown.Descriptor().Message,
-			},
-		},
-		{
-			description: "unknown manifest",
-			err: Error{
-				Code:    ErrorCodeManifestUnknown,
-				Message: ErrorCodeManifestUnknown.Descriptor().Message,
-			},
-		},
-		{
-			description: "unknown manifest",
-			err: Error{
-				Code:    ErrorCodeBlobUnknown,
-				Message: ErrorCodeBlobUnknown.Descriptor().Message,
-				Detail:  map[string]interface{}{"digest": "asdfqwerqwerqwerqwer"},
-			},
-		},
-	} {
-		fatalf := func(format string, args ...interface{}) {
-			t.Fatalf(testcase.description+": "+format, args...)
-		}
-
-		unexpectedErr := func(err error) {
-			fatalf("unexpected error: %v", err)
-		}
-
-		p, err := json.Marshal(testcase.err)
-		if err != nil {
-			unexpectedErr(err)
-		}
-
-		var unmarshaled Error
-		if err := json.Unmarshal(p, &unmarshaled); err != nil {
-			unexpectedErr(err)
-		}
-
-		if !reflect.DeepEqual(unmarshaled, testcase.err) {
-			fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, testcase.err)
-		}
-
-		// Roll everything up into an error response envelope.
-		errors.PushErr(testcase.err)
-	}
-
-	p, err := json.Marshal(errors)
-	if err != nil {
-		t.Fatalf("unexpected error marshaling error envelope: %v", err)
-	}
-
-	var unmarshaled Errors
-	if err := json.Unmarshal(p, &unmarshaled); err != nil {
-		t.Fatalf("unexpected error unmarshaling error envelope: %v", err)
-	}
-
-	if !reflect.DeepEqual(unmarshaled, errors) {
-		t.Fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, errors)
-	}
-}
diff --git a/registry/v2/regexp.go b/registry/v2/regexp.go
deleted file mode 100644
index 07484dc..0000000
--- a/registry/v2/regexp.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package v2
-
-import "regexp"
-
-// This file defines regular expressions for use in route definition. These
-// are also defined in the registry code base. Until they are in a common,
-// shared location, and exported, they must be repeated here.
-
-// RepositoryNameComponentRegexp restricts registtry path components names to
-// start with at least two letters or numbers, with following parts able to
-// separated by one period, dash or underscore.
-var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`)
-
-// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow 1 to
-// 5 path components, separated by a forward slash.
-var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/){0,4}` + RepositoryNameComponentRegexp.String())
-
-// TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go.
-var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`)
-
-// DigestRegexp matches valid digest types.
-var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+`)
diff --git a/registry/v2/routes.go b/registry/v2/routes.go
deleted file mode 100644
index de0a38f..0000000
--- a/registry/v2/routes.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package v2
-
-import "github.com/gorilla/mux"
-
-// The following are definitions of the name under which all V2 routes are
-// registered. These symbols can be used to look up a route based on the name.
-const (
-	RouteNameBase            = "base"
-	RouteNameManifest        = "manifest"
-	RouteNameTags            = "tags"
-	RouteNameBlob            = "blob"
-	RouteNameBlobUpload      = "blob-upload"
-	RouteNameBlobUploadChunk = "blob-upload-chunk"
-)
-
-var allEndpoints = []string{
-	RouteNameManifest,
-	RouteNameTags,
-	RouteNameBlob,
-	RouteNameBlobUpload,
-	RouteNameBlobUploadChunk,
-}
-
-// Router builds a gorilla router with named routes for the various API
-// methods. This can be used directly by both server implementations and
-// clients.
-func Router() *mux.Router {
-	router := mux.NewRouter().
-		StrictSlash(true)
-
-	// GET /v2/	Check	Check that the registry implements API version 2(.1)
-	router.
-		Path("/v2/").
-		Name(RouteNameBase)
-
-	// GET      /v2/<name>/manifest/<reference>	Image Manifest	Fetch the image manifest identified by name and reference where reference can be a tag or digest.
-	// PUT      /v2/<name>/manifest/<reference>	Image Manifest	Upload the image manifest identified by name and reference where reference can be a tag or digest.
-	// DELETE   /v2/<name>/manifest/<reference>	Image Manifest	Delete the image identified by name and reference where reference can be a tag or digest.
-	router.
-		Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{reference:" + TagNameRegexp.String() + "|" + DigestRegexp.String() + "}").
-		Name(RouteNameManifest)
-
-	// GET	/v2/<name>/tags/list	Tags	Fetch the tags under the repository identified by name.
-	router.
-		Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/tags/list").
-		Name(RouteNameTags)
-
-	// GET	/v2/<name>/blob/<digest>	Layer	Fetch the blob identified by digest.
-	router.
-		Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+}").
-		Name(RouteNameBlob)
-
-	// POST	/v2/<name>/blob/upload/	Layer Upload	Initiate an upload of the layer identified by tarsum.
-	router.
-		Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/").
-		Name(RouteNameBlobUpload)
-
-	// GET	/v2/<name>/blob/upload/<uuid>	Layer Upload	Get the status of the upload identified by tarsum and uuid.
-	// PUT	/v2/<name>/blob/upload/<uuid>	Layer Upload	Upload all or a chunk of the upload identified by tarsum and uuid.
-	// DELETE	/v2/<name>/blob/upload/<uuid>	Layer Upload	Cancel the upload identified by layer and uuid
-	router.
-		Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid}").
-		Name(RouteNameBlobUploadChunk)
-
-	return router
-}
diff --git a/registry/v2/routes_test.go b/registry/v2/routes_test.go
deleted file mode 100644
index 0191fee..0000000
--- a/registry/v2/routes_test.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package v2
-
-import (
-	"encoding/json"
-	"net/http"
-	"net/http/httptest"
-	"reflect"
-	"testing"
-
-	"github.com/gorilla/mux"
-)
-
-type routeTestCase struct {
-	RequestURI string
-	Vars       map[string]string
-	RouteName  string
-	StatusCode int
-}
-
-// TestRouter registers a test handler with all the routes and ensures that
-// each route returns the expected path variables. Not method verification is
-// present. This not meant to be exhaustive but as check to ensure that the
-// expected variables are extracted.
-//
-// This may go away as the application structure comes together.
-func TestRouter(t *testing.T) {
-
-	router := Router()
-
-	testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		testCase := routeTestCase{
-			RequestURI: r.RequestURI,
-			Vars:       mux.Vars(r),
-			RouteName:  mux.CurrentRoute(r).GetName(),
-		}
-
-		enc := json.NewEncoder(w)
-
-		if err := enc.Encode(testCase); err != nil {
-			http.Error(w, err.Error(), http.StatusInternalServerError)
-			return
-		}
-	})
-
-	// Startup test server
-	server := httptest.NewServer(router)
-
-	for _, testcase := range []routeTestCase{
-		{
-			RouteName:  RouteNameBase,
-			RequestURI: "/v2/",
-			Vars:       map[string]string{},
-		},
-		{
-			RouteName:  RouteNameManifest,
-			RequestURI: "/v2/foo/manifests/bar",
-			Vars: map[string]string{
-				"name":      "foo",
-				"reference": "bar",
-			},
-		},
-		{
-			RouteName:  RouteNameManifest,
-			RequestURI: "/v2/foo/bar/manifests/tag",
-			Vars: map[string]string{
-				"name":      "foo/bar",
-				"reference": "tag",
-			},
-		},
-		{
-			RouteName:  RouteNameTags,
-			RequestURI: "/v2/foo/bar/tags/list",
-			Vars: map[string]string{
-				"name": "foo/bar",
-			},
-		},
-		{
-			RouteName:  RouteNameBlob,
-			RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234",
-			Vars: map[string]string{
-				"name":   "foo/bar",
-				"digest": "tarsum.dev+foo:abcdef0919234",
-			},
-		},
-		{
-			RouteName:  RouteNameBlob,
-			RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234",
-			Vars: map[string]string{
-				"name":   "foo/bar",
-				"digest": "sha256:abcdef0919234",
-			},
-		},
-		{
-			RouteName:  RouteNameBlobUpload,
-			RequestURI: "/v2/foo/bar/blobs/uploads/",
-			Vars: map[string]string{
-				"name": "foo/bar",
-			},
-		},
-		{
-			RouteName:  RouteNameBlobUploadChunk,
-			RequestURI: "/v2/foo/bar/blobs/uploads/uuid",
-			Vars: map[string]string{
-				"name": "foo/bar",
-				"uuid": "uuid",
-			},
-		},
-		{
-			RouteName:  RouteNameBlobUploadChunk,
-			RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
-			Vars: map[string]string{
-				"name": "foo/bar",
-				"uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
-			},
-		},
-		{
-			RouteName:  RouteNameBlobUploadChunk,
-			RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==",
-			Vars: map[string]string{
-				"name": "foo/bar",
-				"uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==",
-			},
-		},
-		{
-			// Check ambiguity: ensure we can distinguish between tags for
-			// "foo/bar/image/image" and image for "foo/bar/image" with tag
-			// "tags"
-			RouteName:  RouteNameManifest,
-			RequestURI: "/v2/foo/bar/manifests/manifests/tags",
-			Vars: map[string]string{
-				"name":      "foo/bar/manifests",
-				"reference": "tags",
-			},
-		},
-		{
-			// This case presents an ambiguity between foo/bar with tag="tags"
-			// and list tags for "foo/bar/manifest"
-			RouteName:  RouteNameTags,
-			RequestURI: "/v2/foo/bar/manifests/tags/list",
-			Vars: map[string]string{
-				"name": "foo/bar/manifests",
-			},
-		},
-		{
-			RouteName:  RouteNameBlobUploadChunk,
-			RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
-			StatusCode: http.StatusNotFound,
-		},
-	} {
-		// Register the endpoint
-		router.GetRoute(testcase.RouteName).Handler(testHandler)
-		u := server.URL + testcase.RequestURI
-
-		resp, err := http.Get(u)
-
-		if err != nil {
-			t.Fatalf("error issuing get request: %v", err)
-		}
-
-		if testcase.StatusCode == 0 {
-			// Override default, zero-value
-			testcase.StatusCode = http.StatusOK
-		}
-
-		if resp.StatusCode != testcase.StatusCode {
-			t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode)
-		}
-
-		if testcase.StatusCode != http.StatusOK {
-			// We don't care about json response.
-			continue
-		}
-
-		dec := json.NewDecoder(resp.Body)
-
-		var actualRouteInfo routeTestCase
-		if err := dec.Decode(&actualRouteInfo); err != nil {
-			t.Fatalf("error reading json response: %v", err)
-		}
-		// Needs to be set out of band
-		actualRouteInfo.StatusCode = resp.StatusCode
-
-		if actualRouteInfo.RouteName != testcase.RouteName {
-			t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName)
-		}
-
-		if !reflect.DeepEqual(actualRouteInfo, testcase) {
-			t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase)
-		}
-	}
-
-}
diff --git a/registry/v2/urls.go b/registry/v2/urls.go
deleted file mode 100644
index 38fa98a..0000000
--- a/registry/v2/urls.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package v2
-
-import (
-	"net/http"
-	"net/url"
-
-	"github.com/gorilla/mux"
-)
-
-// URLBuilder creates registry API urls from a single base endpoint. It can be
-// used to create urls for use in a registry client or server.
-//
-// All urls will be created from the given base, including the api version.
-// For example, if a root of "/foo/" is provided, urls generated will be fall
-// under "/foo/v2/...". Most application will only provide a schema, host and
-// port, such as "https://localhost:5000/".
-type URLBuilder struct {
-	root   *url.URL // url root (ie http://localhost/)
-	router *mux.Router
-}
-
-// NewURLBuilder creates a URLBuilder with provided root url object.
-func NewURLBuilder(root *url.URL) *URLBuilder {
-	return &URLBuilder{
-		root:   root,
-		router: Router(),
-	}
-}
-
-// NewURLBuilderFromString workes identically to NewURLBuilder except it takes
-// a string argument for the root, returning an error if it is not a valid
-// url.
-func NewURLBuilderFromString(root string) (*URLBuilder, error) {
-	u, err := url.Parse(root)
-	if err != nil {
-		return nil, err
-	}
-
-	return NewURLBuilder(u), nil
-}
-
-// NewURLBuilderFromRequest uses information from an *http.Request to
-// construct the root url.
-func NewURLBuilderFromRequest(r *http.Request) *URLBuilder {
-	u := &url.URL{
-		Scheme: r.URL.Scheme,
-		Host:   r.Host,
-	}
-
-	return NewURLBuilder(u)
-}
-
-// BuildBaseURL constructs a base url for the API, typically just "/v2/".
-func (ub *URLBuilder) BuildBaseURL() (string, error) {
-	route := ub.cloneRoute(RouteNameBase)
-
-	baseURL, err := route.URL()
-	if err != nil {
-		return "", err
-	}
-
-	return baseURL.String(), nil
-}
-
-// BuildTagsURL constructs a url to list the tags in the named repository.
-func (ub *URLBuilder) BuildTagsURL(name string) (string, error) {
-	route := ub.cloneRoute(RouteNameTags)
-
-	tagsURL, err := route.URL("name", name)
-	if err != nil {
-		return "", err
-	}
-
-	return tagsURL.String(), nil
-}
-
-// BuildManifestURL constructs a url for the manifest identified by name and reference.
-func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) {
-	route := ub.cloneRoute(RouteNameManifest)
-
-	manifestURL, err := route.URL("name", name, "reference", reference)
-	if err != nil {
-		return "", err
-	}
-
-	return manifestURL.String(), nil
-}
-
-// BuildBlobURL constructs the url for the blob identified by name and dgst.
-func (ub *URLBuilder) BuildBlobURL(name string, dgst string) (string, error) {
-	route := ub.cloneRoute(RouteNameBlob)
-
-	layerURL, err := route.URL("name", name, "digest", dgst)
-	if err != nil {
-		return "", err
-	}
-
-	return layerURL.String(), nil
-}
-
-// BuildBlobUploadURL constructs a url to begin a blob upload in the
-// repository identified by name.
-func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) {
-	route := ub.cloneRoute(RouteNameBlobUpload)
-
-	uploadURL, err := route.URL("name", name)
-	if err != nil {
-		return "", err
-	}
-
-	return appendValuesURL(uploadURL, values...).String(), nil
-}
-
-// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid,
-// including any url values. This should generally not be used by clients, as
-// this url is provided by server implementations during the blob upload
-// process.
-func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) {
-	route := ub.cloneRoute(RouteNameBlobUploadChunk)
-
-	uploadURL, err := route.URL("name", name, "uuid", uuid)
-	if err != nil {
-		return "", err
-	}
-
-	return appendValuesURL(uploadURL, values...).String(), nil
-}
-
-// clondedRoute returns a clone of the named route from the router. Routes
-// must be cloned to avoid modifying them during url generation.
-func (ub *URLBuilder) cloneRoute(name string) clonedRoute {
-	route := new(mux.Route)
-	root := new(url.URL)
-
-	*route = *ub.router.GetRoute(name) // clone the route
-	*root = *ub.root
-
-	return clonedRoute{Route: route, root: root}
-}
-
-type clonedRoute struct {
-	*mux.Route
-	root *url.URL
-}
-
-func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) {
-	routeURL, err := cr.Route.URL(pairs...)
-	if err != nil {
-		return nil, err
-	}
-
-	return cr.root.ResolveReference(routeURL), nil
-}
-
-// appendValuesURL appends the parameters to the url.
-func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
-	merged := u.Query()
-
-	for _, v := range values {
-		for k, vv := range v {
-			merged[k] = append(merged[k], vv...)
-		}
-	}
-
-	u.RawQuery = merged.Encode()
-	return u
-}
-
-// appendValues appends the parameters to the url. Panics if the string is not
-// a url.
-func appendValues(u string, values ...url.Values) string {
-	up, err := url.Parse(u)
-
-	if err != nil {
-		panic(err) // should never happen
-	}
-
-	return appendValuesURL(up, values...).String()
-}
diff --git a/registry/v2/urls_test.go b/registry/v2/urls_test.go
deleted file mode 100644
index f30c96c..0000000
--- a/registry/v2/urls_test.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package v2
-
-import (
-	"net/url"
-	"testing"
-)
-
-type urlBuilderTestCase struct {
-	description  string
-	expectedPath string
-	build        func() (string, error)
-}
-
-// TestURLBuilder tests the various url building functions, ensuring they are
-// returning the expected values.
-func TestURLBuilder(t *testing.T) {
-	var (
-		urlBuilder *URLBuilder
-		err        error
-	)
-
-	testCases := []urlBuilderTestCase{
-		{
-			description:  "test base url",
-			expectedPath: "/v2/",
-			build: func() (string, error) {
-				return urlBuilder.BuildBaseURL()
-			},
-		},
-		{
-			description:  "test tags url",
-			expectedPath: "/v2/foo/bar/tags/list",
-			build: func() (string, error) {
-				return urlBuilder.BuildTagsURL("foo/bar")
-			},
-		},
-		{
-			description:  "test manifest url",
-			expectedPath: "/v2/foo/bar/manifests/tag",
-			build: func() (string, error) {
-				return urlBuilder.BuildManifestURL("foo/bar", "tag")
-			},
-		},
-		{
-			description:  "build blob url",
-			expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789",
-			build: func() (string, error) {
-				return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789")
-			},
-		},
-		{
-			description:  "build blob upload url",
-			expectedPath: "/v2/foo/bar/blobs/uploads/",
-			build: func() (string, error) {
-				return urlBuilder.BuildBlobUploadURL("foo/bar")
-			},
-		},
-		{
-			description:  "build blob upload url with digest and size",
-			expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000",
-			build: func() (string, error) {
-				return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{
-					"size":   []string{"10000"},
-					"digest": []string{"tarsum.v1+sha256:abcdef0123456789"},
-				})
-			},
-		},
-		{
-			description:  "build blob upload chunk url",
-			expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part",
-			build: func() (string, error) {
-				return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part")
-			},
-		},
-		{
-			description:  "build blob upload chunk url with digest and size",
-			expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000",
-			build: func() (string, error) {
-				return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{
-					"size":   []string{"10000"},
-					"digest": []string{"tarsum.v1+sha256:abcdef0123456789"},
-				})
-			},
-		},
-	}
-
-	roots := []string{
-		"http://example.com",
-		"https://example.com",
-		"http://localhost:5000",
-		"https://localhost:5443",
-	}
-
-	for _, root := range roots {
-		urlBuilder, err = NewURLBuilderFromString(root)
-		if err != nil {
-			t.Fatalf("unexpected error creating urlbuilder: %v", err)
-		}
-
-		for _, testCase := range testCases {
-			url, err := testCase.build()
-			if err != nil {
-				t.Fatalf("%s: error building url: %v", testCase.description, err)
-			}
-
-			expectedURL := root + testCase.expectedPath
-
-			if url != expectedURL {
-				t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL)
-			}
-		}
-	}
-}
diff --git a/runconfig/compare.go b/runconfig/compare.go
index 60a21a7..1d969e9 100644
--- a/runconfig/compare.go
+++ b/runconfig/compare.go
@@ -10,25 +10,25 @@
 	if a.AttachStdout != b.AttachStdout ||
 		a.AttachStderr != b.AttachStderr ||
 		a.User != b.User ||
-		a.Memory != b.Memory ||
-		a.MemorySwap != b.MemorySwap ||
-		a.CpuShares != b.CpuShares ||
 		a.OpenStdin != b.OpenStdin ||
 		a.Tty != b.Tty {
 		return false
 	}
-	if len(a.Cmd) != len(b.Cmd) ||
+
+	if a.Cmd.Len() != b.Cmd.Len() ||
 		len(a.Env) != len(b.Env) ||
 		len(a.Labels) != len(b.Labels) ||
 		len(a.PortSpecs) != len(b.PortSpecs) ||
 		len(a.ExposedPorts) != len(b.ExposedPorts) ||
-		len(a.Entrypoint) != len(b.Entrypoint) ||
+		a.Entrypoint.Len() != b.Entrypoint.Len() ||
 		len(a.Volumes) != len(b.Volumes) {
 		return false
 	}
 
-	for i := 0; i < len(a.Cmd); i++ {
-		if a.Cmd[i] != b.Cmd[i] {
+	aCmd := a.Cmd.Slice()
+	bCmd := b.Cmd.Slice()
+	for i := 0; i < len(aCmd); i++ {
+		if aCmd[i] != bCmd[i] {
 			return false
 		}
 	}
@@ -52,8 +52,11 @@
 			return false
 		}
 	}
-	for i := 0; i < len(a.Entrypoint); i++ {
-		if a.Entrypoint[i] != b.Entrypoint[i] {
+
+	aEntrypoint := a.Entrypoint.Slice()
+	bEntrypoint := b.Entrypoint.Slice()
+	for i := 0; i < len(aEntrypoint); i++ {
+		if aEntrypoint[i] != bEntrypoint[i] {
 			return false
 		}
 	}
diff --git a/runconfig/config.go b/runconfig/config.go
index 45255e9..13d7189 100644
--- a/runconfig/config.go
+++ b/runconfig/config.go
@@ -1,10 +1,108 @@
 package runconfig
 
 import (
-	"github.com/docker/docker/engine"
+	"encoding/json"
+	"io"
+	"strings"
+
 	"github.com/docker/docker/nat"
 )
 
+// Entrypoint encapsulates the container entrypoint.
+// It might be represented as a string or an array of strings.
+// We need to override the json decoder to accept both options.
+// The JSON decoder will fail if the api sends an string and
+//  we try to decode it into an array of string.
+type Entrypoint struct {
+	parts []string
+}
+
+func (e *Entrypoint) MarshalJSON() ([]byte, error) {
+	if e == nil {
+		return []byte{}, nil
+	}
+	return json.Marshal(e.Slice())
+}
+
+// UnmarshalJSON decoded the entrypoint whether it's a string or an array of strings.
+func (e *Entrypoint) UnmarshalJSON(b []byte) error {
+	if len(b) == 0 {
+		return nil
+	}
+
+	p := make([]string, 0, 1)
+	if err := json.Unmarshal(b, &p); err != nil {
+		p = append(p, string(b))
+	}
+	e.parts = p
+	return nil
+}
+
+func (e *Entrypoint) Len() int {
+	if e == nil {
+		return 0
+	}
+	return len(e.parts)
+}
+
+func (e *Entrypoint) Slice() []string {
+	if e == nil {
+		return nil
+	}
+	return e.parts
+}
+
+func NewEntrypoint(parts ...string) *Entrypoint {
+	return &Entrypoint{parts}
+}
+
+type Command struct {
+	parts []string
+}
+
+func (e *Command) ToString() string {
+	return strings.Join(e.parts, " ")
+}
+
+func (e *Command) MarshalJSON() ([]byte, error) {
+	if e == nil {
+		return []byte{}, nil
+	}
+	return json.Marshal(e.Slice())
+}
+
+// UnmarshalJSON decoded the entrypoint whether it's a string or an array of strings.
+func (e *Command) UnmarshalJSON(b []byte) error {
+	if len(b) == 0 {
+		return nil
+	}
+
+	p := make([]string, 0, 1)
+	if err := json.Unmarshal(b, &p); err != nil {
+		p = append(p, string(b))
+	}
+	e.parts = p
+	return nil
+}
+
+func (e *Command) Len() int {
+	if e == nil {
+		return 0
+	}
+	return len(e.parts)
+}
+
+func (e *Command) Slice() []string {
+	if e == nil {
+		return nil
+	}
+	return e.parts
+}
+
+func NewCommand(parts ...string) *Command {
+	return &Command{parts}
+}
+
 // Note: the Config structure should hold only portable information about the container.
 // Here, "portable" means "independent from the host we are running on".
 // Non-portable information *should* appear in HostConfig.
@@ -12,10 +110,6 @@
 	Hostname        string
 	Domainname      string
 	User            string
-	Memory          int64  // FIXME: we keep it for backward compatibility, it has been moved to hostConfig.
-	MemorySwap      int64  // FIXME: it has been moved to hostConfig.
-	CpuShares       int64  // FIXME: it has been moved to hostConfig.
-	Cpuset          string // FIXME: it has been moved to hostConfig and renamed to CpusetCpus.
 	AttachStdin     bool
 	AttachStdout    bool
 	AttachStderr    bool
@@ -25,53 +119,38 @@
 	OpenStdin       bool // Open stdin
 	StdinOnce       bool // If true, close stdin after the 1 attached client disconnects.
 	Env             []string
-	Cmd             []string
+	Cmd             *Command
 	Image           string // Name of the image as it was passed by the operator (eg. could be symbolic)
 	Volumes         map[string]struct{}
+	VolumeDriver    string
 	WorkingDir      string
-	Entrypoint      []string
+	Entrypoint      *Entrypoint
 	NetworkDisabled bool
 	MacAddress      string
 	OnBuild         []string
 	Labels          map[string]string
 }
 
-func ContainerConfigFromJob(job *engine.Job) *Config {
-	config := &Config{
-		Hostname:        job.Getenv("Hostname"),
-		Domainname:      job.Getenv("Domainname"),
-		User:            job.Getenv("User"),
-		Memory:          job.GetenvInt64("Memory"),
-		MemorySwap:      job.GetenvInt64("MemorySwap"),
-		CpuShares:       job.GetenvInt64("CpuShares"),
-		Cpuset:          job.Getenv("Cpuset"),
-		AttachStdin:     job.GetenvBool("AttachStdin"),
-		AttachStdout:    job.GetenvBool("AttachStdout"),
-		AttachStderr:    job.GetenvBool("AttachStderr"),
-		Tty:             job.GetenvBool("Tty"),
-		OpenStdin:       job.GetenvBool("OpenStdin"),
-		StdinOnce:       job.GetenvBool("StdinOnce"),
-		Image:           job.Getenv("Image"),
-		WorkingDir:      job.Getenv("WorkingDir"),
-		NetworkDisabled: job.GetenvBool("NetworkDisabled"),
-		MacAddress:      job.Getenv("MacAddress"),
-	}
-	job.GetenvJson("ExposedPorts", &config.ExposedPorts)
-	job.GetenvJson("Volumes", &config.Volumes)
-	if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil {
-		config.PortSpecs = PortSpecs
-	}
-	if Env := job.GetenvList("Env"); Env != nil {
-		config.Env = Env
-	}
-	if Cmd := job.GetenvList("Cmd"); Cmd != nil {
-		config.Cmd = Cmd
+type ContainerConfigWrapper struct {
+	*Config
+	*hostConfigWrapper
+}
+
+func (c ContainerConfigWrapper) HostConfig() *HostConfig {
+	if c.hostConfigWrapper == nil {
+		return new(HostConfig)
 	}
 
-	job.GetenvJson("Labels", &config.Labels)
+	return c.hostConfigWrapper.GetHostConfig()
+}
 
-	if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil {
-		config.Entrypoint = Entrypoint
+func DecodeContainerConfig(src io.Reader) (*Config, *HostConfig, error) {
+	decoder := json.NewDecoder(src)
+
+	var w ContainerConfigWrapper
+	if err := decoder.Decode(&w); err != nil {
+		return nil, nil, err
 	}
-	return config
+
+	return w.Config, w.HostConfig(), nil
 }
diff --git a/runconfig/config_test.go b/runconfig/config_test.go
index accbd91..27727a4 100644
--- a/runconfig/config_test.go
+++ b/runconfig/config_test.go
@@ -1,7 +1,9 @@
 package runconfig
 
 import (
+	"bytes"
 	"fmt"
+	"io/ioutil"
 	"strings"
 	"testing"
 
@@ -43,13 +45,6 @@
 	if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
 		t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
 	}
-
-	if _, _, err := parse(t, "--link a"); err == nil {
-		t.Fatalf("Error parsing links. `--link a` should be an error but is not")
-	}
-	if _, _, err := parse(t, "--link"); err == nil {
-		t.Fatalf("Error parsing links. `--link` should be an error but is not")
-	}
 }
 
 func TestParseRunAttach(t *testing.T) {
@@ -102,7 +97,7 @@
 	if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil {
 		t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds)
 	} else if _, exists := config.Volumes["/tmp"]; !exists {
-		t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes)
+		t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
 	} else if _, exists := config.Volumes["/var"]; !exists {
 		t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes)
 	}
@@ -119,6 +114,14 @@
 		t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
 	}
 
+	if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:roZ -v /hostVar:/containerVar:rwZ"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:roZ", "/hostVar:/containerVar:rwZ") != nil {
+		t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:roZ -v /hostVar:/containerVar:rwZ` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
+	}
+
+	if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:Z -v /hostVar:/containerVar:z"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:Z", "/hostVar:/containerVar:z") != nil {
+		t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:Z -v /hostVar:/containerVar:z` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
+	}
+
 	if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
 		t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
 	} else if _, exists := config.Volumes["/containerVar"]; !exists {
@@ -260,5 +263,39 @@
 			t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs)
 		}
 	}
+}
 
+func TestDecodeContainerConfig(t *testing.T) {
+	fixtures := []struct {
+		file       string
+		entrypoint *Entrypoint
+	}{
+		{"fixtures/container_config_1_14.json", NewEntrypoint()},
+		{"fixtures/container_config_1_17.json", NewEntrypoint("bash")},
+		{"fixtures/container_config_1_19.json", NewEntrypoint("bash")},
+	}
+
+	for _, f := range fixtures {
+		b, err := ioutil.ReadFile(f.file)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		c, h, err := DecodeContainerConfig(bytes.NewReader(b))
+		if err != nil {
+			t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err))
+		}
+
+		if c.Image != "ubuntu" {
+			t.Fatalf("Expected ubuntu image, found %s\n", c.Image)
+		}
+
+		if c.Entrypoint.Len() != f.entrypoint.Len() {
+			t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint)
+		}
+
+		if h.Memory != 1000 {
+			t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory)
+		}
+	}
 }
diff --git a/runconfig/exec.go b/runconfig/exec.go
index 9390781..781cb35 100644
--- a/runconfig/exec.go
+++ b/runconfig/exec.go
@@ -1,11 +1,7 @@
 package runconfig
 
 import (
-	"fmt"
-
-	"github.com/docker/docker/engine"
 	flag "github.com/docker/docker/pkg/mflag"
-	"github.com/docker/docker/utils"
 )
 
 type ExecConfig struct {
@@ -20,37 +16,17 @@
 	Cmd          []string
 }
 
-func ExecConfigFromJob(job *engine.Job) (*ExecConfig, error) {
-	execConfig := &ExecConfig{
-		// TODO(vishh): Expose 'User' once it is supported.
-		//User:         job.Getenv("User"),
-		// TODO(vishh): Expose 'Privileged' once it is supported.
-		//Privileged:   job.GetenvBool("Privileged"),
-		Tty:          job.GetenvBool("Tty"),
-		AttachStdin:  job.GetenvBool("AttachStdin"),
-		AttachStderr: job.GetenvBool("AttachStderr"),
-		AttachStdout: job.GetenvBool("AttachStdout"),
-	}
-	cmd := job.GetenvList("Cmd")
-	if len(cmd) == 0 {
-		return nil, fmt.Errorf("No exec command specified")
-	}
-
-	execConfig.Cmd = cmd
-
-	return execConfig, nil
-}
-
 func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) {
 	var (
 		flStdin   = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached")
 		flTty     = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY")
 		flDetach  = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background")
+		flUser    = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: <name|uid>[:<group|gid>])")
 		execCmd   []string
 		container string
 	)
 	cmd.Require(flag.Min, 2)
-	if err := utils.ParseFlags(cmd, args, true); err != nil {
+	if err := cmd.ParseFlags(args, true); err != nil {
 		return nil, err
 	}
 	container = cmd.Arg(0)
@@ -58,14 +34,13 @@
 	execCmd = parsedArgs[1:]
 
 	execConfig := &ExecConfig{
-		// TODO(vishh): Expose '-u' flag once it is supported.
-		User: "",
-		// TODO(vishh): Expose '-p' flag once it is supported.
-		Privileged: false,
-		Tty:        *flTty,
-		Cmd:        execCmd,
-		Container:  container,
-		Detach:     *flDetach,
+		User: *flUser,
+		// TODO(vishh): Expose 'Privileged' once it is supported.
+		// +		//Privileged:   job.GetenvBool("Privileged"),
+		Tty:       *flTty,
+		Cmd:       execCmd,
+		Container: container,
+		Detach:    *flDetach,
 	}
 
 	// If -d is not set, attach to everything by default
diff --git a/runconfig/fixtures/container_config_1_14.json b/runconfig/fixtures/container_config_1_14.json
new file mode 100644
index 0000000..b08334c
--- /dev/null
+++ b/runconfig/fixtures/container_config_1_14.json
@@ -0,0 +1,30 @@
+{
+     "Hostname":"",
+     "Domainname": "",
+     "User":"",
+     "Memory": 1000,
+     "MemorySwap":0,
+     "CpuShares": 512,
+     "Cpuset": "0,1",
+     "AttachStdin":false,
+     "AttachStdout":true,
+     "AttachStderr":true,
+     "PortSpecs":null,
+     "Tty":false,
+     "OpenStdin":false,
+     "StdinOnce":false,
+     "Env":null,
+     "Cmd":[
+             "bash"
+     ],
+     "Image":"ubuntu",
+     "Volumes":{
+             "/tmp": {}
+     },
+     "WorkingDir":"",
+     "NetworkDisabled": false,
+     "ExposedPorts":{
+             "22/tcp": {}
+     },
+     "RestartPolicy": { "Name": "always" }
+}
diff --git a/runconfig/fixtures/container_config_1_17.json b/runconfig/fixtures/container_config_1_17.json
new file mode 100644
index 0000000..60fc6e2
--- /dev/null
+++ b/runconfig/fixtures/container_config_1_17.json
@@ -0,0 +1,49 @@
+{
+     "Hostname": "",
+     "Domainname": "",
+     "User": "",
+     "Memory": 1000,
+     "MemorySwap": 0,
+     "CpuShares": 512,
+     "Cpuset": "0,1",
+     "AttachStdin": false,
+     "AttachStdout": true,
+     "AttachStderr": true,
+     "Tty": false,
+     "OpenStdin": false,
+     "StdinOnce": false,
+     "Env": null,
+     "Cmd": [
+             "date"
+     ],
+     "Entrypoint": "bash",
+     "Image": "ubuntu",
+     "Volumes": {
+             "/tmp": {}
+     },
+     "WorkingDir": "",
+     "NetworkDisabled": false,
+     "MacAddress": "12:34:56:78:9a:bc",
+     "ExposedPorts": {
+             "22/tcp": {}
+     },
+     "SecurityOpt": [""],
+     "HostConfig": {
+       "Binds": ["/tmp:/tmp"],
+       "Links": ["redis3:redis"],
+       "LxcConf": {"lxc.utsname":"docker"},
+       "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
+       "PublishAllPorts": false,
+       "Privileged": false,
+       "ReadonlyRootfs": false,
+       "Dns": ["8.8.8.8"],
+       "DnsSearch": [""],
+       "ExtraHosts": null,
+       "VolumesFrom": ["parent", "other:ro"],
+       "CapAdd": ["NET_ADMIN"],
+       "CapDrop": ["MKNOD"],
+       "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 },
+       "NetworkMode": "bridge",
+       "Devices": []
+    }
+}
diff --git a/runconfig/fixtures/container_config_1_19.json b/runconfig/fixtures/container_config_1_19.json
new file mode 100644
index 0000000..9a3ce20
--- /dev/null
+++ b/runconfig/fixtures/container_config_1_19.json
@@ -0,0 +1,57 @@
+{
+     "Hostname": "",
+     "Domainname": "",
+     "User": "",
+     "AttachStdin": false,
+     "AttachStdout": true,
+     "AttachStderr": true,
+     "Tty": false,
+     "OpenStdin": false,
+     "StdinOnce": false,
+     "Env": null,
+     "Cmd": [
+             "date"
+     ],
+     "Entrypoint": "bash",
+     "Image": "ubuntu",
+     "Labels": {
+             "com.example.vendor": "Acme",
+             "com.example.license": "GPL",
+             "com.example.version": "1.0"
+     },
+     "Volumes": {
+             "/tmp": {}
+     },
+     "WorkingDir": "",
+     "NetworkDisabled": false,
+     "MacAddress": "12:34:56:78:9a:bc",
+     "ExposedPorts": {
+             "22/tcp": {}
+     },
+     "HostConfig": {
+       "Binds": ["/tmp:/tmp"],
+       "Links": ["redis3:redis"],
+       "LxcConf": {"lxc.utsname":"docker"},
+       "Memory": 1000,
+       "MemorySwap": 0,
+       "CpuShares": 512,
+       "CpusetCpus": "0,1",
+       "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] },
+       "PublishAllPorts": false,
+       "Privileged": false,
+       "ReadonlyRootfs": false,
+       "Dns": ["8.8.8.8"],
+       "DnsSearch": [""],
+       "ExtraHosts": null,
+       "VolumesFrom": ["parent", "other:ro"],
+       "CapAdd": ["NET_ADMIN"],
+       "CapDrop": ["MKNOD"],
+       "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 },
+       "NetworkMode": "bridge",
+       "Devices": [],
+       "Ulimits": [{}],
+       "LogConfig": { "Type": "json-file", "Config": {} },
+       "SecurityOpt": [""],
+       "CgroupParent": ""
+    }
+}
diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go
index 84d636b..1418dea 100644
--- a/runconfig/hostconfig.go
+++ b/runconfig/hostconfig.go
@@ -1,19 +1,28 @@
 package runconfig
 
 import (
+	"encoding/json"
+	"io"
 	"strings"
 
-	"github.com/docker/docker/engine"
 	"github.com/docker/docker/nat"
 	"github.com/docker/docker/pkg/ulimit"
-	"github.com/docker/docker/utils"
 )
 
+type KeyValuePair struct {
+	Key   string
+	Value string
+}
+
 type NetworkMode string
 
 // IsPrivate indicates whether container use it's private network stack
 func (n NetworkMode) IsPrivate() bool {
-	return !(n.IsHost() || n.IsContainer() || n.IsNone())
+	return !(n.IsHost() || n.IsContainer())
+}
+
+func (n NetworkMode) IsBridge() bool {
+	return n == "bridge"
 }
 
 func (n NetworkMode) IsHost() bool {
@@ -67,6 +76,27 @@
 	return ""
 }
 
+type UTSMode string
+
+// IsPrivate indicates whether container use it's private UTS namespace
+func (n UTSMode) IsPrivate() bool {
+	return !(n.IsHost())
+}
+
+func (n UTSMode) IsHost() bool {
+	return n == "host"
+}
+
+func (n UTSMode) Valid() bool {
+	parts := strings.Split(string(n), ":")
+	switch mode := parts[0]; mode {
+	case "", "host":
+	default:
+		return false
+	}
+	return true
+}
+
 type PidMode string
 
 // IsPrivate indicates whether container use it's private pid stack
@@ -99,19 +129,85 @@
 	MaximumRetryCount int
 }
 
+func (rp *RestartPolicy) IsNone() bool {
+	return rp.Name == "no"
+}
+
+func (rp *RestartPolicy) IsAlways() bool {
+	return rp.Name == "always"
+}
+
+func (rp *RestartPolicy) IsOnFailure() bool {
+	return rp.Name == "on-failure"
+}
+
 type LogConfig struct {
 	Type   string
 	Config map[string]string
 }
 
+type LxcConfig struct {
+	values []KeyValuePair
+}
+
+func (c *LxcConfig) MarshalJSON() ([]byte, error) {
+	if c == nil {
+		return []byte{}, nil
+	}
+	return json.Marshal(c.Slice())
+}
+
+func (c *LxcConfig) UnmarshalJSON(b []byte) error {
+	if len(b) == 0 {
+		return nil
+	}
+
+	var kv []KeyValuePair
+	if err := json.Unmarshal(b, &kv); err != nil {
+		var h map[string]string
+		if err := json.Unmarshal(b, &h); err != nil {
+			return err
+		}
+		for k, v := range h {
+			kv = append(kv, KeyValuePair{k, v})
+		}
+	}
+	c.values = kv
+
+	return nil
+}
+
+func (c *LxcConfig) Len() int {
+	if c == nil {
+		return 0
+	}
+	return len(c.values)
+}
+
+func (c *LxcConfig) Slice() []KeyValuePair {
+	if c == nil {
+		return nil
+	}
+	return c.values
+}
+
+func NewLxcConfig(values []KeyValuePair) *LxcConfig {
+	return &LxcConfig{values}
+}
+
 type HostConfig struct {
 	Binds           []string
 	ContainerIDFile string
-	LxcConf         []utils.KeyValuePair
-	Memory          int64  // Memory limit (in bytes)
-	MemorySwap      int64  // Total memory usage (memory + swap); set `-1` to disable swap
-	CpuShares       int64  // CPU shares (relative weight vs. other containers)
+	LxcConf         *LxcConfig
+	Memory          int64 // Memory limit (in bytes)
+	MemorySwap      int64 // Total memory usage (memory + swap); set `-1` to disable swap
+	CpuShares       int64 // CPU shares (relative weight vs. other containers)
+	CpuPeriod       int64
 	CpusetCpus      string // CpusetCpus 0-2, 0,1
+	CpusetMems      string // CpusetMems 0-2, 0,1
+	CpuQuota        int64
+	BlkioWeight     int64 // Block IO weight (relative weight vs. other containers)
+	OomKillDisable  bool  // Whether to disable OOM Killer or not
 	Privileged      bool
 	PortBindings    nat.PortMap
 	Links           []string
@@ -124,6 +220,7 @@
 	NetworkMode     NetworkMode
 	IpcMode         IpcMode
 	PidMode         PidMode
+	UTSMode         UTSMode
 	CapAdd          []string
 	CapDrop         []string
 	RestartPolicy   RestartPolicy
@@ -134,96 +231,55 @@
 	CgroupParent    string // Parent cgroup.
 }
 
-// This is used by the create command when you want to set both the
-// Config and the HostConfig in the same call
-type ConfigAndHostConfig struct {
-	Config
-	HostConfig HostConfig
-}
-
-func MergeConfigs(config *Config, hostConfig *HostConfig) *ConfigAndHostConfig {
-	return &ConfigAndHostConfig{
-		*config,
-		*hostConfig,
+func MergeConfigs(config *Config, hostConfig *HostConfig) *ContainerConfigWrapper {
+	return &ContainerConfigWrapper{
+		config,
+		&hostConfigWrapper{InnerHostConfig: hostConfig},
 	}
 }
 
-func ContainerHostConfigFromJob(job *engine.Job) *HostConfig {
-	if job.EnvExists("HostConfig") {
-		hostConfig := HostConfig{}
-		job.GetenvJson("HostConfig", &hostConfig)
+type hostConfigWrapper struct {
+	InnerHostConfig *HostConfig `json:"HostConfig,omitempty"`
+	Cpuset          string      `json:",omitempty"` // Deprecated. Exported for backwards compatibility.
 
-		// FIXME: These are for backward compatibility, if people use these
-		// options with `HostConfig`, we should still make them workable.
-		if job.EnvExists("Memory") && hostConfig.Memory == 0 {
-			hostConfig.Memory = job.GetenvInt64("Memory")
+	*HostConfig // Deprecated. Exported to read attrubutes from json that are not in the inner host config structure.
+}
+
+func (w hostConfigWrapper) GetHostConfig() *HostConfig {
+	hc := w.HostConfig
+
+	if hc == nil && w.InnerHostConfig != nil {
+		hc = w.InnerHostConfig
+	} else if w.InnerHostConfig != nil {
+		if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 {
+			w.InnerHostConfig.Memory = hc.Memory
 		}
-		if job.EnvExists("MemorySwap") && hostConfig.MemorySwap == 0 {
-			hostConfig.MemorySwap = job.GetenvInt64("MemorySwap")
+		if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 {
+			w.InnerHostConfig.MemorySwap = hc.MemorySwap
 		}
-		if job.EnvExists("CpuShares") && hostConfig.CpuShares == 0 {
-			hostConfig.CpuShares = job.GetenvInt64("CpuShares")
-		}
-		if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" {
-			hostConfig.CpusetCpus = job.Getenv("Cpuset")
+		if hc.CpuShares != 0 && w.InnerHostConfig.CpuShares == 0 {
+			w.InnerHostConfig.CpuShares = hc.CpuShares
 		}
 
-		return &hostConfig
+		hc = w.InnerHostConfig
 	}
 
-	hostConfig := &HostConfig{
-		ContainerIDFile: job.Getenv("ContainerIDFile"),
-		Memory:          job.GetenvInt64("Memory"),
-		MemorySwap:      job.GetenvInt64("MemorySwap"),
-		CpuShares:       job.GetenvInt64("CpuShares"),
-		CpusetCpus:      job.Getenv("CpusetCpus"),
-		Privileged:      job.GetenvBool("Privileged"),
-		PublishAllPorts: job.GetenvBool("PublishAllPorts"),
-		NetworkMode:     NetworkMode(job.Getenv("NetworkMode")),
-		IpcMode:         IpcMode(job.Getenv("IpcMode")),
-		PidMode:         PidMode(job.Getenv("PidMode")),
-		ReadonlyRootfs:  job.GetenvBool("ReadonlyRootfs"),
-		CgroupParent:    job.Getenv("CgroupParent"),
+	if hc != nil && w.Cpuset != "" && hc.CpusetCpus == "" {
+		hc.CpusetCpus = w.Cpuset
 	}
 
-	// FIXME: This is for backward compatibility, if people use `Cpuset`
-	// in json, make it workable, we will only pass hostConfig.CpusetCpus
-	// to execDriver.
-	if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" {
-		hostConfig.CpusetCpus = job.Getenv("Cpuset")
+	return hc
+}
+
+func DecodeHostConfig(src io.Reader) (*HostConfig, error) {
+	decoder := json.NewDecoder(src)
+
+	var w hostConfigWrapper
+	if err := decoder.Decode(&w); err != nil {
+		return nil, err
 	}
 
-	job.GetenvJson("LxcConf", &hostConfig.LxcConf)
-	job.GetenvJson("PortBindings", &hostConfig.PortBindings)
-	job.GetenvJson("Devices", &hostConfig.Devices)
-	job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy)
-	job.GetenvJson("Ulimits", &hostConfig.Ulimits)
-	job.GetenvJson("LogConfig", &hostConfig.LogConfig)
-	hostConfig.SecurityOpt = job.GetenvList("SecurityOpt")
-	if Binds := job.GetenvList("Binds"); Binds != nil {
-		hostConfig.Binds = Binds
-	}
-	if Links := job.GetenvList("Links"); Links != nil {
-		hostConfig.Links = Links
-	}
-	if Dns := job.GetenvList("Dns"); Dns != nil {
-		hostConfig.Dns = Dns
-	}
-	if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil {
-		hostConfig.DnsSearch = DnsSearch
-	}
-	if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil {
-		hostConfig.ExtraHosts = ExtraHosts
-	}
-	if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil {
-		hostConfig.VolumesFrom = VolumesFrom
-	}
-	if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil {
-		hostConfig.CapAdd = CapAdd
-	}
-	if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil {
-		hostConfig.CapDrop = CapDrop
-	}
+	hc := w.GetHostConfig()
 
-	return hostConfig
+	return hc, nil
 }
diff --git a/runconfig/merge.go b/runconfig/merge.go
index 9bbdc6a..9c9a3b4 100644
--- a/runconfig/merge.go
+++ b/runconfig/merge.go
@@ -3,7 +3,7 @@
 import (
 	"strings"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/nat"
 )
 
@@ -11,15 +11,6 @@
 	if userConf.User == "" {
 		userConf.User = imageConf.User
 	}
-	if userConf.Memory == 0 {
-		userConf.Memory = imageConf.Memory
-	}
-	if userConf.MemorySwap == 0 {
-		userConf.MemorySwap = imageConf.MemorySwap
-	}
-	if userConf.CpuShares == 0 {
-		userConf.CpuShares = imageConf.CpuShares
-	}
 	if len(userConf.ExposedPorts) == 0 {
 		userConf.ExposedPorts = imageConf.ExposedPorts
 	} else if imageConf.ExposedPorts != nil {
@@ -50,7 +41,7 @@
 	}
 	if len(imageConf.PortSpecs) > 0 {
 		// FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia.
-		log.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", "))
+		logrus.Debugf("Migrating image port specs to container: %s", strings.Join(imageConf.PortSpecs, ", "))
 		if userConf.ExposedPorts == nil {
 			userConf.ExposedPorts = make(nat.PortSet)
 		}
@@ -94,8 +85,8 @@
 		userConf.Labels = imageConf.Labels
 	}
 
-	if len(userConf.Entrypoint) == 0 {
-		if len(userConf.Cmd) == 0 {
+	if userConf.Entrypoint.Len() == 0 {
+		if userConf.Cmd.Len() == 0 {
 			userConf.Cmd = imageConf.Cmd
 		}
 
diff --git a/runconfig/parse.go b/runconfig/parse.go
index ccd8056..46ec267 100644
--- a/runconfig/parse.go
+++ b/runconfig/parse.go
@@ -2,7 +2,6 @@
 
 import (
 	"fmt"
-	"path"
 	"strconv"
 	"strings"
 
@@ -12,16 +11,15 @@
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/ulimit"
 	"github.com/docker/docker/pkg/units"
-	"github.com/docker/docker/utils"
 )
 
 var (
-	ErrInvalidWorkingDirectory          = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.")
-	ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: --net=container can't be used with links. This would result in undefined behavior.")
-	ErrConflictContainerNetworkAndDns   = fmt.Errorf("Conflicting options: --net=container can't be used with --dns. This configuration is invalid.")
+	ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: --net=container can't be used with links. This would result in undefined behavior")
+	ErrConflictNetworkAndDns            = fmt.Errorf("Conflicting options: --dns and the network mode (--net)")
 	ErrConflictNetworkHostname          = fmt.Errorf("Conflicting options: -h and the network mode (--net)")
-	ErrConflictHostNetworkAndDns        = fmt.Errorf("Conflicting options: --net=host can't be used with --dns. This configuration is invalid.")
-	ErrConflictHostNetworkAndLinks      = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior.")
+	ErrConflictHostNetworkAndLinks      = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior")
+	ErrConflictContainerNetworkAndMac   = fmt.Errorf("Conflicting options: --mac-address and the network mode (--net)")
+	ErrConflictNetworkHosts             = fmt.Errorf("Conflicting options: --add-host and the network mode (--net)")
 )
 
 func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSet, error) {
@@ -49,13 +47,16 @@
 		flCapDrop     = opts.NewListOpts(nil)
 		flSecurityOpt = opts.NewListOpts(nil)
 		flLabelsFile  = opts.NewListOpts(nil)
+		flLoggingOpts = opts.NewListOpts(nil)
 
 		flNetwork         = cmd.Bool([]string{"#n", "#-networking"}, true, "Enable networking for this container")
 		flPrivileged      = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container")
 		flPidMode         = cmd.String([]string{"-pid"}, "", "PID namespace to use")
+		flUTSMode         = cmd.String([]string{"-uts"}, "", "UTS namespace to use")
 		flPublishAll      = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to random ports")
 		flStdin           = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached")
 		flTty             = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY")
+		flOomKillDisable  = cmd.Bool([]string{"-oom-kill-disable"}, false, "Disable OOM Killer")
 		flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file")
 		flEntrypoint      = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image")
 		flHostname        = cmd.String([]string{"h", "-hostname"}, "", "Container host name")
@@ -64,7 +65,11 @@
 		flUser            = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: <name|uid>[:<group|gid>])")
 		flWorkingDir      = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container")
 		flCpuShares       = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
+		flCpuPeriod       = cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period")
 		flCpusetCpus      = cmd.String([]string{"#-cpuset", "-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
+		flCpusetMems      = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
+		flCpuQuota        = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS quota")
+		flBlkioWeight     = cmd.Int64([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000")
 		flNetMode         = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container")
 		flMacAddress      = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)")
 		flIpcMode         = cmd.String([]string{"-ipc"}, "", "IPC namespace to use")
@@ -93,16 +98,49 @@
 	cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities")
 	cmd.Var(&flSecurityOpt, []string{"-security-opt"}, "Security Options")
 	cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options")
+	cmd.Var(&flLoggingOpts, []string{"-log-opt"}, "Log driver options")
+
+	expFlags := attachExperimentalFlags(cmd)
 
 	cmd.Require(flag.Min, 1)
 
-	if err := utils.ParseFlags(cmd, args, true); err != nil {
+	if err := cmd.ParseFlags(args, true); err != nil {
 		return nil, nil, cmd, err
 	}
 
-	// Validate input params
-	if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
-		return nil, nil, cmd, ErrInvalidWorkingDirectory
+	var (
+		attachStdin  = flAttach.Get("stdin")
+		attachStdout = flAttach.Get("stdout")
+		attachStderr = flAttach.Get("stderr")
+	)
+
+	netMode, err := parseNetMode(*flNetMode)
+	if err != nil {
+		return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err)
+	}
+
+	if (netMode.IsHost() || netMode.IsContainer()) && *flHostname != "" {
+		return nil, nil, cmd, ErrConflictNetworkHostname
+	}
+
+	if netMode.IsHost() && flLinks.Len() > 0 {
+		return nil, nil, cmd, ErrConflictHostNetworkAndLinks
+	}
+
+	if netMode.IsContainer() && flLinks.Len() > 0 {
+		return nil, nil, cmd, ErrConflictContainerNetworkAndLinks
+	}
+
+	if (netMode.IsHost() || netMode.IsContainer()) && flDns.Len() > 0 {
+		return nil, nil, cmd, ErrConflictNetworkAndDns
+	}
+
+	if (netMode.IsContainer() || netMode.IsHost()) && flExtraHosts.Len() > 0 {
+		return nil, nil, cmd, ErrConflictNetworkHosts
+	}
+
+	if (netMode.IsContainer() || netMode.IsHost()) && *flMacAddress != "" {
+		return nil, nil, cmd, ErrConflictContainerNetworkAndMac
 	}
 
 	// Validate the input mac address
@@ -111,31 +149,6 @@
 			return nil, nil, cmd, fmt.Errorf("%s is not a valid mac address", *flMacAddress)
 		}
 	}
-	var (
-		attachStdin  = flAttach.Get("stdin")
-		attachStdout = flAttach.Get("stdout")
-		attachStderr = flAttach.Get("stderr")
-	)
-
-	if *flNetMode != "bridge" && *flNetMode != "none" && *flHostname != "" {
-		return nil, nil, cmd, ErrConflictNetworkHostname
-	}
-
-	if *flNetMode == "host" && flLinks.Len() > 0 {
-		return nil, nil, cmd, ErrConflictHostNetworkAndLinks
-	}
-
-	if *flNetMode == "container" && flLinks.Len() > 0 {
-		return nil, nil, cmd, ErrConflictContainerNetworkAndLinks
-	}
-
-	if *flNetMode == "host" && flDns.Len() > 0 {
-		return nil, nil, cmd, ErrConflictHostNetworkAndDns
-	}
-
-	if *flNetMode == "container" && flDns.Len() > 0 {
-		return nil, nil, cmd, ErrConflictContainerNetworkAndDns
-	}
 
 	// If neither -d or -a are set, attach to everything by default
 	if flAttach.Len() == 0 {
@@ -186,21 +199,22 @@
 
 	var (
 		parsedArgs = cmd.Args()
-		runCmd     []string
-		entrypoint []string
+		runCmd     *Command
+		entrypoint *Entrypoint
 		image      = cmd.Arg(0)
 	)
 	if len(parsedArgs) > 1 {
-		runCmd = parsedArgs[1:]
+		runCmd = NewCommand(parsedArgs[1:]...)
 	}
 	if *flEntrypoint != "" {
-		entrypoint = []string{*flEntrypoint}
+		entrypoint = NewEntrypoint(*flEntrypoint)
 	}
 
-	lxcConf, err := parseKeyValueOpts(flLxcOpts)
+	lc, err := parseKeyValueOpts(flLxcOpts)
 	if err != nil {
 		return nil, nil, cmd, err
 	}
+	lxcConf := NewLxcConfig(lc)
 
 	var (
 		domainname string
@@ -270,12 +284,17 @@
 		return nil, nil, cmd, fmt.Errorf("--pid: invalid PID mode")
 	}
 
-	netMode, err := parseNetMode(*flNetMode)
-	if err != nil {
-		return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err)
+	utsMode := UTSMode(*flUTSMode)
+	if !utsMode.Valid() {
+		return nil, nil, cmd, fmt.Errorf("--uts: invalid UTS mode")
 	}
 
-	restartPolicy, err := parseRestartPolicy(*flRestartPolicy)
+	restartPolicy, err := ParseRestartPolicy(*flRestartPolicy)
+	if err != nil {
+		return nil, nil, cmd, err
+	}
+
+	loggingOpts, err := parseLoggingOpts(*flLoggingDriver, flLoggingOpts.GetAll())
 	if err != nil {
 		return nil, nil, cmd, err
 	}
@@ -289,10 +308,6 @@
 		Tty:             *flTty,
 		NetworkDisabled: !*flNetwork,
 		OpenStdin:       *flStdin,
-		Memory:          flMemory,      // FIXME: for backward compatibility
-		MemorySwap:      MemorySwap,    // FIXME: for backward compatibility
-		CpuShares:       *flCpuShares,  // FIXME: for backward compatibility
-		Cpuset:          *flCpusetCpus, // FIXME: for backward compatibility
 		AttachStdin:     attachStdin,
 		AttachStdout:    attachStdout,
 		AttachStderr:    attachStderr,
@@ -313,7 +328,12 @@
 		Memory:          flMemory,
 		MemorySwap:      MemorySwap,
 		CpuShares:       *flCpuShares,
+		CpuPeriod:       *flCpuPeriod,
 		CpusetCpus:      *flCpusetCpus,
+		CpusetMems:      *flCpusetMems,
+		CpuQuota:        *flCpuQuota,
+		BlkioWeight:     *flBlkioWeight,
+		OomKillDisable:  *flOomKillDisable,
 		Privileged:      *flPrivileged,
 		PortBindings:    portBindings,
 		Links:           flLinks.GetAll(),
@@ -325,6 +345,7 @@
 		NetworkMode:     netMode,
 		IpcMode:         ipcMode,
 		PidMode:         pidMode,
+		UTSMode:         utsMode,
 		Devices:         deviceMappings,
 		CapAdd:          flCapAdd.GetAll(),
 		CapDrop:         flCapDrop.GetAll(),
@@ -332,10 +353,12 @@
 		SecurityOpt:     flSecurityOpt.GetAll(),
 		ReadonlyRootfs:  *flReadonlyRootfs,
 		Ulimits:         flUlimits.GetList(),
-		LogConfig:       LogConfig{Type: *flLoggingDriver},
+		LogConfig:       LogConfig{Type: *flLoggingDriver, Config: loggingOpts},
 		CgroupParent:    *flCgroupParent,
 	}
 
+	applyExperimentalFlags(expFlags, config, hostConfig)
+
 	// When allocating stdin in attached mode, close stdin at client disconnect
 	if config.OpenStdin && config.AttachStdin {
 		config.StdinOnce = true
@@ -374,8 +397,17 @@
 	return result
 }
 
-// parseRestartPolicy returns the parsed policy or an error indicating what is incorrect
-func parseRestartPolicy(policy string) (RestartPolicy, error) {
+func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) {
+	loggingOptsMap := convertKVStringsToMap(loggingOpts)
+	if loggingDriver == "none" && len(loggingOpts) > 0 {
+		return map[string]string{}, fmt.Errorf("Invalid logging opts for driver %s", loggingDriver)
+	}
+	//TODO - validation step
+	return loggingOptsMap, nil
+}
+
+// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect
+func ParseRestartPolicy(policy string) (RestartPolicy, error) {
 	p := RestartPolicy{}
 
 	if policy == "" {
@@ -430,14 +462,14 @@
 	return out, nil
 }
 
-func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) {
-	out := make([]utils.KeyValuePair, opts.Len())
+func parseKeyValueOpts(opts opts.ListOpts) ([]KeyValuePair, error) {
+	out := make([]KeyValuePair, opts.Len())
 	for i, o := range opts.GetAll() {
 		k, v, err := parsers.ParseKeyValueOpt(o)
 		if err != nil {
 			return nil, err
 		}
-		out[i] = utils.KeyValuePair{Key: k, Value: v}
+		out[i] = KeyValuePair{Key: k, Value: v}
 	}
 	return out, nil
 }
diff --git a/runconfig/parse_experimental.go b/runconfig/parse_experimental.go
new file mode 100644
index 0000000..886b377
--- /dev/null
+++ b/runconfig/parse_experimental.go
@@ -0,0 +1,19 @@
+// +build experimental
+
+package runconfig
+
+import flag "github.com/docker/docker/pkg/mflag"
+
+type experimentalFlags struct {
+	flags map[string]interface{}
+}
+
+func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags {
+	flags := make(map[string]interface{})
+	flags["volume-driver"] = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container")
+	return &experimentalFlags{flags: flags}
+}
+
+func applyExperimentalFlags(exp *experimentalFlags, config *Config, hostConfig *HostConfig) {
+	config.VolumeDriver = *(exp.flags["volume-driver"]).(*string)
+}
diff --git a/runconfig/parse_stub.go b/runconfig/parse_stub.go
new file mode 100644
index 0000000..391b6ed
--- /dev/null
+++ b/runconfig/parse_stub.go
@@ -0,0 +1,14 @@
+// +build !experimental
+
+package runconfig
+
+import flag "github.com/docker/docker/pkg/mflag"
+
+type experimentalFlags struct{}
+
+func attachExperimentalFlags(cmd *flag.FlagSet) *experimentalFlags {
+	return nil
+}
+
+func applyExperimentalFlags(flags *experimentalFlags, config *Config, hostConfig *HostConfig) {
+}
diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go
index cd90dc3..6c0a1cf 100644
--- a/runconfig/parse_test.go
+++ b/runconfig/parse_test.go
@@ -57,3 +57,9 @@
 		t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err)
 	}
 }
+
+func TestConflictContainerNetworkAndLinks(t *testing.T) {
+	if _, _, _, err := parseRun([]string{"--net=container:other", "--link=zip:zap", "img", "cmd"}); err != ErrConflictContainerNetworkAndLinks {
+		t.Fatalf("Expected error ErrConflictContainerNetworkAndLinks, got: %s", err)
+	}
+}
diff --git a/trust/service.go b/trust/service.go
index 324a478..6a804fa 100644
--- a/trust/service.go
+++ b/trust/service.go
@@ -4,71 +4,50 @@
 	"fmt"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/engine"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/libtrust"
 )
 
-func (t *TrustStore) Install(eng *engine.Engine) error {
-	for name, handler := range map[string]engine.Handler{
-		"trust_key_check":   t.CmdCheckKey,
-		"trust_update_base": t.CmdUpdateBase,
-	} {
-		if err := eng.Register(name, handler); err != nil {
-			return fmt.Errorf("Could not register %q: %v", name, err)
-		}
-	}
-	return nil
+type NotVerifiedError string
+
+func (e NotVerifiedError) Error() string {
+	return string(e)
 }
 
-func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status {
-	if n := len(job.Args); n != 1 {
-		return job.Errorf("Usage: %s NAMESPACE", job.Name)
+func (t *TrustStore) CheckKey(ns string, key []byte, perm uint16) (bool, error) {
+	if len(key) == 0 {
+		return false, fmt.Errorf("Missing PublicKey")
 	}
-	var (
-		namespace = job.Args[0]
-		keyBytes  = job.Getenv("PublicKey")
-	)
-
-	if keyBytes == "" {
-		return job.Errorf("Missing PublicKey")
-	}
-	pk, err := libtrust.UnmarshalPublicKeyJWK([]byte(keyBytes))
+	pk, err := libtrust.UnmarshalPublicKeyJWK(key)
 	if err != nil {
-		return job.Errorf("Error unmarshalling public key: %s", err)
+		return false, fmt.Errorf("Error unmarshalling public key: %v", err)
 	}
 
-	permission := uint16(job.GetenvInt("Permission"))
-	if permission == 0 {
-		permission = 0x03
+	if perm == 0 {
+		perm = 0x03
 	}
 
 	t.RLock()
 	defer t.RUnlock()
 	if t.graph == nil {
-		job.Stdout.Write([]byte("no graph"))
-		return engine.StatusOK
+		return false, NotVerifiedError("no graph")
 	}
 
 	// Check if any expired grants
-	verified, err := t.graph.Verify(pk, namespace, permission)
+	verified, err := t.graph.Verify(pk, ns, perm)
 	if err != nil {
-		return job.Errorf("Error verifying key to namespace: %s", namespace)
+		return false, fmt.Errorf("Error verifying key to namespace: %s", ns)
 	}
 	if !verified {
-		log.Debugf("Verification failed for %s using key %s", namespace, pk.KeyID())
-		job.Stdout.Write([]byte("not verified"))
-	} else if t.expiration.Before(time.Now()) {
-		job.Stdout.Write([]byte("expired"))
-	} else {
-		job.Stdout.Write([]byte("verified"))
+		logrus.Debugf("Verification failed for %s using key %s", ns, pk.KeyID())
+		return false, NotVerifiedError("not verified")
 	}
-
-	return engine.StatusOK
+	if t.expiration.Before(time.Now()) {
+		return false, NotVerifiedError("expired")
+	}
+	return true, nil
 }
 
-func (t *TrustStore) CmdUpdateBase(job *engine.Job) engine.Status {
+func (t *TrustStore) UpdateBase() {
 	t.fetch()
-
-	return engine.StatusOK
 }
diff --git a/trust/trusts.go b/trust/trusts.go
index f5e317e..885127e 100644
--- a/trust/trusts.go
+++ b/trust/trusts.go
@@ -12,7 +12,7 @@
 	"sync"
 	"time"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/libtrust/trustgraph"
 )
 
@@ -62,8 +62,7 @@
 		baseEndpoints: endpoints,
 	}
 
-	err = t.reload()
-	if err != nil {
+	if err := t.reload(); err != nil {
 		return nil, err
 	}
 
@@ -93,7 +92,7 @@
 	}
 	if len(statements) == 0 {
 		if t.autofetch {
-			log.Debugf("No grants, fetching")
+			logrus.Debugf("No grants, fetching")
 			t.fetcher = time.AfterFunc(t.fetchTime, t.fetch)
 		}
 		return nil
@@ -106,7 +105,7 @@
 
 	t.expiration = expiration
 	t.graph = trustgraph.NewMemoryGraph(grants)
-	log.Debugf("Reloaded graph with %d grants expiring at %s", len(grants), expiration)
+	logrus.Debugf("Reloaded graph with %d grants expiring at %s", len(grants), expiration)
 
 	if t.autofetch {
 		nextFetch := expiration.Sub(time.Now())
@@ -161,28 +160,26 @@
 	for bg, ep := range t.baseEndpoints {
 		statement, err := t.fetchBaseGraph(ep)
 		if err != nil {
-			log.Infof("Trust graph fetch failed: %s", err)
+			logrus.Infof("Trust graph fetch failed: %s", err)
 			continue
 		}
 		b, err := statement.Bytes()
 		if err != nil {
-			log.Infof("Bad trust graph statement: %s", err)
+			logrus.Infof("Bad trust graph statement: %s", err)
 			continue
 		}
 		// TODO check if value differs
-		err = ioutil.WriteFile(path.Join(t.path, bg+".json"), b, 0600)
-		if err != nil {
-			log.Infof("Error writing trust graph statement: %s", err)
+		if err := ioutil.WriteFile(path.Join(t.path, bg+".json"), b, 0600); err != nil {
+			logrus.Infof("Error writing trust graph statement: %s", err)
 		}
 		fetchCount++
 	}
-	log.Debugf("Fetched %d base graphs at %s", fetchCount, time.Now())
+	logrus.Debugf("Fetched %d base graphs at %s", fetchCount, time.Now())
 
 	if fetchCount > 0 {
 		go func() {
-			err := t.reload()
-			if err != nil {
-				log.Infof("Reload of trust graph failed: %s", err)
+			if err := t.reload(); err != nil {
+				logrus.Infof("Reload of trust graph failed: %s", err)
 			}
 		}()
 		t.fetchTime = defaultFetchtime
diff --git a/utils/daemon.go b/utils/daemon.go
deleted file mode 100644
index 871122e..0000000
--- a/utils/daemon.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package utils
-
-import (
-	"fmt"
-	"io/ioutil"
-	"log"
-	"os"
-	"strconv"
-)
-
-func CreatePidFile(pidfile string) error {
-	if pidString, err := ioutil.ReadFile(pidfile); err == nil {
-		pid, err := strconv.Atoi(string(pidString))
-		if err == nil {
-			if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil {
-				return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile)
-			}
-		}
-	}
-
-	file, err := os.Create(pidfile)
-	if err != nil {
-		return err
-	}
-
-	defer file.Close()
-
-	_, err = fmt.Fprintf(file, "%d", os.Getpid())
-	return err
-}
-
-func RemovePidFile(pidfile string) {
-	if err := os.Remove(pidfile); err != nil {
-		log.Printf("Error removing %s: %s", pidfile, err)
-	}
-}
diff --git a/utils/experimental.go b/utils/experimental.go
new file mode 100644
index 0000000..b308a59
--- /dev/null
+++ b/utils/experimental.go
@@ -0,0 +1,7 @@
+// +build experimental
+
+package utils
+
+func ExperimentalBuild() bool {
+	return true
+}
diff --git a/utils/flags.go b/utils/flags.go
deleted file mode 100644
index 33c7227..0000000
--- a/utils/flags.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package utils
-
-import (
-	"fmt"
-	"os"
-
-	flag "github.com/docker/docker/pkg/mflag"
-)
-
-// ParseFlags is a utility function that adds a help flag if withHelp is true,
-// calls cmd.Parse(args) and prints a relevant error message if there are
-// incorrect number of arguments. It returns error only if error handling is
-// set to ContinueOnError and parsing fails. If error handling is set to
-// ExitOnError, it's safe to ignore the return value.
-// TODO: move this to a better package than utils
-func ParseFlags(cmd *flag.FlagSet, args []string, withHelp bool) error {
-	var help *bool
-	if withHelp {
-		help = cmd.Bool([]string{"#help", "-help"}, false, "Print usage")
-	}
-	if err := cmd.Parse(args); err != nil {
-		return err
-	}
-	if help != nil && *help {
-		cmd.Usage()
-		// just in case Usage does not exit
-		os.Exit(0)
-	}
-	if str := cmd.CheckArgs(); str != "" {
-		ReportError(cmd, str, withHelp)
-	}
-	return nil
-}
-
-func ReportError(cmd *flag.FlagSet, str string, withHelp bool) {
-	if withHelp {
-		if os.Args[0] == cmd.Name() {
-			str += ". See '" + os.Args[0] + " --help'"
-		} else {
-			str += ". See '" + os.Args[0] + " " + cmd.Name() + " --help'"
-		}
-	}
-	fmt.Fprintf(cmd.Out(), "docker: %s.\n", str)
-	os.Exit(1)
-}
diff --git a/utils/git.go b/utils/git.go
new file mode 100644
index 0000000..ce8924d
--- /dev/null
+++ b/utils/git.go
@@ -0,0 +1,93 @@
+package utils
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+
+	"github.com/docker/docker/pkg/urlutil"
+)
+
+func GitClone(remoteURL string) (string, error) {
+	if !urlutil.IsGitTransport(remoteURL) {
+		remoteURL = "https://" + remoteURL
+	}
+	root, err := ioutil.TempDir("", "docker-build-git")
+	if err != nil {
+		return "", err
+	}
+
+	u, err := url.Parse(remoteURL)
+	if err != nil {
+		return "", err
+	}
+
+	fragment := u.Fragment
+	clone := cloneArgs(u, root)
+
+	if output, err := git(clone...); err != nil {
+		return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output)
+	}
+
+	return checkoutGit(fragment, root)
+}
+
+func cloneArgs(remoteURL *url.URL, root string) []string {
+	args := []string{"clone", "--recursive"}
+	shallow := len(remoteURL.Fragment) == 0
+
+	if shallow && strings.HasPrefix(remoteURL.Scheme, "http") {
+		res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL))
+		if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" {
+			shallow = false
+		}
+	}
+
+	if shallow {
+		args = append(args, "--depth", "1")
+	}
+
+	if remoteURL.Fragment != "" {
+		remoteURL.Fragment = ""
+	}
+
+	return append(args, remoteURL.String(), root)
+}
+
+func checkoutGit(fragment, root string) (string, error) {
+	refAndDir := strings.SplitN(fragment, ":", 2)
+
+	if len(refAndDir[0]) != 0 {
+		if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil {
+			return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output)
+		}
+	}
+
+	if len(refAndDir) > 1 && len(refAndDir[1]) != 0 {
+		newCtx := filepath.Join(root, refAndDir[1])
+		fi, err := os.Stat(newCtx)
+		if err != nil {
+			return "", err
+		}
+		if !fi.IsDir() {
+			return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx)
+		}
+		root = newCtx
+	}
+
+	return root, nil
+}
+
+func gitWithinDir(dir string, args ...string) ([]byte, error) {
+	a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")}
+	return git(append(a, args...)...)
+}
+
+func git(args ...string) ([]byte, error) {
+	return exec.Command("git", args...).CombinedOutput()
+}
diff --git a/utils/git_test.go b/utils/git_test.go
new file mode 100644
index 0000000..10b13e9
--- /dev/null
+++ b/utils/git_test.go
@@ -0,0 +1,175 @@
+package utils
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/http/httptest"
+	"net/url"
+	"os"
+	"path/filepath"
+	"reflect"
+	"testing"
+)
+
+func TestCloneArgsSmartHttp(t *testing.T) {
+	mux := http.NewServeMux()
+	server := httptest.NewServer(mux)
+	serverURL, _ := url.Parse(server.URL)
+
+	serverURL.Path = "/repo.git"
+	gitURL := serverURL.String()
+
+	mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) {
+		q := r.URL.Query().Get("service")
+		w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q))
+	})
+
+	args := cloneArgs(serverURL, "/tmp")
+	exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"}
+	if !reflect.DeepEqual(args, exp) {
+		t.Fatalf("Expected %v, got %v", exp, args)
+	}
+}
+
+func TestCloneArgsDumbHttp(t *testing.T) {
+	mux := http.NewServeMux()
+	server := httptest.NewServer(mux)
+	serverURL, _ := url.Parse(server.URL)
+
+	serverURL.Path = "/repo.git"
+	gitURL := serverURL.String()
+
+	mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "text/plain")
+	})
+
+	args := cloneArgs(serverURL, "/tmp")
+	exp := []string{"clone", "--recursive", gitURL, "/tmp"}
+	if !reflect.DeepEqual(args, exp) {
+		t.Fatalf("Expected %v, got %v", exp, args)
+	}
+}
+
+func TestCloneArgsGit(t *testing.T) {
+	u, _ := url.Parse("git://github.com/docker/docker")
+	args := cloneArgs(u, "/tmp")
+	exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"}
+	if !reflect.DeepEqual(args, exp) {
+		t.Fatalf("Expected %v, got %v", exp, args)
+	}
+}
+
+func TestCloneArgsStripFragment(t *testing.T) {
+	u, _ := url.Parse("git://github.com/docker/docker#test")
+	args := cloneArgs(u, "/tmp")
+	exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"}
+	if !reflect.DeepEqual(args, exp) {
+		t.Fatalf("Expected %v, got %v", exp, args)
+	}
+}
+
+func TestCheckoutGit(t *testing.T) {
+	root, err := ioutil.TempDir("", "docker-build-git-checkout")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(root)
+
+	gitDir := filepath.Join(root, "repo")
+	_, err = git("init", gitDir)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil {
+		t.Fatal(err)
+	}
+
+	if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	subDir := filepath.Join(gitDir, "subdir")
+	if err = os.Mkdir(subDir, 0755); err != nil {
+		t.Fatal(err)
+	}
+
+	if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil {
+		t.Fatal(err)
+	}
+
+	if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil {
+		t.Fatal(err)
+	}
+
+	cases := []struct {
+		frag string
+		exp  string
+		fail bool
+	}{
+		{"", "FROM scratch", false},
+		{"master", "FROM scratch", false},
+		{":subdir", "FROM scratch\nEXPOSE 5000", false},
+		{":nosubdir", "", true},   // missing directory error
+		{":Dockerfile", "", true}, // not a directory error
+		{"master:nosubdir", "", true},
+		{"master:subdir", "FROM scratch\nEXPOSE 5000", false},
+		{"test", "FROM scratch\nEXPOSE 3000", false},
+		{"test:", "FROM scratch\nEXPOSE 3000", false},
+		{"test:subdir", "FROM busybox\nEXPOSE 5000", false},
+	}
+
+	for _, c := range cases {
+		r, err := checkoutGit(c.frag, gitDir)
+
+		fail := err != nil
+		if fail != c.fail {
+			t.Fatalf("Expected %v failure, error was %v\n", c.fail, err)
+		}
+		if c.fail {
+			continue
+		}
+
+		b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile"))
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if string(b) != c.exp {
+			t.Fatalf("Expected %v, was %v\n", c.exp, string(b))
+		}
+	}
+}
diff --git a/utils/http.go b/utils/http.go
deleted file mode 100644
index 24eaea5..0000000
--- a/utils/http.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package utils
-
-import (
-	"io"
-	"net/http"
-	"strings"
-
-	log "github.com/Sirupsen/logrus"
-)
-
-// VersionInfo is used to model entities which has a version.
-// It is basically a tupple with name and version.
-type VersionInfo interface {
-	Name() string
-	Version() string
-}
-
-func validVersion(version VersionInfo) bool {
-	const stopChars = " \t\r\n/"
-	name := version.Name()
-	vers := version.Version()
-	if len(name) == 0 || strings.ContainsAny(name, stopChars) {
-		return false
-	}
-	if len(vers) == 0 || strings.ContainsAny(vers, stopChars) {
-		return false
-	}
-	return true
-}
-
-// Convert versions to a string and append the string to the string base.
-//
-// Each VersionInfo will be converted to a string in the format of
-// "product/version", where the "product" is get from the Name() method, while
-// version is get from the Version() method. Several pieces of verson information
-// will be concatinated and separated by space.
-func appendVersions(base string, versions ...VersionInfo) string {
-	if len(versions) == 0 {
-		return base
-	}
-
-	verstrs := make([]string, 0, 1+len(versions))
-	if len(base) > 0 {
-		verstrs = append(verstrs, base)
-	}
-
-	for _, v := range versions {
-		if !validVersion(v) {
-			continue
-		}
-		verstrs = append(verstrs, v.Name()+"/"+v.Version())
-	}
-	return strings.Join(verstrs, " ")
-}
-
-// HTTPRequestDecorator is used to change an instance of
-// http.Request. It could be used to add more header fields,
-// change body, etc.
-type HTTPRequestDecorator interface {
-	// ChangeRequest() changes the request accordingly.
-	// The changed request will be returned or err will be non-nil
-	// if an error occur.
-	ChangeRequest(req *http.Request) (newReq *http.Request, err error)
-}
-
-// HTTPUserAgentDecorator appends the product/version to the user agent field
-// of a request.
-type HTTPUserAgentDecorator struct {
-	versions []VersionInfo
-}
-
-func NewHTTPUserAgentDecorator(versions ...VersionInfo) HTTPRequestDecorator {
-	return &HTTPUserAgentDecorator{
-		versions: versions,
-	}
-}
-
-func (h *HTTPUserAgentDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) {
-	if req == nil {
-		return req, nil
-	}
-
-	userAgent := appendVersions(req.UserAgent(), h.versions...)
-	if len(userAgent) > 0 {
-		req.Header.Set("User-Agent", userAgent)
-	}
-	return req, nil
-}
-
-type HTTPMetaHeadersDecorator struct {
-	Headers map[string][]string
-}
-
-func (h *HTTPMetaHeadersDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) {
-	if h.Headers == nil {
-		return req, nil
-	}
-	for k, v := range h.Headers {
-		req.Header[k] = v
-	}
-	return req, nil
-}
-
-type HTTPAuthDecorator struct {
-	login    string
-	password string
-}
-
-func NewHTTPAuthDecorator(login, password string) HTTPRequestDecorator {
-	return &HTTPAuthDecorator{
-		login:    login,
-		password: password,
-	}
-}
-
-func (self *HTTPAuthDecorator) ChangeRequest(req *http.Request) (*http.Request, error) {
-	req.SetBasicAuth(self.login, self.password)
-	return req, nil
-}
-
-// HTTPRequestFactory creates an HTTP request
-// and applies a list of decorators on the request.
-type HTTPRequestFactory struct {
-	decorators []HTTPRequestDecorator
-}
-
-func NewHTTPRequestFactory(d ...HTTPRequestDecorator) *HTTPRequestFactory {
-	return &HTTPRequestFactory{
-		decorators: d,
-	}
-}
-
-func (self *HTTPRequestFactory) AddDecorator(d ...HTTPRequestDecorator) {
-	self.decorators = append(self.decorators, d...)
-}
-
-func (self *HTTPRequestFactory) GetDecorators() []HTTPRequestDecorator {
-	return self.decorators
-}
-
-// NewRequest() creates a new *http.Request,
-// applies all decorators in the HTTPRequestFactory on the request,
-// then applies decorators provided by d on the request.
-func (h *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...HTTPRequestDecorator) (*http.Request, error) {
-	req, err := http.NewRequest(method, urlStr, body)
-	if err != nil {
-		return nil, err
-	}
-
-	// By default, a nil factory should work.
-	if h == nil {
-		return req, nil
-	}
-	for _, dec := range h.decorators {
-		req, err = dec.ChangeRequest(req)
-		if err != nil {
-			return nil, err
-		}
-	}
-	for _, dec := range d {
-		req, err = dec.ChangeRequest(req)
-		if err != nil {
-			return nil, err
-		}
-	}
-	log.Debugf("%v -- HEADERS: %v", req.URL, req.Header)
-	return req, err
-}
diff --git a/utils/jsonmessage.go b/utils/jsonmessage.go
deleted file mode 100644
index 74d3112..0000000
--- a/utils/jsonmessage.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package utils
-
-import (
-	"encoding/json"
-	"fmt"
-	"io"
-	"strings"
-	"time"
-
-	"github.com/docker/docker/pkg/term"
-	"github.com/docker/docker/pkg/timeutils"
-	"github.com/docker/docker/pkg/units"
-)
-
-type JSONError struct {
-	Code    int    `json:"code,omitempty"`
-	Message string `json:"message,omitempty"`
-}
-
-func (e *JSONError) Error() string {
-	return e.Message
-}
-
-type JSONProgress struct {
-	terminalFd uintptr
-	Current    int   `json:"current,omitempty"`
-	Total      int   `json:"total,omitempty"`
-	Start      int64 `json:"start,omitempty"`
-}
-
-func (p *JSONProgress) String() string {
-	var (
-		width       = 200
-		pbBox       string
-		numbersBox  string
-		timeLeftBox string
-	)
-
-	ws, err := term.GetWinsize(p.terminalFd)
-	if err == nil {
-		width = int(ws.Width)
-	}
-
-	if p.Current <= 0 && p.Total <= 0 {
-		return ""
-	}
-	current := units.HumanSize(float64(p.Current))
-	if p.Total <= 0 {
-		return fmt.Sprintf("%8v", current)
-	}
-	total := units.HumanSize(float64(p.Total))
-	percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
-	if percentage > 50 {
-		percentage = 50
-	}
-	if width > 110 {
-		// this number can't be negetive gh#7136
-		numSpaces := 0
-		if 50-percentage > 0 {
-			numSpaces = 50 - percentage
-		}
-		pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
-	}
-	numbersBox = fmt.Sprintf("%8v/%v", current, total)
-
-	if p.Current > 0 && p.Start > 0 && percentage < 50 {
-		fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0))
-		perEntry := fromStart / time.Duration(p.Current)
-		left := time.Duration(p.Total-p.Current) * perEntry
-		left = (left / time.Second) * time.Second
-
-		if width > 50 {
-			timeLeftBox = " " + left.String()
-		}
-	}
-	return pbBox + numbersBox + timeLeftBox
-}
-
-type JSONMessage struct {
-	Stream          string        `json:"stream,omitempty"`
-	Status          string        `json:"status,omitempty"`
-	Progress        *JSONProgress `json:"progressDetail,omitempty"`
-	ProgressMessage string        `json:"progress,omitempty"` //deprecated
-	ID              string        `json:"id,omitempty"`
-	From            string        `json:"from,omitempty"`
-	Time            int64         `json:"time,omitempty"`
-	Error           *JSONError    `json:"errorDetail,omitempty"`
-	ErrorMessage    string        `json:"error,omitempty"` //deprecated
-}
-
-func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
-	if jm.Error != nil {
-		if jm.Error.Code == 401 {
-			return fmt.Errorf("Authentication is required.")
-		}
-		return jm.Error
-	}
-	var endl string
-	if isTerminal && jm.Stream == "" && jm.Progress != nil {
-		// <ESC>[2K = erase entire current line
-		fmt.Fprintf(out, "%c[2K\r", 27)
-		endl = "\r"
-	} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
-		return nil
-	}
-	if jm.Time != 0 {
-		fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed))
-	}
-	if jm.ID != "" {
-		fmt.Fprintf(out, "%s: ", jm.ID)
-	}
-	if jm.From != "" {
-		fmt.Fprintf(out, "(from %s) ", jm.From)
-	}
-	if jm.Progress != nil && isTerminal {
-		fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
-	} else if jm.ProgressMessage != "" { //deprecated
-		fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
-	} else if jm.Stream != "" {
-		fmt.Fprintf(out, "%s%s", jm.Stream, endl)
-	} else {
-		fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
-	}
-	return nil
-}
-
-func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool) error {
-	var (
-		dec  = json.NewDecoder(in)
-		ids  = make(map[string]int)
-		diff = 0
-	)
-	for {
-		var jm JSONMessage
-		if err := dec.Decode(&jm); err != nil {
-			if err == io.EOF {
-				break
-			}
-			return err
-		}
-
-		if jm.Progress != nil {
-			jm.Progress.terminalFd = terminalFd
-		}
-		if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
-			line, ok := ids[jm.ID]
-			if !ok {
-				line = len(ids)
-				ids[jm.ID] = line
-				if isTerminal {
-					fmt.Fprintf(out, "\n")
-				}
-				diff = 0
-			} else {
-				diff = len(ids) - line
-			}
-			if jm.ID != "" && isTerminal {
-				// <ESC>[{diff}A = move cursor up diff rows
-				fmt.Fprintf(out, "%c[%dA", 27, diff)
-			}
-		}
-		err := jm.Display(out, isTerminal)
-		if jm.ID != "" && isTerminal {
-			// <ESC>[{diff}B = move cursor down diff rows
-			fmt.Fprintf(out, "%c[%dB", 27, diff)
-		}
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
diff --git a/utils/jsonmessage_test.go b/utils/jsonmessage_test.go
deleted file mode 100644
index b9103da..0000000
--- a/utils/jsonmessage_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package utils
-
-import (
-	"testing"
-)
-
-func TestError(t *testing.T) {
-	je := JSONError{404, "Not found"}
-	if je.Error() != "Not found" {
-		t.Fatalf("Expected 'Not found' got '%s'", je.Error())
-	}
-}
-
-func TestProgress(t *testing.T) {
-	jp := JSONProgress{}
-	if jp.String() != "" {
-		t.Fatalf("Expected empty string, got '%s'", jp.String())
-	}
-
-	expected := "     1 B"
-	jp2 := JSONProgress{Current: 1}
-	if jp2.String() != expected {
-		t.Fatalf("Expected %q, got %q", expected, jp2.String())
-	}
-
-	expected = "[=========================>                         ]     50 B/100 B"
-	jp3 := JSONProgress{Current: 50, Total: 100}
-	if jp3.String() != expected {
-		t.Fatalf("Expected %q, got %q", expected, jp3.String())
-	}
-
-	// this number can't be negetive gh#7136
-	expected = "[==================================================>]     50 B/40 B"
-	jp4 := JSONProgress{Current: 50, Total: 40}
-	if jp4.String() != expected {
-		t.Fatalf("Expected %q, got %q", expected, jp4.String())
-	}
-}
diff --git a/utils/streamformatter.go b/utils/streamformatter.go
deleted file mode 100644
index e5b15f9..0000000
--- a/utils/streamformatter.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package utils
-
-import (
-	"encoding/json"
-	"fmt"
-	"github.com/docker/docker/pkg/progressreader"
-	"io"
-)
-
-type StreamFormatter struct {
-	json bool
-}
-
-func NewStreamFormatter(json bool) *StreamFormatter {
-	return &StreamFormatter{json}
-}
-
-const streamNewline = "\r\n"
-
-var streamNewlineBytes = []byte(streamNewline)
-
-func (sf *StreamFormatter) FormatStream(str string) []byte {
-	if sf.json {
-		b, err := json.Marshal(&JSONMessage{Stream: str})
-		if err != nil {
-			return sf.FormatError(err)
-		}
-		return append(b, streamNewlineBytes...)
-	}
-	return []byte(str + "\r")
-}
-
-func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte {
-	str := fmt.Sprintf(format, a...)
-	if sf.json {
-		b, err := json.Marshal(&JSONMessage{ID: id, Status: str})
-		if err != nil {
-			return sf.FormatError(err)
-		}
-		return append(b, streamNewlineBytes...)
-	}
-	return []byte(str + streamNewline)
-}
-
-func (sf *StreamFormatter) FormatError(err error) []byte {
-	if sf.json {
-		jsonError, ok := err.(*JSONError)
-		if !ok {
-			jsonError = &JSONError{Message: err.Error()}
-		}
-		if b, err := json.Marshal(&JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil {
-			return append(b, streamNewlineBytes...)
-		}
-		return []byte("{\"error\":\"format error\"}" + streamNewline)
-	}
-	return []byte("Error: " + err.Error() + streamNewline)
-}
-func (sf *StreamFormatter) FormatProg(id, action string, p interface{}) []byte {
-	switch progress := p.(type) {
-	case *JSONProgress:
-		return sf.FormatProgress(id, action, progress)
-	case progressreader.PR_JSONProgress:
-		return sf.FormatProgress(id, action, &JSONProgress{Current: progress.GetCurrent(), Total: progress.GetTotal()})
-	}
-	return nil
-}
-func (sf *StreamFormatter) FormatProgress(id, action string, progress *JSONProgress) []byte {
-	if progress == nil {
-		progress = &JSONProgress{}
-	}
-	if sf.json {
-
-		b, err := json.Marshal(&JSONMessage{
-			Status:          action,
-			ProgressMessage: progress.String(),
-			Progress:        progress,
-			ID:              id,
-		})
-		if err != nil {
-			return nil
-		}
-		return b
-	}
-	endl := "\r"
-	if progress.String() == "" {
-		endl += "\n"
-	}
-	return []byte(action + " " + progress.String() + endl)
-}
-
-func (sf *StreamFormatter) Json() bool {
-	return sf.json
-}
-
-type StdoutFormater struct {
-	io.Writer
-	*StreamFormatter
-}
-
-func (sf *StdoutFormater) Write(buf []byte) (int, error) {
-	formattedBuf := sf.StreamFormatter.FormatStream(string(buf))
-	n, err := sf.Writer.Write(formattedBuf)
-	if n != len(formattedBuf) {
-		return n, io.ErrShortWrite
-	}
-	return len(buf), err
-}
-
-type StderrFormater struct {
-	io.Writer
-	*StreamFormatter
-}
-
-func (sf *StderrFormater) Write(buf []byte) (int, error) {
-	formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m")
-	n, err := sf.Writer.Write(formattedBuf)
-	if n != len(formattedBuf) {
-		return n, io.ErrShortWrite
-	}
-	return len(buf), err
-}
diff --git a/utils/streamformatter_test.go b/utils/streamformatter_test.go
deleted file mode 100644
index 20610f6..0000000
--- a/utils/streamformatter_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package utils
-
-import (
-	"encoding/json"
-	"errors"
-	"reflect"
-	"testing"
-)
-
-func TestFormatStream(t *testing.T) {
-	sf := NewStreamFormatter(true)
-	res := sf.FormatStream("stream")
-	if string(res) != `{"stream":"stream"}`+"\r\n" {
-		t.Fatalf("%q", res)
-	}
-}
-
-func TestFormatStatus(t *testing.T) {
-	sf := NewStreamFormatter(true)
-	res := sf.FormatStatus("ID", "%s%d", "a", 1)
-	if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" {
-		t.Fatalf("%q", res)
-	}
-}
-
-func TestFormatSimpleError(t *testing.T) {
-	sf := NewStreamFormatter(true)
-	res := sf.FormatError(errors.New("Error for formatter"))
-	if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" {
-		t.Fatalf("%q", res)
-	}
-}
-
-func TestFormatJSONError(t *testing.T) {
-	sf := NewStreamFormatter(true)
-	err := &JSONError{Code: 50, Message: "Json error"}
-	res := sf.FormatError(err)
-	if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" {
-		t.Fatalf("%q", res)
-	}
-}
-
-func TestFormatProgress(t *testing.T) {
-	sf := NewStreamFormatter(true)
-	progress := &JSONProgress{
-		Current: 15,
-		Total:   30,
-		Start:   1,
-	}
-	res := sf.FormatProgress("id", "action", progress)
-	msg := &JSONMessage{}
-	if err := json.Unmarshal(res, msg); err != nil {
-		t.Fatal(err)
-	}
-	if msg.ID != "id" {
-		t.Fatalf("ID must be 'id', got: %s", msg.ID)
-	}
-	if msg.Status != "action" {
-		t.Fatalf("Status must be 'action', got: %s", msg.Status)
-	}
-	if msg.ProgressMessage != progress.String() {
-		t.Fatalf("ProgressMessage must be %s, got: %s", progress.String(), msg.ProgressMessage)
-	}
-	if !reflect.DeepEqual(msg.Progress, progress) {
-		t.Fatal("Original progress not equals progress from FormatProgress")
-	}
-}
diff --git a/utils/stubs.go b/utils/stubs.go
new file mode 100644
index 0000000..b376f0c
--- /dev/null
+++ b/utils/stubs.go
@@ -0,0 +1,7 @@
+// +build !experimental
+
+package utils
+
+func ExperimentalBuild() bool {
+	return false
+}
diff --git a/utils/tcp.go b/utils/tcp.go
new file mode 100644
index 0000000..75980ff
--- /dev/null
+++ b/utils/tcp.go
@@ -0,0 +1,22 @@
+package utils
+
+import (
+	"net"
+	"net/http"
+	"time"
+)
+
+func ConfigureTCPTransport(tr *http.Transport, proto, addr string) {
+	// Why 32? See https://github.com/docker/docker/pull/8035.
+	timeout := 32 * time.Second
+	if proto == "unix" {
+		// No need for compression in local communications.
+		tr.DisableCompression = true
+		tr.Dial = func(_, _ string) (net.Conn, error) {
+			return net.DialTimeout(proto, addr, timeout)
+		}
+	} else {
+		tr.Proxy = http.ProxyFromEnvironment
+		tr.Dial = (&net.Dialer{Timeout: timeout}).Dial
+	}
+}
diff --git a/utils/tmpdir.go b/utils/tmpdir.go
deleted file mode 100644
index e200f34..0000000
--- a/utils/tmpdir.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package utils
-
-import (
-	"os"
-	"path/filepath"
-)
-
-// TempDir returns the default directory to use for temporary files.
-func TempDir(rootDir string) (string, error) {
-	var tmpDir string
-	if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
-		tmpDir = filepath.Join(rootDir, "tmp")
-	}
-	err := os.MkdirAll(tmpDir, 0700)
-	return tmpDir, err
-}
diff --git a/utils/utils.go b/utils/utils.go
index 540ae6f..cb1b7b3 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -2,57 +2,23 @@
 
 import (
 	"bufio"
-	"bytes"
 	"crypto/sha1"
-	"crypto/sha256"
 	"encoding/hex"
 	"fmt"
 	"io"
 	"io/ioutil"
-	"net/http"
 	"os"
 	"os/exec"
 	"path/filepath"
-	"regexp"
 	"runtime"
 	"strings"
-	"sync"
 
-	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/autogen/dockerversion"
 	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/common"
 	"github.com/docker/docker/pkg/fileutils"
-	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/stringid"
 )
 
-type KeyValuePair struct {
-	Key   string
-	Value string
-}
-
-var (
-	validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
-)
-
-// Request a given URL and return an io.Reader
-func Download(url string) (resp *http.Response, err error) {
-	if resp, err = http.Get(url); err != nil {
-		return nil, err
-	}
-	if resp.StatusCode >= 400 {
-		return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status)
-	}
-	return resp, nil
-}
-
-func Trunc(s string, maxlen int) string {
-	if len(s) <= maxlen {
-		return s
-	}
-	return s[:maxlen]
-}
-
 // Figure out the absolute path of our own binary (if it's still around).
 func SelfPath() string {
 	path, err := exec.LookPath(os.Args[0])
@@ -126,12 +92,12 @@
 		filepath.Join(filepath.Dir(selfPath), "dockerinit"),
 
 		// FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec."
-		// http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec
+		// https://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec
 		"/usr/libexec/docker/dockerinit",
 		"/usr/local/libexec/docker/dockerinit",
 
 		// FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts."
-		// http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA
+		// https://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA
 		"/usr/lib/docker/dockerinit",
 		"/usr/local/lib/docker/dockerinit",
 	}
@@ -154,157 +120,6 @@
 	return ""
 }
 
-func GetTotalUsedFds() int {
-	if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
-		log.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
-	} else {
-		return len(fds)
-	}
-	return -1
-}
-
-func ValidateID(id string) error {
-	if ok := validHex.MatchString(id); !ok {
-		err := fmt.Errorf("image ID '%s' is invalid", id)
-		return err
-	}
-	return nil
-}
-
-// Code c/c from io.Copy() modified to handle escape sequence
-func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
-	buf := make([]byte, 32*1024)
-	for {
-		nr, er := src.Read(buf)
-		if nr > 0 {
-			// ---- Docker addition
-			// char 16 is C-p
-			if nr == 1 && buf[0] == 16 {
-				nr, er = src.Read(buf)
-				// char 17 is C-q
-				if nr == 1 && buf[0] == 17 {
-					if err := src.Close(); err != nil {
-						return 0, err
-					}
-					return 0, nil
-				}
-			}
-			// ---- End of docker
-			nw, ew := dst.Write(buf[0:nr])
-			if nw > 0 {
-				written += int64(nw)
-			}
-			if ew != nil {
-				err = ew
-				break
-			}
-			if nr != nw {
-				err = io.ErrShortWrite
-				break
-			}
-		}
-		if er == io.EOF {
-			break
-		}
-		if er != nil {
-			err = er
-			break
-		}
-	}
-	return written, err
-}
-
-func HashData(src io.Reader) (string, error) {
-	h := sha256.New()
-	if _, err := io.Copy(h, src); err != nil {
-		return "", err
-	}
-	return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
-}
-
-type WriteFlusher struct {
-	sync.Mutex
-	w       io.Writer
-	flusher http.Flusher
-}
-
-func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
-	wf.Lock()
-	defer wf.Unlock()
-	n, err = wf.w.Write(b)
-	wf.flusher.Flush()
-	return n, err
-}
-
-// Flush the stream immediately.
-func (wf *WriteFlusher) Flush() {
-	wf.Lock()
-	defer wf.Unlock()
-	wf.flusher.Flush()
-}
-
-func NewWriteFlusher(w io.Writer) *WriteFlusher {
-	var flusher http.Flusher
-	if f, ok := w.(http.Flusher); ok {
-		flusher = f
-	} else {
-		flusher = &ioutils.NopFlusher{}
-	}
-	return &WriteFlusher{w: w, flusher: flusher}
-}
-
-func NewHTTPRequestError(msg string, res *http.Response) error {
-	return &JSONError{
-		Message: msg,
-		Code:    res.StatusCode,
-	}
-}
-
-// An StatusError reports an unsuccessful exit by a command.
-type StatusError struct {
-	Status     string
-	StatusCode int
-}
-
-func (e *StatusError) Error() string {
-	return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
-}
-
-func quote(word string, buf *bytes.Buffer) {
-	// Bail out early for "simple" strings
-	if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
-		buf.WriteString(word)
-		return
-	}
-
-	buf.WriteString("'")
-
-	for i := 0; i < len(word); i++ {
-		b := word[i]
-		if b == '\'' {
-			// Replace literal ' with a close ', a \', and a open '
-			buf.WriteString("'\\''")
-		} else {
-			buf.WriteByte(b)
-		}
-	}
-
-	buf.WriteString("'")
-}
-
-// Take a list of strings and escape them so they will be handled right
-// when passed as arguments to an program via a shell
-func ShellQuoteArguments(args []string) string {
-	var buf bytes.Buffer
-	for i, arg := range args {
-		if i != 0 {
-			buf.WriteByte(' ')
-		}
-		quote(arg, &buf)
-	}
-	return buf.String()
-}
-
 var globalTestID string
 
 // TestDirectory creates a new temporary directory and returns its path.
@@ -312,7 +127,7 @@
 // new directory.
 func TestDirectory(templateDir string) (dir string, err error) {
 	if globalTestID == "" {
-		globalTestID = common.RandomString()[:4]
+		globalTestID = stringid.GenerateRandomID()[:4]
 	}
 	prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2))
 	if prefix == "" {
@@ -342,26 +157,6 @@
 	return callerShortName
 }
 
-func CopyFile(src, dst string) (int64, error) {
-	if src == dst {
-		return 0, nil
-	}
-	sf, err := os.Open(src)
-	if err != nil {
-		return 0, err
-	}
-	defer sf.Close()
-	if err := os.Remove(dst); err != nil && !os.IsNotExist(err) {
-		return 0, err
-	}
-	df, err := os.Create(dst)
-	if err != nil {
-		return 0, err
-	}
-	defer df.Close()
-	return io.Copy(df, sf)
-}
-
 // ReplaceOrAppendValues returns the defaults with the overrides either
 // replaced by env key or appended to the list
 func ReplaceOrAppendEnvValues(defaults, overrides []string) []string {
@@ -400,37 +195,6 @@
 	return defaults
 }
 
-func DoesEnvExist(name string) bool {
-	for _, entry := range os.Environ() {
-		parts := strings.SplitN(entry, "=", 2)
-		if parts[0] == name {
-			return true
-		}
-	}
-	return false
-}
-
-// ReadSymlinkedDirectory returns the target directory of a symlink.
-// The target of the symbolic link may not be a file.
-func ReadSymlinkedDirectory(path string) (string, error) {
-	var realPath string
-	var err error
-	if realPath, err = filepath.Abs(path); err != nil {
-		return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
-	}
-	if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
-		return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
-	}
-	realPathInfo, err := os.Stat(realPath)
-	if err != nil {
-		return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
-	}
-	if !realPathInfo.Mode().IsDir() {
-		return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
-	}
-	return realPath, nil
-}
-
 // ValidateContextDirectory checks if all the contents of the directory
 // can be read and returns an error if some files can't be read
 // symlinks which point to non-existing files don't trigger an error
@@ -475,15 +239,6 @@
 	})
 }
 
-func StringsContainsNoCase(slice []string, s string) bool {
-	for _, ss := range slice {
-		if strings.ToLower(s) == strings.ToLower(ss) {
-			return true
-		}
-	}
-	return false
-}
-
 // Reads a .dockerignore file and returns the list of file patterns
 // to ignore. Note this will trim whitespace from each line as well
 // as use GO's "clean" func to get the shortest/cleanest path for each.
@@ -515,27 +270,6 @@
 	return excludes, nil
 }
 
-// Wrap a concrete io.Writer and hold a count of the number
-// of bytes written to the writer during a "session".
-// This can be convenient when write return is masked
-// (e.g., json.Encoder.Encode())
-type WriteCounter struct {
-	Count  int64
-	Writer io.Writer
-}
-
-func NewWriteCounter(w io.Writer) *WriteCounter {
-	return &WriteCounter{
-		Writer: w,
-	}
-}
-
-func (wc *WriteCounter) Write(p []byte) (count int, err error) {
-	count, err = wc.Writer.Write(p)
-	wc.Count += int64(count)
-	return
-}
-
 // ImageReference combines `repo` and `ref` and returns a string representing
 // the combination. If `ref` is a digest (meaning it's of the form
 // <algorithm>:<digest>, the returned string is <repo>@<ref>. Otherwise,
diff --git a/utils/utils_daemon.go b/utils/utils_daemon.go
deleted file mode 100644
index 3f8f4d5..0000000
--- a/utils/utils_daemon.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build daemon
-
-package utils
-
-import (
-	"github.com/docker/docker/pkg/system"
-	"os"
-)
-
-// IsFileOwner checks whether the current user is the owner of the given file.
-func IsFileOwner(f string) bool {
-	if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil {
-		if int(fileInfo.Uid()) == os.Getuid() {
-			return true
-		}
-	}
-	return false
-}
diff --git a/utils/utils_daemon_test.go b/utils/utils_daemon_test.go
deleted file mode 100644
index e836148..0000000
--- a/utils/utils_daemon_test.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package utils
-
-import (
-	"os"
-	"path"
-	"testing"
-)
-
-func TestIsFileOwner(t *testing.T) {
-	var err error
-	var file *os.File
-
-	if file, err = os.Create(path.Join(os.TempDir(), "testIsFileOwner")); err != nil {
-		t.Fatalf("failed to create file: %s", err)
-	}
-	file.Close()
-
-	if ok := IsFileOwner(path.Join(os.TempDir(), "testIsFileOwner")); !ok {
-		t.Fatalf("User should be owner of file")
-	}
-
-	if err = os.Remove(path.Join(os.TempDir(), "testIsFileOwner")); err != nil {
-		t.Fatalf("failed to remove file: %s", err)
-	}
-
-}
diff --git a/utils/utils_test.go b/utils/utils_test.go
index 94303a0..2863009 100644
--- a/utils/utils_test.go
+++ b/utils/utils_test.go
@@ -1,9 +1,10 @@
 package utils
 
 import (
-	"bytes"
+	"fmt"
+	"io/ioutil"
 	"os"
-	"strings"
+	"path/filepath"
 	"testing"
 )
 
@@ -25,104 +26,6 @@
 	}
 }
 
-// Reading a symlink to a directory must return the directory
-func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) {
-	var err error
-	if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil {
-		t.Errorf("failed to create directory: %s", err)
-	}
-
-	if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil {
-		t.Errorf("failed to create symlink: %s", err)
-	}
-
-	var path string
-	if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil {
-		t.Fatalf("failed to read symlink to directory: %s", err)
-	}
-
-	if path != "/tmp/testReadSymlinkToExistingDirectory" {
-		t.Fatalf("symlink returned unexpected directory: %s", path)
-	}
-
-	if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil {
-		t.Errorf("failed to remove temporary directory: %s", err)
-	}
-
-	if err = os.Remove("/tmp/dirLinkTest"); err != nil {
-		t.Errorf("failed to remove symlink: %s", err)
-	}
-}
-
-// Reading a non-existing symlink must fail
-func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) {
-	var path string
-	var err error
-	if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil {
-		t.Fatalf("error expected for non-existing symlink")
-	}
-
-	if path != "" {
-		t.Fatalf("expected empty path, but '%s' was returned", path)
-	}
-}
-
-// Reading a symlink to a file must fail
-func TestReadSymlinkedDirectoryToFile(t *testing.T) {
-	var err error
-	var file *os.File
-
-	if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil {
-		t.Fatalf("failed to create file: %s", err)
-	}
-
-	file.Close()
-
-	if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil {
-		t.Errorf("failed to create symlink: %s", err)
-	}
-
-	var path string
-	if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil {
-		t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed")
-	}
-
-	if path != "" {
-		t.Fatalf("path should've been empty: %s", path)
-	}
-
-	if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil {
-		t.Errorf("failed to remove file: %s", err)
-	}
-
-	if err = os.Remove("/tmp/fileLinkTest"); err != nil {
-		t.Errorf("failed to remove symlink: %s", err)
-	}
-}
-
-func TestWriteCounter(t *testing.T) {
-	dummy1 := "This is a dummy string."
-	dummy2 := "This is another dummy string."
-	totalLength := int64(len(dummy1) + len(dummy2))
-
-	reader1 := strings.NewReader(dummy1)
-	reader2 := strings.NewReader(dummy2)
-
-	var buffer bytes.Buffer
-	wc := NewWriteCounter(&buffer)
-
-	reader1.WriteTo(wc)
-	reader2.WriteTo(wc)
-
-	if wc.Count != totalLength {
-		t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength)
-	}
-
-	if buffer.String() != dummy1+dummy2 {
-		t.Error("Wrong message written")
-	}
-}
-
 func TestImageReference(t *testing.T) {
 	tests := []struct {
 		repo     string
@@ -152,3 +55,46 @@
 		t.Errorf("Unexpected DigestReference=true for input %q", input)
 	}
 }
+
+func TestReadDockerIgnore(t *testing.T) {
+	tmpDir, err := ioutil.TempDir("", "dockerignore-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+
+	diName := filepath.Join(tmpDir, ".dockerignore")
+
+	di, err := ReadDockerIgnore(diName)
+	if err != nil {
+		t.Fatalf("Expected not to have error, got %s", err)
+	}
+
+	if diLen := len(di); diLen != 0 {
+		t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen)
+	}
+
+	content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile")
+	err = ioutil.WriteFile(diName, []byte(content), 0777)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	di, err = ReadDockerIgnore(diName)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if di[0] != "test1" {
+		t.Fatalf("First element is not test1")
+	}
+	if di[1] != "/test2" {
+		t.Fatalf("Second element is not /test2")
+	}
+	if di[2] != "/a/file/here" {
+		t.Fatalf("Third element is not /a/file/here")
+	}
+	if di[3] != "lastfile" {
+		t.Fatalf("Fourth element is not lastfile")
+	}
+}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
deleted file mode 100644
index e363aa7..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/common.go
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package tar implements access to tar archives.
-// It aims to cover most of the variations, including those produced
-// by GNU and BSD tars.
-//
-// References:
-//   http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
-//   http://www.gnu.org/software/tar/manual/html_node/Standard.html
-//   http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
-package tar
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"os"
-	"path"
-	"time"
-)
-
-const (
-	blockSize = 512
-
-	// Types
-	TypeReg           = '0'    // regular file
-	TypeRegA          = '\x00' // regular file
-	TypeLink          = '1'    // hard link
-	TypeSymlink       = '2'    // symbolic link
-	TypeChar          = '3'    // character device node
-	TypeBlock         = '4'    // block device node
-	TypeDir           = '5'    // directory
-	TypeFifo          = '6'    // fifo node
-	TypeCont          = '7'    // reserved
-	TypeXHeader       = 'x'    // extended header
-	TypeXGlobalHeader = 'g'    // global extended header
-	TypeGNULongName   = 'L'    // Next file has a long name
-	TypeGNULongLink   = 'K'    // Next file symlinks to a file w/ a long name
-	TypeGNUSparse     = 'S'    // sparse file
-)
-
-// A Header represents a single header in a tar archive.
-// Some fields may not be populated.
-type Header struct {
-	Name       string    // name of header file entry
-	Mode       int64     // permission and mode bits
-	Uid        int       // user id of owner
-	Gid        int       // group id of owner
-	Size       int64     // length in bytes
-	ModTime    time.Time // modified time
-	Typeflag   byte      // type of header entry
-	Linkname   string    // target name of link
-	Uname      string    // user name of owner
-	Gname      string    // group name of owner
-	Devmajor   int64     // major number of character or block device
-	Devminor   int64     // minor number of character or block device
-	AccessTime time.Time // access time
-	ChangeTime time.Time // status change time
-	Xattrs     map[string]string
-}
-
-// File name constants from the tar spec.
-const (
-	fileNameSize       = 100 // Maximum number of bytes in a standard tar name.
-	fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
-)
-
-// FileInfo returns an os.FileInfo for the Header.
-func (h *Header) FileInfo() os.FileInfo {
-	return headerFileInfo{h}
-}
-
-// headerFileInfo implements os.FileInfo.
-type headerFileInfo struct {
-	h *Header
-}
-
-func (fi headerFileInfo) Size() int64        { return fi.h.Size }
-func (fi headerFileInfo) IsDir() bool        { return fi.Mode().IsDir() }
-func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
-func (fi headerFileInfo) Sys() interface{}   { return fi.h }
-
-// Name returns the base name of the file.
-func (fi headerFileInfo) Name() string {
-	if fi.IsDir() {
-		return path.Base(path.Clean(fi.h.Name))
-	}
-	return path.Base(fi.h.Name)
-}
-
-// Mode returns the permission and mode bits for the headerFileInfo.
-func (fi headerFileInfo) Mode() (mode os.FileMode) {
-	// Set file permission bits.
-	mode = os.FileMode(fi.h.Mode).Perm()
-
-	// Set setuid, setgid and sticky bits.
-	if fi.h.Mode&c_ISUID != 0 {
-		// setuid
-		mode |= os.ModeSetuid
-	}
-	if fi.h.Mode&c_ISGID != 0 {
-		// setgid
-		mode |= os.ModeSetgid
-	}
-	if fi.h.Mode&c_ISVTX != 0 {
-		// sticky
-		mode |= os.ModeSticky
-	}
-
-	// Set file mode bits.
-	// clear perm, setuid, setgid and sticky bits.
-	m := os.FileMode(fi.h.Mode) &^ 07777
-	if m == c_ISDIR {
-		// directory
-		mode |= os.ModeDir
-	}
-	if m == c_ISFIFO {
-		// named pipe (FIFO)
-		mode |= os.ModeNamedPipe
-	}
-	if m == c_ISLNK {
-		// symbolic link
-		mode |= os.ModeSymlink
-	}
-	if m == c_ISBLK {
-		// device file
-		mode |= os.ModeDevice
-	}
-	if m == c_ISCHR {
-		// Unix character device
-		mode |= os.ModeDevice
-		mode |= os.ModeCharDevice
-	}
-	if m == c_ISSOCK {
-		// Unix domain socket
-		mode |= os.ModeSocket
-	}
-
-	switch fi.h.Typeflag {
-	case TypeLink, TypeSymlink:
-		// hard link, symbolic link
-		mode |= os.ModeSymlink
-	case TypeChar:
-		// character device node
-		mode |= os.ModeDevice
-		mode |= os.ModeCharDevice
-	case TypeBlock:
-		// block device node
-		mode |= os.ModeDevice
-	case TypeDir:
-		// directory
-		mode |= os.ModeDir
-	case TypeFifo:
-		// fifo node
-		mode |= os.ModeNamedPipe
-	}
-
-	return mode
-}
-
-// sysStat, if non-nil, populates h from system-dependent fields of fi.
-var sysStat func(fi os.FileInfo, h *Header) error
-
-// Mode constants from the tar spec.
-const (
-	c_ISUID  = 04000   // Set uid
-	c_ISGID  = 02000   // Set gid
-	c_ISVTX  = 01000   // Save text (sticky bit)
-	c_ISDIR  = 040000  // Directory
-	c_ISFIFO = 010000  // FIFO
-	c_ISREG  = 0100000 // Regular file
-	c_ISLNK  = 0120000 // Symbolic link
-	c_ISBLK  = 060000  // Block special file
-	c_ISCHR  = 020000  // Character special file
-	c_ISSOCK = 0140000 // Socket
-)
-
-// Keywords for the PAX Extended Header
-const (
-	paxAtime    = "atime"
-	paxCharset  = "charset"
-	paxComment  = "comment"
-	paxCtime    = "ctime" // please note that ctime is not a valid pax header.
-	paxGid      = "gid"
-	paxGname    = "gname"
-	paxLinkpath = "linkpath"
-	paxMtime    = "mtime"
-	paxPath     = "path"
-	paxSize     = "size"
-	paxUid      = "uid"
-	paxUname    = "uname"
-	paxXattr    = "SCHILY.xattr."
-	paxNone     = ""
-)
-
-// FileInfoHeader creates a partially-populated Header from fi.
-// If fi describes a symlink, FileInfoHeader records link as the link target.
-// If fi describes a directory, a slash is appended to the name.
-// Because os.FileInfo's Name method returns only the base name of
-// the file it describes, it may be necessary to modify the Name field
-// of the returned header to provide the full path name of the file.
-func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
-	if fi == nil {
-		return nil, errors.New("tar: FileInfo is nil")
-	}
-	fm := fi.Mode()
-	h := &Header{
-		Name:    fi.Name(),
-		ModTime: fi.ModTime(),
-		Mode:    int64(fm.Perm()), // or'd with c_IS* constants later
-	}
-	switch {
-	case fm.IsRegular():
-		h.Mode |= c_ISREG
-		h.Typeflag = TypeReg
-		h.Size = fi.Size()
-	case fi.IsDir():
-		h.Typeflag = TypeDir
-		h.Mode |= c_ISDIR
-		h.Name += "/"
-	case fm&os.ModeSymlink != 0:
-		h.Typeflag = TypeSymlink
-		h.Mode |= c_ISLNK
-		h.Linkname = link
-	case fm&os.ModeDevice != 0:
-		if fm&os.ModeCharDevice != 0 {
-			h.Mode |= c_ISCHR
-			h.Typeflag = TypeChar
-		} else {
-			h.Mode |= c_ISBLK
-			h.Typeflag = TypeBlock
-		}
-	case fm&os.ModeNamedPipe != 0:
-		h.Typeflag = TypeFifo
-		h.Mode |= c_ISFIFO
-	case fm&os.ModeSocket != 0:
-		h.Mode |= c_ISSOCK
-	default:
-		return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
-	}
-	if fm&os.ModeSetuid != 0 {
-		h.Mode |= c_ISUID
-	}
-	if fm&os.ModeSetgid != 0 {
-		h.Mode |= c_ISGID
-	}
-	if fm&os.ModeSticky != 0 {
-		h.Mode |= c_ISVTX
-	}
-	if sysStat != nil {
-		return h, sysStat(fi, h)
-	}
-	return h, nil
-}
-
-var zeroBlock = make([]byte, blockSize)
-
-// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
-// We compute and return both.
-func checksum(header []byte) (unsigned int64, signed int64) {
-	for i := 0; i < len(header); i++ {
-		if i == 148 {
-			// The chksum field (header[148:156]) is special: it should be treated as space bytes.
-			unsigned += ' ' * 8
-			signed += ' ' * 8
-			i += 7
-			continue
-		}
-		unsigned += int64(header[i])
-		signed += int64(int8(header[i]))
-	}
-	return
-}
-
-type slicer []byte
-
-func (sp *slicer) next(n int) (b []byte) {
-	s := *sp
-	b, *sp = s[0:n], s[n:]
-	return
-}
-
-func isASCII(s string) bool {
-	for _, c := range s {
-		if c >= 0x80 {
-			return false
-		}
-	}
-	return true
-}
-
-func toASCII(s string) string {
-	if isASCII(s) {
-		return s
-	}
-	var buf bytes.Buffer
-	for _, c := range s {
-		if c < 0x80 {
-			buf.WriteByte(byte(c))
-		}
-	}
-	return buf.String()
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
deleted file mode 100644
index 351eaa0..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/example_test.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar_test
-
-import (
-	"archive/tar"
-	"bytes"
-	"fmt"
-	"io"
-	"log"
-	"os"
-)
-
-func Example() {
-	// Create a buffer to write our archive to.
-	buf := new(bytes.Buffer)
-
-	// Create a new tar archive.
-	tw := tar.NewWriter(buf)
-
-	// Add some files to the archive.
-	var files = []struct {
-		Name, Body string
-	}{
-		{"readme.txt", "This archive contains some text files."},
-		{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
-		{"todo.txt", "Get animal handling licence."},
-	}
-	for _, file := range files {
-		hdr := &tar.Header{
-			Name: file.Name,
-			Size: int64(len(file.Body)),
-		}
-		if err := tw.WriteHeader(hdr); err != nil {
-			log.Fatalln(err)
-		}
-		if _, err := tw.Write([]byte(file.Body)); err != nil {
-			log.Fatalln(err)
-		}
-	}
-	// Make sure to check the error on Close.
-	if err := tw.Close(); err != nil {
-		log.Fatalln(err)
-	}
-
-	// Open the tar archive for reading.
-	r := bytes.NewReader(buf.Bytes())
-	tr := tar.NewReader(r)
-
-	// Iterate through the files in the archive.
-	for {
-		hdr, err := tr.Next()
-		if err == io.EOF {
-			// end of tar archive
-			break
-		}
-		if err != nil {
-			log.Fatalln(err)
-		}
-		fmt.Printf("Contents of %s:\n", hdr.Name)
-		if _, err := io.Copy(os.Stdout, tr); err != nil {
-			log.Fatalln(err)
-		}
-		fmt.Println()
-	}
-
-	// Output:
-	// Contents of readme.txt:
-	// This archive contains some text files.
-	// Contents of gopher.txt:
-	// Gopher names:
-	// George
-	// Geoffrey
-	// Gonzo
-	// Contents of todo.txt:
-	// Get animal handling licence.
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
deleted file mode 100644
index a27559d..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader.go
+++ /dev/null
@@ -1,820 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-// TODO(dsymonds):
-//   - pax extensions
-
-import (
-	"bytes"
-	"errors"
-	"io"
-	"io/ioutil"
-	"os"
-	"strconv"
-	"strings"
-	"time"
-)
-
-var (
-	ErrHeader = errors.New("archive/tar: invalid tar header")
-)
-
-const maxNanoSecondIntSize = 9
-
-// A Reader provides sequential access to the contents of a tar archive.
-// A tar archive consists of a sequence of files.
-// The Next method advances to the next file in the archive (including the first),
-// and then it can be treated as an io.Reader to access the file's data.
-type Reader struct {
-	r       io.Reader
-	err     error
-	pad     int64           // amount of padding (ignored) after current file entry
-	curr    numBytesReader  // reader for current file entry
-	hdrBuff [blockSize]byte // buffer to use in readHeader
-}
-
-// A numBytesReader is an io.Reader with a numBytes method, returning the number
-// of bytes remaining in the underlying encoded data.
-type numBytesReader interface {
-	io.Reader
-	numBytes() int64
-}
-
-// A regFileReader is a numBytesReader for reading file data from a tar archive.
-type regFileReader struct {
-	r  io.Reader // underlying reader
-	nb int64     // number of unread bytes for current file entry
-}
-
-// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive.
-type sparseFileReader struct {
-	rfr *regFileReader // reads the sparse-encoded file data
-	sp  []sparseEntry  // the sparse map for the file
-	pos int64          // keeps track of file position
-	tot int64          // total size of the file
-}
-
-// Keywords for GNU sparse files in a PAX extended header
-const (
-	paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
-	paxGNUSparseOffset    = "GNU.sparse.offset"
-	paxGNUSparseNumBytes  = "GNU.sparse.numbytes"
-	paxGNUSparseMap       = "GNU.sparse.map"
-	paxGNUSparseName      = "GNU.sparse.name"
-	paxGNUSparseMajor     = "GNU.sparse.major"
-	paxGNUSparseMinor     = "GNU.sparse.minor"
-	paxGNUSparseSize      = "GNU.sparse.size"
-	paxGNUSparseRealSize  = "GNU.sparse.realsize"
-)
-
-// Keywords for old GNU sparse headers
-const (
-	oldGNUSparseMainHeaderOffset               = 386
-	oldGNUSparseMainHeaderIsExtendedOffset     = 482
-	oldGNUSparseMainHeaderNumEntries           = 4
-	oldGNUSparseExtendedHeaderIsExtendedOffset = 504
-	oldGNUSparseExtendedHeaderNumEntries       = 21
-	oldGNUSparseOffsetSize                     = 12
-	oldGNUSparseNumBytesSize                   = 12
-)
-
-// NewReader creates a new Reader reading from r.
-func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
-
-// Next advances to the next entry in the tar archive.
-func (tr *Reader) Next() (*Header, error) {
-	var hdr *Header
-	if tr.err == nil {
-		tr.skipUnread()
-	}
-	if tr.err != nil {
-		return hdr, tr.err
-	}
-	hdr = tr.readHeader()
-	if hdr == nil {
-		return hdr, tr.err
-	}
-	// Check for PAX/GNU header.
-	switch hdr.Typeflag {
-	case TypeXHeader:
-		//  PAX extended header
-		headers, err := parsePAX(tr)
-		if err != nil {
-			return nil, err
-		}
-		// We actually read the whole file,
-		// but this skips alignment padding
-		tr.skipUnread()
-		hdr = tr.readHeader()
-		mergePAX(hdr, headers)
-
-		// Check for a PAX format sparse file
-		sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers)
-		if err != nil {
-			tr.err = err
-			return nil, err
-		}
-		if sp != nil {
-			// Current file is a PAX format GNU sparse file.
-			// Set the current file reader to a sparse file reader.
-			tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
-		}
-		return hdr, nil
-	case TypeGNULongName:
-		// We have a GNU long name header. Its contents are the real file name.
-		realname, err := ioutil.ReadAll(tr)
-		if err != nil {
-			return nil, err
-		}
-		hdr, err := tr.Next()
-		hdr.Name = cString(realname)
-		return hdr, err
-	case TypeGNULongLink:
-		// We have a GNU long link header.
-		realname, err := ioutil.ReadAll(tr)
-		if err != nil {
-			return nil, err
-		}
-		hdr, err := tr.Next()
-		hdr.Linkname = cString(realname)
-		return hdr, err
-	}
-	return hdr, tr.err
-}
-
-// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
-// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
-// be treated as a regular file.
-func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
-	var sparseFormat string
-
-	// Check for sparse format indicators
-	major, majorOk := headers[paxGNUSparseMajor]
-	minor, minorOk := headers[paxGNUSparseMinor]
-	sparseName, sparseNameOk := headers[paxGNUSparseName]
-	_, sparseMapOk := headers[paxGNUSparseMap]
-	sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
-	sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
-
-	// Identify which, if any, sparse format applies from which PAX headers are set
-	if majorOk && minorOk {
-		sparseFormat = major + "." + minor
-	} else if sparseNameOk && sparseMapOk {
-		sparseFormat = "0.1"
-	} else if sparseSizeOk {
-		sparseFormat = "0.0"
-	} else {
-		// Not a PAX format GNU sparse file.
-		return nil, nil
-	}
-
-	// Check for unknown sparse format
-	if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
-		return nil, nil
-	}
-
-	// Update hdr from GNU sparse PAX headers
-	if sparseNameOk {
-		hdr.Name = sparseName
-	}
-	if sparseSizeOk {
-		realSize, err := strconv.ParseInt(sparseSize, 10, 0)
-		if err != nil {
-			return nil, ErrHeader
-		}
-		hdr.Size = realSize
-	} else if sparseRealSizeOk {
-		realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
-		if err != nil {
-			return nil, ErrHeader
-		}
-		hdr.Size = realSize
-	}
-
-	// Set up the sparse map, according to the particular sparse format in use
-	var sp []sparseEntry
-	var err error
-	switch sparseFormat {
-	case "0.0", "0.1":
-		sp, err = readGNUSparseMap0x1(headers)
-	case "1.0":
-		sp, err = readGNUSparseMap1x0(tr.curr)
-	}
-	return sp, err
-}
-
-// mergePAX merges well known headers according to PAX standard.
-// In general headers with the same name as those found
-// in the header struct overwrite those found in the header
-// struct with higher precision or longer values. Esp. useful
-// for name and linkname fields.
-func mergePAX(hdr *Header, headers map[string]string) error {
-	for k, v := range headers {
-		switch k {
-		case paxPath:
-			hdr.Name = v
-		case paxLinkpath:
-			hdr.Linkname = v
-		case paxGname:
-			hdr.Gname = v
-		case paxUname:
-			hdr.Uname = v
-		case paxUid:
-			uid, err := strconv.ParseInt(v, 10, 0)
-			if err != nil {
-				return err
-			}
-			hdr.Uid = int(uid)
-		case paxGid:
-			gid, err := strconv.ParseInt(v, 10, 0)
-			if err != nil {
-				return err
-			}
-			hdr.Gid = int(gid)
-		case paxAtime:
-			t, err := parsePAXTime(v)
-			if err != nil {
-				return err
-			}
-			hdr.AccessTime = t
-		case paxMtime:
-			t, err := parsePAXTime(v)
-			if err != nil {
-				return err
-			}
-			hdr.ModTime = t
-		case paxCtime:
-			t, err := parsePAXTime(v)
-			if err != nil {
-				return err
-			}
-			hdr.ChangeTime = t
-		case paxSize:
-			size, err := strconv.ParseInt(v, 10, 0)
-			if err != nil {
-				return err
-			}
-			hdr.Size = int64(size)
-		default:
-			if strings.HasPrefix(k, paxXattr) {
-				if hdr.Xattrs == nil {
-					hdr.Xattrs = make(map[string]string)
-				}
-				hdr.Xattrs[k[len(paxXattr):]] = v
-			}
-		}
-	}
-	return nil
-}
-
-// parsePAXTime takes a string of the form %d.%d as described in
-// the PAX specification.
-func parsePAXTime(t string) (time.Time, error) {
-	buf := []byte(t)
-	pos := bytes.IndexByte(buf, '.')
-	var seconds, nanoseconds int64
-	var err error
-	if pos == -1 {
-		seconds, err = strconv.ParseInt(t, 10, 0)
-		if err != nil {
-			return time.Time{}, err
-		}
-	} else {
-		seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
-		if err != nil {
-			return time.Time{}, err
-		}
-		nano_buf := string(buf[pos+1:])
-		// Pad as needed before converting to a decimal.
-		// For example .030 -> .030000000 -> 30000000 nanoseconds
-		if len(nano_buf) < maxNanoSecondIntSize {
-			// Right pad
-			nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
-		} else if len(nano_buf) > maxNanoSecondIntSize {
-			// Right truncate
-			nano_buf = nano_buf[:maxNanoSecondIntSize]
-		}
-		nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
-		if err != nil {
-			return time.Time{}, err
-		}
-	}
-	ts := time.Unix(seconds, nanoseconds)
-	return ts, nil
-}
-
-// parsePAX parses PAX headers.
-// If an extended header (type 'x') is invalid, ErrHeader is returned
-func parsePAX(r io.Reader) (map[string]string, error) {
-	buf, err := ioutil.ReadAll(r)
-	if err != nil {
-		return nil, err
-	}
-
-	// For GNU PAX sparse format 0.0 support.
-	// This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
-	var sparseMap bytes.Buffer
-
-	headers := make(map[string]string)
-	// Each record is constructed as
-	//     "%d %s=%s\n", length, keyword, value
-	for len(buf) > 0 {
-		// or the header was empty to start with.
-		var sp int
-		// The size field ends at the first space.
-		sp = bytes.IndexByte(buf, ' ')
-		if sp == -1 {
-			return nil, ErrHeader
-		}
-		// Parse the first token as a decimal integer.
-		n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
-		if err != nil {
-			return nil, ErrHeader
-		}
-		// Extract everything between the decimal and the n -1 on the
-		// beginning to eat the ' ', -1 on the end to skip the newline.
-		var record []byte
-		record, buf = buf[sp+1:n-1], buf[n:]
-		// The first equals is guaranteed to mark the end of the key.
-		// Everything else is value.
-		eq := bytes.IndexByte(record, '=')
-		if eq == -1 {
-			return nil, ErrHeader
-		}
-		key, value := record[:eq], record[eq+1:]
-
-		keyStr := string(key)
-		if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
-			// GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
-			sparseMap.Write(value)
-			sparseMap.Write([]byte{','})
-		} else {
-			// Normal key. Set the value in the headers map.
-			headers[keyStr] = string(value)
-		}
-	}
-	if sparseMap.Len() != 0 {
-		// Add sparse info to headers, chopping off the extra comma
-		sparseMap.Truncate(sparseMap.Len() - 1)
-		headers[paxGNUSparseMap] = sparseMap.String()
-	}
-	return headers, nil
-}
-
-// cString parses bytes as a NUL-terminated C-style string.
-// If a NUL byte is not found then the whole slice is returned as a string.
-func cString(b []byte) string {
-	n := 0
-	for n < len(b) && b[n] != 0 {
-		n++
-	}
-	return string(b[0:n])
-}
-
-func (tr *Reader) octal(b []byte) int64 {
-	// Check for binary format first.
-	if len(b) > 0 && b[0]&0x80 != 0 {
-		var x int64
-		for i, c := range b {
-			if i == 0 {
-				c &= 0x7f // ignore signal bit in first byte
-			}
-			x = x<<8 | int64(c)
-		}
-		return x
-	}
-
-	// Because unused fields are filled with NULs, we need
-	// to skip leading NULs. Fields may also be padded with
-	// spaces or NULs.
-	// So we remove leading and trailing NULs and spaces to
-	// be sure.
-	b = bytes.Trim(b, " \x00")
-
-	if len(b) == 0 {
-		return 0
-	}
-	x, err := strconv.ParseUint(cString(b), 8, 64)
-	if err != nil {
-		tr.err = err
-	}
-	return int64(x)
-}
-
-// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
-func (tr *Reader) skipUnread() {
-	nr := tr.numBytes() + tr.pad // number of bytes to skip
-	tr.curr, tr.pad = nil, 0
-	if sr, ok := tr.r.(io.Seeker); ok {
-		if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
-			return
-		}
-	}
-	_, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
-}
-
-func (tr *Reader) verifyChecksum(header []byte) bool {
-	if tr.err != nil {
-		return false
-	}
-
-	given := tr.octal(header[148:156])
-	unsigned, signed := checksum(header)
-	return given == unsigned || given == signed
-}
-
-func (tr *Reader) readHeader() *Header {
-	header := tr.hdrBuff[:]
-	copy(header, zeroBlock)
-
-	if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
-		return nil
-	}
-
-	// Two blocks of zero bytes marks the end of the archive.
-	if bytes.Equal(header, zeroBlock[0:blockSize]) {
-		if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
-			return nil
-		}
-		if bytes.Equal(header, zeroBlock[0:blockSize]) {
-			tr.err = io.EOF
-		} else {
-			tr.err = ErrHeader // zero block and then non-zero block
-		}
-		return nil
-	}
-
-	if !tr.verifyChecksum(header) {
-		tr.err = ErrHeader
-		return nil
-	}
-
-	// Unpack
-	hdr := new(Header)
-	s := slicer(header)
-
-	hdr.Name = cString(s.next(100))
-	hdr.Mode = tr.octal(s.next(8))
-	hdr.Uid = int(tr.octal(s.next(8)))
-	hdr.Gid = int(tr.octal(s.next(8)))
-	hdr.Size = tr.octal(s.next(12))
-	hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
-	s.next(8) // chksum
-	hdr.Typeflag = s.next(1)[0]
-	hdr.Linkname = cString(s.next(100))
-
-	// The remainder of the header depends on the value of magic.
-	// The original (v7) version of tar had no explicit magic field,
-	// so its magic bytes, like the rest of the block, are NULs.
-	magic := string(s.next(8)) // contains version field as well.
-	var format string
-	switch {
-	case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
-		if string(header[508:512]) == "tar\x00" {
-			format = "star"
-		} else {
-			format = "posix"
-		}
-	case magic == "ustar  \x00": // old GNU tar
-		format = "gnu"
-	}
-
-	switch format {
-	case "posix", "gnu", "star":
-		hdr.Uname = cString(s.next(32))
-		hdr.Gname = cString(s.next(32))
-		devmajor := s.next(8)
-		devminor := s.next(8)
-		if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
-			hdr.Devmajor = tr.octal(devmajor)
-			hdr.Devminor = tr.octal(devminor)
-		}
-		var prefix string
-		switch format {
-		case "posix", "gnu":
-			prefix = cString(s.next(155))
-		case "star":
-			prefix = cString(s.next(131))
-			hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0)
-			hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0)
-		}
-		if len(prefix) > 0 {
-			hdr.Name = prefix + "/" + hdr.Name
-		}
-	}
-
-	if tr.err != nil {
-		tr.err = ErrHeader
-		return nil
-	}
-
-	// Maximum value of hdr.Size is 64 GB (12 octal digits),
-	// so there's no risk of int64 overflowing.
-	nb := int64(hdr.Size)
-	tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
-
-	// Set the current file reader.
-	tr.curr = &regFileReader{r: tr.r, nb: nb}
-
-	// Check for old GNU sparse format entry.
-	if hdr.Typeflag == TypeGNUSparse {
-		// Get the real size of the file.
-		hdr.Size = tr.octal(header[483:495])
-
-		// Read the sparse map.
-		sp := tr.readOldGNUSparseMap(header)
-		if tr.err != nil {
-			return nil
-		}
-		// Current file is a GNU sparse file. Update the current file reader.
-		tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
-	}
-
-	return hdr
-}
-
-// A sparseEntry holds a single entry in a sparse file's sparse map.
-// A sparse entry indicates the offset and size in a sparse file of a
-// block of data.
-type sparseEntry struct {
-	offset   int64
-	numBytes int64
-}
-
-// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
-// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
-// then one or more extension headers are used to store the rest of the sparse map.
-func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
-	isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
-	spCap := oldGNUSparseMainHeaderNumEntries
-	if isExtended {
-		spCap += oldGNUSparseExtendedHeaderNumEntries
-	}
-	sp := make([]sparseEntry, 0, spCap)
-	s := slicer(header[oldGNUSparseMainHeaderOffset:])
-
-	// Read the four entries from the main tar header
-	for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
-		offset := tr.octal(s.next(oldGNUSparseOffsetSize))
-		numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
-		if tr.err != nil {
-			tr.err = ErrHeader
-			return nil
-		}
-		if offset == 0 && numBytes == 0 {
-			break
-		}
-		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
-	}
-
-	for isExtended {
-		// There are more entries. Read an extension header and parse its entries.
-		sparseHeader := make([]byte, blockSize)
-		if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
-			return nil
-		}
-		isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
-		s = slicer(sparseHeader)
-		for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
-			offset := tr.octal(s.next(oldGNUSparseOffsetSize))
-			numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
-			if tr.err != nil {
-				tr.err = ErrHeader
-				return nil
-			}
-			if offset == 0 && numBytes == 0 {
-				break
-			}
-			sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
-		}
-	}
-	return sp
-}
-
-// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0.
-// The sparse map is stored just before the file data and padded out to the nearest block boundary.
-func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
-	buf := make([]byte, 2*blockSize)
-	sparseHeader := buf[:blockSize]
-
-	// readDecimal is a helper function to read a decimal integer from the sparse map
-	// while making sure to read from the file in blocks of size blockSize
-	readDecimal := func() (int64, error) {
-		// Look for newline
-		nl := bytes.IndexByte(sparseHeader, '\n')
-		if nl == -1 {
-			if len(sparseHeader) >= blockSize {
-				// This is an error
-				return 0, ErrHeader
-			}
-			oldLen := len(sparseHeader)
-			newLen := oldLen + blockSize
-			if cap(sparseHeader) < newLen {
-				// There's more header, but we need to make room for the next block
-				copy(buf, sparseHeader)
-				sparseHeader = buf[:newLen]
-			} else {
-				// There's more header, and we can just reslice
-				sparseHeader = sparseHeader[:newLen]
-			}
-
-			// Now that sparseHeader is large enough, read next block
-			if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil {
-				return 0, err
-			}
-
-			// Look for a newline in the new data
-			nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n')
-			if nl == -1 {
-				// This is an error
-				return 0, ErrHeader
-			}
-			nl += oldLen // We want the position from the beginning
-		}
-		// Now that we've found a newline, read a number
-		n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0)
-		if err != nil {
-			return 0, ErrHeader
-		}
-
-		// Update sparseHeader to consume this number
-		sparseHeader = sparseHeader[nl+1:]
-		return n, nil
-	}
-
-	// Read the first block
-	if _, err := io.ReadFull(r, sparseHeader); err != nil {
-		return nil, err
-	}
-
-	// The first line contains the number of entries
-	numEntries, err := readDecimal()
-	if err != nil {
-		return nil, err
-	}
-
-	// Read all the entries
-	sp := make([]sparseEntry, 0, numEntries)
-	for i := int64(0); i < numEntries; i++ {
-		// Read the offset
-		offset, err := readDecimal()
-		if err != nil {
-			return nil, err
-		}
-		// Read numBytes
-		numBytes, err := readDecimal()
-		if err != nil {
-			return nil, err
-		}
-
-		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
-	}
-
-	return sp, nil
-}
-
-// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1.
-// The sparse map is stored in the PAX headers.
-func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) {
-	// Get number of entries
-	numEntriesStr, ok := headers[paxGNUSparseNumBlocks]
-	if !ok {
-		return nil, ErrHeader
-	}
-	numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0)
-	if err != nil {
-		return nil, ErrHeader
-	}
-
-	sparseMap := strings.Split(headers[paxGNUSparseMap], ",")
-
-	// There should be two numbers in sparseMap for each entry
-	if int64(len(sparseMap)) != 2*numEntries {
-		return nil, ErrHeader
-	}
-
-	// Loop through the entries in the sparse map
-	sp := make([]sparseEntry, 0, numEntries)
-	for i := int64(0); i < numEntries; i++ {
-		offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0)
-		if err != nil {
-			return nil, ErrHeader
-		}
-		numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0)
-		if err != nil {
-			return nil, ErrHeader
-		}
-		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
-	}
-
-	return sp, nil
-}
-
-// numBytes returns the number of bytes left to read in the current file's entry
-// in the tar archive, or 0 if there is no current file.
-func (tr *Reader) numBytes() int64 {
-	if tr.curr == nil {
-		// No current file, so no bytes
-		return 0
-	}
-	return tr.curr.numBytes()
-}
-
-// Read reads from the current entry in the tar archive.
-// It returns 0, io.EOF when it reaches the end of that entry,
-// until Next is called to advance to the next entry.
-func (tr *Reader) Read(b []byte) (n int, err error) {
-	if tr.curr == nil {
-		return 0, io.EOF
-	}
-	n, err = tr.curr.Read(b)
-	if err != nil && err != io.EOF {
-		tr.err = err
-	}
-	return
-}
-
-func (rfr *regFileReader) Read(b []byte) (n int, err error) {
-	if rfr.nb == 0 {
-		// file consumed
-		return 0, io.EOF
-	}
-	if int64(len(b)) > rfr.nb {
-		b = b[0:rfr.nb]
-	}
-	n, err = rfr.r.Read(b)
-	rfr.nb -= int64(n)
-
-	if err == io.EOF && rfr.nb > 0 {
-		err = io.ErrUnexpectedEOF
-	}
-	return
-}
-
-// numBytes returns the number of bytes left to read in the file's data in the tar archive.
-func (rfr *regFileReader) numBytes() int64 {
-	return rfr.nb
-}
-
-// readHole reads a sparse file hole ending at offset toOffset
-func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int {
-	n64 := toOffset - sfr.pos
-	if n64 > int64(len(b)) {
-		n64 = int64(len(b))
-	}
-	n := int(n64)
-	for i := 0; i < n; i++ {
-		b[i] = 0
-	}
-	sfr.pos += n64
-	return n
-}
-
-// Read reads the sparse file data in expanded form.
-func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
-	if len(sfr.sp) == 0 {
-		// No more data fragments to read from.
-		if sfr.pos < sfr.tot {
-			// We're in the last hole
-			n = sfr.readHole(b, sfr.tot)
-			return
-		}
-		// Otherwise, we're at the end of the file
-		return 0, io.EOF
-	}
-	if sfr.pos < sfr.sp[0].offset {
-		// We're in a hole
-		n = sfr.readHole(b, sfr.sp[0].offset)
-		return
-	}
-
-	// We're not in a hole, so we'll read from the next data fragment
-	posInFragment := sfr.pos - sfr.sp[0].offset
-	bytesLeft := sfr.sp[0].numBytes - posInFragment
-	if int64(len(b)) > bytesLeft {
-		b = b[0:bytesLeft]
-	}
-
-	n, err = sfr.rfr.Read(b)
-	sfr.pos += int64(n)
-
-	if int64(n) == bytesLeft {
-		// We're done with this fragment
-		sfr.sp = sfr.sp[1:]
-	}
-
-	if err == io.EOF && sfr.pos < sfr.tot {
-		// We reached the end of the last fragment's data, but there's a final hole
-		err = nil
-	}
-	return
-}
-
-// numBytes returns the number of bytes left to read in the sparse file's
-// sparse-encoded data in the tar archive.
-func (sfr *sparseFileReader) numBytes() int64 {
-	return sfr.rfr.nb
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
deleted file mode 100644
index 9601ffe..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/reader_test.go
+++ /dev/null
@@ -1,743 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import (
-	"bytes"
-	"crypto/md5"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"reflect"
-	"strings"
-	"testing"
-	"time"
-)
-
-type untarTest struct {
-	file    string
-	headers []*Header
-	cksums  []string
-}
-
-var gnuTarTest = &untarTest{
-	file: "testdata/gnu.tar",
-	headers: []*Header{
-		{
-			Name:     "small.txt",
-			Mode:     0640,
-			Uid:      73025,
-			Gid:      5000,
-			Size:     5,
-			ModTime:  time.Unix(1244428340, 0),
-			Typeflag: '0',
-			Uname:    "dsymonds",
-			Gname:    "eng",
-		},
-		{
-			Name:     "small2.txt",
-			Mode:     0640,
-			Uid:      73025,
-			Gid:      5000,
-			Size:     11,
-			ModTime:  time.Unix(1244436044, 0),
-			Typeflag: '0',
-			Uname:    "dsymonds",
-			Gname:    "eng",
-		},
-	},
-	cksums: []string{
-		"e38b27eaccb4391bdec553a7f3ae6b2f",
-		"c65bd2e50a56a2138bf1716f2fd56fe9",
-	},
-}
-
-var sparseTarTest = &untarTest{
-	file: "testdata/sparse-formats.tar",
-	headers: []*Header{
-		{
-			Name:     "sparse-gnu",
-			Mode:     420,
-			Uid:      1000,
-			Gid:      1000,
-			Size:     200,
-			ModTime:  time.Unix(1392395740, 0),
-			Typeflag: 0x53,
-			Linkname: "",
-			Uname:    "david",
-			Gname:    "david",
-			Devmajor: 0,
-			Devminor: 0,
-		},
-		{
-			Name:     "sparse-posix-0.0",
-			Mode:     420,
-			Uid:      1000,
-			Gid:      1000,
-			Size:     200,
-			ModTime:  time.Unix(1392342187, 0),
-			Typeflag: 0x30,
-			Linkname: "",
-			Uname:    "david",
-			Gname:    "david",
-			Devmajor: 0,
-			Devminor: 0,
-		},
-		{
-			Name:     "sparse-posix-0.1",
-			Mode:     420,
-			Uid:      1000,
-			Gid:      1000,
-			Size:     200,
-			ModTime:  time.Unix(1392340456, 0),
-			Typeflag: 0x30,
-			Linkname: "",
-			Uname:    "david",
-			Gname:    "david",
-			Devmajor: 0,
-			Devminor: 0,
-		},
-		{
-			Name:     "sparse-posix-1.0",
-			Mode:     420,
-			Uid:      1000,
-			Gid:      1000,
-			Size:     200,
-			ModTime:  time.Unix(1392337404, 0),
-			Typeflag: 0x30,
-			Linkname: "",
-			Uname:    "david",
-			Gname:    "david",
-			Devmajor: 0,
-			Devminor: 0,
-		},
-		{
-			Name:     "end",
-			Mode:     420,
-			Uid:      1000,
-			Gid:      1000,
-			Size:     4,
-			ModTime:  time.Unix(1392398319, 0),
-			Typeflag: 0x30,
-			Linkname: "",
-			Uname:    "david",
-			Gname:    "david",
-			Devmajor: 0,
-			Devminor: 0,
-		},
-	},
-	cksums: []string{
-		"6f53234398c2449fe67c1812d993012f",
-		"6f53234398c2449fe67c1812d993012f",
-		"6f53234398c2449fe67c1812d993012f",
-		"6f53234398c2449fe67c1812d993012f",
-		"b0061974914468de549a2af8ced10316",
-	},
-}
-
-var untarTests = []*untarTest{
-	gnuTarTest,
-	sparseTarTest,
-	{
-		file: "testdata/star.tar",
-		headers: []*Header{
-			{
-				Name:       "small.txt",
-				Mode:       0640,
-				Uid:        73025,
-				Gid:        5000,
-				Size:       5,
-				ModTime:    time.Unix(1244592783, 0),
-				Typeflag:   '0',
-				Uname:      "dsymonds",
-				Gname:      "eng",
-				AccessTime: time.Unix(1244592783, 0),
-				ChangeTime: time.Unix(1244592783, 0),
-			},
-			{
-				Name:       "small2.txt",
-				Mode:       0640,
-				Uid:        73025,
-				Gid:        5000,
-				Size:       11,
-				ModTime:    time.Unix(1244592783, 0),
-				Typeflag:   '0',
-				Uname:      "dsymonds",
-				Gname:      "eng",
-				AccessTime: time.Unix(1244592783, 0),
-				ChangeTime: time.Unix(1244592783, 0),
-			},
-		},
-	},
-	{
-		file: "testdata/v7.tar",
-		headers: []*Header{
-			{
-				Name:     "small.txt",
-				Mode:     0444,
-				Uid:      73025,
-				Gid:      5000,
-				Size:     5,
-				ModTime:  time.Unix(1244593104, 0),
-				Typeflag: '\x00',
-			},
-			{
-				Name:     "small2.txt",
-				Mode:     0444,
-				Uid:      73025,
-				Gid:      5000,
-				Size:     11,
-				ModTime:  time.Unix(1244593104, 0),
-				Typeflag: '\x00',
-			},
-		},
-	},
-	{
-		file: "testdata/pax.tar",
-		headers: []*Header{
-			{
-				Name:       "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
-				Mode:       0664,
-				Uid:        1000,
-				Gid:        1000,
-				Uname:      "shane",
-				Gname:      "shane",
-				Size:       7,
-				ModTime:    time.Unix(1350244992, 23960108),
-				ChangeTime: time.Unix(1350244992, 23960108),
-				AccessTime: time.Unix(1350244992, 23960108),
-				Typeflag:   TypeReg,
-			},
-			{
-				Name:       "a/b",
-				Mode:       0777,
-				Uid:        1000,
-				Gid:        1000,
-				Uname:      "shane",
-				Gname:      "shane",
-				Size:       0,
-				ModTime:    time.Unix(1350266320, 910238425),
-				ChangeTime: time.Unix(1350266320, 910238425),
-				AccessTime: time.Unix(1350266320, 910238425),
-				Typeflag:   TypeSymlink,
-				Linkname:   "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
-			},
-		},
-	},
-	{
-		file: "testdata/nil-uid.tar", // golang.org/issue/5290
-		headers: []*Header{
-			{
-				Name:     "P1050238.JPG.log",
-				Mode:     0664,
-				Uid:      0,
-				Gid:      0,
-				Size:     14,
-				ModTime:  time.Unix(1365454838, 0),
-				Typeflag: TypeReg,
-				Linkname: "",
-				Uname:    "eyefi",
-				Gname:    "eyefi",
-				Devmajor: 0,
-				Devminor: 0,
-			},
-		},
-	},
-	{
-		file: "testdata/xattrs.tar",
-		headers: []*Header{
-			{
-				Name:       "small.txt",
-				Mode:       0644,
-				Uid:        1000,
-				Gid:        10,
-				Size:       5,
-				ModTime:    time.Unix(1386065770, 448252320),
-				Typeflag:   '0',
-				Uname:      "alex",
-				Gname:      "wheel",
-				AccessTime: time.Unix(1389782991, 419875220),
-				ChangeTime: time.Unix(1389782956, 794414986),
-				Xattrs: map[string]string{
-					"user.key":  "value",
-					"user.key2": "value2",
-					// Interestingly, selinux encodes the terminating null inside the xattr
-					"security.selinux": "unconfined_u:object_r:default_t:s0\x00",
-				},
-			},
-			{
-				Name:       "small2.txt",
-				Mode:       0644,
-				Uid:        1000,
-				Gid:        10,
-				Size:       11,
-				ModTime:    time.Unix(1386065770, 449252304),
-				Typeflag:   '0',
-				Uname:      "alex",
-				Gname:      "wheel",
-				AccessTime: time.Unix(1389782991, 419875220),
-				ChangeTime: time.Unix(1386065770, 449252304),
-				Xattrs: map[string]string{
-					"security.selinux": "unconfined_u:object_r:default_t:s0\x00",
-				},
-			},
-		},
-	},
-}
-
-func TestReader(t *testing.T) {
-testLoop:
-	for i, test := range untarTests {
-		f, err := os.Open(test.file)
-		if err != nil {
-			t.Errorf("test %d: Unexpected error: %v", i, err)
-			continue
-		}
-		defer f.Close()
-		tr := NewReader(f)
-		for j, header := range test.headers {
-			hdr, err := tr.Next()
-			if err != nil || hdr == nil {
-				t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
-				f.Close()
-				continue testLoop
-			}
-			if !reflect.DeepEqual(*hdr, *header) {
-				t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
-					i, j, *hdr, *header)
-			}
-		}
-		hdr, err := tr.Next()
-		if err == io.EOF {
-			continue testLoop
-		}
-		if hdr != nil || err != nil {
-			t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err)
-		}
-	}
-}
-
-func TestPartialRead(t *testing.T) {
-	f, err := os.Open("testdata/gnu.tar")
-	if err != nil {
-		t.Fatalf("Unexpected error: %v", err)
-	}
-	defer f.Close()
-
-	tr := NewReader(f)
-
-	// Read the first four bytes; Next() should skip the last byte.
-	hdr, err := tr.Next()
-	if err != nil || hdr == nil {
-		t.Fatalf("Didn't get first file: %v", err)
-	}
-	buf := make([]byte, 4)
-	if _, err := io.ReadFull(tr, buf); err != nil {
-		t.Fatalf("Unexpected error: %v", err)
-	}
-	if expected := []byte("Kilt"); !bytes.Equal(buf, expected) {
-		t.Errorf("Contents = %v, want %v", buf, expected)
-	}
-
-	// Second file
-	hdr, err = tr.Next()
-	if err != nil || hdr == nil {
-		t.Fatalf("Didn't get second file: %v", err)
-	}
-	buf = make([]byte, 6)
-	if _, err := io.ReadFull(tr, buf); err != nil {
-		t.Fatalf("Unexpected error: %v", err)
-	}
-	if expected := []byte("Google"); !bytes.Equal(buf, expected) {
-		t.Errorf("Contents = %v, want %v", buf, expected)
-	}
-}
-
-func TestIncrementalRead(t *testing.T) {
-	test := gnuTarTest
-	f, err := os.Open(test.file)
-	if err != nil {
-		t.Fatalf("Unexpected error: %v", err)
-	}
-	defer f.Close()
-
-	tr := NewReader(f)
-
-	headers := test.headers
-	cksums := test.cksums
-	nread := 0
-
-	// loop over all files
-	for ; ; nread++ {
-		hdr, err := tr.Next()
-		if hdr == nil || err == io.EOF {
-			break
-		}
-
-		// check the header
-		if !reflect.DeepEqual(*hdr, *headers[nread]) {
-			t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
-				*hdr, headers[nread])
-		}
-
-		// read file contents in little chunks EOF,
-		// checksumming all the way
-		h := md5.New()
-		rdbuf := make([]uint8, 8)
-		for {
-			nr, err := tr.Read(rdbuf)
-			if err == io.EOF {
-				break
-			}
-			if err != nil {
-				t.Errorf("Read: unexpected error %v\n", err)
-				break
-			}
-			h.Write(rdbuf[0:nr])
-		}
-		// verify checksum
-		have := fmt.Sprintf("%x", h.Sum(nil))
-		want := cksums[nread]
-		if want != have {
-			t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
-		}
-	}
-	if nread != len(headers) {
-		t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
-	}
-}
-
-func TestNonSeekable(t *testing.T) {
-	test := gnuTarTest
-	f, err := os.Open(test.file)
-	if err != nil {
-		t.Fatalf("Unexpected error: %v", err)
-	}
-	defer f.Close()
-
-	type readerOnly struct {
-		io.Reader
-	}
-	tr := NewReader(readerOnly{f})
-	nread := 0
-
-	for ; ; nread++ {
-		_, err := tr.Next()
-		if err == io.EOF {
-			break
-		}
-		if err != nil {
-			t.Fatalf("Unexpected error: %v", err)
-		}
-	}
-
-	if nread != len(test.headers) {
-		t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread)
-	}
-}
-
-func TestParsePAXHeader(t *testing.T) {
-	paxTests := [][3]string{
-		{"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths
-		{"a", "a=name", "9 a=name\n"},  // Test case involving multiple acceptable length
-		{"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}}
-	for _, test := range paxTests {
-		key, expected, raw := test[0], test[1], test[2]
-		reader := bytes.NewReader([]byte(raw))
-		headers, err := parsePAX(reader)
-		if err != nil {
-			t.Errorf("Couldn't parse correctly formatted headers: %v", err)
-			continue
-		}
-		if strings.EqualFold(headers[key], expected) {
-			t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected)
-			continue
-		}
-		trailer := make([]byte, 100)
-		n, err := reader.Read(trailer)
-		if err != io.EOF || n != 0 {
-			t.Error("Buffer wasn't consumed")
-		}
-	}
-	badHeader := bytes.NewReader([]byte("3 somelongkey="))
-	if _, err := parsePAX(badHeader); err != ErrHeader {
-		t.Fatal("Unexpected success when parsing bad header")
-	}
-}
-
-func TestParsePAXTime(t *testing.T) {
-	// Some valid PAX time values
-	timestamps := map[string]time.Time{
-		"1350244992.023960108":  time.Unix(1350244992, 23960108), // The common case
-		"1350244992.02396010":   time.Unix(1350244992, 23960100), // Lower precision value
-		"1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value
-		"1350244992":            time.Unix(1350244992, 0),        // Low precision value
-	}
-	for input, expected := range timestamps {
-		ts, err := parsePAXTime(input)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if !ts.Equal(expected) {
-			t.Fatalf("Time parsing failure %s %s", ts, expected)
-		}
-	}
-}
-
-func TestMergePAX(t *testing.T) {
-	hdr := new(Header)
-	// Test a string, integer, and time based value.
-	headers := map[string]string{
-		"path":  "a/b/c",
-		"uid":   "1000",
-		"mtime": "1350244992.023960108",
-	}
-	err := mergePAX(hdr, headers)
-	if err != nil {
-		t.Fatal(err)
-	}
-	want := &Header{
-		Name:    "a/b/c",
-		Uid:     1000,
-		ModTime: time.Unix(1350244992, 23960108),
-	}
-	if !reflect.DeepEqual(hdr, want) {
-		t.Errorf("incorrect merge: got %+v, want %+v", hdr, want)
-	}
-}
-
-func TestSparseEndToEnd(t *testing.T) {
-	test := sparseTarTest
-	f, err := os.Open(test.file)
-	if err != nil {
-		t.Fatalf("Unexpected error: %v", err)
-	}
-	defer f.Close()
-
-	tr := NewReader(f)
-
-	headers := test.headers
-	cksums := test.cksums
-	nread := 0
-
-	// loop over all files
-	for ; ; nread++ {
-		hdr, err := tr.Next()
-		if hdr == nil || err == io.EOF {
-			break
-		}
-
-		// check the header
-		if !reflect.DeepEqual(*hdr, *headers[nread]) {
-			t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
-				*hdr, headers[nread])
-		}
-
-		// read and checksum the file data
-		h := md5.New()
-		_, err = io.Copy(h, tr)
-		if err != nil {
-			t.Fatalf("Unexpected error: %v", err)
-		}
-
-		// verify checksum
-		have := fmt.Sprintf("%x", h.Sum(nil))
-		want := cksums[nread]
-		if want != have {
-			t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
-		}
-	}
-	if nread != len(headers) {
-		t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
-	}
-}
-
-type sparseFileReadTest struct {
-	sparseData []byte
-	sparseMap  []sparseEntry
-	realSize   int64
-	expected   []byte
-}
-
-var sparseFileReadTests = []sparseFileReadTest{
-	{
-		sparseData: []byte("abcde"),
-		sparseMap: []sparseEntry{
-			{offset: 0, numBytes: 2},
-			{offset: 5, numBytes: 3},
-		},
-		realSize: 8,
-		expected: []byte("ab\x00\x00\x00cde"),
-	},
-	{
-		sparseData: []byte("abcde"),
-		sparseMap: []sparseEntry{
-			{offset: 0, numBytes: 2},
-			{offset: 5, numBytes: 3},
-		},
-		realSize: 10,
-		expected: []byte("ab\x00\x00\x00cde\x00\x00"),
-	},
-	{
-		sparseData: []byte("abcde"),
-		sparseMap: []sparseEntry{
-			{offset: 1, numBytes: 3},
-			{offset: 6, numBytes: 2},
-		},
-		realSize: 8,
-		expected: []byte("\x00abc\x00\x00de"),
-	},
-	{
-		sparseData: []byte("abcde"),
-		sparseMap: []sparseEntry{
-			{offset: 1, numBytes: 3},
-			{offset: 6, numBytes: 2},
-		},
-		realSize: 10,
-		expected: []byte("\x00abc\x00\x00de\x00\x00"),
-	},
-	{
-		sparseData: []byte(""),
-		sparseMap:  nil,
-		realSize:   2,
-		expected:   []byte("\x00\x00"),
-	},
-}
-
-func TestSparseFileReader(t *testing.T) {
-	for i, test := range sparseFileReadTests {
-		r := bytes.NewReader(test.sparseData)
-		nb := int64(r.Len())
-		sfr := &sparseFileReader{
-			rfr: &regFileReader{r: r, nb: nb},
-			sp:  test.sparseMap,
-			pos: 0,
-			tot: test.realSize,
-		}
-		if sfr.numBytes() != nb {
-			t.Errorf("test %d: Before reading, sfr.numBytes() = %d, want %d", i, sfr.numBytes(), nb)
-		}
-		buf, err := ioutil.ReadAll(sfr)
-		if err != nil {
-			t.Errorf("test %d: Unexpected error: %v", i, err)
-		}
-		if e := test.expected; !bytes.Equal(buf, e) {
-			t.Errorf("test %d: Contents = %v, want %v", i, buf, e)
-		}
-		if sfr.numBytes() != 0 {
-			t.Errorf("test %d: After draining the reader, numBytes() was nonzero", i)
-		}
-	}
-}
-
-func TestSparseIncrementalRead(t *testing.T) {
-	sparseMap := []sparseEntry{{10, 2}}
-	sparseData := []byte("Go")
-	expected := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Go\x00\x00\x00\x00\x00\x00\x00\x00"
-
-	r := bytes.NewReader(sparseData)
-	nb := int64(r.Len())
-	sfr := &sparseFileReader{
-		rfr: &regFileReader{r: r, nb: nb},
-		sp:  sparseMap,
-		pos: 0,
-		tot: int64(len(expected)),
-	}
-
-	// We'll read the data 6 bytes at a time, with a hole of size 10 at
-	// the beginning and one of size 8 at the end.
-	var outputBuf bytes.Buffer
-	buf := make([]byte, 6)
-	for {
-		n, err := sfr.Read(buf)
-		if err == io.EOF {
-			break
-		}
-		if err != nil {
-			t.Errorf("Read: unexpected error %v\n", err)
-		}
-		if n > 0 {
-			_, err := outputBuf.Write(buf[:n])
-			if err != nil {
-				t.Errorf("Write: unexpected error %v\n", err)
-			}
-		}
-	}
-	got := outputBuf.String()
-	if got != expected {
-		t.Errorf("Contents = %v, want %v", got, expected)
-	}
-}
-
-func TestReadGNUSparseMap0x1(t *testing.T) {
-	headers := map[string]string{
-		paxGNUSparseNumBlocks: "4",
-		paxGNUSparseMap:       "0,5,10,5,20,5,30,5",
-	}
-	expected := []sparseEntry{
-		{offset: 0, numBytes: 5},
-		{offset: 10, numBytes: 5},
-		{offset: 20, numBytes: 5},
-		{offset: 30, numBytes: 5},
-	}
-
-	sp, err := readGNUSparseMap0x1(headers)
-	if err != nil {
-		t.Errorf("Unexpected error: %v", err)
-	}
-	if !reflect.DeepEqual(sp, expected) {
-		t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
-	}
-}
-
-func TestReadGNUSparseMap1x0(t *testing.T) {
-	// This test uses lots of holes so the sparse header takes up more than two blocks
-	numEntries := 100
-	expected := make([]sparseEntry, 0, numEntries)
-	sparseMap := new(bytes.Buffer)
-
-	fmt.Fprintf(sparseMap, "%d\n", numEntries)
-	for i := 0; i < numEntries; i++ {
-		offset := int64(2048 * i)
-		numBytes := int64(1024)
-		expected = append(expected, sparseEntry{offset: offset, numBytes: numBytes})
-		fmt.Fprintf(sparseMap, "%d\n%d\n", offset, numBytes)
-	}
-
-	// Make the header the smallest multiple of blockSize that fits the sparseMap
-	headerBlocks := (sparseMap.Len() + blockSize - 1) / blockSize
-	bufLen := blockSize * headerBlocks
-	buf := make([]byte, bufLen)
-	copy(buf, sparseMap.Bytes())
-
-	// Get an reader to read the sparse map
-	r := bytes.NewReader(buf)
-
-	// Read the sparse map
-	sp, err := readGNUSparseMap1x0(r)
-	if err != nil {
-		t.Errorf("Unexpected error: %v", err)
-	}
-	if !reflect.DeepEqual(sp, expected) {
-		t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
-	}
-}
-
-func TestUninitializedRead(t *testing.T) {
-	test := gnuTarTest
-	f, err := os.Open(test.file)
-	if err != nil {
-		t.Fatalf("Unexpected error: %v", err)
-	}
-	defer f.Close()
-
-	tr := NewReader(f)
-	_, err = tr.Read([]byte{})
-	if err == nil || err != io.EOF {
-		t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF)
-	}
-
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
deleted file mode 100644
index cf9cc79..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atim.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux dragonfly openbsd solaris
-
-package tar
-
-import (
-	"syscall"
-	"time"
-)
-
-func statAtime(st *syscall.Stat_t) time.Time {
-	return time.Unix(st.Atim.Unix())
-}
-
-func statCtime(st *syscall.Stat_t) time.Time {
-	return time.Unix(st.Ctim.Unix())
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
deleted file mode 100644
index 6f17dbe..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_atimespec.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin freebsd netbsd
-
-package tar
-
-import (
-	"syscall"
-	"time"
-)
-
-func statAtime(st *syscall.Stat_t) time.Time {
-	return time.Unix(st.Atimespec.Unix())
-}
-
-func statCtime(st *syscall.Stat_t) time.Time {
-	return time.Unix(st.Ctimespec.Unix())
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
deleted file mode 100644
index cb843db..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/stat_unix.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux darwin dragonfly freebsd openbsd netbsd solaris
-
-package tar
-
-import (
-	"os"
-	"syscall"
-)
-
-func init() {
-	sysStat = statUnix
-}
-
-func statUnix(fi os.FileInfo, h *Header) error {
-	sys, ok := fi.Sys().(*syscall.Stat_t)
-	if !ok {
-		return nil
-	}
-	h.Uid = int(sys.Uid)
-	h.Gid = int(sys.Gid)
-	// TODO(bradfitz): populate username & group.  os/user
-	// doesn't cache LookupId lookups, and lacks group
-	// lookup functions.
-	h.AccessTime = statAtime(sys)
-	h.ChangeTime = statCtime(sys)
-	// TODO(bradfitz): major/minor device numbers?
-	return nil
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
deleted file mode 100644
index ed333f3..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/tar_test.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import (
-	"bytes"
-	"io/ioutil"
-	"os"
-	"path"
-	"reflect"
-	"strings"
-	"testing"
-	"time"
-)
-
-func TestFileInfoHeader(t *testing.T) {
-	fi, err := os.Stat("testdata/small.txt")
-	if err != nil {
-		t.Fatal(err)
-	}
-	h, err := FileInfoHeader(fi, "")
-	if err != nil {
-		t.Fatalf("FileInfoHeader: %v", err)
-	}
-	if g, e := h.Name, "small.txt"; g != e {
-		t.Errorf("Name = %q; want %q", g, e)
-	}
-	if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
-		t.Errorf("Mode = %#o; want %#o", g, e)
-	}
-	if g, e := h.Size, int64(5); g != e {
-		t.Errorf("Size = %v; want %v", g, e)
-	}
-	if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
-		t.Errorf("ModTime = %v; want %v", g, e)
-	}
-	// FileInfoHeader should error when passing nil FileInfo
-	if _, err := FileInfoHeader(nil, ""); err == nil {
-		t.Fatalf("Expected error when passing nil to FileInfoHeader")
-	}
-}
-
-func TestFileInfoHeaderDir(t *testing.T) {
-	fi, err := os.Stat("testdata")
-	if err != nil {
-		t.Fatal(err)
-	}
-	h, err := FileInfoHeader(fi, "")
-	if err != nil {
-		t.Fatalf("FileInfoHeader: %v", err)
-	}
-	if g, e := h.Name, "testdata/"; g != e {
-		t.Errorf("Name = %q; want %q", g, e)
-	}
-	// Ignoring c_ISGID for golang.org/issue/4867
-	if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
-		t.Errorf("Mode = %#o; want %#o", g, e)
-	}
-	if g, e := h.Size, int64(0); g != e {
-		t.Errorf("Size = %v; want %v", g, e)
-	}
-	if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
-		t.Errorf("ModTime = %v; want %v", g, e)
-	}
-}
-
-func TestFileInfoHeaderSymlink(t *testing.T) {
-	h, err := FileInfoHeader(symlink{}, "some-target")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if g, e := h.Name, "some-symlink"; g != e {
-		t.Errorf("Name = %q; want %q", g, e)
-	}
-	if g, e := h.Linkname, "some-target"; g != e {
-		t.Errorf("Linkname = %q; want %q", g, e)
-	}
-}
-
-type symlink struct{}
-
-func (symlink) Name() string       { return "some-symlink" }
-func (symlink) Size() int64        { return 0 }
-func (symlink) Mode() os.FileMode  { return os.ModeSymlink }
-func (symlink) ModTime() time.Time { return time.Time{} }
-func (symlink) IsDir() bool        { return false }
-func (symlink) Sys() interface{}   { return nil }
-
-func TestRoundTrip(t *testing.T) {
-	data := []byte("some file contents")
-
-	var b bytes.Buffer
-	tw := NewWriter(&b)
-	hdr := &Header{
-		Name:    "file.txt",
-		Uid:     1 << 21, // too big for 8 octal digits
-		Size:    int64(len(data)),
-		ModTime: time.Now(),
-	}
-	// tar only supports second precision.
-	hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
-	if err := tw.WriteHeader(hdr); err != nil {
-		t.Fatalf("tw.WriteHeader: %v", err)
-	}
-	if _, err := tw.Write(data); err != nil {
-		t.Fatalf("tw.Write: %v", err)
-	}
-	if err := tw.Close(); err != nil {
-		t.Fatalf("tw.Close: %v", err)
-	}
-
-	// Read it back.
-	tr := NewReader(&b)
-	rHdr, err := tr.Next()
-	if err != nil {
-		t.Fatalf("tr.Next: %v", err)
-	}
-	if !reflect.DeepEqual(rHdr, hdr) {
-		t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
-	}
-	rData, err := ioutil.ReadAll(tr)
-	if err != nil {
-		t.Fatalf("Read: %v", err)
-	}
-	if !bytes.Equal(rData, data) {
-		t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
-	}
-}
-
-type headerRoundTripTest struct {
-	h  *Header
-	fm os.FileMode
-}
-
-func TestHeaderRoundTrip(t *testing.T) {
-	golden := []headerRoundTripTest{
-		// regular file.
-		{
-			h: &Header{
-				Name:     "test.txt",
-				Mode:     0644 | c_ISREG,
-				Size:     12,
-				ModTime:  time.Unix(1360600916, 0),
-				Typeflag: TypeReg,
-			},
-			fm: 0644,
-		},
-		// hard link.
-		{
-			h: &Header{
-				Name:     "hard.txt",
-				Mode:     0644 | c_ISLNK,
-				Size:     0,
-				ModTime:  time.Unix(1360600916, 0),
-				Typeflag: TypeLink,
-			},
-			fm: 0644 | os.ModeSymlink,
-		},
-		// symbolic link.
-		{
-			h: &Header{
-				Name:     "link.txt",
-				Mode:     0777 | c_ISLNK,
-				Size:     0,
-				ModTime:  time.Unix(1360600852, 0),
-				Typeflag: TypeSymlink,
-			},
-			fm: 0777 | os.ModeSymlink,
-		},
-		// character device node.
-		{
-			h: &Header{
-				Name:     "dev/null",
-				Mode:     0666 | c_ISCHR,
-				Size:     0,
-				ModTime:  time.Unix(1360578951, 0),
-				Typeflag: TypeChar,
-			},
-			fm: 0666 | os.ModeDevice | os.ModeCharDevice,
-		},
-		// block device node.
-		{
-			h: &Header{
-				Name:     "dev/sda",
-				Mode:     0660 | c_ISBLK,
-				Size:     0,
-				ModTime:  time.Unix(1360578954, 0),
-				Typeflag: TypeBlock,
-			},
-			fm: 0660 | os.ModeDevice,
-		},
-		// directory.
-		{
-			h: &Header{
-				Name:     "dir/",
-				Mode:     0755 | c_ISDIR,
-				Size:     0,
-				ModTime:  time.Unix(1360601116, 0),
-				Typeflag: TypeDir,
-			},
-			fm: 0755 | os.ModeDir,
-		},
-		// fifo node.
-		{
-			h: &Header{
-				Name:     "dev/initctl",
-				Mode:     0600 | c_ISFIFO,
-				Size:     0,
-				ModTime:  time.Unix(1360578949, 0),
-				Typeflag: TypeFifo,
-			},
-			fm: 0600 | os.ModeNamedPipe,
-		},
-		// setuid.
-		{
-			h: &Header{
-				Name:     "bin/su",
-				Mode:     0755 | c_ISREG | c_ISUID,
-				Size:     23232,
-				ModTime:  time.Unix(1355405093, 0),
-				Typeflag: TypeReg,
-			},
-			fm: 0755 | os.ModeSetuid,
-		},
-		// setguid.
-		{
-			h: &Header{
-				Name:     "group.txt",
-				Mode:     0750 | c_ISREG | c_ISGID,
-				Size:     0,
-				ModTime:  time.Unix(1360602346, 0),
-				Typeflag: TypeReg,
-			},
-			fm: 0750 | os.ModeSetgid,
-		},
-		// sticky.
-		{
-			h: &Header{
-				Name:     "sticky.txt",
-				Mode:     0600 | c_ISREG | c_ISVTX,
-				Size:     7,
-				ModTime:  time.Unix(1360602540, 0),
-				Typeflag: TypeReg,
-			},
-			fm: 0600 | os.ModeSticky,
-		},
-	}
-
-	for i, g := range golden {
-		fi := g.h.FileInfo()
-		h2, err := FileInfoHeader(fi, "")
-		if err != nil {
-			t.Error(err)
-			continue
-		}
-		if strings.Contains(fi.Name(), "/") {
-			t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
-		}
-		name := path.Base(g.h.Name)
-		if fi.IsDir() {
-			name += "/"
-		}
-		if got, want := h2.Name, name; got != want {
-			t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
-		}
-		if got, want := h2.Size, g.h.Size; got != want {
-			t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
-		}
-		if got, want := h2.Mode, g.h.Mode; got != want {
-			t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
-		}
-		if got, want := fi.Mode(), g.fm; got != want {
-			t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
-		}
-		if got, want := h2.ModTime, g.h.ModTime; got != want {
-			t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
-		}
-		if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
-			t.Errorf("i=%d: Sys didn't return original *Header", i)
-		}
-	}
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar
deleted file mode 100644
index fc899dc..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/gnu.tar
+++ /dev/null
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar
deleted file mode 100644
index cc9cfaa..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/nil-uid.tar
+++ /dev/null
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar
deleted file mode 100644
index 9bc24b6..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/pax.tar
+++ /dev/null
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
deleted file mode 100644
index b249bfc..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small.txt
+++ /dev/null
@@ -1 +0,0 @@
-Kilts
\ No newline at end of file
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
deleted file mode 100644
index 394ee3e..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/small2.txt
+++ /dev/null
@@ -1 +0,0 @@
-Google.com
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar
deleted file mode 100644
index 8bd4e74..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/sparse-formats.tar
+++ /dev/null
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar
deleted file mode 100644
index 59e2d4e..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/star.tar
+++ /dev/null
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar
deleted file mode 100644
index 29679d9..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/ustar.tar
+++ /dev/null
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar
deleted file mode 100644
index eb65fc9..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/v7.tar
+++ /dev/null
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar
deleted file mode 100644
index 5960ee8..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big-long.tar
+++ /dev/null
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar
deleted file mode 100644
index 753e883..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer-big.tar
+++ /dev/null
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar
deleted file mode 100644
index e6d816a..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/writer.tar
+++ /dev/null
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar
deleted file mode 100644
index 9701950..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/testdata/xattrs.tar
+++ /dev/null
Binary files differ
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
deleted file mode 100644
index dafb2ca..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer.go
+++ /dev/null
@@ -1,396 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-// TODO(dsymonds):
-// - catch more errors (no first header, etc.)
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"io"
-	"os"
-	"path"
-	"strconv"
-	"strings"
-	"time"
-)
-
-var (
-	ErrWriteTooLong    = errors.New("archive/tar: write too long")
-	ErrFieldTooLong    = errors.New("archive/tar: header field too long")
-	ErrWriteAfterClose = errors.New("archive/tar: write after close")
-	errNameTooLong     = errors.New("archive/tar: name too long")
-	errInvalidHeader   = errors.New("archive/tar: header field too long or contains invalid values")
-)
-
-// A Writer provides sequential writing of a tar archive in POSIX.1 format.
-// A tar archive consists of a sequence of files.
-// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
-// writing at most hdr.Size bytes in total.
-type Writer struct {
-	w          io.Writer
-	err        error
-	nb         int64 // number of unwritten bytes for current file entry
-	pad        int64 // amount of padding to write after current file entry
-	closed     bool
-	usedBinary bool            // whether the binary numeric field extension was used
-	preferPax  bool            // use pax header instead of binary numeric header
-	hdrBuff    [blockSize]byte // buffer to use in writeHeader when writing a regular header
-	paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
-}
-
-// NewWriter creates a new Writer writing to w.
-func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
-
-// Flush finishes writing the current file (optional).
-func (tw *Writer) Flush() error {
-	if tw.nb > 0 {
-		tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
-		return tw.err
-	}
-
-	n := tw.nb + tw.pad
-	for n > 0 && tw.err == nil {
-		nr := n
-		if nr > blockSize {
-			nr = blockSize
-		}
-		var nw int
-		nw, tw.err = tw.w.Write(zeroBlock[0:nr])
-		n -= int64(nw)
-	}
-	tw.nb = 0
-	tw.pad = 0
-	return tw.err
-}
-
-// Write s into b, terminating it with a NUL if there is room.
-// If the value is too long for the field and allowPax is true add a paxheader record instead
-func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
-	needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s)
-	if needsPaxHeader {
-		paxHeaders[paxKeyword] = s
-		return
-	}
-	if len(s) > len(b) {
-		if tw.err == nil {
-			tw.err = ErrFieldTooLong
-		}
-		return
-	}
-	ascii := toASCII(s)
-	copy(b, ascii)
-	if len(ascii) < len(b) {
-		b[len(ascii)] = 0
-	}
-}
-
-// Encode x as an octal ASCII string and write it into b with leading zeros.
-func (tw *Writer) octal(b []byte, x int64) {
-	s := strconv.FormatInt(x, 8)
-	// leading zeros, but leave room for a NUL.
-	for len(s)+1 < len(b) {
-		s = "0" + s
-	}
-	tw.cString(b, s, false, paxNone, nil)
-}
-
-// Write x into b, either as octal or as binary (GNUtar/star extension).
-// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead
-func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
-	// Try octal first.
-	s := strconv.FormatInt(x, 8)
-	if len(s) < len(b) {
-		tw.octal(b, x)
-		return
-	}
-
-	// If it is too long for octal, and pax is preferred, use a pax header
-	if allowPax && tw.preferPax {
-		tw.octal(b, 0)
-		s := strconv.FormatInt(x, 10)
-		paxHeaders[paxKeyword] = s
-		return
-	}
-
-	// Too big: use binary (big-endian).
-	tw.usedBinary = true
-	for i := len(b) - 1; x > 0 && i >= 0; i-- {
-		b[i] = byte(x)
-		x >>= 8
-	}
-	b[0] |= 0x80 // highest bit indicates binary format
-}
-
-var (
-	minTime = time.Unix(0, 0)
-	// There is room for 11 octal digits (33 bits) of mtime.
-	maxTime = minTime.Add((1<<33 - 1) * time.Second)
-)
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// WriteHeader calls Flush if it is not the first header.
-// Calling after a Close will return ErrWriteAfterClose.
-func (tw *Writer) WriteHeader(hdr *Header) error {
-	return tw.writeHeader(hdr, true)
-}
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// WriteHeader calls Flush if it is not the first header.
-// Calling after a Close will return ErrWriteAfterClose.
-// As this method is called internally by writePax header to allow it to
-// suppress writing the pax header.
-func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
-	if tw.closed {
-		return ErrWriteAfterClose
-	}
-	if tw.err == nil {
-		tw.Flush()
-	}
-	if tw.err != nil {
-		return tw.err
-	}
-
-	// a map to hold pax header records, if any are needed
-	paxHeaders := make(map[string]string)
-
-	// TODO(shanemhansen): we might want to use PAX headers for
-	// subsecond time resolution, but for now let's just capture
-	// too long fields or non ascii characters
-
-	var header []byte
-
-	// We need to select which scratch buffer to use carefully,
-	// since this method is called recursively to write PAX headers.
-	// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
-	// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
-	// already being used by the non-recursive call, so we must use paxHdrBuff.
-	header = tw.hdrBuff[:]
-	if !allowPax {
-		header = tw.paxHdrBuff[:]
-	}
-	copy(header, zeroBlock)
-	s := slicer(header)
-
-	// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
-	pathHeaderBytes := s.next(fileNameSize)
-
-	tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders)
-
-	// Handle out of range ModTime carefully.
-	var modTime int64
-	if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
-		modTime = hdr.ModTime.Unix()
-	}
-
-	tw.octal(s.next(8), hdr.Mode)                                   // 100:108
-	tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116
-	tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124
-	tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders)     // 124:136
-	tw.numeric(s.next(12), modTime, false, paxNone, nil)            // 136:148 --- consider using pax for finer granularity
-	s.next(8)                                                       // chksum (148:156)
-	s.next(1)[0] = hdr.Typeflag                                     // 156:157
-
-	tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders)
-
-	copy(s.next(8), []byte("ustar\x0000"))                        // 257:265
-	tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297
-	tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329
-	tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil)      // 329:337
-	tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil)      // 337:345
-
-	// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
-	prefixHeaderBytes := s.next(155)
-	tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500  prefix
-
-	// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
-	if tw.usedBinary {
-		copy(header[257:265], []byte("ustar  \x00"))
-	}
-
-	_, paxPathUsed := paxHeaders[paxPath]
-	// try to use a ustar header when only the name is too long
-	if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
-		suffix := hdr.Name
-		prefix := ""
-		if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
-			var err error
-			prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
-			if err == nil {
-				// ok we can use a ustar long name instead of pax, now correct the fields
-
-				// remove the path field from the pax header. this will suppress the pax header
-				delete(paxHeaders, paxPath)
-
-				// update the path fields
-				tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
-				tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
-
-				// Use the ustar magic if we used ustar long names.
-				if len(prefix) > 0 && !tw.usedBinary {
-					copy(header[257:265], []byte("ustar\x00"))
-				}
-			}
-		}
-	}
-
-	// The chksum field is terminated by a NUL and a space.
-	// This is different from the other octal fields.
-	chksum, _ := checksum(header)
-	tw.octal(header[148:155], chksum)
-	header[155] = ' '
-
-	if tw.err != nil {
-		// problem with header; probably integer too big for a field.
-		return tw.err
-	}
-
-	if allowPax {
-		for k, v := range hdr.Xattrs {
-			paxHeaders[paxXattr+k] = v
-		}
-	}
-
-	if len(paxHeaders) > 0 {
-		if !allowPax {
-			return errInvalidHeader
-		}
-		if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
-			return err
-		}
-	}
-	tw.nb = int64(hdr.Size)
-	tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
-
-	_, tw.err = tw.w.Write(header)
-	return tw.err
-}
-
-// writeUSTARLongName splits a USTAR long name hdr.Name.
-// name must be < 256 characters. errNameTooLong is returned
-// if hdr.Name can't be split. The splitting heuristic
-// is compatible with gnu tar.
-func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
-	length := len(name)
-	if length > fileNamePrefixSize+1 {
-		length = fileNamePrefixSize + 1
-	} else if name[length-1] == '/' {
-		length--
-	}
-	i := strings.LastIndex(name[:length], "/")
-	// nlen contains the resulting length in the name field.
-	// plen contains the resulting length in the prefix field.
-	nlen := len(name) - i - 1
-	plen := i
-	if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
-		err = errNameTooLong
-		return
-	}
-	prefix, suffix = name[:i], name[i+1:]
-	return
-}
-
-// writePaxHeader writes an extended pax header to the
-// archive.
-func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
-	// Prepare extended header
-	ext := new(Header)
-	ext.Typeflag = TypeXHeader
-	// Setting ModTime is required for reader parsing to
-	// succeed, and seems harmless enough.
-	ext.ModTime = hdr.ModTime
-	// The spec asks that we namespace our pseudo files
-	// with the current pid.
-	pid := os.Getpid()
-	dir, file := path.Split(hdr.Name)
-	fullName := path.Join(dir,
-		fmt.Sprintf("PaxHeaders.%d", pid), file)
-
-	ascii := toASCII(fullName)
-	if len(ascii) > 100 {
-		ascii = ascii[:100]
-	}
-	ext.Name = ascii
-	// Construct the body
-	var buf bytes.Buffer
-
-	for k, v := range paxHeaders {
-		fmt.Fprint(&buf, paxHeader(k+"="+v))
-	}
-
-	ext.Size = int64(len(buf.Bytes()))
-	if err := tw.writeHeader(ext, false); err != nil {
-		return err
-	}
-	if _, err := tw.Write(buf.Bytes()); err != nil {
-		return err
-	}
-	if err := tw.Flush(); err != nil {
-		return err
-	}
-	return nil
-}
-
-// paxHeader formats a single pax record, prefixing it with the appropriate length
-func paxHeader(msg string) string {
-	const padding = 2 // Extra padding for space and newline
-	size := len(msg) + padding
-	size += len(strconv.Itoa(size))
-	record := fmt.Sprintf("%d %s\n", size, msg)
-	if len(record) != size {
-		// Final adjustment if adding size increased
-		// the number of digits in size
-		size = len(record)
-		record = fmt.Sprintf("%d %s\n", size, msg)
-	}
-	return record
-}
-
-// Write writes to the current entry in the tar archive.
-// Write returns the error ErrWriteTooLong if more than
-// hdr.Size bytes are written after WriteHeader.
-func (tw *Writer) Write(b []byte) (n int, err error) {
-	if tw.closed {
-		err = ErrWriteTooLong
-		return
-	}
-	overwrite := false
-	if int64(len(b)) > tw.nb {
-		b = b[0:tw.nb]
-		overwrite = true
-	}
-	n, err = tw.w.Write(b)
-	tw.nb -= int64(n)
-	if err == nil && overwrite {
-		err = ErrWriteTooLong
-		return
-	}
-	tw.err = err
-	return
-}
-
-// Close closes the tar archive, flushing any unwritten
-// data to the underlying writer.
-func (tw *Writer) Close() error {
-	if tw.err != nil || tw.closed {
-		return tw.err
-	}
-	tw.Flush()
-	tw.closed = true
-	if tw.err != nil {
-		return tw.err
-	}
-
-	// trailer: two zero blocks
-	for i := 0; i < 2; i++ {
-		_, tw.err = tw.w.Write(zeroBlock)
-		if tw.err != nil {
-			break
-		}
-	}
-	return tw.err
-}
diff --git a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go b/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
deleted file mode 100644
index 5e42e32..0000000
--- a/vendor/src/code.google.com/p/go/src/pkg/archive/tar/writer_test.go
+++ /dev/null
@@ -1,491 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tar
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"reflect"
-	"strings"
-	"testing"
-	"testing/iotest"
-	"time"
-)
-
-type writerTestEntry struct {
-	header   *Header
-	contents string
-}
-
-type writerTest struct {
-	file    string // filename of expected output
-	entries []*writerTestEntry
-}
-
-var writerTests = []*writerTest{
-	// The writer test file was produced with this command:
-	// tar (GNU tar) 1.26
-	//   ln -s small.txt link.txt
-	//   tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
-	{
-		file: "testdata/writer.tar",
-		entries: []*writerTestEntry{
-			{
-				header: &Header{
-					Name:     "small.txt",
-					Mode:     0640,
-					Uid:      73025,
-					Gid:      5000,
-					Size:     5,
-					ModTime:  time.Unix(1246508266, 0),
-					Typeflag: '0',
-					Uname:    "dsymonds",
-					Gname:    "eng",
-				},
-				contents: "Kilts",
-			},
-			{
-				header: &Header{
-					Name:     "small2.txt",
-					Mode:     0640,
-					Uid:      73025,
-					Gid:      5000,
-					Size:     11,
-					ModTime:  time.Unix(1245217492, 0),
-					Typeflag: '0',
-					Uname:    "dsymonds",
-					Gname:    "eng",
-				},
-				contents: "Google.com\n",
-			},
-			{
-				header: &Header{
-					Name:     "link.txt",
-					Mode:     0777,
-					Uid:      1000,
-					Gid:      1000,
-					Size:     0,
-					ModTime:  time.Unix(1314603082, 0),
-					Typeflag: '2',
-					Linkname: "small.txt",
-					Uname:    "strings",
-					Gname:    "strings",
-				},
-				// no contents
-			},
-		},
-	},
-	// The truncated test file was produced using these commands:
-	//   dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
-	//   tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
-	{
-		file: "testdata/writer-big.tar",
-		entries: []*writerTestEntry{
-			{
-				header: &Header{
-					Name:     "tmp/16gig.txt",
-					Mode:     0640,
-					Uid:      73025,
-					Gid:      5000,
-					Size:     16 << 30,
-					ModTime:  time.Unix(1254699560, 0),
-					Typeflag: '0',
-					Uname:    "dsymonds",
-					Gname:    "eng",
-				},
-				// fake contents
-				contents: strings.Repeat("\x00", 4<<10),
-			},
-		},
-	},
-	// The truncated test file was produced using these commands:
-	//   dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
-	//   tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
-	{
-		file: "testdata/writer-big-long.tar",
-		entries: []*writerTestEntry{
-			{
-				header: &Header{
-					Name:     strings.Repeat("longname/", 15) + "16gig.txt",
-					Mode:     0644,
-					Uid:      1000,
-					Gid:      1000,
-					Size:     16 << 30,
-					ModTime:  time.Unix(1399583047, 0),
-					Typeflag: '0',
-					Uname:    "guillaume",
-					Gname:    "guillaume",
-				},
-				// fake contents
-				contents: strings.Repeat("\x00", 4<<10),
-			},
-		},
-	},
-	// This file was produced using gnu tar 1.17
-	// gnutar  -b 4 --format=ustar (longname/)*15 + file.txt
-	{
-		file: "testdata/ustar.tar",
-		entries: []*writerTestEntry{
-			{
-				header: &Header{
-					Name:     strings.Repeat("longname/", 15) + "file.txt",
-					Mode:     0644,
-					Uid:      0765,
-					Gid:      024,
-					Size:     06,
-					ModTime:  time.Unix(1360135598, 0),
-					Typeflag: '0',
-					Uname:    "shane",
-					Gname:    "staff",
-				},
-				contents: "hello\n",
-			},
-		},
-	},
-}
-
-// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
-func bytestr(offset int, b []byte) string {
-	const rowLen = 32
-	s := fmt.Sprintf("%04x ", offset)
-	for _, ch := range b {
-		switch {
-		case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
-			s += fmt.Sprintf("  %c", ch)
-		default:
-			s += fmt.Sprintf(" %02x", ch)
-		}
-	}
-	return s
-}
-
-// Render a pseudo-diff between two blocks of bytes.
-func bytediff(a []byte, b []byte) string {
-	const rowLen = 32
-	s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
-	for offset := 0; len(a)+len(b) > 0; offset += rowLen {
-		na, nb := rowLen, rowLen
-		if na > len(a) {
-			na = len(a)
-		}
-		if nb > len(b) {
-			nb = len(b)
-		}
-		sa := bytestr(offset, a[0:na])
-		sb := bytestr(offset, b[0:nb])
-		if sa != sb {
-			s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
-		}
-		a = a[na:]
-		b = b[nb:]
-	}
-	return s
-}
-
-func TestWriter(t *testing.T) {
-testLoop:
-	for i, test := range writerTests {
-		expected, err := ioutil.ReadFile(test.file)
-		if err != nil {
-			t.Errorf("test %d: Unexpected error: %v", i, err)
-			continue
-		}
-
-		buf := new(bytes.Buffer)
-		tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
-		big := false
-		for j, entry := range test.entries {
-			big = big || entry.header.Size > 1<<10
-			if err := tw.WriteHeader(entry.header); err != nil {
-				t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
-				continue testLoop
-			}
-			if _, err := io.WriteString(tw, entry.contents); err != nil {
-				t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
-				continue testLoop
-			}
-		}
-		// Only interested in Close failures for the small tests.
-		if err := tw.Close(); err != nil && !big {
-			t.Errorf("test %d: Failed closing archive: %v", i, err)
-			continue testLoop
-		}
-
-		actual := buf.Bytes()
-		if !bytes.Equal(expected, actual) {
-			t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
-				i, bytediff(expected, actual))
-		}
-		if testing.Short() { // The second test is expensive.
-			break
-		}
-	}
-}
-
-func TestPax(t *testing.T) {
-	// Create an archive with a large name
-	fileinfo, err := os.Stat("testdata/small.txt")
-	if err != nil {
-		t.Fatal(err)
-	}
-	hdr, err := FileInfoHeader(fileinfo, "")
-	if err != nil {
-		t.Fatalf("os.Stat: %v", err)
-	}
-	// Force a PAX long name to be written
-	longName := strings.Repeat("ab", 100)
-	contents := strings.Repeat(" ", int(hdr.Size))
-	hdr.Name = longName
-	var buf bytes.Buffer
-	writer := NewWriter(&buf)
-	if err := writer.WriteHeader(hdr); err != nil {
-		t.Fatal(err)
-	}
-	if _, err = writer.Write([]byte(contents)); err != nil {
-		t.Fatal(err)
-	}
-	if err := writer.Close(); err != nil {
-		t.Fatal(err)
-	}
-	// Simple test to make sure PAX extensions are in effect
-	if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
-		t.Fatal("Expected at least one PAX header to be written.")
-	}
-	// Test that we can get a long name back out of the archive.
-	reader := NewReader(&buf)
-	hdr, err = reader.Next()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if hdr.Name != longName {
-		t.Fatal("Couldn't recover long file name")
-	}
-}
-
-func TestPaxSymlink(t *testing.T) {
-	// Create an archive with a large linkname
-	fileinfo, err := os.Stat("testdata/small.txt")
-	if err != nil {
-		t.Fatal(err)
-	}
-	hdr, err := FileInfoHeader(fileinfo, "")
-	hdr.Typeflag = TypeSymlink
-	if err != nil {
-		t.Fatalf("os.Stat:1 %v", err)
-	}
-	// Force a PAX long linkname to be written
-	longLinkname := strings.Repeat("1234567890/1234567890", 10)
-	hdr.Linkname = longLinkname
-
-	hdr.Size = 0
-	var buf bytes.Buffer
-	writer := NewWriter(&buf)
-	if err := writer.WriteHeader(hdr); err != nil {
-		t.Fatal(err)
-	}
-	if err := writer.Close(); err != nil {
-		t.Fatal(err)
-	}
-	// Simple test to make sure PAX extensions are in effect
-	if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
-		t.Fatal("Expected at least one PAX header to be written.")
-	}
-	// Test that we can get a long name back out of the archive.
-	reader := NewReader(&buf)
-	hdr, err = reader.Next()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if hdr.Linkname != longLinkname {
-		t.Fatal("Couldn't recover long link name")
-	}
-}
-
-func TestPaxNonAscii(t *testing.T) {
-	// Create an archive with non ascii. These should trigger a pax header
-	// because pax headers have a defined utf-8 encoding.
-	fileinfo, err := os.Stat("testdata/small.txt")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	hdr, err := FileInfoHeader(fileinfo, "")
-	if err != nil {
-		t.Fatalf("os.Stat:1 %v", err)
-	}
-
-	// some sample data
-	chineseFilename := "文件名"
-	chineseGroupname := "組"
-	chineseUsername := "用戶名"
-
-	hdr.Name = chineseFilename
-	hdr.Gname = chineseGroupname
-	hdr.Uname = chineseUsername
-
-	contents := strings.Repeat(" ", int(hdr.Size))
-
-	var buf bytes.Buffer
-	writer := NewWriter(&buf)
-	if err := writer.WriteHeader(hdr); err != nil {
-		t.Fatal(err)
-	}
-	if _, err = writer.Write([]byte(contents)); err != nil {
-		t.Fatal(err)
-	}
-	if err := writer.Close(); err != nil {
-		t.Fatal(err)
-	}
-	// Simple test to make sure PAX extensions are in effect
-	if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
-		t.Fatal("Expected at least one PAX header to be written.")
-	}
-	// Test that we can get a long name back out of the archive.
-	reader := NewReader(&buf)
-	hdr, err = reader.Next()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if hdr.Name != chineseFilename {
-		t.Fatal("Couldn't recover unicode name")
-	}
-	if hdr.Gname != chineseGroupname {
-		t.Fatal("Couldn't recover unicode group")
-	}
-	if hdr.Uname != chineseUsername {
-		t.Fatal("Couldn't recover unicode user")
-	}
-}
-
-func TestPaxXattrs(t *testing.T) {
-	xattrs := map[string]string{
-		"user.key": "value",
-	}
-
-	// Create an archive with an xattr
-	fileinfo, err := os.Stat("testdata/small.txt")
-	if err != nil {
-		t.Fatal(err)
-	}
-	hdr, err := FileInfoHeader(fileinfo, "")
-	if err != nil {
-		t.Fatalf("os.Stat: %v", err)
-	}
-	contents := "Kilts"
-	hdr.Xattrs = xattrs
-	var buf bytes.Buffer
-	writer := NewWriter(&buf)
-	if err := writer.WriteHeader(hdr); err != nil {
-		t.Fatal(err)
-	}
-	if _, err = writer.Write([]byte(contents)); err != nil {
-		t.Fatal(err)
-	}
-	if err := writer.Close(); err != nil {
-		t.Fatal(err)
-	}
-	// Test that we can get the xattrs back out of the archive.
-	reader := NewReader(&buf)
-	hdr, err = reader.Next()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
-		t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
-			hdr.Xattrs, xattrs)
-	}
-}
-
-func TestPAXHeader(t *testing.T) {
-	medName := strings.Repeat("CD", 50)
-	longName := strings.Repeat("AB", 100)
-	paxTests := [][2]string{
-		{paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"},
-		{"a=b", "6 a=b\n"},          // Single digit length
-		{"a=names", "11 a=names\n"}, // Test case involving carries
-		{paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)},
-		{paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}}
-
-	for _, test := range paxTests {
-		key, expected := test[0], test[1]
-		if result := paxHeader(key); result != expected {
-			t.Fatalf("paxHeader: got %s, expected %s", result, expected)
-		}
-	}
-}
-
-func TestUSTARLongName(t *testing.T) {
-	// Create an archive with a path that failed to split with USTAR extension in previous versions.
-	fileinfo, err := os.Stat("testdata/small.txt")
-	if err != nil {
-		t.Fatal(err)
-	}
-	hdr, err := FileInfoHeader(fileinfo, "")
-	hdr.Typeflag = TypeDir
-	if err != nil {
-		t.Fatalf("os.Stat:1 %v", err)
-	}
-	// Force a PAX long name to be written. The name was taken from a practical example
-	// that fails and replaced ever char through numbers to anonymize the sample.
-	longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
-	hdr.Name = longName
-
-	hdr.Size = 0
-	var buf bytes.Buffer
-	writer := NewWriter(&buf)
-	if err := writer.WriteHeader(hdr); err != nil {
-		t.Fatal(err)
-	}
-	if err := writer.Close(); err != nil {
-		t.Fatal(err)
-	}
-	// Test that we can get a long name back out of the archive.
-	reader := NewReader(&buf)
-	hdr, err = reader.Next()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if hdr.Name != longName {
-		t.Fatal("Couldn't recover long name")
-	}
-}
-
-func TestValidTypeflagWithPAXHeader(t *testing.T) {
-	var buffer bytes.Buffer
-	tw := NewWriter(&buffer)
-
-	fileName := strings.Repeat("ab", 100)
-
-	hdr := &Header{
-		Name:     fileName,
-		Size:     4,
-		Typeflag: 0,
-	}
-	if err := tw.WriteHeader(hdr); err != nil {
-		t.Fatalf("Failed to write header: %s", err)
-	}
-	if _, err := tw.Write([]byte("fooo")); err != nil {
-		t.Fatalf("Failed to write the file's data: %s", err)
-	}
-	tw.Close()
-
-	tr := NewReader(&buffer)
-
-	for {
-		header, err := tr.Next()
-		if err == io.EOF {
-			break
-		}
-		if err != nil {
-			t.Fatalf("Failed to read header: %s", err)
-		}
-		if header.Typeflag != 0 {
-			t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
-		}
-	}
-}
diff --git a/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..eb72bff
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,7 @@
+# 0.7.3
+
+formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/src/github.com/Sirupsen/logrus/README.md b/vendor/src/github.com/Sirupsen/logrus/README.md
index 512f26e..d55f909 100644
--- a/vendor/src/github.com/Sirupsen/logrus/README.md
+++ b/vendor/src/github.com/Sirupsen/logrus/README.md
@@ -37,11 +37,13 @@
 [logfmt](http://godoc.org/github.com/kr/logfmt) format:
 
 ```text
-time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
-time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
-time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
-time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
-time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
 ```
 
 #### Example
@@ -106,6 +108,16 @@
     "omg":    true,
     "number": 100,
   }).Fatal("The ice breaks!")
+
+  // A common pattern is to re-use fields between logging statements by re-using
+  // the logrus.Entry returned from WithFields()
+  contextLogger := log.WithFields(log.Fields{
+    "common": "this is a common field",
+    "other": "I also should be logged always",
+  })
+
+  contextLogger.Info("I'll be logged with common and other field")
+  contextLogger.Info("Me too")
 }
 ```
 
@@ -187,31 +199,18 @@
 }
 ```
 
-* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
-  Send errors to an exception tracking service compatible with the Airbrake API.
-  Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
 
-* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
-  Send errors to the Papertrail hosted logging service via UDP.
-
-* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
-  Send errors to remote syslog server.
-  Uses standard library `log/syslog` behind the scenes.
-
-* [`github.com/Sirupsen/logrus/hooks/bugsnag`](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go)
-  Send errors to the Bugsnag exception tracking service.
-
-* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
-  Send errors to a channel in hipchat.
-
-* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
-  Send logs to Loggly (https://www.loggly.com/)
-
-* [`github.com/johntdyer/slackrus`](https://github.com/johntdyer/slackrus)
-  Hook for Slack chat.
-
-* [`github.com/wercker/journalhook`](https://github.com/wercker/journalhook).
-  Hook for logging to `systemd-journald`.
+| Hook  | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
 
 #### Level logging
 
diff --git a/vendor/src/github.com/Sirupsen/logrus/formatter.go b/vendor/src/github.com/Sirupsen/logrus/formatter.go
index 038ce9f..104d689 100644
--- a/vendor/src/github.com/Sirupsen/logrus/formatter.go
+++ b/vendor/src/github.com/Sirupsen/logrus/formatter.go
@@ -1,5 +1,9 @@
 package logrus
 
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
 // The Formatter interface is used to implement a custom Formatter. It takes an
 // `Entry`. It exposes all the fields, including the default ones:
 //
diff --git a/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
index 34b1ccb..8ea93dd 100644
--- a/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
+++ b/vendor/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
@@ -3,19 +3,27 @@
 import (
 	"encoding/json"
 	"fmt"
+
 	"github.com/Sirupsen/logrus"
-	"time"
 )
 
 // Formatter generates json in logstash format.
 // Logstash site: http://logstash.net/
 type LogstashFormatter struct {
 	Type string // if not empty use for logstash type field.
+
+	// TimestampFormat sets the format used for timestamps.
+	TimestampFormat string
 }
 
 func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {
 	entry.Data["@version"] = 1
-	entry.Data["@timestamp"] = entry.Time.Format(time.RFC3339)
+
+	if f.TimestampFormat == "" {
+		f.TimestampFormat = logrus.DefaultTimestampFormat
+	}
+
+	entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat)
 
 	// set message field
 	v, ok := entry.Data["message"]
diff --git a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go
index 5c4c44b..dcc4f1d 100644
--- a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go
+++ b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go
@@ -3,10 +3,12 @@
 import (
 	"encoding/json"
 	"fmt"
-	"time"
 )
 
-type JSONFormatter struct{}
+type JSONFormatter struct {
+	// TimestampFormat sets the format used for marshaling timestamps.
+	TimestampFormat string
+}
 
 func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
 	data := make(Fields, len(entry.Data)+3)
@@ -21,7 +23,12 @@
 		}
 	}
 	prefixFieldClashes(data)
-	data["time"] = entry.Time.Format(time.RFC3339)
+
+	if f.TimestampFormat == "" {
+		f.TimestampFormat = DefaultTimestampFormat
+	}
+
+	data["time"] = entry.Time.Format(f.TimestampFormat)
 	data["msg"] = entry.Message
 	data["level"] = entry.Level.String()
 
diff --git a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go
index 0a06a11..612417f 100644
--- a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go
+++ b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go
@@ -46,6 +46,9 @@
 	// the time passed since beginning of execution.
 	FullTimestamp bool
 
+	// TimestampFormat to use for display when a full timestamp is printed
+	TimestampFormat string
+
 	// The fields are sorted by default for a consistent output. For applications
 	// that log extremely frequently and don't use the JSON formatter this may not
 	// be desired.
@@ -68,11 +71,14 @@
 
 	isColored := (f.ForceColors || isTerminal) && !f.DisableColors
 
+	if f.TimestampFormat == "" {
+		f.TimestampFormat = DefaultTimestampFormat
+	}
 	if isColored {
 		f.printColored(b, entry, keys)
 	} else {
 		if !f.DisableTimestamp {
-			f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
+			f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat))
 		}
 		f.appendKeyValue(b, "level", entry.Level.String())
 		f.appendKeyValue(b, "msg", entry.Message)
@@ -103,7 +109,7 @@
 	if !f.FullTimestamp {
 		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
 	} else {
-		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(time.RFC3339), entry.Message)
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message)
 	}
 	for _, k := range keys {
 		v := entry.Data[k]
diff --git a/vendor/src/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/src/github.com/Sirupsen/logrus/text_formatter_test.go
index 28a9499..e25a44f 100644
--- a/vendor/src/github.com/Sirupsen/logrus/text_formatter_test.go
+++ b/vendor/src/github.com/Sirupsen/logrus/text_formatter_test.go
@@ -3,8 +3,8 @@
 import (
 	"bytes"
 	"errors"
-
 	"testing"
+	"time"
 )
 
 func TestQuoting(t *testing.T) {
@@ -33,5 +33,29 @@
 	checkQuoting(true, errors.New("invalid argument"))
 }
 
+func TestTimestampFormat(t *testing.T) {
+	checkTimeStr := func(format string) {
+		customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
+		customStr, _ := customFormatter.Format(WithField("test", "test"))
+		timeStart := bytes.Index(customStr, ([]byte)("time="))
+		timeEnd := bytes.Index(customStr, ([]byte)("level="))
+		timeStr := customStr[timeStart+5 : timeEnd-1]
+		if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
+			timeStr = timeStr[1 : len(timeStr)-1]
+		}
+		if format == "" {
+			format = time.RFC3339
+		}
+		_, e := time.Parse(format, (string)(timeStr))
+		if e != nil {
+			t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
+		}
+	}
+
+	checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
+	checkTimeStr("Mon Jan _2 15:04:05 2006")
+	checkTimeStr("")
+}
+
 // TODO add tests for sorting etc., this requires a parser for the text
 // formatter output.
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go
new file mode 100644
index 0000000..5f091bb
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go
@@ -0,0 +1,1459 @@
+package v2
+
+import (
+	"net/http"
+	"regexp"
+
+	"github.com/docker/distribution/digest"
+)
+
+var (
+	nameParameterDescriptor = ParameterDescriptor{
+		Name:        "name",
+		Type:        "string",
+		Format:      RepositoryNameRegexp.String(),
+		Required:    true,
+		Description: `Name of the target repository.`,
+	}
+
+	tagParameterDescriptor = ParameterDescriptor{
+		Name:        "tag",
+		Type:        "string",
+		Format:      TagNameRegexp.String(),
+		Required:    true,
+		Description: `Tag of the target manifiest.`,
+	}
+
+	uuidParameterDescriptor = ParameterDescriptor{
+		Name:        "uuid",
+		Type:        "opaque",
+		Required:    true,
+		Description: `A uuid identifying the upload. This field can accept almost anything.`,
+	}
+
+	digestPathParameter = ParameterDescriptor{
+		Name:        "digest",
+		Type:        "path",
+		Required:    true,
+		Format:      digest.DigestRegexp.String(),
+		Description: `Digest of desired blob.`,
+	}
+
+	hostHeader = ParameterDescriptor{
+		Name:        "Host",
+		Type:        "string",
+		Description: "Standard HTTP Host Header. Should be set to the registry host.",
+		Format:      "<registry host>",
+		Examples:    []string{"registry-1.docker.io"},
+	}
+
+	authHeader = ParameterDescriptor{
+		Name:        "Authorization",
+		Type:        "string",
+		Description: "An RFC7235 compliant authorization header.",
+		Format:      "<scheme> <token>",
+		Examples:    []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="},
+	}
+
+	authChallengeHeader = ParameterDescriptor{
+		Name:        "WWW-Authenticate",
+		Type:        "string",
+		Description: "An RFC7235 compliant authentication challenge header.",
+		Format:      `<scheme> realm="<realm>", ..."`,
+		Examples: []string{
+			`Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`,
+		},
+	}
+
+	contentLengthZeroHeader = ParameterDescriptor{
+		Name:        "Content-Length",
+		Description: "The `Content-Length` header must be zero and the body must be empty.",
+		Type:        "integer",
+		Format:      "0",
+	}
+
+	dockerUploadUUIDHeader = ParameterDescriptor{
+		Name:        "Docker-Upload-UUID",
+		Description: "Identifies the docker upload uuid for the current request.",
+		Type:        "uuid",
+		Format:      "<uuid>",
+	}
+
+	digestHeader = ParameterDescriptor{
+		Name:        "Docker-Content-Digest",
+		Description: "Digest of the targeted content for the request.",
+		Type:        "digest",
+		Format:      "<digest>",
+	}
+
+	unauthorizedResponse = ResponseDescriptor{
+		Description: "The client does not have access to the repository.",
+		StatusCode:  http.StatusUnauthorized,
+		Headers: []ParameterDescriptor{
+			authChallengeHeader,
+			{
+				Name:        "Content-Length",
+				Type:        "integer",
+				Description: "Length of the JSON error response body.",
+				Format:      "<length>",
+			},
+		},
+		ErrorCodes: []ErrorCode{
+			ErrorCodeUnauthorized,
+		},
+		Body: BodyDescriptor{
+			ContentType: "application/json; charset=utf-8",
+			Format:      unauthorizedErrorsBody,
+		},
+	}
+
+	unauthorizedResponsePush = ResponseDescriptor{
+		Description: "The client does not have access to push to the repository.",
+		StatusCode:  http.StatusUnauthorized,
+		Headers: []ParameterDescriptor{
+			authChallengeHeader,
+			{
+				Name:        "Content-Length",
+				Type:        "integer",
+				Description: "Length of the JSON error response body.",
+				Format:      "<length>",
+			},
+		},
+		ErrorCodes: []ErrorCode{
+			ErrorCodeUnauthorized,
+		},
+		Body: BodyDescriptor{
+			ContentType: "application/json; charset=utf-8",
+			Format:      unauthorizedErrorsBody,
+		},
+	}
+)
+
+const (
+	manifestBody = `{
+   "name": <name>,
+   "tag": <tag>,
+   "fsLayers": [
+      {
+         "blobSum": <tarsum>
+      },
+      ...
+    ]
+   ],
+   "history": <v1 images>,
+   "signature": <JWS>
+}`
+
+	errorsBody = `{
+	"errors:" [
+	    {
+            "code": <error code>,
+            "message": "<error message>",
+            "detail": ...
+        },
+        ...
+    ]
+}`
+
+	unauthorizedErrorsBody = `{
+	"errors:" [
+	    {
+            "code": "UNAUTHORIZED",
+            "message": "access to the requested resource is not authorized",
+            "detail": ...
+        },
+        ...
+    ]
+}`
+)
+
+// APIDescriptor exports descriptions of the layout of the v2 registry API.
+var APIDescriptor = struct {
+	// RouteDescriptors provides a list of the routes available in the API.
+	RouteDescriptors []RouteDescriptor
+
+	// ErrorDescriptors provides a list of the error codes and their
+	// associated documentation and metadata.
+	ErrorDescriptors []ErrorDescriptor
+}{
+	RouteDescriptors: routeDescriptors,
+	ErrorDescriptors: errorDescriptors,
+}
+
+// RouteDescriptor describes a route specified by name.
+type RouteDescriptor struct {
+	// Name is the name of the route, as specified in RouteNameXXX exports.
+	// These names a should be considered a unique reference for a route. If
+	// the route is registered with gorilla, this is the name that will be
+	// used.
+	Name string
+
+	// Path is a gorilla/mux-compatible regexp that can be used to match the
+	// route. For any incoming method and path, only one route descriptor
+	// should match.
+	Path string
+
+	// Entity should be a short, human-readalbe description of the object
+	// targeted by the endpoint.
+	Entity string
+
+	// Description should provide an accurate overview of the functionality
+	// provided by the route.
+	Description string
+
+	// Methods should describe the various HTTP methods that may be used on
+	// this route, including request and response formats.
+	Methods []MethodDescriptor
+}
+
+// MethodDescriptor provides a description of the requests that may be
+// conducted with the target method.
+type MethodDescriptor struct {
+
+	// Method is an HTTP method, such as GET, PUT or POST.
+	Method string
+
+	// Description should provide an overview of the functionality provided by
+	// the covered method, suitable for use in documentation. Use of markdown
+	// here is encouraged.
+	Description string
+
+	// Requests is a slice of request descriptors enumerating how this
+	// endpoint may be used.
+	Requests []RequestDescriptor
+}
+
+// RequestDescriptor covers a particular set of headers and parameters that
+// can be carried out with the parent method. Its most helpful to have one
+// RequestDescriptor per API use case.
+type RequestDescriptor struct {
+	// Name provides a short identifier for the request, usable as a title or
+	// to provide quick context for the particalar request.
+	Name string
+
+	// Description should cover the requests purpose, covering any details for
+	// this particular use case.
+	Description string
+
+	// Headers describes headers that must be used with the HTTP request.
+	Headers []ParameterDescriptor
+
+	// PathParameters enumerate the parameterized path components for the
+	// given request, as defined in the route's regular expression.
+	PathParameters []ParameterDescriptor
+
+	// QueryParameters provides a list of query parameters for the given
+	// request.
+	QueryParameters []ParameterDescriptor
+
+	// Body describes the format of the request body.
+	Body BodyDescriptor
+
+	// Successes enumerates the possible responses that are considered to be
+	// the result of a successful request.
+	Successes []ResponseDescriptor
+
+	// Failures covers the possible failures from this particular request.
+	Failures []ResponseDescriptor
+}
+
+// ResponseDescriptor describes the components of an API response.
+type ResponseDescriptor struct {
+	// Name provides a short identifier for the response, usable as a title or
+	// to provide quick context for the particalar response.
+	Name string
+
+	// Description should provide a brief overview of the role of the
+	// response.
+	Description string
+
+	// StatusCode specifies the status recieved by this particular response.
+	StatusCode int
+
+	// Headers covers any headers that may be returned from the response.
+	Headers []ParameterDescriptor
+
+	// ErrorCodes enumerates the error codes that may be returned along with
+	// the response.
+	ErrorCodes []ErrorCode
+
+	// Body describes the body of the response, if any.
+	Body BodyDescriptor
+}
+
+// BodyDescriptor describes a request body and its expected content type. For
+// the most  part, it should be example json or some placeholder for body
+// data in documentation.
+type BodyDescriptor struct {
+	ContentType string
+	Format      string
+}
+
+// ParameterDescriptor describes the format of a request parameter, which may
+// be a header, path parameter or query parameter.
+type ParameterDescriptor struct {
+	// Name is the name of the parameter, either of the path component or
+	// query parameter.
+	Name string
+
+	// Type specifies the type of the parameter, such as string, integer, etc.
+	Type string
+
+	// Description provides a human-readable description of the parameter.
+	Description string
+
+	// Required means the field is required when set.
+	Required bool
+
+	// Format is a specifying the string format accepted by this parameter.
+	Format string
+
+	// Regexp is a compiled regular expression that can be used to validate
+	// the contents of the parameter.
+	Regexp *regexp.Regexp
+
+	// Examples provides multiple examples for the values that might be valid
+	// for this parameter.
+	Examples []string
+}
+
+// ErrorDescriptor provides relevant information about a given error code.
+type ErrorDescriptor struct {
+	// Code is the error code that this descriptor describes.
+	Code ErrorCode
+
+	// Value provides a unique, string key, often captilized with
+	// underscores, to identify the error code. This value is used as the
+	// keyed value when serializing api errors.
+	Value string
+
+	// Message is a short, human readable decription of the error condition
+	// included in API responses.
+	Message string
+
+	// Description provides a complete account of the errors purpose, suitable
+	// for use in documentation.
+	Description string
+
+	// HTTPStatusCodes provides a list of status under which this error
+	// condition may arise. If it is empty, the error condition may be seen
+	// for any status code.
+	HTTPStatusCodes []int
+}
+
+var routeDescriptors = []RouteDescriptor{
+	{
+		Name:        RouteNameBase,
+		Path:        "/v2/",
+		Entity:      "Base",
+		Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authorization.`,
+		Methods: []MethodDescriptor{
+			{
+				Method:      "GET",
+				Description: "Check that the endpoint implements Docker Registry API V2.",
+				Requests: []RequestDescriptor{
+					{
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The API implements V2 protocol and is accessible.",
+								StatusCode:  http.StatusOK,
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "The client is not authorized to access the registry.",
+								StatusCode:  http.StatusUnauthorized,
+								Headers: []ParameterDescriptor{
+									authChallengeHeader,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+								ErrorCodes: []ErrorCode{
+									ErrorCodeUnauthorized,
+								},
+							},
+							{
+								Description: "The registry does not implement the V2 API.",
+								StatusCode:  http.StatusNotFound,
+							},
+						},
+					},
+				},
+			},
+		},
+	},
+	{
+		Name:        RouteNameTags,
+		Path:        "/v2/{name:" + RepositoryNameRegexp.String() + "}/tags/list",
+		Entity:      "Tags",
+		Description: "Retrieve information about tags.",
+		Methods: []MethodDescriptor{
+			{
+				Method:      "GET",
+				Description: "Fetch the tags under the repository identified by `name`.",
+				Requests: []RequestDescriptor{
+					{
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								StatusCode:  http.StatusOK,
+								Description: "A list of tags for the named repository.",
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "Length of the JSON response body.",
+										Format:      "<length>",
+									},
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format: `{
+    "name": <name>,
+    "tags": [
+        <tag>,
+        ...
+    ]
+}`,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								StatusCode:  http.StatusNotFound,
+								Description: "The repository is not known to the registry.",
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+								ErrorCodes: []ErrorCode{
+									ErrorCodeNameUnknown,
+								},
+							},
+							{
+								StatusCode:  http.StatusUnauthorized,
+								Description: "The client does not have access to the repository.",
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+								ErrorCodes: []ErrorCode{
+									ErrorCodeUnauthorized,
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	},
+	{
+		Name:        RouteNameManifest,
+		Path:        "/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{reference:" + TagNameRegexp.String() + "|" + digest.DigestRegexp.String() + "}",
+		Entity:      "Manifest",
+		Description: "Create, update and retrieve manifests.",
+		Methods: []MethodDescriptor{
+			{
+				Method:      "GET",
+				Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest.",
+				Requests: []RequestDescriptor{
+					{
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							tagParameterDescriptor,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The manifest idenfied by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.",
+								StatusCode:  http.StatusOK,
+								Headers: []ParameterDescriptor{
+									digestHeader,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      manifestBody,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "The name or reference was invalid.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeNameInvalid,
+									ErrorCodeTagInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								StatusCode:  http.StatusUnauthorized,
+								Description: "The client does not have access to the repository.",
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+								ErrorCodes: []ErrorCode{
+									ErrorCodeUnauthorized,
+								},
+							},
+							{
+								Description: "The named manifest is not known to the registry.",
+								StatusCode:  http.StatusNotFound,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeNameUnknown,
+									ErrorCodeManifestUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+						},
+					},
+				},
+			},
+			{
+				Method:      "PUT",
+				Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.",
+				Requests: []RequestDescriptor{
+					{
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							tagParameterDescriptor,
+						},
+						Body: BodyDescriptor{
+							ContentType: "application/json; charset=utf-8",
+							Format:      manifestBody,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.",
+								StatusCode:  http.StatusAccepted,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Location",
+										Type:        "url",
+										Description: "The canonical location url of the uploaded manifest.",
+										Format:      "<url>",
+									},
+									contentLengthZeroHeader,
+									digestHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:        "Invalid Manifest",
+								Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.",
+								StatusCode:  http.StatusBadRequest,
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+								ErrorCodes: []ErrorCode{
+									ErrorCodeNameInvalid,
+									ErrorCodeTagInvalid,
+									ErrorCodeManifestInvalid,
+									ErrorCodeManifestUnverified,
+									ErrorCodeBlobUnknown,
+								},
+							},
+							{
+								StatusCode:  http.StatusUnauthorized,
+								Description: "The client does not have permission to push to the repository.",
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+								ErrorCodes: []ErrorCode{
+									ErrorCodeUnauthorized,
+								},
+							},
+							{
+								Name:        "Missing Layer(s)",
+								Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeBlobUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format: `{
+    "errors:" [{
+            "code": "BLOB_UNKNOWN",
+            "message": "blob unknown to registry",
+            "detail": {
+                "digest": <tarsum>
+            }
+        },
+        ...
+    ]
+}`,
+								},
+							},
+							{
+								StatusCode: http.StatusUnauthorized,
+								Headers: []ParameterDescriptor{
+									authChallengeHeader,
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "Length of the JSON error response body.",
+										Format:      "<length>",
+									},
+								},
+								ErrorCodes: []ErrorCode{
+									ErrorCodeUnauthorized,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+						},
+					},
+				},
+			},
+			{
+				Method:      "DELETE",
+				Description: "Delete the manifest identified by `name` and `reference` where `reference` can be a tag or digest.",
+				Requests: []RequestDescriptor{
+					{
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							tagParameterDescriptor,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								StatusCode: http.StatusAccepted,
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:        "Invalid Name or Tag",
+								Description: "The specified `name` or `tag` were invalid and the delete was unable to proceed.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeNameInvalid,
+									ErrorCodeTagInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								StatusCode: http.StatusUnauthorized,
+								Headers: []ParameterDescriptor{
+									authChallengeHeader,
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "Length of the JSON error response body.",
+										Format:      "<length>",
+									},
+								},
+								ErrorCodes: []ErrorCode{
+									ErrorCodeUnauthorized,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Name:        "Unknown Manifest",
+								Description: "The specified `name` or `tag` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.",
+								StatusCode:  http.StatusNotFound,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeNameUnknown,
+									ErrorCodeManifestUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	},
+
+	{
+		Name:        RouteNameBlob,
+		Path:        "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}",
+		Entity:      "Blob",
+		Description: "Fetch the blob identified by `name` and `digest`. Used to fetch layers by tarsum digest.",
+		Methods: []MethodDescriptor{
+
+			{
+				Method:      "GET",
+				Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.",
+				Requests: []RequestDescriptor{
+					{
+						Name: "Fetch Blob",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							digestPathParameter,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.",
+								StatusCode:  http.StatusOK,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "The length of the requested blob content.",
+										Format:      "<length>",
+									},
+									digestHeader,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/octet-stream",
+									Format:      "<blob binary data>",
+								},
+							},
+							{
+								Description: "The blob identified by `digest` is available at the provided location.",
+								StatusCode:  http.StatusTemporaryRedirect,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Location",
+										Type:        "url",
+										Description: "The location where the layer should be accessible.",
+										Format:      "<blob location>",
+									},
+									digestHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeNameInvalid,
+									ErrorCodeDigestInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							unauthorizedResponse,
+							{
+								Description: "The blob, identified by `name` and `digest`, is unknown to the registry.",
+								StatusCode:  http.StatusNotFound,
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+								ErrorCodes: []ErrorCode{
+									ErrorCodeNameUnknown,
+									ErrorCodeBlobUnknown,
+								},
+							},
+						},
+					},
+					{
+						Name:        "Fetch Blob Part",
+						Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							{
+								Name:        "Range",
+								Type:        "string",
+								Description: "HTTP Range header specifying blob chunk.",
+								Format:      "bytes=<start>-<end>",
+							},
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							digestPathParameter,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.",
+								StatusCode:  http.StatusPartialContent,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "The length of the requested blob chunk.",
+										Format:      "<length>",
+									},
+									{
+										Name:        "Content-Range",
+										Type:        "byte range",
+										Description: "Content range of blob chunk.",
+										Format:      "bytes <start>-<end>/<size>",
+									},
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/octet-stream",
+									Format:      "<blob binary data>",
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeNameInvalid,
+									ErrorCodeDigestInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							unauthorizedResponse,
+							{
+								StatusCode: http.StatusNotFound,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeNameUnknown,
+									ErrorCodeBlobUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.",
+								StatusCode:  http.StatusRequestedRangeNotSatisfiable,
+							},
+						},
+					},
+				},
+			},
+			// TODO(stevvooe): We may want to add a PUT request here to
+			// kickoff an upload of a blob, integrated with the blob upload
+			// API.
+		},
+	},
+
+	{
+		Name:        RouteNameBlobUpload,
+		Path:        "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/",
+		Entity:      "Intiate Blob Upload",
+		Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.",
+		Methods: []MethodDescriptor{
+			{
+				Method:      "POST",
+				Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.",
+				Requests: []RequestDescriptor{
+					{
+						Name:        "Initiate Monolithic Blob Upload",
+						Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							{
+								Name:   "Content-Length",
+								Type:   "integer",
+								Format: "<length of blob>",
+							},
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+						},
+						QueryParameters: []ParameterDescriptor{
+							{
+								Name:        "digest",
+								Type:        "query",
+								Format:      "<tarsum>",
+								Regexp:      digest.DigestRegexp,
+								Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`,
+							},
+						},
+						Body: BodyDescriptor{
+							ContentType: "application/octect-stream",
+							Format:      "<binary data>",
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The blob has been created in the registry and is available at the provided location.",
+								StatusCode:  http.StatusCreated,
+								Headers: []ParameterDescriptor{
+									{
+										Name:   "Location",
+										Type:   "url",
+										Format: "<blob location>",
+									},
+									contentLengthZeroHeader,
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:       "Invalid Name or Digest",
+								StatusCode: http.StatusBadRequest,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+								},
+							},
+							unauthorizedResponsePush,
+						},
+					},
+					{
+						Name:        "Initiate Resumable Blob Upload",
+						Description: "Initiate a resumable blob upload with an empty request body.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							contentLengthZeroHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.",
+								StatusCode:  http.StatusAccepted,
+								Headers: []ParameterDescriptor{
+									contentLengthZeroHeader,
+									{
+										Name:        "Location",
+										Type:        "url",
+										Format:      "/v2/<name>/blobs/uploads/<uuid>",
+										Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
+									},
+									{
+										Name:        "Range",
+										Format:      "0-0",
+										Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.",
+									},
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:       "Invalid Name or Digest",
+								StatusCode: http.StatusBadRequest,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+								},
+							},
+							unauthorizedResponsePush,
+						},
+					},
+				},
+			},
+		},
+	},
+
+	{
+		Name:        RouteNameBlobUploadChunk,
+		Path:        "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid}",
+		Entity:      "Blob Upload",
+		Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.",
+		Methods: []MethodDescriptor{
+			{
+				Method:      "GET",
+				Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.",
+				Requests: []RequestDescriptor{
+					{
+						Description: "Retrieve the progress of the current upload, as reported by the `Range` header.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							uuidParameterDescriptor,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Name:        "Upload Progress",
+								Description: "The upload is known and in progress. The last received offset is available in the `Range` header.",
+								StatusCode:  http.StatusNoContent,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Range",
+										Type:        "header",
+										Format:      "0-<offset>",
+										Description: "Range indicating the current progress of the upload.",
+									},
+									contentLengthZeroHeader,
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "There was an error processing the upload and it must be restarted.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+									ErrorCodeBlobUploadInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							unauthorizedResponse,
+							{
+								Description: "The upload is unknown to the registry. The upload must be restarted.",
+								StatusCode:  http.StatusNotFound,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeBlobUploadUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+						},
+					},
+				},
+			},
+			{
+				Method:      "PATCH",
+				Description: "Upload a chunk of data for the specified upload.",
+				Requests: []RequestDescriptor{
+					{
+						Description: "Upload a chunk of data to specified upload without completing the upload.",
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							uuidParameterDescriptor,
+						},
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							{
+								Name:        "Content-Range",
+								Type:        "header",
+								Format:      "<start of range>-<end of range, inclusive>",
+								Required:    true,
+								Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.",
+							},
+							{
+								Name:        "Content-Length",
+								Type:        "integer",
+								Format:      "<length of chunk>",
+								Description: "Length of the chunk being uploaded, corresponding the length of the request body.",
+							},
+						},
+						Body: BodyDescriptor{
+							ContentType: "application/octet-stream",
+							Format:      "<binary chunk>",
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Name:        "Chunk Accepted",
+								Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.",
+								StatusCode:  http.StatusNoContent,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Location",
+										Type:        "url",
+										Format:      "/v2/<name>/blobs/uploads/<uuid>",
+										Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
+									},
+									{
+										Name:        "Range",
+										Type:        "header",
+										Format:      "0-<offset>",
+										Description: "Range indicating the current progress of the upload.",
+									},
+									contentLengthZeroHeader,
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "There was an error processing the upload and it must be restarted.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+									ErrorCodeBlobUploadInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							unauthorizedResponsePush,
+							{
+								Description: "The upload is unknown to the registry. The upload must be restarted.",
+								StatusCode:  http.StatusNotFound,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeBlobUploadUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.",
+								StatusCode:  http.StatusRequestedRangeNotSatisfiable,
+							},
+						},
+					},
+				},
+			},
+			{
+				Method:      "PUT",
+				Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.",
+				Requests: []RequestDescriptor{
+					{
+						// TODO(stevvooe): Break this down into three separate requests:
+						// 	1. Complete an upload where all data has already been sent.
+						// 	2. Complete an upload where the entire body is in the PUT.
+						// 	3. Complete an upload where the final, partial chunk is the body.
+
+						Description: "Complete the upload, providing the _final_ chunk of data, if necessary. This method may take a body with all the data. If the `Content-Range` header is specified, it may include the final chunk. A request without a body will just complete the upload with previously uploaded content.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							{
+								Name:        "Content-Range",
+								Type:        "header",
+								Format:      "<start of range>-<end of range, inclusive>",
+								Description: "Range of bytes identifying the block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header. May be omitted if no data is provided.",
+							},
+							{
+								Name:        "Content-Length",
+								Type:        "integer",
+								Format:      "<length of chunk>",
+								Description: "Length of the chunk being uploaded, corresponding to the length of the request body. May be zero if no data is provided.",
+							},
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							uuidParameterDescriptor,
+						},
+						QueryParameters: []ParameterDescriptor{
+							{
+								Name:        "digest",
+								Type:        "string",
+								Format:      "<tarsum>",
+								Regexp:      digest.DigestRegexp,
+								Required:    true,
+								Description: `Digest of uploaded blob.`,
+							},
+						},
+						Body: BodyDescriptor{
+							ContentType: "application/octet-stream",
+							Format:      "<binary chunk>",
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Name:        "Upload Complete",
+								Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.",
+								StatusCode:  http.StatusNoContent,
+								Headers: []ParameterDescriptor{
+									{
+										Name:   "Location",
+										Type:   "url",
+										Format: "<blob location>",
+									},
+									{
+										Name:        "Content-Range",
+										Type:        "header",
+										Format:      "<start of range>-<end of range, inclusive>",
+										Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.",
+									},
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Format:      "<length of chunk>",
+										Description: "Length of the chunk being uploaded, corresponding the length of the request body.",
+									},
+									digestHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "There was an error processing the upload and it must be restarted.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+									ErrorCodeBlobUploadInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							unauthorizedResponsePush,
+							{
+								Description: "The upload is unknown to the registry. The upload must be restarted.",
+								StatusCode:  http.StatusNotFound,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeBlobUploadUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. The contents of the `Range` header may be used to resolve the condition.",
+								StatusCode:  http.StatusRequestedRangeNotSatisfiable,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Location",
+										Type:        "url",
+										Format:      "/v2/<name>/blobs/uploads/<uuid>",
+										Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
+									},
+									{
+										Name:        "Range",
+										Type:        "header",
+										Format:      "0-<offset>",
+										Description: "Range indicating the current progress of the upload.",
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+			{
+				Method:      "DELETE",
+				Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.",
+				Requests: []RequestDescriptor{
+					{
+						Description: "Cancel the upload specified by `uuid`.",
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							uuidParameterDescriptor,
+						},
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							contentLengthZeroHeader,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Name:        "Upload Deleted",
+								Description: "The upload has been successfully deleted.",
+								StatusCode:  http.StatusNoContent,
+								Headers: []ParameterDescriptor{
+									contentLengthZeroHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "An error was encountered processing the delete. The client may ignore this error.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeNameInvalid,
+									ErrorCodeBlobUploadInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							unauthorizedResponse,
+							{
+								Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.",
+								StatusCode:  http.StatusNotFound,
+								ErrorCodes: []ErrorCode{
+									ErrorCodeBlobUploadUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	},
+}
+
+// ErrorDescriptors provides a list of HTTP API Error codes that may be
+// encountered when interacting with the registry API.
+var errorDescriptors = []ErrorDescriptor{
+	{
+		Code:    ErrorCodeUnknown,
+		Value:   "UNKNOWN",
+		Message: "unknown error",
+		Description: `Generic error returned when the error does not have an
+		API classification.`,
+	},
+	{
+		Code:    ErrorCodeUnsupported,
+		Value:   "UNSUPPORTED",
+		Message: "The operation is unsupported.",
+		Description: `The operation was unsupported due to a missing
+		implementation or invalid set of parameters.`,
+	},
+	{
+		Code:    ErrorCodeUnauthorized,
+		Value:   "UNAUTHORIZED",
+		Message: "access to the requested resource is not authorized",
+		Description: `The access controller denied access for the operation on
+		a resource. Often this will be accompanied by a 401 Unauthorized
+		response status.`,
+	},
+	{
+		Code:    ErrorCodeDigestInvalid,
+		Value:   "DIGEST_INVALID",
+		Message: "provided digest did not match uploaded content",
+		Description: `When a blob is uploaded, the registry will check that
+		the content matches the digest provided by the client. The error may
+		include a detail structure with the key "digest", including the
+		invalid digest string. This error may also be returned when a manifest
+		includes an invalid layer digest.`,
+		HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
+	},
+	{
+		Code:    ErrorCodeSizeInvalid,
+		Value:   "SIZE_INVALID",
+		Message: "provided length did not match content length",
+		Description: `When a layer is uploaded, the provided size will be
+		checked against the uploaded content. If they do not match, this error
+		will be returned.`,
+		HTTPStatusCodes: []int{http.StatusBadRequest},
+	},
+	{
+		Code:    ErrorCodeNameInvalid,
+		Value:   "NAME_INVALID",
+		Message: "invalid repository name",
+		Description: `Invalid repository name encountered either during
+		manifest validation or any API operation.`,
+		HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
+	},
+	{
+		Code:    ErrorCodeTagInvalid,
+		Value:   "TAG_INVALID",
+		Message: "manifest tag did not match URI",
+		Description: `During a manifest upload, if the tag in the manifest
+		does not match the uri tag, this error will be returned.`,
+		HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
+	},
+	{
+		Code:    ErrorCodeNameUnknown,
+		Value:   "NAME_UNKNOWN",
+		Message: "repository name not known to registry",
+		Description: `This is returned if the name used during an operation is
+		unknown to the registry.`,
+		HTTPStatusCodes: []int{http.StatusNotFound},
+	},
+	{
+		Code:    ErrorCodeManifestUnknown,
+		Value:   "MANIFEST_UNKNOWN",
+		Message: "manifest unknown",
+		Description: `This error is returned when the manifest, identified by
+		name and tag is unknown to the repository.`,
+		HTTPStatusCodes: []int{http.StatusNotFound},
+	},
+	{
+		Code:    ErrorCodeManifestInvalid,
+		Value:   "MANIFEST_INVALID",
+		Message: "manifest invalid",
+		Description: `During upload, manifests undergo several checks ensuring
+		validity. If those checks fail, this error may be returned, unless a
+		more specific error is included. The detail will contain information
+		the failed validation.`,
+		HTTPStatusCodes: []int{http.StatusBadRequest},
+	},
+	{
+		Code:    ErrorCodeManifestUnverified,
+		Value:   "MANIFEST_UNVERIFIED",
+		Message: "manifest failed signature verification",
+		Description: `During manifest upload, if the manifest fails signature
+		verification, this error will be returned.`,
+		HTTPStatusCodes: []int{http.StatusBadRequest},
+	},
+	{
+		Code:    ErrorCodeBlobUnknown,
+		Value:   "BLOB_UNKNOWN",
+		Message: "blob unknown to registry",
+		Description: `This error may be returned when a blob is unknown to the
+		registry in a specified repository. This can be returned with a
+		standard get or if a manifest references an unknown layer during
+		upload.`,
+		HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
+	},
+
+	{
+		Code:    ErrorCodeBlobUploadUnknown,
+		Value:   "BLOB_UPLOAD_UNKNOWN",
+		Message: "blob upload unknown to registry",
+		Description: `If a blob upload has been cancelled or was never
+		started, this error code may be returned.`,
+		HTTPStatusCodes: []int{http.StatusNotFound},
+	},
+	{
+		Code:    ErrorCodeBlobUploadInvalid,
+		Value:   "BLOB_UPLOAD_INVALID",
+		Message: "blob upload invalid",
+		Description: `The blob upload encountered an error and can no
+		longer proceed.`,
+		HTTPStatusCodes: []int{http.StatusNotFound},
+	},
+}
+
+var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor
+var idToDescriptors map[string]ErrorDescriptor
+var routeDescriptorsMap map[string]RouteDescriptor
+
+func init() {
+	errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(errorDescriptors))
+	idToDescriptors = make(map[string]ErrorDescriptor, len(errorDescriptors))
+	routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors))
+
+	for _, descriptor := range errorDescriptors {
+		errorCodeToDescriptors[descriptor.Code] = descriptor
+		idToDescriptors[descriptor.Value] = descriptor
+	}
+	for _, descriptor := range routeDescriptors {
+		routeDescriptorsMap[descriptor.Name] = descriptor
+	}
+}
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go
new file mode 100644
index 0000000..cde0119
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go
@@ -0,0 +1,9 @@
+// Package v2 describes routes, urls and the error codes used in the Docker
+// Registry JSON HTTP API V2. In addition to declarations, descriptors are
+// provided for routes and error codes that can be used for implementation and
+// automatically generating documentation.
+//
+// Definitions here are considered to be locked down for the V2 registry api.
+// Any changes must be considered carefully and should not proceed without a
+// change proposal in docker core.
+package v2
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go
new file mode 100644
index 0000000..cbae020
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go
@@ -0,0 +1,194 @@
+package v2
+
+import (
+	"fmt"
+	"strings"
+)
+
+// ErrorCode represents the error type. The errors are serialized via strings
+// and the integer format may change and should *never* be exported.
+type ErrorCode int
+
+const (
+	// ErrorCodeUnknown is a catch-all for errors not defined below.
+	ErrorCodeUnknown ErrorCode = iota
+
+	// ErrorCodeUnsupported is returned when an operation is not supported.
+	ErrorCodeUnsupported
+
+	// ErrorCodeUnauthorized is returned if a request is not authorized.
+	ErrorCodeUnauthorized
+
+	// ErrorCodeDigestInvalid is returned when uploading a blob if the
+	// provided digest does not match the blob contents.
+	ErrorCodeDigestInvalid
+
+	// ErrorCodeSizeInvalid is returned when uploading a blob if the provided
+	// size does not match the content length.
+	ErrorCodeSizeInvalid
+
+	// ErrorCodeNameInvalid is returned when the name in the manifest does not
+	// match the provided name.
+	ErrorCodeNameInvalid
+
+	// ErrorCodeTagInvalid is returned when the tag in the manifest does not
+	// match the provided tag.
+	ErrorCodeTagInvalid
+
+	// ErrorCodeNameUnknown when the repository name is not known.
+	ErrorCodeNameUnknown
+
+	// ErrorCodeManifestUnknown returned when image manifest is unknown.
+	ErrorCodeManifestUnknown
+
+	// ErrorCodeManifestInvalid returned when an image manifest is invalid,
+	// typically during a PUT operation. This error encompasses all errors
+	// encountered during manifest validation that aren't signature errors.
+	ErrorCodeManifestInvalid
+
+	// ErrorCodeManifestUnverified is returned when the manifest fails
+	// signature verfication.
+	ErrorCodeManifestUnverified
+
+	// ErrorCodeBlobUnknown is returned when a blob is unknown to the
+	// registry. This can happen when the manifest references a nonexistent
+	// layer or the result is not found by a blob fetch.
+	ErrorCodeBlobUnknown
+
+	// ErrorCodeBlobUploadUnknown is returned when an upload is unknown.
+	ErrorCodeBlobUploadUnknown
+
+	// ErrorCodeBlobUploadInvalid is returned when an upload is invalid.
+	ErrorCodeBlobUploadInvalid
+)
+
+// ParseErrorCode attempts to parse the error code string, returning
+// ErrorCodeUnknown if the error is not known.
+func ParseErrorCode(s string) ErrorCode {
+	desc, ok := idToDescriptors[s]
+
+	if !ok {
+		return ErrorCodeUnknown
+	}
+
+	return desc.Code
+}
+
+// Descriptor returns the descriptor for the error code.
+func (ec ErrorCode) Descriptor() ErrorDescriptor {
+	d, ok := errorCodeToDescriptors[ec]
+
+	if !ok {
+		return ErrorCodeUnknown.Descriptor()
+	}
+
+	return d
+}
+
+// String returns the canonical identifier for this error code.
+func (ec ErrorCode) String() string {
+	return ec.Descriptor().Value
+}
+
+// Message returned the human-readable error message for this error code.
+func (ec ErrorCode) Message() string {
+	return ec.Descriptor().Message
+}
+
+// MarshalText encodes the receiver into UTF-8-encoded text and returns the
+// result.
+func (ec ErrorCode) MarshalText() (text []byte, err error) {
+	return []byte(ec.String()), nil
+}
+
+// UnmarshalText decodes the form generated by MarshalText.
+func (ec *ErrorCode) UnmarshalText(text []byte) error {
+	desc, ok := idToDescriptors[string(text)]
+
+	if !ok {
+		desc = ErrorCodeUnknown.Descriptor()
+	}
+
+	*ec = desc.Code
+
+	return nil
+}
+
+// Error provides a wrapper around ErrorCode with extra Details provided.
+type Error struct {
+	Code    ErrorCode   `json:"code"`
+	Message string      `json:"message,omitempty"`
+	Detail  interface{} `json:"detail,omitempty"`
+}
+
+// Error returns a human readable representation of the error.
+func (e Error) Error() string {
+	return fmt.Sprintf("%s: %s",
+		strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)),
+		e.Message)
+}
+
+// Errors provides the envelope for multiple errors and a few sugar methods
+// for use within the application.
+type Errors struct {
+	Errors []Error `json:"errors,omitempty"`
+}
+
+// Push pushes an error on to the error stack, with the optional detail
+// argument. It is a programming error (ie panic) to push more than one
+// detail at a time.
+func (errs *Errors) Push(code ErrorCode, details ...interface{}) {
+	if len(details) > 1 {
+		panic("please specify zero or one detail items for this error")
+	}
+
+	var detail interface{}
+	if len(details) > 0 {
+		detail = details[0]
+	}
+
+	if err, ok := detail.(error); ok {
+		detail = err.Error()
+	}
+
+	errs.PushErr(Error{
+		Code:    code,
+		Message: code.Message(),
+		Detail:  detail,
+	})
+}
+
+// PushErr pushes an error interface onto the error stack.
+func (errs *Errors) PushErr(err error) {
+	switch err.(type) {
+	case Error:
+		errs.Errors = append(errs.Errors, err.(Error))
+	default:
+		errs.Errors = append(errs.Errors, Error{Message: err.Error()})
+	}
+}
+
+func (errs *Errors) Error() string {
+	switch errs.Len() {
+	case 0:
+		return "<nil>"
+	case 1:
+		return errs.Errors[0].Error()
+	default:
+		msg := "errors:\n"
+		for _, err := range errs.Errors {
+			msg += err.Error() + "\n"
+		}
+		return msg
+	}
+}
+
+// Clear clears the errors.
+func (errs *Errors) Clear() {
+	errs.Errors = errs.Errors[:0]
+}
+
+// Len returns the current number of errors.
+func (errs *Errors) Len() int {
+	return len(errs.Errors)
+}
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/errors_test.go b/vendor/src/github.com/docker/distribution/registry/api/v2/errors_test.go
new file mode 100644
index 0000000..9cc831c
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/errors_test.go
@@ -0,0 +1,165 @@
+package v2
+
+import (
+	"encoding/json"
+	"reflect"
+	"testing"
+
+	"github.com/docker/distribution/digest"
+)
+
+// TestErrorCodes ensures that error code format, mappings and
+// marshaling/unmarshaling. round trips are stable.
+func TestErrorCodes(t *testing.T) {
+	for _, desc := range errorDescriptors {
+		if desc.Code.String() != desc.Value {
+			t.Fatalf("error code string incorrect: %q != %q", desc.Code.String(), desc.Value)
+		}
+
+		if desc.Code.Message() != desc.Message {
+			t.Fatalf("incorrect message for error code %v: %q != %q", desc.Code, desc.Code.Message(), desc.Message)
+		}
+
+		// Serialize the error code using the json library to ensure that we
+		// get a string and it works round trip.
+		p, err := json.Marshal(desc.Code)
+
+		if err != nil {
+			t.Fatalf("error marshaling error code %v: %v", desc.Code, err)
+		}
+
+		if len(p) <= 0 {
+			t.Fatalf("expected content in marshaled before for error code %v", desc.Code)
+		}
+
+		// First, unmarshal to interface and ensure we have a string.
+		var ecUnspecified interface{}
+		if err := json.Unmarshal(p, &ecUnspecified); err != nil {
+			t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err)
+		}
+
+		if _, ok := ecUnspecified.(string); !ok {
+			t.Fatalf("expected a string for error code %v on unmarshal got a %T", desc.Code, ecUnspecified)
+		}
+
+		// Now, unmarshal with the error code type and ensure they are equal
+		var ecUnmarshaled ErrorCode
+		if err := json.Unmarshal(p, &ecUnmarshaled); err != nil {
+			t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err)
+		}
+
+		if ecUnmarshaled != desc.Code {
+			t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, desc.Code)
+		}
+	}
+}
+
+// TestErrorsManagement does a quick check of the Errors type to ensure that
+// members are properly pushed and marshaled.
+func TestErrorsManagement(t *testing.T) {
+	var errs Errors
+
+	errs.Push(ErrorCodeDigestInvalid)
+	errs.Push(ErrorCodeBlobUnknown,
+		map[string]digest.Digest{"digest": "sometestblobsumdoesntmatter"})
+
+	p, err := json.Marshal(errs)
+
+	if err != nil {
+		t.Fatalf("error marashaling errors: %v", err)
+	}
+
+	expectedJSON := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"BLOB_UNKNOWN\",\"message\":\"blob unknown to registry\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]}"
+
+	if string(p) != expectedJSON {
+		t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON)
+	}
+
+	errs.Clear()
+	errs.Push(ErrorCodeUnknown)
+	expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}"
+	p, err = json.Marshal(errs)
+
+	if err != nil {
+		t.Fatalf("error marashaling errors: %v", err)
+	}
+
+	if string(p) != expectedJSON {
+		t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON)
+	}
+}
+
+// TestMarshalUnmarshal ensures that api errors can round trip through json
+// without losing information.
+func TestMarshalUnmarshal(t *testing.T) {
+
+	var errors Errors
+
+	for _, testcase := range []struct {
+		description string
+		err         Error
+	}{
+		{
+			description: "unknown error",
+			err: Error{
+
+				Code:    ErrorCodeUnknown,
+				Message: ErrorCodeUnknown.Descriptor().Message,
+			},
+		},
+		{
+			description: "unknown manifest",
+			err: Error{
+				Code:    ErrorCodeManifestUnknown,
+				Message: ErrorCodeManifestUnknown.Descriptor().Message,
+			},
+		},
+		{
+			description: "unknown manifest",
+			err: Error{
+				Code:    ErrorCodeBlobUnknown,
+				Message: ErrorCodeBlobUnknown.Descriptor().Message,
+				Detail:  map[string]interface{}{"digest": "asdfqwerqwerqwerqwer"},
+			},
+		},
+	} {
+		fatalf := func(format string, args ...interface{}) {
+			t.Fatalf(testcase.description+": "+format, args...)
+		}
+
+		unexpectedErr := func(err error) {
+			fatalf("unexpected error: %v", err)
+		}
+
+		p, err := json.Marshal(testcase.err)
+		if err != nil {
+			unexpectedErr(err)
+		}
+
+		var unmarshaled Error
+		if err := json.Unmarshal(p, &unmarshaled); err != nil {
+			unexpectedErr(err)
+		}
+
+		if !reflect.DeepEqual(unmarshaled, testcase.err) {
+			fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, testcase.err)
+		}
+
+		// Roll everything up into an error response envelope.
+		errors.PushErr(testcase.err)
+	}
+
+	p, err := json.Marshal(errors)
+	if err != nil {
+		t.Fatalf("unexpected error marshaling error envelope: %v", err)
+	}
+
+	var unmarshaled Errors
+	if err := json.Unmarshal(p, &unmarshaled); err != nil {
+		t.Fatalf("unexpected error unmarshaling error envelope: %v", err)
+	}
+
+	if !reflect.DeepEqual(unmarshaled, errors) {
+		t.Fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, errors)
+	}
+}
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/names.go b/vendor/src/github.com/docker/distribution/registry/api/v2/names.go
new file mode 100644
index 0000000..e4a9886
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/names.go
@@ -0,0 +1,100 @@
+package v2
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+// TODO(stevvooe): Move these definitions back to an exported package. While
+// they are used with v2 definitions, their relevance expands beyond.
+// "distribution/names" is a candidate package.
+
+const (
+	// RepositoryNameComponentMinLength is the minimum number of characters in a
+	// single repository name slash-delimited component
+	RepositoryNameComponentMinLength = 2
+
+	// RepositoryNameMinComponents is the minimum number of slash-delimited
+	// components that a repository name must have
+	RepositoryNameMinComponents = 1
+
+	// RepositoryNameTotalLengthMax is the maximum total number of characters in
+	// a repository name
+	RepositoryNameTotalLengthMax = 255
+)
+
+// RepositoryNameComponentRegexp restricts registry path component names to
+// start with at least one letter or number, with following parts able to
+// be separated by one period, dash or underscore.
+var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`)
+
+// RepositoryNameComponentAnchoredRegexp is the version of
+// RepositoryNameComponentRegexp which must completely match the content
+var RepositoryNameComponentAnchoredRegexp = regexp.MustCompile(`^` + RepositoryNameComponentRegexp.String() + `$`)
+
+// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow
+// multiple path components, separated by a forward slash.
+var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/)*` + RepositoryNameComponentRegexp.String())
+
+// TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go.
+var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`)
+
+// TODO(stevvooe): Contribute these exports back to core, so they are shared.
+
+var (
+	// ErrRepositoryNameComponentShort is returned when a repository name
+	// contains a component which is shorter than
+	// RepositoryNameComponentMinLength
+	ErrRepositoryNameComponentShort = fmt.Errorf("respository name component must be %v or more characters", RepositoryNameComponentMinLength)
+
+	// ErrRepositoryNameMissingComponents is returned when a repository name
+	// contains fewer than RepositoryNameMinComponents components
+	ErrRepositoryNameMissingComponents = fmt.Errorf("repository name must have at least %v components", RepositoryNameMinComponents)
+
+	// ErrRepositoryNameLong is returned when a repository name is longer than
+	// RepositoryNameTotalLengthMax
+	ErrRepositoryNameLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax)
+
+	// ErrRepositoryNameComponentInvalid is returned when a repository name does
+	// not match RepositoryNameComponentRegexp
+	ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String())
+)
+
+// ValidateRespositoryName ensures the repository name is valid for use in the
+// registry. This function accepts a superset of what might be accepted by
+// docker core or docker hub. If the name does not pass validation, an error,
+// describing the conditions, is returned.
+//
+// Effectively, the name should comply with the following grammar:
+//
+// 	alpha-numeric := /[a-z0-9]+/
+//	separator := /[._-]/
+//	component := alpha-numeric [separator alpha-numeric]*
+//	namespace := component ['/' component]*
+//
+// The result of the production, known as the "namespace", should be limited
+// to 255 characters.
+func ValidateRespositoryName(name string) error {
+	if len(name) > RepositoryNameTotalLengthMax {
+		return ErrRepositoryNameLong
+	}
+
+	components := strings.Split(name, "/")
+
+	if len(components) < RepositoryNameMinComponents {
+		return ErrRepositoryNameMissingComponents
+	}
+
+	for _, component := range components {
+		if len(component) < RepositoryNameComponentMinLength {
+			return ErrRepositoryNameComponentShort
+		}
+
+		if !RepositoryNameComponentAnchoredRegexp.MatchString(component) {
+			return ErrRepositoryNameComponentInvalid
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/names_test.go b/vendor/src/github.com/docker/distribution/registry/api/v2/names_test.go
new file mode 100644
index 0000000..de6a168
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/names_test.go
@@ -0,0 +1,100 @@
+package v2
+
+import (
+	"strings"
+	"testing"
+)
+
+func TestRepositoryNameRegexp(t *testing.T) {
+	for _, testcase := range []struct {
+		input string
+		err   error
+	}{
+		{
+			input: "short",
+		},
+		{
+			input: "simple/name",
+		},
+		{
+			input: "library/ubuntu",
+		},
+		{
+			input: "docker/stevvooe/app",
+		},
+		{
+			input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb",
+		},
+		{
+			input: "aa/aa/bb/bb/bb",
+		},
+		{
+			input: "a/a/a/b/b",
+			err:   ErrRepositoryNameComponentShort,
+		},
+		{
+			input: "a/a/a/a/",
+			err:   ErrRepositoryNameComponentShort,
+		},
+		{
+			input: "foo.com/bar/baz",
+		},
+		{
+			input: "blog.foo.com/bar/baz",
+		},
+		{
+			input: "asdf",
+		},
+		{
+			input: "asdf$$^/aa",
+			err:   ErrRepositoryNameComponentInvalid,
+		},
+		{
+			input: "aa-a/aa",
+		},
+		{
+			input: "aa/aa",
+		},
+		{
+			input: "a-a/a-a",
+		},
+		{
+			input: "a",
+			err:   ErrRepositoryNameComponentShort,
+		},
+		{
+			input: "a-/a/a/a",
+			err:   ErrRepositoryNameComponentInvalid,
+		},
+		{
+			input: strings.Repeat("a", 255),
+		},
+		{
+			input: strings.Repeat("a", 256),
+			err:   ErrRepositoryNameLong,
+		},
+	} {
+
+		failf := func(format string, v ...interface{}) {
+			t.Logf(testcase.input+": "+format, v...)
+			t.Fail()
+		}
+
+		if err := ValidateRespositoryName(testcase.input); err != testcase.err {
+			if testcase.err != nil {
+				if err != nil {
+					failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err)
+				} else {
+					failf("expected invalid repository: %v", testcase.err)
+				}
+			} else {
+				if err != nil {
+					// Wrong error returned.
+					failf("unexpected error validating repository name: %v, expected %v", err, testcase.err)
+				} else {
+					failf("unexpected error validating repository name: %v", err)
+				}
+			}
+		}
+	}
+}
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go
new file mode 100644
index 0000000..69f9d90
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go
@@ -0,0 +1,47 @@
+package v2
+
+import "github.com/gorilla/mux"
+
+// The following are definitions of the name under which all V2 routes are
+// registered. These symbols can be used to look up a route based on the name.
+const (
+	RouteNameBase            = "base"
+	RouteNameManifest        = "manifest"
+	RouteNameTags            = "tags"
+	RouteNameBlob            = "blob"
+	RouteNameBlobUpload      = "blob-upload"
+	RouteNameBlobUploadChunk = "blob-upload-chunk"
+)
+
+var allEndpoints = []string{
+	RouteNameManifest,
+	RouteNameTags,
+	RouteNameBlob,
+	RouteNameBlobUpload,
+	RouteNameBlobUploadChunk,
+}
+
+// Router builds a gorilla router with named routes for the various API
+// methods. This can be used directly by both server implementations and
+// clients.
+func Router() *mux.Router {
+	return RouterWithPrefix("")
+}
+
+// RouterWithPrefix builds a gorilla router with a configured prefix
+// on all routes.
+func RouterWithPrefix(prefix string) *mux.Router {
+	rootRouter := mux.NewRouter()
+	router := rootRouter
+	if prefix != "" {
+		router = router.PathPrefix(prefix).Subrouter()
+	}
+
+	router.StrictSlash(true)
+
+	for _, descriptor := range routeDescriptors {
+		router.Path(descriptor.Path).Name(descriptor.Name)
+	}
+
+	return rootRouter
+}
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/routes_test.go b/vendor/src/github.com/docker/distribution/registry/api/v2/routes_test.go
new file mode 100644
index 0000000..afab71f
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/routes_test.go
@@ -0,0 +1,315 @@
+package v2
+
+import (
+	"encoding/json"
+	"fmt"
+	"math/rand"
+	"net/http"
+	"net/http/httptest"
+	"reflect"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/gorilla/mux"
+)
+
+type routeTestCase struct {
+	RequestURI  string
+	ExpectedURI string
+	Vars        map[string]string
+	RouteName   string
+	StatusCode  int
+}
+
+// TestRouter registers a test handler with all the routes and ensures that
+// each route returns the expected path variables. Not method verification is
+// present. This not meant to be exhaustive but as check to ensure that the
+// expected variables are extracted.
+//
+// This may go away as the application structure comes together.
+func TestRouter(t *testing.T) {
+	testCases := []routeTestCase{
+		{
+			RouteName:  RouteNameBase,
+			RequestURI: "/v2/",
+			Vars:       map[string]string{},
+		},
+		{
+			RouteName:  RouteNameManifest,
+			RequestURI: "/v2/foo/manifests/bar",
+			Vars: map[string]string{
+				"name":      "foo",
+				"reference": "bar",
+			},
+		},
+		{
+			RouteName:  RouteNameManifest,
+			RequestURI: "/v2/foo/bar/manifests/tag",
+			Vars: map[string]string{
+				"name":      "foo/bar",
+				"reference": "tag",
+			},
+		},
+		{
+			RouteName:  RouteNameManifest,
+			RequestURI: "/v2/foo/bar/manifests/sha256:abcdef01234567890",
+			Vars: map[string]string{
+				"name":      "foo/bar",
+				"reference": "sha256:abcdef01234567890",
+			},
+		},
+		{
+			RouteName:  RouteNameTags,
+			RequestURI: "/v2/foo/bar/tags/list",
+			Vars: map[string]string{
+				"name": "foo/bar",
+			},
+		},
+		{
+			RouteName:  RouteNameBlob,
+			RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234",
+			Vars: map[string]string{
+				"name":   "foo/bar",
+				"digest": "tarsum.dev+foo:abcdef0919234",
+			},
+		},
+		{
+			RouteName:  RouteNameBlob,
+			RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234",
+			Vars: map[string]string{
+				"name":   "foo/bar",
+				"digest": "sha256:abcdef0919234",
+			},
+		},
+		{
+			RouteName:  RouteNameBlobUpload,
+			RequestURI: "/v2/foo/bar/blobs/uploads/",
+			Vars: map[string]string{
+				"name": "foo/bar",
+			},
+		},
+		{
+			RouteName:  RouteNameBlobUploadChunk,
+			RequestURI: "/v2/foo/bar/blobs/uploads/uuid",
+			Vars: map[string]string{
+				"name": "foo/bar",
+				"uuid": "uuid",
+			},
+		},
+		{
+			RouteName:  RouteNameBlobUploadChunk,
+			RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
+			Vars: map[string]string{
+				"name": "foo/bar",
+				"uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
+			},
+		},
+		{
+			RouteName:  RouteNameBlobUploadChunk,
+			RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==",
+			Vars: map[string]string{
+				"name": "foo/bar",
+				"uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==",
+			},
+		},
+		{
+			// Check ambiguity: ensure we can distinguish between tags for
+			// "foo/bar/image/image" and image for "foo/bar/image" with tag
+			// "tags"
+			RouteName:  RouteNameManifest,
+			RequestURI: "/v2/foo/bar/manifests/manifests/tags",
+			Vars: map[string]string{
+				"name":      "foo/bar/manifests",
+				"reference": "tags",
+			},
+		},
+		{
+			// This case presents an ambiguity between foo/bar with tag="tags"
+			// and list tags for "foo/bar/manifest"
+			RouteName:  RouteNameTags,
+			RequestURI: "/v2/foo/bar/manifests/tags/list",
+			Vars: map[string]string{
+				"name": "foo/bar/manifests",
+			},
+		},
+	}
+
+	checkTestRouter(t, testCases, "", true)
+	checkTestRouter(t, testCases, "/prefix/", true)
+}
+
+func TestRouterWithPathTraversals(t *testing.T) {
+	testCases := []routeTestCase{
+		{
+			RouteName:   RouteNameBlobUploadChunk,
+			RequestURI:  "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
+			ExpectedURI: "/blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
+			StatusCode:  http.StatusNotFound,
+		},
+		{
+			// Testing for path traversal attack handling
+			RouteName:   RouteNameTags,
+			RequestURI:  "/v2/foo/../bar/baz/tags/list",
+			ExpectedURI: "/v2/bar/baz/tags/list",
+			Vars: map[string]string{
+				"name": "bar/baz",
+			},
+		},
+	}
+	checkTestRouter(t, testCases, "", false)
+}
+
+func TestRouterWithBadCharacters(t *testing.T) {
+	if testing.Short() {
+		testCases := []routeTestCase{
+			{
+				RouteName:  RouteNameBlobUploadChunk,
+				RequestURI: "/v2/foo/blob/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286",
+				StatusCode: http.StatusNotFound,
+			},
+			{
+				// Testing for path traversal attack handling
+				RouteName:  RouteNameTags,
+				RequestURI: "/v2/foo/不bar/tags/list",
+				StatusCode: http.StatusNotFound,
+			},
+		}
+		checkTestRouter(t, testCases, "", true)
+	} else {
+		// in the long version we're going to fuzz the router
+		// with random UTF8 characters not in the 128 bit ASCII range.
+		// These are not valid characters for the router and we expect
+		// 404s on every test.
+		rand.Seed(time.Now().UTC().UnixNano())
+		testCases := make([]routeTestCase, 1000)
+		for idx := range testCases {
+			testCases[idx] = routeTestCase{
+				RouteName:  RouteNameTags,
+				RequestURI: fmt.Sprintf("/v2/%v/%v/tags/list", randomString(10), randomString(10)),
+				StatusCode: http.StatusNotFound,
+			}
+		}
+		checkTestRouter(t, testCases, "", true)
+	}
+}
+
+func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, deeplyEqual bool) {
+	router := RouterWithPrefix(prefix)
+
+	testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		testCase := routeTestCase{
+			RequestURI: r.RequestURI,
+			Vars:       mux.Vars(r),
+			RouteName:  mux.CurrentRoute(r).GetName(),
+		}
+
+		enc := json.NewEncoder(w)
+
+		if err := enc.Encode(testCase); err != nil {
+			http.Error(w, err.Error(), http.StatusInternalServerError)
+			return
+		}
+	})
+
+	// Startup test server
+	server := httptest.NewServer(router)
+
+	for _, testcase := range testCases {
+		testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI
+		// Register the endpoint
+		route := router.GetRoute(testcase.RouteName)
+		if route == nil {
+			t.Fatalf("route for name %q not found", testcase.RouteName)
+		}
+
+		route.Handler(testHandler)
+
+		u := server.URL + testcase.RequestURI
+
+		resp, err := http.Get(u)
+
+		if err != nil {
+			t.Fatalf("error issuing get request: %v", err)
+		}
+
+		if testcase.StatusCode == 0 {
+			// Override default, zero-value
+			testcase.StatusCode = http.StatusOK
+		}
+		if testcase.ExpectedURI == "" {
+			// Override default, zero-value
+			testcase.ExpectedURI = testcase.RequestURI
+		}
+
+		if resp.StatusCode != testcase.StatusCode {
+			t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode)
+		}
+
+		if testcase.StatusCode != http.StatusOK {
+			// We don't care about json response.
+			continue
+		}
+
+		dec := json.NewDecoder(resp.Body)
+
+		var actualRouteInfo routeTestCase
+		if err := dec.Decode(&actualRouteInfo); err != nil {
+			t.Fatalf("error reading json response: %v", err)
+		}
+		// Needs to be set out of band
+		actualRouteInfo.StatusCode = resp.StatusCode
+
+		if actualRouteInfo.RequestURI != testcase.ExpectedURI {
+			t.Fatalf("URI %v incorrectly parsed, expected %v", actualRouteInfo.RequestURI, testcase.ExpectedURI)
+		}
+
+		if actualRouteInfo.RouteName != testcase.RouteName {
+			t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName)
+		}
+
+		// when testing deep equality, the actualRouteInfo has an empty ExpectedURI, we don't want
+		// that to make the comparison fail. We're otherwise done with the testcase so empty the
+		// testcase.ExpectedURI
+		testcase.ExpectedURI = ""
+		if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) {
+			t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase)
+		}
+	}
+
+}
+
+// -------------- START LICENSED CODE --------------
+// The following code is derivative of https://github.com/google/gofuzz
+// gofuzz is licensed under the Apache License, Version 2.0, January 2004,
+// a copy of which can be found in the LICENSE file at the root of this
+// repository.
+
+// These functions allow us to generate strings containing only multibyte
+// characters that are invalid in our URLs. They are used above for fuzzing
+// to ensure we always get 404s on these invalid strings
+type charRange struct {
+	first, last rune
+}
+
+// choose returns a random unicode character from the given range, using the
+// given randomness source.
+func (r *charRange) choose() rune {
+	count := int64(r.last - r.first)
+	return r.first + rune(rand.Int63n(count))
+}
+
+var unicodeRanges = []charRange{
+	{'\u00a0', '\u02af'}, // Multi-byte encoded characters
+	{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
+}
+
+func randomString(length int) string {
+	runes := make([]rune, length)
+	for i := range runes {
+		runes[i] = unicodeRanges[rand.Intn(len(unicodeRanges))].choose()
+	}
+	return string(runes)
+}
+
+// -------------- END LICENSED CODE --------------
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go
new file mode 100644
index 0000000..4b42dd1
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go
@@ -0,0 +1,217 @@
+package v2
+
+import (
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/docker/distribution/digest"
+	"github.com/gorilla/mux"
+)
+
+// URLBuilder creates registry API urls from a single base endpoint. It can be
+// used to create urls for use in a registry client or server.
+//
+// All urls will be created from the given base, including the api version.
+// For example, if a root of "/foo/" is provided, urls generated will be fall
+// under "/foo/v2/...". Most application will only provide a schema, host and
+// port, such as "https://localhost:5000/".
+type URLBuilder struct {
+	root   *url.URL // url root (ie http://localhost/)
+	router *mux.Router
+}
+
+// NewURLBuilder creates a URLBuilder with provided root url object.
+func NewURLBuilder(root *url.URL) *URLBuilder {
+	return &URLBuilder{
+		root:   root,
+		router: Router(),
+	}
+}
+
+// NewURLBuilderFromString workes identically to NewURLBuilder except it takes
+// a string argument for the root, returning an error if it is not a valid
+// url.
+func NewURLBuilderFromString(root string) (*URLBuilder, error) {
+	u, err := url.Parse(root)
+	if err != nil {
+		return nil, err
+	}
+
+	return NewURLBuilder(u), nil
+}
+
+// NewURLBuilderFromRequest uses information from an *http.Request to
+// construct the root url.
+func NewURLBuilderFromRequest(r *http.Request) *URLBuilder {
+	var scheme string
+
+	forwardedProto := r.Header.Get("X-Forwarded-Proto")
+
+	switch {
+	case len(forwardedProto) > 0:
+		scheme = forwardedProto
+	case r.TLS != nil:
+		scheme = "https"
+	case len(r.URL.Scheme) > 0:
+		scheme = r.URL.Scheme
+	default:
+		scheme = "http"
+	}
+
+	host := r.Host
+	forwardedHost := r.Header.Get("X-Forwarded-Host")
+	if len(forwardedHost) > 0 {
+		host = forwardedHost
+	}
+
+	basePath := routeDescriptorsMap[RouteNameBase].Path
+
+	requestPath := r.URL.Path
+	index := strings.Index(requestPath, basePath)
+
+	u := &url.URL{
+		Scheme: scheme,
+		Host:   host,
+	}
+
+	if index > 0 {
+		// N.B. index+1 is important because we want to include the trailing /
+		u.Path = requestPath[0 : index+1]
+	}
+
+	return NewURLBuilder(u)
+}
+
+// BuildBaseURL constructs a base url for the API, typically just "/v2/".
+func (ub *URLBuilder) BuildBaseURL() (string, error) {
+	route := ub.cloneRoute(RouteNameBase)
+
+	baseURL, err := route.URL()
+	if err != nil {
+		return "", err
+	}
+
+	return baseURL.String(), nil
+}
+
+// BuildTagsURL constructs a url to list the tags in the named repository.
+func (ub *URLBuilder) BuildTagsURL(name string) (string, error) {
+	route := ub.cloneRoute(RouteNameTags)
+
+	tagsURL, err := route.URL("name", name)
+	if err != nil {
+		return "", err
+	}
+
+	return tagsURL.String(), nil
+}
+
+// BuildManifestURL constructs a url for the manifest identified by name and
+// reference. The argument reference may be either a tag or digest.
+func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) {
+	route := ub.cloneRoute(RouteNameManifest)
+
+	manifestURL, err := route.URL("name", name, "reference", reference)
+	if err != nil {
+		return "", err
+	}
+
+	return manifestURL.String(), nil
+}
+
+// BuildBlobURL constructs the url for the blob identified by name and dgst.
+func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) {
+	route := ub.cloneRoute(RouteNameBlob)
+
+	layerURL, err := route.URL("name", name, "digest", dgst.String())
+	if err != nil {
+		return "", err
+	}
+
+	return layerURL.String(), nil
+}
+
+// BuildBlobUploadURL constructs a url to begin a blob upload in the
+// repository identified by name.
+func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) {
+	route := ub.cloneRoute(RouteNameBlobUpload)
+
+	uploadURL, err := route.URL("name", name)
+	if err != nil {
+		return "", err
+	}
+
+	return appendValuesURL(uploadURL, values...).String(), nil
+}
+
+// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid,
+// including any url values. This should generally not be used by clients, as
+// this url is provided by server implementations during the blob upload
+// process.
+func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) {
+	route := ub.cloneRoute(RouteNameBlobUploadChunk)
+
+	uploadURL, err := route.URL("name", name, "uuid", uuid)
+	if err != nil {
+		return "", err
+	}
+
+	return appendValuesURL(uploadURL, values...).String(), nil
+}
+
+// clondedRoute returns a clone of the named route from the router. Routes
+// must be cloned to avoid modifying them during url generation.
+func (ub *URLBuilder) cloneRoute(name string) clonedRoute {
+	route := new(mux.Route)
+	root := new(url.URL)
+
+	*route = *ub.router.GetRoute(name) // clone the route
+	*root = *ub.root
+
+	return clonedRoute{Route: route, root: root}
+}
+
+type clonedRoute struct {
+	*mux.Route
+	root *url.URL
+}
+
+func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) {
+	routeURL, err := cr.Route.URL(pairs...)
+	if err != nil {
+		return nil, err
+	}
+
+	if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" {
+		routeURL.Path = routeURL.Path[1:]
+	}
+
+	return cr.root.ResolveReference(routeURL), nil
+}
+
+// appendValuesURL appends the parameters to the url.
+func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
+	merged := u.Query()
+
+	for _, v := range values {
+		for k, vv := range v {
+			merged[k] = append(merged[k], vv...)
+		}
+	}
+
+	u.RawQuery = merged.Encode()
+	return u
+}
+
+// appendValues appends the parameters to the url. Panics if the string is not
+// a url.
+func appendValues(u string, values ...url.Values) string {
+	up, err := url.Parse(u)
+
+	if err != nil {
+		panic(err) // should never happen
+	}
+
+	return appendValuesURL(up, values...).String()
+}
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/urls_test.go b/vendor/src/github.com/docker/distribution/registry/api/v2/urls_test.go
new file mode 100644
index 0000000..237d0f6
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/urls_test.go
@@ -0,0 +1,225 @@
+package v2
+
+import (
+	"net/http"
+	"net/url"
+	"testing"
+)
+
+type urlBuilderTestCase struct {
+	description  string
+	expectedPath string
+	build        func() (string, error)
+}
+
+func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase {
+	return []urlBuilderTestCase{
+		{
+			description:  "test base url",
+			expectedPath: "/v2/",
+			build:        urlBuilder.BuildBaseURL,
+		},
+		{
+			description:  "test tags url",
+			expectedPath: "/v2/foo/bar/tags/list",
+			build: func() (string, error) {
+				return urlBuilder.BuildTagsURL("foo/bar")
+			},
+		},
+		{
+			description:  "test manifest url",
+			expectedPath: "/v2/foo/bar/manifests/tag",
+			build: func() (string, error) {
+				return urlBuilder.BuildManifestURL("foo/bar", "tag")
+			},
+		},
+		{
+			description:  "build blob url",
+			expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789",
+			build: func() (string, error) {
+				return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789")
+			},
+		},
+		{
+			description:  "build blob upload url",
+			expectedPath: "/v2/foo/bar/blobs/uploads/",
+			build: func() (string, error) {
+				return urlBuilder.BuildBlobUploadURL("foo/bar")
+			},
+		},
+		{
+			description:  "build blob upload url with digest and size",
+			expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000",
+			build: func() (string, error) {
+				return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{
+					"size":   []string{"10000"},
+					"digest": []string{"tarsum.v1+sha256:abcdef0123456789"},
+				})
+			},
+		},
+		{
+			description:  "build blob upload chunk url",
+			expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part",
+			build: func() (string, error) {
+				return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part")
+			},
+		},
+		{
+			description:  "build blob upload chunk url with digest and size",
+			expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000",
+			build: func() (string, error) {
+				return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{
+					"size":   []string{"10000"},
+					"digest": []string{"tarsum.v1+sha256:abcdef0123456789"},
+				})
+			},
+		},
+	}
+}
+
+// TestURLBuilder tests the various url building functions, ensuring they are
+// returning the expected values.
+func TestURLBuilder(t *testing.T) {
+	roots := []string{
+		"http://example.com",
+		"https://example.com",
+		"http://localhost:5000",
+		"https://localhost:5443",
+	}
+
+	for _, root := range roots {
+		urlBuilder, err := NewURLBuilderFromString(root)
+		if err != nil {
+			t.Fatalf("unexpected error creating urlbuilder: %v", err)
+		}
+
+		for _, testCase := range makeURLBuilderTestCases(urlBuilder) {
+			url, err := testCase.build()
+			if err != nil {
+				t.Fatalf("%s: error building url: %v", testCase.description, err)
+			}
+
+			expectedURL := root + testCase.expectedPath
+
+			if url != expectedURL {
+				t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL)
+			}
+		}
+	}
+}
+
+func TestURLBuilderWithPrefix(t *testing.T) {
+	roots := []string{
+		"http://example.com/prefix/",
+		"https://example.com/prefix/",
+		"http://localhost:5000/prefix/",
+		"https://localhost:5443/prefix/",
+	}
+
+	for _, root := range roots {
+		urlBuilder, err := NewURLBuilderFromString(root)
+		if err != nil {
+			t.Fatalf("unexpected error creating urlbuilder: %v", err)
+		}
+
+		for _, testCase := range makeURLBuilderTestCases(urlBuilder) {
+			url, err := testCase.build()
+			if err != nil {
+				t.Fatalf("%s: error building url: %v", testCase.description, err)
+			}
+
+			expectedURL := root[0:len(root)-1] + testCase.expectedPath
+
+			if url != expectedURL {
+				t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL)
+			}
+		}
+	}
+}
+
+type builderFromRequestTestCase struct {
+	request *http.Request
+	base    string
+}
+
+func TestBuilderFromRequest(t *testing.T) {
+	u, err := url.Parse("http://example.com")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	forwardedProtoHeader := make(http.Header, 1)
+	forwardedProtoHeader.Set("X-Forwarded-Proto", "https")
+
+	testRequests := []struct {
+		request *http.Request
+		base    string
+	}{
+		{
+			request: &http.Request{URL: u, Host: u.Host},
+			base:    "http://example.com",
+		},
+		{
+			request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader},
+			base:    "https://example.com",
+		},
+	}
+
+	for _, tr := range testRequests {
+		builder := NewURLBuilderFromRequest(tr.request)
+
+		for _, testCase := range makeURLBuilderTestCases(builder) {
+			url, err := testCase.build()
+			if err != nil {
+				t.Fatalf("%s: error building url: %v", testCase.description, err)
+			}
+
+			expectedURL := tr.base + testCase.expectedPath
+
+			if url != expectedURL {
+				t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL)
+			}
+		}
+	}
+}
+
+func TestBuilderFromRequestWithPrefix(t *testing.T) {
+	u, err := url.Parse("http://example.com/prefix/v2/")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	forwardedProtoHeader := make(http.Header, 1)
+	forwardedProtoHeader.Set("X-Forwarded-Proto", "https")
+
+	testRequests := []struct {
+		request *http.Request
+		base    string
+	}{
+		{
+			request: &http.Request{URL: u, Host: u.Host},
+			base:    "http://example.com/prefix/",
+		},
+		{
+			request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader},
+			base:    "https://example.com/prefix/",
+		},
+	}
+
+	for _, tr := range testRequests {
+		builder := NewURLBuilderFromRequest(tr.request)
+
+		for _, testCase := range makeURLBuilderTestCases(builder) {
+			url, err := testCase.build()
+			if err != nil {
+				t.Fatalf("%s: error building url: %v", testCase.description, err)
+			}
+
+			expectedURL := tr.base[0:len(tr.base)-1] + testCase.expectedPath
+
+			if url != expectedURL {
+				t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL)
+			}
+		}
+	}
+}
diff --git a/vendor/src/github.com/docker/libcontainer/.gitignore b/vendor/src/github.com/docker/libcontainer/.gitignore
index bf6a664..2e3f79b 100644
--- a/vendor/src/github.com/docker/libcontainer/.gitignore
+++ b/vendor/src/github.com/docker/libcontainer/.gitignore
@@ -1,2 +1,3 @@
 bundles
 nsinit/nsinit
+vendor/pkg
diff --git a/vendor/src/github.com/docker/libcontainer/SPEC.md b/vendor/src/github.com/docker/libcontainer/SPEC.md
index 3ca90d6..5d37fe9 100644
--- a/vendor/src/github.com/docker/libcontainer/SPEC.md
+++ b/vendor/src/github.com/docker/libcontainer/SPEC.md
@@ -15,7 +15,7 @@
 ### System Requirements and Compatibility
 
 Minimum requirements:
-* Kernel version - 3.8 recommended 2.6.2x minimum(with backported patches) 
+* Kernel version - 3.10 recommended 2.6.2x minimum(with backported patches)
 * Mounted cgroups with each subsystem in its own hierarchy
 
 
@@ -28,11 +28,9 @@
 | CLONE_NEWIPC  |    1    |
 | CLONE_NEWNET  |    1    |
 | CLONE_NEWNS   |    1    |
-| CLONE_NEWUSER |    0    |
+| CLONE_NEWUSER |    1    |
 
-In v1 the user namespace is not enabled by default for support of older kernels
-where the user namespace feature is not fully implemented.  Namespaces are 
-created for the container via the `clone` syscall.  
+Namespaces are created for the container via the `clone` syscall.  
 
 
 ### Filesystem
@@ -143,6 +141,7 @@
 | blkio      | 1       |
 | perf_event | 1       |
 | freezer    | 1       |
+| hugetlb    | 1       |
 
 
 All cgroup subsystem are joined so that statistics can be collected from
@@ -165,6 +164,7 @@
 | -------------------- | ------- |
 | CAP_NET_RAW          | 1       |
 | CAP_NET_BIND_SERVICE | 1       |
+| CAP_AUDIT_READ       | 1       |
 | CAP_AUDIT_WRITE      | 1       |
 | CAP_DAC_OVERRIDE     | 1       |
 | CAP_SETFCAP          | 1       |
@@ -304,6 +304,7 @@
 | Pause          | Pause all processes inside the container                           |
 | Resume         | Resume all processes inside the container if paused                |
 | Exec           | Execute a new process inside of the container  ( requires setns )  |
+| Set            | Setup configs of the container after it's created                  |
 
 ### Execute a new process inside of a running container.
 
diff --git a/vendor/src/github.com/docker/libcontainer/apparmor/apparmor.go b/vendor/src/github.com/docker/libcontainer/apparmor/apparmor.go
index 3be3294..18cedf6 100644
--- a/vendor/src/github.com/docker/libcontainer/apparmor/apparmor.go
+++ b/vendor/src/github.com/docker/libcontainer/apparmor/apparmor.go
@@ -14,8 +14,10 @@
 
 func IsEnabled() bool {
 	if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" {
-		buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
-		return err == nil && len(buf) > 1 && buf[0] == 'Y'
+		if _, err = os.Stat("/sbin/apparmor_parser"); err == nil {
+			buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
+			return err == nil && len(buf) > 1 && buf[0] == 'Y'
+		}
 	}
 	return false
 }
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
index 0a2d76b..99c7845 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
@@ -1,6 +1,8 @@
 package fs
 
 import (
+	"fmt"
+	"io"
 	"io/ioutil"
 	"os"
 	"path/filepath"
@@ -19,6 +21,7 @@
 		"cpuset":     &CpusetGroup{},
 		"cpuacct":    &CpuacctGroup{},
 		"blkio":      &BlkioGroup{},
+		"hugetlb":    &HugetlbGroup{},
 		"perf_event": &PerfEventGroup{},
 		"freezer":    &FreezerGroup{},
 	}
@@ -75,10 +78,13 @@
 }
 
 func (m *Manager) Apply(pid int) error {
+
 	if m.Cgroups == nil {
 		return nil
 	}
 
+	var c = m.Cgroups
+
 	d, err := getCgroupData(m.Cgroups, pid)
 	if err != nil {
 		return err
@@ -108,6 +114,12 @@
 	}
 	m.Paths = paths
 
+	if paths["cpu"] != "" {
+		if err := CheckCpushares(paths["cpu"], c.CpuShares); err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
@@ -119,19 +131,6 @@
 	return m.Paths
 }
 
-// Symmetrical public function to update device based cgroups.  Also available
-// in the systemd implementation.
-func ApplyDevices(c *configs.Cgroup, pid int) error {
-	d, err := getCgroupData(c, pid)
-	if err != nil {
-		return err
-	}
-
-	devices := subsystems["devices"]
-
-	return devices.Apply(d)
-}
-
 func (m *Manager) GetStats() (*cgroups.Stats, error) {
 	stats := cgroups.NewStats()
 	for name, path := range m.Paths {
@@ -263,6 +262,11 @@
 }
 
 func writeFile(dir, file, data string) error {
+	// Normally dir should not be empty, one case is that cgroup subsystem
+	// is not mounted, we will get empty dir, and we want it fail here.
+	if dir == "" {
+		return fmt.Errorf("no such directory for %s.", file)
+	}
 	return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
 }
 
@@ -280,3 +284,27 @@
 	}
 	return nil
 }
+
+func CheckCpushares(path string, c int64) error {
+	var cpuShares int64
+
+	fd, err := os.Open(filepath.Join(path, "cpu.shares"))
+	if err != nil {
+		return err
+	}
+	defer fd.Close()
+
+	_, err = fmt.Fscanf(fd, "%d", &cpuShares)
+	if err != nil && err != io.EOF {
+		return err
+	}
+	if c != 0 {
+		if c > cpuShares {
+			return fmt.Errorf("The maximum allowed cpu-shares is %d", cpuShares)
+		} else if c < cpuShares {
+			return fmt.Errorf("The minimum allowed cpu-shares is %d", cpuShares)
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go
index 8e13264..06f0a3b 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio.go
@@ -35,6 +35,32 @@
 		}
 	}
 
+	if cgroup.BlkioWeightDevice != "" {
+		if err := writeFile(path, "blkio.weight_device", cgroup.BlkioWeightDevice); err != nil {
+			return err
+		}
+	}
+	if cgroup.BlkioThrottleReadBpsDevice != "" {
+		if err := writeFile(path, "blkio.throttle.read_bps_device", cgroup.BlkioThrottleReadBpsDevice); err != nil {
+			return err
+		}
+	}
+	if cgroup.BlkioThrottleWriteBpsDevice != "" {
+		if err := writeFile(path, "blkio.throttle.write_bps_device", cgroup.BlkioThrottleWriteBpsDevice); err != nil {
+			return err
+		}
+	}
+	if cgroup.BlkioThrottleReadIOpsDevice != "" {
+		if err := writeFile(path, "blkio.throttle.read_iops_device", cgroup.BlkioThrottleReadIOpsDevice); err != nil {
+			return err
+		}
+	}
+	if cgroup.BlkioThrottleWriteIOpsDevice != "" {
+		if err := writeFile(path, "blkio.throttle.write_iops_device", cgroup.BlkioThrottleWriteIOpsDevice); err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go
index 9ef93fc..9d0915d 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/blkio_test.go
@@ -67,6 +67,8 @@
 252:0 Async 164
 252:0 Total 164
 Total 328`
+	throttleBefore = `8:0 1024`
+	throttleAfter  = `8:0 2048`
 )
 
 func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) {
@@ -102,6 +104,35 @@
 	}
 }
 
+func TestBlkioSetWeightDevice(t *testing.T) {
+	helper := NewCgroupTestUtil("blkio", t)
+	defer helper.cleanup()
+
+	const (
+		weightDeviceBefore = "8:0 400"
+		weightDeviceAfter  = "8:0 500"
+	)
+
+	helper.writeFileContents(map[string]string{
+		"blkio.weight_device": weightDeviceBefore,
+	})
+
+	helper.CgroupData.c.BlkioWeightDevice = weightDeviceAfter
+	blkio := &BlkioGroup{}
+	if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
+		t.Fatal(err)
+	}
+
+	value, err := getCgroupParamString(helper.CgroupPath, "blkio.weight_device")
+	if err != nil {
+		t.Fatalf("Failed to parse blkio.weight_device - %s", err)
+	}
+
+	if value != weightDeviceAfter {
+		t.Fatal("Got the wrong value, set blkio.weight_device failed.")
+	}
+}
+
 func TestBlkioStats(t *testing.T) {
 	helper := NewCgroupTestUtil("blkio", t)
 	defer helper.cleanup()
@@ -442,3 +473,96 @@
 
 	expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
 }
+
+func TestBlkioSetThrottleReadBpsDevice(t *testing.T) {
+	helper := NewCgroupTestUtil("blkio", t)
+	defer helper.cleanup()
+
+	helper.writeFileContents(map[string]string{
+		"blkio.throttle.read_bps_device": throttleBefore,
+	})
+
+	helper.CgroupData.c.BlkioThrottleReadBpsDevice = throttleAfter
+	blkio := &BlkioGroup{}
+	if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
+		t.Fatal(err)
+	}
+
+	value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.read_bps_device")
+	if err != nil {
+		t.Fatalf("Failed to parse blkio.throttle.read_bps_device - %s", err)
+	}
+
+	if value != throttleAfter {
+		t.Fatal("Got the wrong value, set blkio.throttle.read_bps_device failed.")
+	}
+}
+func TestBlkioSetThrottleWriteBpsDevice(t *testing.T) {
+	helper := NewCgroupTestUtil("blkio", t)
+	defer helper.cleanup()
+
+	helper.writeFileContents(map[string]string{
+		"blkio.throttle.write_bps_device": throttleBefore,
+	})
+
+	helper.CgroupData.c.BlkioThrottleWriteBpsDevice = throttleAfter
+	blkio := &BlkioGroup{}
+	if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
+		t.Fatal(err)
+	}
+
+	value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.write_bps_device")
+	if err != nil {
+		t.Fatalf("Failed to parse blkio.throttle.write_bps_device - %s", err)
+	}
+
+	if value != throttleAfter {
+		t.Fatal("Got the wrong value, set blkio.throttle.write_bps_device failed.")
+	}
+}
+func TestBlkioSetThrottleReadIOpsDevice(t *testing.T) {
+	helper := NewCgroupTestUtil("blkio", t)
+	defer helper.cleanup()
+
+	helper.writeFileContents(map[string]string{
+		"blkio.throttle.read_iops_device": throttleBefore,
+	})
+
+	helper.CgroupData.c.BlkioThrottleReadIOpsDevice = throttleAfter
+	blkio := &BlkioGroup{}
+	if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
+		t.Fatal(err)
+	}
+
+	value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.read_iops_device")
+	if err != nil {
+		t.Fatalf("Failed to parse blkio.throttle.read_iops_device - %s", err)
+	}
+
+	if value != throttleAfter {
+		t.Fatal("Got the wrong value, set blkio.throttle.read_iops_device failed.")
+	}
+}
+func TestBlkioSetThrottleWriteIOpsDevice(t *testing.T) {
+	helper := NewCgroupTestUtil("blkio", t)
+	defer helper.cleanup()
+
+	helper.writeFileContents(map[string]string{
+		"blkio.throttle.write_iops_device": throttleBefore,
+	})
+
+	helper.CgroupData.c.BlkioThrottleWriteIOpsDevice = throttleAfter
+	blkio := &BlkioGroup{}
+	if err := blkio.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
+		t.Fatal(err)
+	}
+
+	value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.write_iops_device")
+	if err != nil {
+		t.Fatalf("Failed to parse blkio.throttle.write_iops_device - %s", err)
+	}
+
+	if value != throttleAfter {
+		t.Fatal("Got the wrong value, set blkio.throttle.write_iops_device failed.")
+	}
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go
index 1fbf7b1..c9d4ad1 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpu.go
@@ -17,7 +17,7 @@
 	// We always want to join the cpu group, to allow fair cpu scheduling
 	// on a container basis
 	dir, err := d.join("cpu")
-	if err != nil {
+	if err != nil && !cgroups.IsNotFound(err) {
 		return err
 	}
 
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go
index d8465a6..6ad42a5 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/cpuset.go
@@ -16,7 +16,7 @@
 
 func (s *CpusetGroup) Apply(d *data) error {
 	dir, err := d.path("cpuset")
-	if err != nil {
+	if err != nil && !cgroups.IsNotFound(err) {
 		return err
 	}
 
@@ -48,6 +48,11 @@
 }
 
 func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error {
+	// This might happen if we have no cpuset cgroup mounted.
+	// Just do nothing and don't fail.
+	if dir == "" {
+		return nil
+	}
 	if err := s.ensureParent(dir); err != nil {
 		return err
 	}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go
index 16e00b1..09ce92e 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices.go
@@ -11,6 +11,8 @@
 func (s *DevicesGroup) Apply(d *data) error {
 	dir, err := d.join("devices")
 	if err != nil {
+		// We will return error even it's `not found` error, devices
+		// cgroup is hard requirement for container's security.
 		return err
 	}
 
@@ -32,6 +34,17 @@
 				return err
 			}
 		}
+		return nil
+	}
+
+	if err := writeFile(path, "devices.allow", "a"); err != nil {
+		return err
+	}
+
+	for _, dev := range cgroup.DeniedDevices {
+		if err := writeFile(path, "devices.deny", dev.CgroupString()); err != nil {
+			return err
+		}
 	}
 
 	return nil
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices_test.go
index 18bb127..f950c1b 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices_test.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/devices_test.go
@@ -17,7 +17,18 @@
 			FileMode:    0666,
 		},
 	}
-	allowedList = "c 1:5 rwm"
+	allowedList   = "c 1:5 rwm"
+	deniedDevices = []*configs.Device{
+		{
+			Path:        "/dev/null",
+			Type:        'c',
+			Major:       1,
+			Minor:       3,
+			Permissions: "rwm",
+			FileMode:    0666,
+		},
+	}
+	deniedList = "c 1:3 rwm"
 )
 
 func TestDevicesSetAllow(t *testing.T) {
@@ -44,3 +55,28 @@
 		t.Fatal("Got the wrong value, set devices.allow failed.")
 	}
 }
+
+func TestDevicesSetDeny(t *testing.T) {
+	helper := NewCgroupTestUtil("devices", t)
+	defer helper.cleanup()
+
+	helper.writeFileContents(map[string]string{
+		"devices.allow": "a",
+	})
+
+	helper.CgroupData.c.AllowAllDevices = true
+	helper.CgroupData.c.DeniedDevices = deniedDevices
+	devices := &DevicesGroup{}
+	if err := devices.Set(helper.CgroupPath, helper.CgroupData.c); err != nil {
+		t.Fatal(err)
+	}
+
+	value, err := getCgroupParamString(helper.CgroupPath, "devices.deny")
+	if err != nil {
+		t.Fatalf("Failed to parse devices.deny - %s", err)
+	}
+
+	if value != deniedList {
+		t.Fatal("Got the wrong value, set devices.deny failed.")
+	}
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/hugetlb.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/hugetlb.go
new file mode 100644
index 0000000..8defdd1
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/hugetlb.go
@@ -0,0 +1,29 @@
+package fs
+
+import (
+	"github.com/docker/libcontainer/cgroups"
+	"github.com/docker/libcontainer/configs"
+)
+
+type HugetlbGroup struct {
+}
+
+func (s *HugetlbGroup) Apply(d *data) error {
+	// we just want to join this group even though we don't set anything
+	if _, err := d.join("hugetlb"); err != nil && !cgroups.IsNotFound(err) {
+		return err
+	}
+	return nil
+}
+
+func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error {
+	return nil
+}
+
+func (s *HugetlbGroup) Remove(d *data) error {
+	return removePath(d.path("hugetlb"))
+}
+
+func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error {
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go
index b99f816..2dcef0f 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory.go
@@ -16,8 +16,7 @@
 
 func (s *MemoryGroup) Apply(d *data) error {
 	dir, err := d.join("memory")
-	// only return an error for memory if it was specified
-	if err != nil && (d.c.Memory != 0 || d.c.MemoryReservation != 0 || d.c.MemorySwap != 0) {
+	if err != nil && !cgroups.IsNotFound(err) {
 		return err
 	}
 	defer func() {
@@ -95,6 +94,7 @@
 		return fmt.Errorf("failed to parse memory.usage_in_bytes - %v", err)
 	}
 	stats.MemoryStats.Usage = value
+	stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"]
 	value, err = getCgroupParamUint(path, "memory.max_usage_in_bytes")
 	if err != nil {
 		return fmt.Errorf("failed to parse memory.max_usage_in_bytes - %v", err)
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go
index 1e939c4..60edc67 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/memory_test.go
@@ -128,7 +128,7 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	expectedStats := cgroups.MemoryStats{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Stats: map[string]uint64{"cache": 512, "rss": 1024}}
+	expectedStats := cgroups.MemoryStats{Usage: 2048, Cache: 512, MaxUsage: 4096, Failcnt: 100, Stats: map[string]uint64{"cache": 512, "rss": 1024}}
 	expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats)
 }
 
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go
index c55ba93..48e2f3a 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/stats_util_test.go
@@ -2,9 +2,9 @@
 
 import (
 	"fmt"
-	"log"
 	"testing"
 
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/libcontainer/cgroups"
 )
 
@@ -23,75 +23,75 @@
 
 func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) {
 	if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil {
-		log.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err)
+		logrus.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err)
 		t.Fail()
 	}
 
 	if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil {
-		log.Printf("blkio IoServicedRecursive do not match - %s\n", err)
+		logrus.Printf("blkio IoServicedRecursive do not match - %s\n", err)
 		t.Fail()
 	}
 
 	if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil {
-		log.Printf("blkio IoQueuedRecursive do not match - %s\n", err)
+		logrus.Printf("blkio IoQueuedRecursive do not match - %s\n", err)
 		t.Fail()
 	}
 
 	if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil {
-		log.Printf("blkio SectorsRecursive do not match - %s\n", err)
+		logrus.Printf("blkio SectorsRecursive do not match - %s\n", err)
 		t.Fail()
 	}
 
 	if err := blkioStatEntryEquals(expected.IoServiceTimeRecursive, actual.IoServiceTimeRecursive); err != nil {
-		log.Printf("blkio IoServiceTimeRecursive do not match - %s\n", err)
+		logrus.Printf("blkio IoServiceTimeRecursive do not match - %s\n", err)
 		t.Fail()
 	}
 
 	if err := blkioStatEntryEquals(expected.IoWaitTimeRecursive, actual.IoWaitTimeRecursive); err != nil {
-		log.Printf("blkio IoWaitTimeRecursive do not match - %s\n", err)
+		logrus.Printf("blkio IoWaitTimeRecursive do not match - %s\n", err)
 		t.Fail()
 	}
 
 	if err := blkioStatEntryEquals(expected.IoMergedRecursive, actual.IoMergedRecursive); err != nil {
-		log.Printf("blkio IoMergedRecursive do not match - %v vs %v\n", expected.IoMergedRecursive, actual.IoMergedRecursive)
+		logrus.Printf("blkio IoMergedRecursive do not match - %v vs %v\n", expected.IoMergedRecursive, actual.IoMergedRecursive)
 		t.Fail()
 	}
 
 	if err := blkioStatEntryEquals(expected.IoTimeRecursive, actual.IoTimeRecursive); err != nil {
-		log.Printf("blkio IoTimeRecursive do not match - %s\n", err)
+		logrus.Printf("blkio IoTimeRecursive do not match - %s\n", err)
 		t.Fail()
 	}
 }
 
 func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) {
 	if expected != actual {
-		log.Printf("Expected throttling data %v but found %v\n", expected, actual)
+		logrus.Printf("Expected throttling data %v but found %v\n", expected, actual)
 		t.Fail()
 	}
 }
 
 func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) {
 	if expected.Usage != actual.Usage {
-		log.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage)
+		logrus.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage)
 		t.Fail()
 	}
 	if expected.MaxUsage != actual.MaxUsage {
-		log.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage)
+		logrus.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage)
 		t.Fail()
 	}
 	for key, expValue := range expected.Stats {
 		actValue, ok := actual.Stats[key]
 		if !ok {
-			log.Printf("Expected memory stat key %s not found\n", key)
+			logrus.Printf("Expected memory stat key %s not found\n", key)
 			t.Fail()
 		}
 		if expValue != actValue {
-			log.Printf("Expected memory stat value %d but found %d\n", expValue, actValue)
+			logrus.Printf("Expected memory stat value %d but found %d\n", expValue, actValue)
 			t.Fail()
 		}
 	}
 	if expected.Failcnt != actual.Failcnt {
-		log.Printf("Expected memory failcnt %d but found %d\n", expected.Failcnt, actual.Failcnt)
+		logrus.Printf("Expected memory failcnt %d but found %d\n", expected.Failcnt, actual.Failcnt)
 		t.Fail()
 	}
 }
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/stats.go b/vendor/src/github.com/docker/libcontainer/cgroups/stats.go
index dc5dbb3..25c8f19 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/stats.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/stats.go
@@ -33,6 +33,8 @@
 type MemoryStats struct {
 	// current res_counter usage for memory
 	Usage uint64 `json:"usage,omitempty"`
+	// memory used for cache
+	Cache uint64 `json:"cache,omitempty"`
 	// maximum usage ever recorded.
 	MaxUsage uint64 `json:"max_usage,omitempty"`
 	// TODO(vishh): Export these as stronger types.
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go
index 95ed4ea..9b605b3 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_nosystemd.go
@@ -46,10 +46,6 @@
 	return fmt.Errorf("Systemd not supported")
 }
 
-func ApplyDevices(c *configs.Cgroup, pid int) error {
-	return fmt.Errorf("Systemd not supported")
-}
-
 func Freeze(c *configs.Cgroup, state configs.FreezerState) error {
 	return fmt.Errorf("Systemd not supported")
 }
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go
index 3609bcc..4fb8d8d 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/systemd/apply_systemd.go
@@ -38,6 +38,7 @@
 	"cpuset":     &fs.CpusetGroup{},
 	"cpuacct":    &fs.CpuacctGroup{},
 	"blkio":      &fs.BlkioGroup{},
+	"hugetlb":    &fs.HugetlbGroup{},
 	"perf_event": &fs.PerfEventGroup{},
 	"freezer":    &fs.FreezerGroup{},
 }
@@ -216,6 +217,13 @@
 		return err
 	}
 
+	// FIXME: Systemd does have `BlockIODeviceWeight` property, but we got problem
+	// using that (at least on systemd 208, see https://github.com/docker/libcontainer/pull/354),
+	// so use fs work around for now.
+	if err := joinBlkio(c, pid); err != nil {
+		return err
+	}
+
 	paths := make(map[string]string)
 	for sysname := range subsystems {
 		subsystemPath, err := getSubsystemPath(m.Cgroups, sysname)
@@ -228,9 +236,14 @@
 		}
 		paths[sysname] = subsystemPath
 	}
-
 	m.Paths = paths
 
+	if paths["cpu"] != "" {
+		if err := fs.CheckCpushares(paths["cpu"], c.CpuShares); err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
@@ -243,6 +256,11 @@
 }
 
 func writeFile(dir, file, data string) error {
+	// Normally dir should not be empty, one case is that cgroup subsystem
+	// is not mounted, we will get empty dir, and we want it fail here.
+	if dir == "" {
+		return fmt.Errorf("no such directory for %s.", file)
+	}
 	return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
 }
 
@@ -263,16 +281,16 @@
 
 func joinCpu(c *configs.Cgroup, pid int) error {
 	path, err := getSubsystemPath(c, "cpu")
-	if err != nil {
+	if err != nil && !cgroups.IsNotFound(err) {
 		return err
 	}
 	if c.CpuQuota != 0 {
-		if err = ioutil.WriteFile(filepath.Join(path, "cpu.cfs_quota_us"), []byte(strconv.FormatInt(c.CpuQuota, 10)), 0700); err != nil {
+		if err = writeFile(path, "cpu.cfs_quota_us", strconv.FormatInt(c.CpuQuota, 10)); err != nil {
 			return err
 		}
 	}
 	if c.CpuPeriod != 0 {
-		if err = ioutil.WriteFile(filepath.Join(path, "cpu.cfs_period_us"), []byte(strconv.FormatInt(c.CpuPeriod, 10)), 0700); err != nil {
+		if err = writeFile(path, "cpu.cfs_period_us", strconv.FormatInt(c.CpuPeriod, 10)); err != nil {
 			return err
 		}
 	}
@@ -280,7 +298,7 @@
 }
 
 func joinFreezer(c *configs.Cgroup, pid int) error {
-	if _, err := join(c, "freezer", pid); err != nil {
+	if _, err := join(c, "freezer", pid); err != nil && !cgroups.IsNotFound(err) {
 		return err
 	}
 
@@ -350,7 +368,17 @@
 }
 
 func (m *Manager) Set(container *configs.Config) error {
-	panic("not implemented")
+	for name, path := range m.Paths {
+		sys, ok := subsystems[name]
+		if !ok || !cgroups.PathExists(path) {
+			continue
+		}
+		if err := sys.Set(path, container.Cgroups); err != nil {
+			return err
+		}
+	}
+
+	return nil
 }
 
 func getUnitName(c *configs.Cgroup) string {
@@ -362,7 +390,7 @@
 // * Support for wildcards to allow /dev/pts support
 //
 // The second is available in more recent systemd as "char-pts", but not in e.g. v208 which is
-// in wide use. When both these are availalable we will be able to switch, but need to keep the old
+// in wide use. When both these are available we will be able to switch, but need to keep the old
 // implementation for backwards compat.
 //
 // Note: we can't use systemd to set up the initial limits, and then change the cgroup
@@ -370,22 +398,14 @@
 // This happens at least for v208 when any sibling unit is started.
 func joinDevices(c *configs.Cgroup, pid int) error {
 	path, err := join(c, "devices", pid)
+	// Even if it's `not found` error, we'll return err because devices cgroup
+	// is hard requirement for container security.
 	if err != nil {
 		return err
 	}
 
 	devices := subsystems["devices"]
-	if err := devices.Set(path, c); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Symmetrical public function to update device based cgroups.  Also available
-// in the fs implementation.
-func ApplyDevices(c *configs.Cgroup, pid int) error {
-	return joinDevices(c, pid)
+	return devices.Set(path, c)
 }
 
 func joinMemory(c *configs.Cgroup, pid int) error {
@@ -397,11 +417,11 @@
 	}
 
 	path, err := getSubsystemPath(c, "memory")
-	if err != nil {
+	if err != nil && !cgroups.IsNotFound(err) {
 		return err
 	}
 
-	return ioutil.WriteFile(filepath.Join(path, "memory.memsw.limit_in_bytes"), []byte(strconv.FormatInt(memorySwap, 10)), 0700)
+	return writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(memorySwap, 10))
 }
 
 // systemd does not atm set up the cpuset controller, so we must manually
@@ -409,7 +429,7 @@
 // level must have a full setup as the default for a new directory is "no cpus"
 func joinCpuset(c *configs.Cgroup, pid int) error {
 	path, err := getSubsystemPath(c, "cpuset")
-	if err != nil {
+	if err != nil && !cgroups.IsNotFound(err) {
 		return err
 	}
 
@@ -417,3 +437,40 @@
 
 	return s.ApplyDir(path, c, pid)
 }
+
+// `BlockIODeviceWeight` property of systemd does not work properly, and systemd
+// expects device path instead of major minor numbers, which is also confusing
+// for users. So we use fs work around for now.
+func joinBlkio(c *configs.Cgroup, pid int) error {
+	path, err := getSubsystemPath(c, "blkio")
+	if err != nil {
+		return err
+	}
+	if c.BlkioWeightDevice != "" {
+		if err := writeFile(path, "blkio.weight_device", c.BlkioWeightDevice); err != nil {
+			return err
+		}
+	}
+	if c.BlkioThrottleReadBpsDevice != "" {
+		if err := writeFile(path, "blkio.throttle.read_bps_device", c.BlkioThrottleReadBpsDevice); err != nil {
+			return err
+		}
+	}
+	if c.BlkioThrottleWriteBpsDevice != "" {
+		if err := writeFile(path, "blkio.throttle.write_bps_device", c.BlkioThrottleWriteBpsDevice); err != nil {
+			return err
+		}
+	}
+	if c.BlkioThrottleReadIOpsDevice != "" {
+		if err := writeFile(path, "blkio.throttle.read_iops_device", c.BlkioThrottleReadIOpsDevice); err != nil {
+			return err
+		}
+	}
+	if c.BlkioThrottleWriteIOpsDevice != "" {
+		if err := writeFile(path, "blkio.throttle.write_iops_device", c.BlkioThrottleWriteIOpsDevice); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libcontainer/configs/cgroup.go b/vendor/src/github.com/docker/libcontainer/configs/cgroup.go
index 8bf174c..8a161fc 100644
--- a/vendor/src/github.com/docker/libcontainer/configs/cgroup.go
+++ b/vendor/src/github.com/docker/libcontainer/configs/cgroup.go
@@ -19,6 +19,8 @@
 
 	AllowedDevices []*Device `json:"allowed_devices"`
 
+	DeniedDevices []*Device `json:"denied_devices"`
+
 	// Memory limit (in bytes)
 	Memory int64 `json:"memory"`
 
@@ -43,9 +45,24 @@
 	// MEM to use
 	CpusetMems string `json:"cpuset_mems"`
 
+	// IO read rate limit per cgroup per device, bytes per second.
+	BlkioThrottleReadBpsDevice string `json:"blkio_throttle_read_bps_device"`
+
+	// IO write rate limit per cgroup per divice, bytes per second.
+	BlkioThrottleWriteBpsDevice string `json:"blkio_throttle_write_bps_device"`
+
+	// IO read rate limit per cgroup per device, IO per second.
+	BlkioThrottleReadIOpsDevice string `json:"blkio_throttle_read_iops_device"`
+
+	// IO write rate limit per cgroup per device, IO per second.
+	BlkioThrottleWriteIOpsDevice string `json:"blkio_throttle_write_iops_device"`
+
 	// Specifies per cgroup weight, range is from 10 to 1000.
 	BlkioWeight int64 `json:"blkio_weight"`
 
+	// Weight per cgroup per device, can override BlkioWeight.
+	BlkioWeightDevice string `json:"blkio_weight_device"`
+
 	// set the freeze value for the process
 	Freezer FreezerState `json:"freezer"`
 
diff --git a/vendor/src/github.com/docker/libcontainer/configs/config.go b/vendor/src/github.com/docker/libcontainer/configs/config.go
index b07f252..2c311a0 100644
--- a/vendor/src/github.com/docker/libcontainer/configs/config.go
+++ b/vendor/src/github.com/docker/libcontainer/configs/config.go
@@ -37,6 +37,9 @@
 	// bind mounts are writtable.
 	Readonlyfs bool `json:"readonlyfs"`
 
+	// Privatefs will mount the container's rootfs as private where mount points from the parent will not propogate
+	Privatefs bool `json:"privatefs"`
+
 	// Mounts specify additional source and destination paths that will be mounted inside the container's
 	// rootfs and mount namespace if specified
 	Mounts []*Mount `json:"mounts"`
@@ -96,6 +99,10 @@
 	// ReadonlyPaths specifies paths within the container's rootfs to remount as read-only
 	// so that these files prevent any writes.
 	ReadonlyPaths []string `json:"readonly_paths"`
+
+	// SystemProperties is a map of properties and their values. It is the equivalent of using
+	// sysctl -w my.property.name value in Linux.
+	SystemProperties map[string]string `json:"system_properties"`
 }
 
 // Gets the root uid for the process on host which could be non-zero
diff --git a/vendor/src/github.com/docker/libcontainer/configs/mount.go b/vendor/src/github.com/docker/libcontainer/configs/mount.go
index 7b3dea3..5a69f81 100644
--- a/vendor/src/github.com/docker/libcontainer/configs/mount.go
+++ b/vendor/src/github.com/docker/libcontainer/configs/mount.go
@@ -18,4 +18,17 @@
 
 	// Relabel source if set, "z" indicates shared, "Z" indicates unshared.
 	Relabel string `json:"relabel"`
+
+	// Optional Command to be run before Source is mounted.
+	PremountCmds []Command `json:"premount_cmds"`
+
+	// Optional Command to be run after Source is mounted.
+	PostmountCmds []Command `json:"postmount_cmds"`
+}
+
+type Command struct {
+	Path string   `json:"path"`
+	Args []string `json:"args"`
+	Env  []string `json:"env"`
+	Dir  string   `json:"dir"`
 }
diff --git a/vendor/src/github.com/docker/libcontainer/configs/namespaces.go b/vendor/src/github.com/docker/libcontainer/configs/namespaces.go
index ac6a7fa..2c2a9fd 100644
--- a/vendor/src/github.com/docker/libcontainer/configs/namespaces.go
+++ b/vendor/src/github.com/docker/libcontainer/configs/namespaces.go
@@ -1,9 +1,6 @@
 package configs
 
-import (
-	"fmt"
-	"syscall"
-)
+import "fmt"
 
 type NamespaceType string
 
@@ -34,10 +31,6 @@
 	Path string        `json:"path"`
 }
 
-func (n *Namespace) Syscall() int {
-	return namespaceInfo[n.Type]
-}
-
 func (n *Namespace) GetPath(pid int) string {
 	if n.Path != "" {
 		return n.Path
@@ -96,25 +89,3 @@
 func (n *Namespaces) Contains(t NamespaceType) bool {
 	return n.index(t) != -1
 }
-
-var namespaceInfo = map[NamespaceType]int{
-	NEWNET:  syscall.CLONE_NEWNET,
-	NEWNS:   syscall.CLONE_NEWNS,
-	NEWUSER: syscall.CLONE_NEWUSER,
-	NEWIPC:  syscall.CLONE_NEWIPC,
-	NEWUTS:  syscall.CLONE_NEWUTS,
-	NEWPID:  syscall.CLONE_NEWPID,
-}
-
-// CloneFlags parses the container's Namespaces options to set the correct
-// flags on clone, unshare. This functions returns flags only for new namespaces.
-func (n *Namespaces) CloneFlags() uintptr {
-	var flag int
-	for _, v := range *n {
-		if v.Path != "" {
-			continue
-		}
-		flag |= namespaceInfo[v.Type]
-	}
-	return uintptr(flag)
-}
diff --git a/vendor/src/github.com/docker/libcontainer/configs/namespaces_syscall.go b/vendor/src/github.com/docker/libcontainer/configs/namespaces_syscall.go
new file mode 100644
index 0000000..c962999
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/configs/namespaces_syscall.go
@@ -0,0 +1,31 @@
+// +build linux
+
+package configs
+
+import "syscall"
+
+func (n *Namespace) Syscall() int {
+	return namespaceInfo[n.Type]
+}
+
+var namespaceInfo = map[NamespaceType]int{
+	NEWNET:  syscall.CLONE_NEWNET,
+	NEWNS:   syscall.CLONE_NEWNS,
+	NEWUSER: syscall.CLONE_NEWUSER,
+	NEWIPC:  syscall.CLONE_NEWIPC,
+	NEWUTS:  syscall.CLONE_NEWUTS,
+	NEWPID:  syscall.CLONE_NEWPID,
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This functions returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+	var flag int
+	for _, v := range *n {
+		if v.Path != "" {
+			continue
+		}
+		flag |= namespaceInfo[v.Type]
+	}
+	return uintptr(flag)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/configs/namespaces_syscall_unsupported.go b/vendor/src/github.com/docker/libcontainer/configs/namespaces_syscall_unsupported.go
new file mode 100644
index 0000000..1bd26bd
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/configs/namespaces_syscall_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux
+
+package configs
+
+func (n *Namespace) Syscall() int {
+	panic("No namespace syscall support")
+	return 0
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This functions returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+	panic("No namespace syscall support")
+	return uintptr(0)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/configs/network.go b/vendor/src/github.com/docker/libcontainer/configs/network.go
index 9d5ed7a..ccdb228 100644
--- a/vendor/src/github.com/docker/libcontainer/configs/network.go
+++ b/vendor/src/github.com/docker/libcontainer/configs/network.go
@@ -2,7 +2,7 @@
 
 // Network defines configuration for a container's networking stack
 //
-// The network configuration can be omited from a container causing the
+// The network configuration can be omitted from a container causing the
 // container to be setup with the host's networking stack
 type Network struct {
 	// Type sets the networks type, commonly veth and loopback
@@ -53,7 +53,7 @@
 // Routes can be specified to create entries in the route table as the container is started
 //
 // All of destination, source, and gateway should be either IPv4 or IPv6.
-// One of the three options must be present, and ommitted entries will use their
+// One of the three options must be present, and omitted entries will use their
 // IP family default for the route table.  For IPv4 for example, setting the
 // gateway to 1.2.3.4 and the interface to eth0 will set up a standard
 // destination of 0.0.0.0(or *) when viewed in the route table.
diff --git a/vendor/src/github.com/docker/libcontainer/console_linux.go b/vendor/src/github.com/docker/libcontainer/console_linux.go
index afdc297..a3a0551 100644
--- a/vendor/src/github.com/docker/libcontainer/console_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/console_linux.go
@@ -38,7 +38,7 @@
 	}, nil
 }
 
-// newConsoleFromPath is an internal fucntion returning an initialzied console for use inside
+// newConsoleFromPath is an internal function returning an initialized console for use inside
 // a container's MNT namespace.
 func newConsoleFromPath(slavePath string) *linuxConsole {
 	return &linuxConsole{
diff --git a/vendor/src/github.com/docker/libcontainer/container.go b/vendor/src/github.com/docker/libcontainer/container.go
index 35bdfd7..a38df82 100644
--- a/vendor/src/github.com/docker/libcontainer/container.go
+++ b/vendor/src/github.com/docker/libcontainer/container.go
@@ -67,7 +67,7 @@
 	// State returns the current container's state information.
 	//
 	// errors:
-	// Systemerror - System erroor.
+	// Systemerror - System error.
 	State() (*State, error)
 
 	// Returns the current config of the container.
diff --git a/vendor/src/github.com/docker/libcontainer/container_linux.go b/vendor/src/github.com/docker/libcontainer/container_linux.go
index d52610f..8a7728a 100644
--- a/vendor/src/github.com/docker/libcontainer/container_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/container_linux.go
@@ -11,11 +11,13 @@
 	"sync"
 	"syscall"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/libcontainer/cgroups"
 	"github.com/docker/libcontainer/configs"
 )
 
+const stdioFdCount = 3
+
 type linuxContainer struct {
 	id            string
 	root          string
@@ -100,7 +102,7 @@
 	if err := parent.start(); err != nil {
 		// terminate the process to ensure that it properly is reaped.
 		if err := parent.terminate(); err != nil {
-			log.Warn(err)
+			logrus.Warn(err)
 		}
 		return newSystemError(err)
 	}
@@ -139,7 +141,8 @@
 	if cmd.SysProcAttr == nil {
 		cmd.SysProcAttr = &syscall.SysProcAttr{}
 	}
-	cmd.ExtraFiles = []*os.File{childPipe}
+	cmd.ExtraFiles = append(p.ExtraFiles, childPipe)
+	cmd.Env = append(cmd.Env, fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1))
 	// NOTE: when running a container with no PID namespace and the parent process spawning the container is
 	// PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason
 	// even with the parent still running.
@@ -178,11 +181,9 @@
 		fmt.Sprintf("_LIBCONTAINER_INITPID=%d", c.initProcess.pid()),
 		"_LIBCONTAINER_INITTYPE=setns",
 	)
-
 	if p.consolePath != "" {
 		cmd.Env = append(cmd.Env, "_LIBCONTAINER_CONSOLE_PATH="+p.consolePath)
 	}
-
 	// TODO: set on container for process management
 	return &setnsProcess{
 		cmd:         cmd,
@@ -195,13 +196,14 @@
 
 func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
 	return &initConfig{
-		Config:       c.config,
-		Args:         process.Args,
-		Env:          process.Env,
-		User:         process.User,
-		Cwd:          process.Cwd,
-		Console:      process.consolePath,
-		Capabilities: process.Capabilities,
+		Config:           c.config,
+		Args:             process.Args,
+		Env:              process.Env,
+		User:             process.User,
+		Cwd:              process.Cwd,
+		Console:          process.consolePath,
+		Capabilities:     process.Capabilities,
+		PassedFilesCount: len(process.ExtraFiles),
 	}
 }
 
@@ -225,7 +227,7 @@
 	}
 	if !c.config.Namespaces.Contains(configs.NEWPID) {
 		if err := killCgroupProcesses(c.cgroupManager); err != nil {
-			log.Warn(err)
+			logrus.Warn(err)
 		}
 	}
 	err = c.cgroupManager.Destroy()
diff --git a/vendor/src/github.com/docker/libcontainer/devices/devices.go b/vendor/src/github.com/docker/libcontainer/devices/devices.go
index 537f71a..7a11eaf 100644
--- a/vendor/src/github.com/docker/libcontainer/devices/devices.go
+++ b/vendor/src/github.com/docker/libcontainer/devices/devices.go
@@ -21,7 +21,7 @@
 	ioutilReadDir = ioutil.ReadDir
 )
 
-// Given the path to a device and it's cgroup_permissions(which cannot be easilly queried) look up the information about a linux device and return that information as a Device struct.
+// Given the path to a device and it's cgroup_permissions(which cannot be easily queried) look up the information about a linux device and return that information as a Device struct.
 func DeviceFromPath(path, permissions string) (*configs.Device, error) {
 	fileInfo, err := osLstat(path)
 	if err != nil {
diff --git a/vendor/src/github.com/docker/libcontainer/factory.go b/vendor/src/github.com/docker/libcontainer/factory.go
index 0c9fa63..2b3ff85 100644
--- a/vendor/src/github.com/docker/libcontainer/factory.go
+++ b/vendor/src/github.com/docker/libcontainer/factory.go
@@ -32,15 +32,13 @@
 	// System error
 	Load(id string) (Container, error)
 
-	// StartInitialization is an internal API to libcontainer used during the rexec of the
-	// container.  pipefd is the fd to the child end of the pipe used to syncronize the
-	// parent and child process providing state and configuration to the child process and
-	// returning any errors during the init of the container
+	// StartInitialization is an internal API to libcontainer used during the reexec of the
+	// container.
 	//
 	// Errors:
-	// pipe connection error
-	// system error
-	StartInitialization(pipefd uintptr) error
+	// Pipe connection error
+	// System error
+	StartInitialization() error
 
 	// Type returns info string about factory type (e.g. lxc, libcontainer...)
 	Type() string
diff --git a/vendor/src/github.com/docker/libcontainer/factory_linux.go b/vendor/src/github.com/docker/libcontainer/factory_linux.go
index a2d3bec..3cf1c3d 100644
--- a/vendor/src/github.com/docker/libcontainer/factory_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/factory_linux.go
@@ -10,6 +10,7 @@
 	"os/exec"
 	"path/filepath"
 	"regexp"
+	"strconv"
 	"syscall"
 
 	"github.com/docker/docker/pkg/mount"
@@ -194,7 +195,11 @@
 
 // StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state
 // This is a low level implementation detail of the reexec and should not be consumed externally
-func (l *LinuxFactory) StartInitialization(pipefd uintptr) (err error) {
+func (l *LinuxFactory) StartInitialization() (err error) {
+	pipefd, err := strconv.Atoi(os.Getenv("_LIBCONTAINER_INITPIPE"))
+	if err != nil {
+		return err
+	}
 	var (
 		pipe = os.NewFile(uintptr(pipefd), "pipe")
 		it   = initType(os.Getenv("_LIBCONTAINER_INITTYPE"))
diff --git a/vendor/src/github.com/docker/libcontainer/init_linux.go b/vendor/src/github.com/docker/libcontainer/init_linux.go
index 1786b1e..1771fd1 100644
--- a/vendor/src/github.com/docker/libcontainer/init_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/init_linux.go
@@ -9,7 +9,7 @@
 	"strings"
 	"syscall"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/libcontainer/cgroups"
 	"github.com/docker/libcontainer/configs"
 	"github.com/docker/libcontainer/netlink"
@@ -40,14 +40,15 @@
 
 // initConfig is used for transferring parameters from Exec() to Init()
 type initConfig struct {
-	Args         []string        `json:"args"`
-	Env          []string        `json:"env"`
-	Cwd          string          `json:"cwd"`
-	Capabilities []string        `json:"capabilities"`
-	User         string          `json:"user"`
-	Config       *configs.Config `json:"config"`
-	Console      string          `json:"console"`
-	Networks     []*network      `json:"network"`
+	Args             []string        `json:"args"`
+	Env              []string        `json:"env"`
+	Cwd              string          `json:"cwd"`
+	Capabilities     []string        `json:"capabilities"`
+	User             string          `json:"user"`
+	Config           *configs.Config `json:"config"`
+	Console          string          `json:"console"`
+	Networks         []*network      `json:"network"`
+	PassedFilesCount int             `json:"passed_files_count"`
 }
 
 type initer interface {
@@ -95,10 +96,10 @@
 // and working dir, and closes any leaked file descriptors
 // before executing the command inside the namespace
 func finalizeNamespace(config *initConfig) error {
-	// Ensure that all non-standard fds we may have accidentally
+	// Ensure that all unwanted fds we may have accidentally
 	// inherited are marked close-on-exec so they stay out of the
 	// container
-	if err := utils.CloseExecFrom(3); err != nil {
+	if err := utils.CloseExecFrom(config.PassedFilesCount + 3); err != nil {
 		return err
 	}
 
@@ -233,7 +234,7 @@
 func killCgroupProcesses(m cgroups.Manager) error {
 	var procs []*os.Process
 	if err := m.Freeze(configs.Frozen); err != nil {
-		log.Warn(err)
+		logrus.Warn(err)
 	}
 	pids, err := m.GetPids()
 	if err != nil {
@@ -244,16 +245,16 @@
 		if p, err := os.FindProcess(pid); err == nil {
 			procs = append(procs, p)
 			if err := p.Kill(); err != nil {
-				log.Warn(err)
+				logrus.Warn(err)
 			}
 		}
 	}
 	if err := m.Freeze(configs.Thawed); err != nil {
-		log.Warn(err)
+		logrus.Warn(err)
 	}
 	for _, p := range procs {
 		if _, err := p.Wait(); err != nil {
-			log.Warn(err)
+			logrus.Warn(err)
 		}
 	}
 	return nil
diff --git a/vendor/src/github.com/docker/libcontainer/integration/exec_test.go b/vendor/src/github.com/docker/libcontainer/integration/exec_test.go
index 12457ba..fea5f7e 100644
--- a/vendor/src/github.com/docker/libcontainer/integration/exec_test.go
+++ b/vendor/src/github.com/docker/libcontainer/integration/exec_test.go
@@ -4,8 +4,10 @@
 	"bytes"
 	"io/ioutil"
 	"os"
+	"path/filepath"
 	"strconv"
 	"strings"
+	"syscall"
 	"testing"
 
 	"github.com/docker/libcontainer"
@@ -29,9 +31,7 @@
 		return
 	}
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 	config := newTemplateConfig(rootfs)
 	if userns {
@@ -64,21 +64,15 @@
 	}
 
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 
 	l, err := os.Readlink("/proc/1/ns/ipc")
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	config := newTemplateConfig(rootfs)
 	buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc")
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	if exitCode != 0 {
 		t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr)
@@ -95,22 +89,16 @@
 	}
 
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 
 	l, err := os.Readlink("/proc/1/ns/ipc")
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	config := newTemplateConfig(rootfs)
 	config.Namespaces.Remove(configs.NEWIPC)
 	buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc")
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	if exitCode != 0 {
 		t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr)
@@ -127,23 +115,17 @@
 	}
 
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 
 	l, err := os.Readlink("/proc/1/ns/ipc")
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	config := newTemplateConfig(rootfs)
 	config.Namespaces.Add(configs.NEWIPC, "/proc/1/ns/ipc")
 
 	buffers, exitCode, err := runContainer(config, "", "readlink", "/proc/self/ns/ipc")
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	if exitCode != 0 {
 		t.Fatalf("exit code not 0. code %d stderr %q", exitCode, buffers.Stderr)
@@ -160,9 +142,7 @@
 	}
 
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 
 	config := newTemplateConfig(rootfs)
@@ -180,16 +160,12 @@
 	}
 
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 
 	config := newTemplateConfig(rootfs)
 	out, _, err := runContainer(config, "", "/bin/sh", "-c", "ulimit -n")
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	if limit := strings.TrimSpace(out.Stdout.String()); limit != "1025" {
 		t.Fatalf("expected rlimit to be 1025, got %s", limit)
 	}
@@ -208,9 +184,7 @@
 
 func waitProcess(p *libcontainer.Process, t *testing.T) {
 	status, err := p.Wait()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	if !status.Success() {
 		t.Fatal(status)
 	}
@@ -221,35 +195,22 @@
 		return
 	}
 	root, err := newTestRoot()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer os.RemoveAll(root)
 
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 
 	config := newTemplateConfig(rootfs)
 
-	factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
-	if err != nil {
-		t.Fatal(err)
-	}
-
 	container, err := factory.Create("test", config)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer container.Destroy()
 
 	// Execute a first process in the container
 	stdinR, stdinW, err := os.Pipe()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	var stdout, stdout2 bytes.Buffer
 
@@ -262,19 +223,13 @@
 	err = container.Start(&pconfig)
 	stdinR.Close()
 	defer stdinW.Close()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	pid, err := pconfig.Pid()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	// Execute another process in the container
 	stdinR2, stdinW2, err := os.Pipe()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	pconfig2 := libcontainer.Process{
 		Env: standardEnvironment,
 	}
@@ -285,19 +240,13 @@
 	err = container.Start(&pconfig2)
 	stdinR2.Close()
 	defer stdinW2.Close()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	pid2, err := pconfig2.Pid()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	processes, err := container.Processes()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	n := 0
 	for i := range processes {
@@ -318,14 +267,10 @@
 
 	// Check that both processes live in the same pidns
 	pidns := string(stdout.Bytes())
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	pidns2 := string(stdout2.Bytes())
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	if pidns != pidns2 {
 		t.Fatal("The second process isn't in the required pid namespace", pidns, pidns2)
@@ -337,28 +282,17 @@
 		return
 	}
 	root, err := newTestRoot()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer os.RemoveAll(root)
 
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 
 	config := newTemplateConfig(rootfs)
 
-	factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
-	if err != nil {
-		t.Fatal(err)
-	}
-
 	container, err := factory.Create("test", config)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer container.Destroy()
 
 	var stdout bytes.Buffer
@@ -374,17 +308,12 @@
 		Stdout: &stdout,
 	}
 	err = container.Start(&pconfig)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	// Wait for process
 	waitProcess(&pconfig, t)
 
 	outputEnv := string(stdout.Bytes())
-	if err != nil {
-		t.Fatal(err)
-	}
 
 	// Check that the environment has the key/value pair we added
 	if !strings.Contains(outputEnv, "FOO=BAR") {
@@ -402,28 +331,17 @@
 		return
 	}
 	root, err := newTestRoot()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer os.RemoveAll(root)
 
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 
 	config := newTemplateConfig(rootfs)
 
-	factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
-	if err != nil {
-		t.Fatal(err)
-	}
-
 	container, err := factory.Create("test", config)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer container.Destroy()
 
 	processCaps := append(config.Capabilities, "NET_ADMIN")
@@ -437,17 +355,12 @@
 		Stdout:       &stdout,
 	}
 	err = container.Start(&pconfig)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	// Wait for process
 	waitProcess(&pconfig, t)
 
 	outputStatus := string(stdout.Bytes())
-	if err != nil {
-		t.Fatal(err)
-	}
 
 	lines := strings.Split(outputStatus, "\n")
 
@@ -497,37 +410,25 @@
 		return
 	}
 	root, err := newTestRoot()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer os.RemoveAll(root)
 
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 
 	config := newTemplateConfig(rootfs)
+	f := factory
 	if systemd {
-		config.Cgroups.Slice = "system.slice"
+		f = systemdFactory
 	}
 
-	factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	container, err := factory.Create("test", config)
-	if err != nil {
-		t.Fatal(err)
-	}
+	container, err := f.Create("test", config)
+	ok(t, err)
 	defer container.Destroy()
 
 	stdinR, stdinW, err := os.Pipe()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	pconfig := libcontainer.Process{
 		Args:  []string{"cat"},
@@ -537,44 +438,64 @@
 	err = container.Start(&pconfig)
 	stdinR.Close()
 	defer stdinW.Close()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	pid, err := pconfig.Pid()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	process, err := os.FindProcess(pid)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
-	if err := container.Pause(); err != nil {
-		t.Fatal(err)
-	}
+	err = container.Pause()
+	ok(t, err)
 	state, err := container.Status()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := container.Resume(); err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
+	err = container.Resume()
+	ok(t, err)
 	if state != libcontainer.Paused {
 		t.Fatal("Unexpected state: ", state)
 	}
 
 	stdinW.Close()
 	s, err := process.Wait()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
+
 	if !s.Success() {
 		t.Fatal(s.String())
 	}
 }
 
+func TestCpuShares(t *testing.T) {
+	testCpuShares(t, false)
+}
+
+func TestSystemdCpuShares(t *testing.T) {
+	if !systemd.UseSystemd() {
+		t.Skip("Systemd is unsupported")
+	}
+	testCpuShares(t, true)
+}
+
+func testCpuShares(t *testing.T, systemd bool) {
+	if testing.Short() {
+		return
+	}
+	rootfs, err := newRootfs()
+	ok(t, err)
+	defer remove(rootfs)
+
+	config := newTemplateConfig(rootfs)
+	if systemd {
+		config.Cgroups.Slice = "system.slice"
+	}
+	config.Cgroups.CpuShares = 1
+
+	_, _, err = runContainer(config, "", "ps")
+	if err == nil {
+		t.Fatalf("runContainer should failed with invalid CpuShares")
+	}
+}
+
 func TestContainerState(t *testing.T) {
 	if testing.Short() {
 		return
@@ -606,11 +527,6 @@
 		{Type: configs.NEWNET},
 	})
 
-	factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
-	if err != nil {
-		t.Fatal(err)
-	}
-
 	container, err := factory.Create("test", config)
 	if err != nil {
 		t.Fatal(err)
@@ -648,3 +564,172 @@
 	stdinW.Close()
 	p.Wait()
 }
+
+func TestPassExtraFiles(t *testing.T) {
+	if testing.Short() {
+		return
+	}
+
+	rootfs, err := newRootfs()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer remove(rootfs)
+
+	config := newTemplateConfig(rootfs)
+
+	container, err := factory.Create("test", config)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer container.Destroy()
+
+	var stdout bytes.Buffer
+	pipeout1, pipein1, err := os.Pipe()
+	pipeout2, pipein2, err := os.Pipe()
+	process := libcontainer.Process{
+		Args:       []string{"sh", "-c", "cd /proc/$$/fd; echo -n *; echo -n 1 >3; echo -n 2 >4"},
+		Env:        []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
+		ExtraFiles: []*os.File{pipein1, pipein2},
+		Stdin:      nil,
+		Stdout:     &stdout,
+	}
+	err = container.Start(&process)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	waitProcess(&process, t)
+
+	out := string(stdout.Bytes())
+	// fd 5 is the directory handle for /proc/$$/fd
+	if out != "0 1 2 3 4 5" {
+		t.Fatalf("expected to have the file descriptors '0 1 2 3 4 5' passed to init, got '%s'", out)
+	}
+	var buf = []byte{0}
+	_, err = pipeout1.Read(buf)
+	if err != nil {
+		t.Fatal(err)
+	}
+	out1 := string(buf)
+	if out1 != "1" {
+		t.Fatalf("expected first pipe to receive '1', got '%s'", out1)
+	}
+
+	_, err = pipeout2.Read(buf)
+	if err != nil {
+		t.Fatal(err)
+	}
+	out2 := string(buf)
+	if out2 != "2" {
+		t.Fatalf("expected second pipe to receive '2', got '%s'", out2)
+	}
+}
+
+func TestMountCmds(t *testing.T) {
+	if testing.Short() {
+		return
+	}
+	root, err := newTestRoot()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(root)
+
+	rootfs, err := newRootfs()
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer remove(rootfs)
+
+	tmpDir, err := ioutil.TempDir("", "tmpdir")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+
+	config := newTemplateConfig(rootfs)
+	config.Mounts = append(config.Mounts, &configs.Mount{
+		Source:      tmpDir,
+		Destination: "/tmp",
+		Device:      "bind",
+		Flags:       syscall.MS_BIND | syscall.MS_REC,
+		PremountCmds: []configs.Command{
+			{Path: "touch", Args: []string{filepath.Join(tmpDir, "hello")}},
+			{Path: "touch", Args: []string{filepath.Join(tmpDir, "world")}},
+		},
+		PostmountCmds: []configs.Command{
+			{Path: "cp", Args: []string{filepath.Join(rootfs, "tmp", "hello"), filepath.Join(rootfs, "tmp", "hello-backup")}},
+			{Path: "cp", Args: []string{filepath.Join(rootfs, "tmp", "world"), filepath.Join(rootfs, "tmp", "world-backup")}},
+		},
+	})
+
+	container, err := factory.Create("test", config)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer container.Destroy()
+
+	pconfig := libcontainer.Process{
+		Args: []string{"sh", "-c", "env"},
+		Env:  standardEnvironment,
+	}
+	err = container.Start(&pconfig)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Wait for process
+	waitProcess(&pconfig, t)
+
+	entries, err := ioutil.ReadDir(tmpDir)
+	if err != nil {
+		t.Fatal(err)
+	}
+	expected := []string{"hello", "hello-backup", "world", "world-backup"}
+	for i, e := range entries {
+		if e.Name() != expected[i] {
+			t.Errorf("Got(%s), expect %s", e.Name(), expected[i])
+		}
+	}
+}
+
+func TestSystemProperties(t *testing.T) {
+	if testing.Short() {
+		return
+	}
+	root, err := newTestRoot()
+	ok(t, err)
+	defer os.RemoveAll(root)
+
+	rootfs, err := newRootfs()
+	ok(t, err)
+	defer remove(rootfs)
+
+	config := newTemplateConfig(rootfs)
+	config.SystemProperties = map[string]string{
+		"kernel.shmmni": "8192",
+	}
+
+	container, err := factory.Create("test", config)
+	ok(t, err)
+	defer container.Destroy()
+
+	var stdout bytes.Buffer
+	pconfig := libcontainer.Process{
+		Args:   []string{"sh", "-c", "cat /proc/sys/kernel/shmmni"},
+		Env:    standardEnvironment,
+		Stdin:  nil,
+		Stdout: &stdout,
+	}
+	err = container.Start(&pconfig)
+	ok(t, err)
+
+	// Wait for process
+	waitProcess(&pconfig, t)
+
+	shmmniOutput := strings.TrimSpace(string(stdout.Bytes()))
+	if shmmniOutput != "8192" {
+		t.Fatalf("kernel.shmmni property expected to be 8192, but is %s", shmmniOutput)
+	}
+}
diff --git a/vendor/src/github.com/docker/libcontainer/integration/execin_test.go b/vendor/src/github.com/docker/libcontainer/integration/execin_test.go
index 252e6e4..f81faf0 100644
--- a/vendor/src/github.com/docker/libcontainer/integration/execin_test.go
+++ b/vendor/src/github.com/docker/libcontainer/integration/execin_test.go
@@ -16,22 +16,16 @@
 		return
 	}
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 	config := newTemplateConfig(rootfs)
 	container, err := newContainer(config)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer container.Destroy()
 
 	// Execute a first process in the container
 	stdinR, stdinW, err := os.Pipe()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	process := &libcontainer.Process{
 		Args:  []string{"cat"},
 		Env:   standardEnvironment,
@@ -40,9 +34,7 @@
 	err = container.Start(process)
 	stdinR.Close()
 	defer stdinW.Close()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	buffers := newStdBuffers()
 	ps := &libcontainer.Process{
@@ -53,12 +45,9 @@
 		Stderr: buffers.Stderr,
 	}
 	err = container.Start(ps)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if _, err := ps.Wait(); err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
+	_, err = ps.Wait()
+	ok(t, err)
 	stdinW.Close()
 	if _, err := process.Wait(); err != nil {
 		t.Log(err)
@@ -74,21 +63,15 @@
 		return
 	}
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 	config := newTemplateConfig(rootfs)
 	container, err := newContainer(config)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer container.Destroy()
 
 	stdinR, stdinW, err := os.Pipe()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	process := &libcontainer.Process{
 		Args:  []string{"cat"},
 		Env:   standardEnvironment,
@@ -97,9 +80,7 @@
 	err = container.Start(process)
 	stdinR.Close()
 	defer stdinW.Close()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	buffers := newStdBuffers()
 	ps := &libcontainer.Process{
@@ -110,12 +91,9 @@
 		Stderr: buffers.Stderr,
 	}
 	err = container.Start(ps)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if _, err := ps.Wait(); err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
+	_, err = ps.Wait()
+	ok(t, err)
 	stdinW.Close()
 	if _, err := process.Wait(); err != nil {
 		t.Log(err)
@@ -131,22 +109,16 @@
 		return
 	}
 	rootfs, err := newRootfs()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer remove(rootfs)
 	config := newTemplateConfig(rootfs)
 	container, err := newContainer(config)
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	defer container.Destroy()
 
 	// Execute a first process in the container
 	stdinR, stdinW, err := os.Pipe()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 	process := &libcontainer.Process{
 		Args:  []string{"cat"},
 		Env:   standardEnvironment,
@@ -160,9 +132,7 @@
 			t.Log(err)
 		}
 	}()
-	if err != nil {
-		t.Fatal(err)
-	}
+	ok(t, err)
 
 	unexistent := &libcontainer.Process{
 		Args: []string{"unexistent"},
@@ -182,6 +152,121 @@
 		return
 	}
 	rootfs, err := newRootfs()
+	ok(t, err)
+	defer remove(rootfs)
+	config := newTemplateConfig(rootfs)
+	container, err := newContainer(config)
+	ok(t, err)
+	defer container.Destroy()
+
+	// Execute a first process in the container
+	stdinR, stdinW, err := os.Pipe()
+	ok(t, err)
+	process := &libcontainer.Process{
+		Args:  []string{"cat"},
+		Env:   standardEnvironment,
+		Stdin: stdinR,
+	}
+	err = container.Start(process)
+	stdinR.Close()
+	defer stdinW.Close()
+	ok(t, err)
+
+	var stdout bytes.Buffer
+	ps := &libcontainer.Process{
+		Args: []string{"ps"},
+		Env:  standardEnvironment,
+	}
+	console, err := ps.NewConsole(0)
+	copy := make(chan struct{})
+	go func() {
+		io.Copy(&stdout, console)
+		close(copy)
+	}()
+	ok(t, err)
+	err = container.Start(ps)
+	ok(t, err)
+	select {
+	case <-time.After(5 * time.Second):
+		t.Fatal("Waiting for copy timed out")
+	case <-copy:
+	}
+	_, err = ps.Wait()
+	ok(t, err)
+	stdinW.Close()
+	if _, err := process.Wait(); err != nil {
+		t.Log(err)
+	}
+	out := stdout.String()
+	if !strings.Contains(out, "cat") || !strings.Contains(string(out), "ps") {
+		t.Fatalf("unexpected running process, output %q", out)
+	}
+}
+
+func TestExecInEnvironment(t *testing.T) {
+	if testing.Short() {
+		return
+	}
+	rootfs, err := newRootfs()
+	ok(t, err)
+	defer remove(rootfs)
+	config := newTemplateConfig(rootfs)
+	container, err := newContainer(config)
+	ok(t, err)
+	defer container.Destroy()
+
+	// Execute a first process in the container
+	stdinR, stdinW, err := os.Pipe()
+	ok(t, err)
+	process := &libcontainer.Process{
+		Args:  []string{"cat"},
+		Env:   standardEnvironment,
+		Stdin: stdinR,
+	}
+	err = container.Start(process)
+	stdinR.Close()
+	defer stdinW.Close()
+	ok(t, err)
+
+	buffers := newStdBuffers()
+	process2 := &libcontainer.Process{
+		Args: []string{"env"},
+		Env: []string{
+			"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+			"DEBUG=true",
+			"DEBUG=false",
+			"ENV=test",
+		},
+		Stdin:  buffers.Stdin,
+		Stdout: buffers.Stdout,
+		Stderr: buffers.Stderr,
+	}
+	err = container.Start(process2)
+	ok(t, err)
+	if _, err := process2.Wait(); err != nil {
+		out := buffers.Stdout.String()
+		t.Fatal(err, out)
+	}
+	stdinW.Close()
+	if _, err := process.Wait(); err != nil {
+		t.Log(err)
+	}
+	out := buffers.Stdout.String()
+	// check execin's process environment
+	if !strings.Contains(out, "DEBUG=false") ||
+		!strings.Contains(out, "ENV=test") ||
+		!strings.Contains(out, "HOME=/root") ||
+		!strings.Contains(out, "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin") ||
+		strings.Contains(out, "DEBUG=true") {
+		t.Fatalf("unexpected running process, output %q", out)
+	}
+}
+
+func TestExecinPassExtraFiles(t *testing.T) {
+	if testing.Short() {
+		return
+	}
+	rootfs, err := newRootfs()
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -211,106 +296,45 @@
 	}
 
 	var stdout bytes.Buffer
-	ps := &libcontainer.Process{
-		Args: []string{"ps"},
-		Env:  standardEnvironment,
+	pipeout1, pipein1, err := os.Pipe()
+	pipeout2, pipein2, err := os.Pipe()
+	inprocess := &libcontainer.Process{
+		Args:       []string{"sh", "-c", "cd /proc/$$/fd; echo -n *; echo -n 1 >3; echo -n 2 >4"},
+		Env:        []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
+		ExtraFiles: []*os.File{pipein1, pipein2},
+		Stdin:      nil,
+		Stdout:     &stdout,
 	}
-	console, err := ps.NewConsole(0)
-	copy := make(chan struct{})
-	go func() {
-		io.Copy(&stdout, console)
-		close(copy)
-	}()
+	err = container.Start(inprocess)
 	if err != nil {
 		t.Fatal(err)
 	}
-	err = container.Start(ps)
-	if err != nil {
-		t.Fatal(err)
-	}
-	select {
-	case <-time.After(5 * time.Second):
-		t.Fatal("Waiting for copy timed out")
-	case <-copy:
-	}
-	if _, err := ps.Wait(); err != nil {
-		t.Fatal(err)
-	}
+
+	waitProcess(inprocess, t)
 	stdinW.Close()
-	if _, err := process.Wait(); err != nil {
-		t.Log(err)
-	}
-	out := stdout.String()
-	if !strings.Contains(out, "cat") || !strings.Contains(string(out), "ps") {
-		t.Fatalf("unexpected running process, output %q", out)
-	}
-}
+	waitProcess(process, t)
 
-func TestExecInEnvironment(t *testing.T) {
-	if testing.Short() {
-		return
+	out := string(stdout.Bytes())
+	// fd 5 is the directory handle for /proc/$$/fd
+	if out != "0 1 2 3 4 5" {
+		t.Fatalf("expected to have the file descriptors '0 1 2 3 4 5' passed to exec, got '%s'", out)
 	}
-	rootfs, err := newRootfs()
+	var buf = []byte{0}
+	_, err = pipeout1.Read(buf)
 	if err != nil {
 		t.Fatal(err)
 	}
-	defer remove(rootfs)
-	config := newTemplateConfig(rootfs)
-	container, err := newContainer(config)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer container.Destroy()
-
-	// Execute a first process in the container
-	stdinR, stdinW, err := os.Pipe()
-	if err != nil {
-		t.Fatal(err)
-	}
-	process := &libcontainer.Process{
-		Args:  []string{"cat"},
-		Env:   standardEnvironment,
-		Stdin: stdinR,
-	}
-	err = container.Start(process)
-	stdinR.Close()
-	defer stdinW.Close()
-	if err != nil {
-		t.Fatal(err)
+	out1 := string(buf)
+	if out1 != "1" {
+		t.Fatalf("expected first pipe to receive '1', got '%s'", out1)
 	}
 
-	buffers := newStdBuffers()
-	process2 := &libcontainer.Process{
-		Args: []string{"env"},
-		Env: []string{
-			"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-			"DEBUG=true",
-			"DEBUG=false",
-			"ENV=test",
-		},
-		Stdin:  buffers.Stdin,
-		Stdout: buffers.Stdout,
-		Stderr: buffers.Stderr,
-	}
-	err = container.Start(process2)
+	_, err = pipeout2.Read(buf)
 	if err != nil {
 		t.Fatal(err)
 	}
-	if _, err := process2.Wait(); err != nil {
-		out := buffers.Stdout.String()
-		t.Fatal(err, out)
-	}
-	stdinW.Close()
-	if _, err := process.Wait(); err != nil {
-		t.Log(err)
-	}
-	out := buffers.Stdout.String()
-	// check execin's process environment
-	if !strings.Contains(out, "DEBUG=false") ||
-		!strings.Contains(out, "ENV=test") ||
-		!strings.Contains(out, "HOME=/root") ||
-		!strings.Contains(out, "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin") ||
-		strings.Contains(out, "DEBUG=true") {
-		t.Fatalf("unexpected running process, output %q", out)
+	out2 := string(buf)
+	if out2 != "2" {
+		t.Fatalf("expected second pipe to receive '2', got '%s'", out2)
 	}
 }
diff --git a/vendor/src/github.com/docker/libcontainer/integration/init_test.go b/vendor/src/github.com/docker/libcontainer/integration/init_test.go
index f11834d..2846603 100644
--- a/vendor/src/github.com/docker/libcontainer/integration/init_test.go
+++ b/vendor/src/github.com/docker/libcontainer/integration/init_test.go
@@ -1,11 +1,13 @@
 package integration
 
 import (
-	"log"
 	"os"
 	"runtime"
+	"testing"
 
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/cgroups/systemd"
 	_ "github.com/docker/libcontainer/nsenter"
 )
 
@@ -19,9 +21,40 @@
 	runtime.LockOSThread()
 	factory, err := libcontainer.New("")
 	if err != nil {
-		log.Fatalf("unable to initialize for container: %s", err)
+		logrus.Fatalf("unable to initialize for container: %s", err)
 	}
-	if err := factory.StartInitialization(3); err != nil {
-		log.Fatal(err)
+	if err := factory.StartInitialization(); err != nil {
+		logrus.Fatal(err)
 	}
 }
+
+var (
+	factory        libcontainer.Factory
+	systemdFactory libcontainer.Factory
+)
+
+func TestMain(m *testing.M) {
+	var (
+		err error
+		ret int = 0
+	)
+
+	logrus.SetOutput(os.Stderr)
+	logrus.SetLevel(logrus.InfoLevel)
+
+	factory, err = libcontainer.New(".", libcontainer.Cgroupfs)
+	if err != nil {
+		logrus.Error(err)
+		os.Exit(1)
+	}
+	if systemd.UseSystemd() {
+		systemdFactory, err = libcontainer.New(".", libcontainer.SystemdCgroups)
+		if err != nil {
+			logrus.Error(err)
+			os.Exit(1)
+		}
+	}
+
+	ret = m.Run()
+	os.Exit(ret)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/integration/utils_test.go b/vendor/src/github.com/docker/libcontainer/integration/utils_test.go
index cf45968..ffd7130 100644
--- a/vendor/src/github.com/docker/libcontainer/integration/utils_test.go
+++ b/vendor/src/github.com/docker/libcontainer/integration/utils_test.go
@@ -6,8 +6,11 @@
 	"io/ioutil"
 	"os"
 	"os/exec"
+	"path/filepath"
+	"runtime"
 	"strings"
 	"syscall"
+	"testing"
 
 	"github.com/docker/libcontainer"
 	"github.com/docker/libcontainer/configs"
@@ -38,6 +41,14 @@
 	return strings.Join(s, "|")
 }
 
+// ok fails the test if an err is not nil.
+func ok(t testing.TB, err error) {
+	if err != nil {
+		_, file, line, _ := runtime.Caller(1)
+		t.Fatalf("%s:%d: unexpected error: %s\n\n", filepath.Base(file), line, err.Error())
+	}
+}
+
 // newRootfs creates a new tmp directory and copies the busybox root filesystem
 func newRootfs() (string, error) {
 	dir, err := ioutil.TempDir("", "")
@@ -68,19 +79,13 @@
 }
 
 func newContainer(config *configs.Config) (libcontainer.Container, error) {
-	cgm := libcontainer.Cgroupfs
+	f := factory
+
 	if config.Cgroups != nil && config.Cgroups.Slice == "system.slice" {
-		cgm = libcontainer.SystemdCgroups
+		f = systemdFactory
 	}
 
-	factory, err := libcontainer.New(".",
-		libcontainer.InitArgs(os.Args[0], "init", "--"),
-		cgm,
-	)
-	if err != nil {
-		return nil, err
-	}
-	return factory.Create("testCT", config)
+	return f.Create("testCT", config)
 }
 
 // runContainer runs the container with the specific config and arguments
diff --git a/vendor/src/github.com/docker/libcontainer/label/label_selinux.go b/vendor/src/github.com/docker/libcontainer/label/label_selinux.go
index 5983031..7bc40dd 100644
--- a/vendor/src/github.com/docker/libcontainer/label/label_selinux.go
+++ b/vendor/src/github.com/docker/libcontainer/label/label_selinux.go
@@ -101,10 +101,22 @@
 // the MCS label should continue to be used.  SELinux will use this field
 // to make sure the content can not be shared by other containes.
 func Relabel(path string, fileLabel string, relabel string) error {
+	exclude_path := []string{"/", "/usr", "/etc"}
 	if fileLabel == "" {
 		return nil
 	}
-	if relabel == "z" {
+	for _, p := range exclude_path {
+		if path == p {
+			return fmt.Errorf("Relabeling of %s is not allowed", path)
+		}
+	}
+	if !strings.ContainsAny(relabel, "zZ") {
+		return nil
+	}
+	if strings.Contains(relabel, "z") && strings.Contains(relabel, "Z") {
+		return fmt.Errorf("Bad SELinux option z and Z can not be used together")
+	}
+	if strings.Contains(relabel, "z") {
 		c := selinux.NewContext(fileLabel)
 		c["level"] = "s0"
 		fileLabel = c.Get()
diff --git a/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go b/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go
index 8629353..6ab0c67 100644
--- a/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go
+++ b/vendor/src/github.com/docker/libcontainer/label/label_selinux_test.go
@@ -87,3 +87,31 @@
 		t.Errorf("DisableSecOpt Failed level incorrect")
 	}
 }
+func TestRelabel(t *testing.T) {
+	testdir := "/tmp/test"
+	label := "system_u:system_r:svirt_sandbox_file_t:s0:c1,c2"
+	if err := Relabel(testdir, "", "z"); err != nil {
+		t.Fatal("Relabel with no label failed: %v", err)
+	}
+	if err := Relabel(testdir, label, ""); err != nil {
+		t.Fatal("Relabel with no relabel field failed: %v", err)
+	}
+	if err := Relabel(testdir, label, "z"); err != nil {
+		t.Fatal("Relabel shared failed: %v", err)
+	}
+	if err := Relabel(testdir, label, "Z"); err != nil {
+		t.Fatal("Relabel unshared failed: %v", err)
+	}
+	if err := Relabel(testdir, label, "zZ"); err == nil {
+		t.Fatal("Relabel with shared and unshared succeeded")
+	}
+	if err := Relabel("/etc", label, "zZ"); err == nil {
+		t.Fatal("Relabel /etc succeeded")
+	}
+	if err := Relabel("/", label, ""); err == nil {
+		t.Fatal("Relabel / succeeded")
+	}
+	if err := Relabel("/usr", label, "Z"); err == nil {
+		t.Fatal("Relabel /usr succeeded")
+	}
+}
diff --git a/vendor/src/github.com/docker/libcontainer/nsenter/README.md b/vendor/src/github.com/docker/libcontainer/nsenter/README.md
index ac94cba..d1a60ef 100644
--- a/vendor/src/github.com/docker/libcontainer/nsenter/README.md
+++ b/vendor/src/github.com/docker/libcontainer/nsenter/README.md
@@ -1,6 +1,25 @@
 ## nsenter
 
-The `nsenter` package registers a special init constructor that is called before the Go runtime has 
-a chance to boot.  This provides us the ability to `setns` on existing namespaces and avoid the issues
-that the Go runtime has with multiple threads.  This constructor is only called if this package is 
-registered, imported, in your go application and the argv 0 is `nsenter`.
+The `nsenter` package registers a special init constructor that is called before 
+the Go runtime has a chance to boot.  This provides us the ability to `setns` on 
+existing namespaces and avoid the issues that the Go runtime has with multiple 
+threads.  This constructor will be called if this package is registered, 
+imported, in your go application.
+
+The `nsenter` package will `import "C"` and it uses [cgo](https://golang.org/cmd/cgo/)
+package. In cgo, if the import of "C" is immediately preceded by a comment, that comment, 
+called the preamble, is used as a header when compiling the C parts of the package.
+So every time we  import package `nsenter`, the C code function `nsexec()` would be 
+called. And package `nsenter` is now only imported in Docker execdriver, so every time 
+before we call `execdriver.Exec()`, that C code would run.
+
+`nsexec()` will first check the environment variable `_LIBCONTAINER_INITPID` 
+which will give the process of the container that should be joined. Namespaces fd will 
+be found from `/proc/[pid]/ns` and set by `setns` syscall.
+
+And then get the pipe number from `_LIBCONTAINER_INITPIPE`, error message could
+be transfered through it. If tty is added, `_LIBCONTAINER_CONSOLE_PATH` will 
+have value and start a console for output.
+
+Finally, `nsexec()` will clone a child process , exit the parent process and let 
+the Go runtime take over.
diff --git a/vendor/src/github.com/docker/libcontainer/nsenter/nsenter_test.go b/vendor/src/github.com/docker/libcontainer/nsenter/nsenter_test.go
index 34e1f52..db27b8a 100644
--- a/vendor/src/github.com/docker/libcontainer/nsenter/nsenter_test.go
+++ b/vendor/src/github.com/docker/libcontainer/nsenter/nsenter_test.go
@@ -24,7 +24,7 @@
 		Path:       os.Args[0],
 		Args:       args,
 		ExtraFiles: []*os.File{w},
-		Env:        []string{fmt.Sprintf("_LIBCONTAINER_INITPID=%d", os.Getpid())},
+		Env:        []string{fmt.Sprintf("_LIBCONTAINER_INITPID=%d", os.Getpid()), "_LIBCONTAINER_INITPIPE=3"},
 	}
 
 	if err := cmd.Start(); err != nil {
diff --git a/vendor/src/github.com/docker/libcontainer/nsenter/nsexec.c b/vendor/src/github.com/docker/libcontainer/nsenter/nsexec.c
index e7658f3..d8e45f3 100644
--- a/vendor/src/github.com/docker/libcontainer/nsenter/nsexec.c
+++ b/vendor/src/github.com/docker/libcontainer/nsenter/nsexec.c
@@ -66,7 +66,7 @@
 	const int num = sizeof(namespaces) / sizeof(char *);
 	jmp_buf env;
 	char buf[PATH_MAX], *val;
-	int i, tfd, child, len, consolefd = -1;
+	int i, tfd, child, len, pipenum, consolefd = -1;
 	pid_t pid;
 	char *console;
 
@@ -81,6 +81,19 @@
 		exit(1);
 	}
 
+	val = getenv("_LIBCONTAINER_INITPIPE");
+	if (val == NULL) {
+		pr_perror("Child pipe not found");
+		exit(1);
+	}
+
+	pipenum = atoi(val);
+	snprintf(buf, sizeof(buf), "%d", pipenum);
+	if (strcmp(val, buf)) {
+		pr_perror("Unable to parse _LIBCONTAINER_INITPIPE");
+		exit(1);
+	}
+
 	console = getenv("_LIBCONTAINER_CONSOLE_PATH");
 	if (console != NULL) {
 		consolefd = open(console, O_RDWR);
@@ -124,6 +137,8 @@
 	}
 
 	if (setjmp(env) == 1) {
+		// Child
+
 		if (setsid() == -1) {
 			pr_perror("setsid failed");
 			exit(1);
@@ -149,7 +164,11 @@
 		// Finish executing, let the Go runtime take over.
 		return;
 	}
+	// Parent
 
+	// We must fork to actually enter the PID namespace, use CLONE_PARENT
+	// so the child can have the right parent, and we don't need to forward
+	// the child's exit code or resend its death signal.
 	child = clone_parent(&env);
 	if (child < 0) {
 		pr_perror("Unable to fork");
@@ -158,7 +177,7 @@
 
 	len = snprintf(buf, sizeof(buf), "{ \"pid\" : %d }\n", child);
 
-	if (write(3, buf, len) != len) {
+	if (write(pipenum, buf, len) != len) {
 		pr_perror("Unable to send a child pid");
 		kill(child, SIGKILL);
 		exit(1);
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/README.md b/vendor/src/github.com/docker/libcontainer/nsinit/README.md
index f2e66a8..98bed0e 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/README.md
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/README.md
@@ -65,3 +65,48 @@
    
 You may also specify an alternate root directory from where the `container.json`
 file is read and where the `state.json` file will be saved.
+
+### How to use?
+
+Currently nsinit has 9 commands. Type `nsinit -h` to list all of them. 
+And for every alternative command, you can also use `--help` to get more 
+detailed help documents. For example, `nsinit config --help`.
+
+`nsinit` cli application is implemented using [cli.go](https://github.com/codegangsta/cli). 
+Lots of details are handled in cli.go, so the implementation of `nsinit` itself 
+is very clean and clear.
+
+*   **config**	
+It will generate a standard configuration file for a container.  By default, it 
+will generate as the template file in [config.go](https://github.com/docker/libcontainer/blob/master/nsinit/config.go#L192). 
+It will modify the template if you have specified some configuration by options.
+*   **exec**	
+Starts a container and execute a new command inside it. Besides common options, it
+has some special options as below.
+	- `--tty,-t`: allocate a TTY to the container.
+	- `--config`: you can specify a configuration file. By default, it will use 
+	template configuration.
+	- `--id`: specify the ID for a container. By default, the id is "nsinit".
+	- `--user,-u`: set the user, uid, and/or gid for the process. By default the 
+	value is "root".
+	- `--cwd`: set the current working dir.
+	- `--env`: set environment variables for the process.
+*   **init**		
+It's an internal command that is called inside the container's namespaces to 
+initialize the namespace and exec the user's process. It should not be called 
+externally.
+*   **oom**		
+Display oom notifications for a container, you should specify container id.
+*   **pause**	
+Pause the container's processes, you should specify container id. It will use 
+cgroup freeze subsystem to help.
+*   **unpause**		
+Unpause the container's processes. Same with `pause`.
+*   **stats**	
+Display statistics for the container, it will mainly show cgroup and network 
+statistics.
+*   **state**	
+Get the container's current state. You can also read the state from `state.json`
+ in your container_id folder.
+*   **help, h**		
+Shows a list of commands or help for one command.
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/config.go b/vendor/src/github.com/docker/libcontainer/nsinit/config.go
index e50bb3c..1eee9dd 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/config.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/config.go
@@ -43,6 +43,7 @@
 	cli.StringFlag{Name: "veth-address", Usage: "veth ip address"},
 	cli.StringFlag{Name: "veth-gateway", Usage: "veth gateway address"},
 	cli.IntFlag{Name: "veth-mtu", Usage: "veth mtu"},
+	cli.BoolFlag{Name: "cgroup", Usage: "mount the cgroup data for the container"},
 }
 
 var configCommand = cli.Command{
@@ -187,6 +188,12 @@
 		}
 		config.Networks = append(config.Networks, network)
 	}
+	if context.Bool("cgroup") {
+		config.Mounts = append(config.Mounts, &configs.Mount{
+			Destination: "/sys/fs/cgroup",
+			Device:      "cgroup",
+		})
+	}
 }
 
 func getTemplate() *configs.Config {
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/exec.go b/vendor/src/github.com/docker/libcontainer/nsinit/exec.go
index 9d302aa..cf40a59 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/exec.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/exec.go
@@ -23,6 +23,7 @@
 	Action: execAction,
 	Flags: append([]cli.Flag{
 		cli.BoolFlag{Name: "tty,t", Usage: "allocate a TTY to the container"},
+		cli.BoolFlag{Name: "systemd", Usage: "Use systemd for managing cgroups, if available"},
 		cli.StringFlag{Name: "id", Value: "nsinit", Usage: "specify the ID for a container"},
 		cli.StringFlag{Name: "config", Value: "", Usage: "path to the configuration file"},
 		cli.StringFlag{Name: "user,u", Value: "root", Usage: "set the user, uid, and/or gid for the process"},
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/init.go b/vendor/src/github.com/docker/libcontainer/nsinit/init.go
index 7b2cf19..24058d4 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/init.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/init.go
@@ -3,7 +3,7 @@
 import (
 	"runtime"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/codegangsta/cli"
 	"github.com/docker/libcontainer"
 	_ "github.com/docker/libcontainer/nsenter"
@@ -13,14 +13,14 @@
 	Name:  "init",
 	Usage: "runs the init process inside the namespace",
 	Action: func(context *cli.Context) {
-		log.SetLevel(log.DebugLevel)
+		logrus.SetLevel(logrus.DebugLevel)
 		runtime.GOMAXPROCS(1)
 		runtime.LockOSThread()
 		factory, err := libcontainer.New("")
 		if err != nil {
 			fatal(err)
 		}
-		if err := factory.StartInitialization(3); err != nil {
+		if err := factory.StartInitialization(); err != nil {
 			fatal(err)
 		}
 		panic("This line should never been executed")
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/main.go b/vendor/src/github.com/docker/libcontainer/nsinit/main.go
index eec064c..0a59c9f 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/main.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/main.go
@@ -3,7 +3,7 @@
 import (
 	"os"
 
-	log "github.com/Sirupsen/logrus"
+	"github.com/Sirupsen/logrus"
 	"github.com/codegangsta/cli"
 )
 
@@ -29,18 +29,18 @@
 	}
 	app.Before = func(context *cli.Context) error {
 		if context.GlobalBool("debug") {
-			log.SetLevel(log.DebugLevel)
+			logrus.SetLevel(logrus.DebugLevel)
 		}
 		if path := context.GlobalString("log-file"); path != "" {
 			f, err := os.Create(path)
 			if err != nil {
 				return err
 			}
-			log.SetOutput(f)
+			logrus.SetOutput(f)
 		}
 		return nil
 	}
 	if err := app.Run(os.Args); err != nil {
-		log.Fatal(err)
+		logrus.Fatal(err)
 	}
 }
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/oom.go b/vendor/src/github.com/docker/libcontainer/nsinit/oom.go
index a59b753..e92c558 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/oom.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/oom.go
@@ -1,8 +1,7 @@
 package main
 
 import (
-	"log"
-
+	"github.com/Sirupsen/logrus"
 	"github.com/codegangsta/cli"
 )
 
@@ -15,16 +14,16 @@
 	Action: func(context *cli.Context) {
 		container, err := getContainer(context)
 		if err != nil {
-			log.Fatal(err)
+			logrus.Fatal(err)
 		}
 		n, err := container.NotifyOOM()
 		if err != nil {
-			log.Fatal(err)
+			logrus.Fatal(err)
 		}
 		for x := range n {
 			// hack for calm down go1.4 gofmt
 			_ = x
-			log.Printf("OOM notification received")
+			logrus.Printf("OOM notification received")
 		}
 	},
 }
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/pause.go b/vendor/src/github.com/docker/libcontainer/nsinit/pause.go
index 89af0b6..7b0cc32 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/pause.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/pause.go
@@ -1,8 +1,7 @@
 package main
 
 import (
-	"log"
-
+	"github.com/Sirupsen/logrus"
 	"github.com/codegangsta/cli"
 )
 
@@ -15,10 +14,10 @@
 	Action: func(context *cli.Context) {
 		container, err := getContainer(context)
 		if err != nil {
-			log.Fatal(err)
+			logrus.Fatal(err)
 		}
 		if err = container.Pause(); err != nil {
-			log.Fatal(err)
+			logrus.Fatal(err)
 		}
 	},
 }
@@ -32,10 +31,10 @@
 	Action: func(context *cli.Context) {
 		container, err := getContainer(context)
 		if err != nil {
-			log.Fatal(err)
+			logrus.Fatal(err)
 		}
 		if err = container.Resume(); err != nil {
-			log.Fatal(err)
+			logrus.Fatal(err)
 		}
 	},
 }
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/utils.go b/vendor/src/github.com/docker/libcontainer/nsinit/utils.go
index 4deca76..fe9d0ef 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/utils.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/utils.go
@@ -3,10 +3,12 @@
 import (
 	"encoding/json"
 	"fmt"
+	"github.com/Sirupsen/logrus"
 	"os"
 
 	"github.com/codegangsta/cli"
 	"github.com/docker/libcontainer"
+	"github.com/docker/libcontainer/cgroups/systemd"
 	"github.com/docker/libcontainer/configs"
 )
 
@@ -29,7 +31,15 @@
 }
 
 func loadFactory(context *cli.Context) (libcontainer.Factory, error) {
-	return libcontainer.New(context.GlobalString("root"), libcontainer.Cgroupfs)
+	cgm := libcontainer.Cgroupfs
+	if context.Bool("systemd") {
+		if systemd.UseSystemd() {
+			cgm = libcontainer.SystemdCgroups
+		} else {
+			logrus.Warn("systemd cgroup flag passed, but systemd support for managing cgroups is not available.")
+		}
+	}
+	return libcontainer.New(context.GlobalString("root"), cgm)
 }
 
 func getContainer(context *cli.Context) (libcontainer.Container, error) {
diff --git a/vendor/src/github.com/docker/libcontainer/process.go b/vendor/src/github.com/docker/libcontainer/process.go
index 82fcff8..7902d08 100644
--- a/vendor/src/github.com/docker/libcontainer/process.go
+++ b/vendor/src/github.com/docker/libcontainer/process.go
@@ -23,7 +23,7 @@
 	Env []string
 
 	// User will set the uid and gid of the executing process running inside the container
-	// local to the contaienr's user and group configuration.
+	// local to the container's user and group configuration.
 	User string
 
 	// Cwd will change the processes current working directory inside the container's rootfs.
@@ -38,11 +38,14 @@
 	// Stderr is a pointer to a writer which receives the standard error stream.
 	Stderr io.Writer
 
+	// ExtraFiles specifies additional open files to be inherited by the container
+	ExtraFiles []*os.File
+
 	// consolePath is the path to the console allocated to the container.
 	consolePath string
 
 	// Capabilities specify the capabilities to keep when executing the process inside the container
-	// All capbilities not specified will be dropped from the processes capability mask
+	// All capabilities not specified will be dropped from the processes capability mask
 	Capabilities []string
 
 	ops processOperations
diff --git a/vendor/src/github.com/docker/libcontainer/rootfs_linux.go b/vendor/src/github.com/docker/libcontainer/rootfs_linux.go
index 472a4a9..4ddfff1 100644
--- a/vendor/src/github.com/docker/libcontainer/rootfs_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/rootfs_linux.go
@@ -6,12 +6,15 @@
 	"fmt"
 	"io/ioutil"
 	"os"
+	"os/exec"
+	"path"
 	"path/filepath"
 	"strings"
 	"syscall"
 	"time"
 
 	"github.com/docker/docker/pkg/symlink"
+	"github.com/docker/libcontainer/cgroups"
 	"github.com/docker/libcontainer/configs"
 	"github.com/docker/libcontainer/label"
 )
@@ -25,9 +28,20 @@
 		return newSystemError(err)
 	}
 	for _, m := range config.Mounts {
+		for _, precmd := range m.PremountCmds {
+			if err := mountCmd(precmd); err != nil {
+				return newSystemError(err)
+			}
+		}
 		if err := mountToRootfs(m, config.Rootfs, config.MountLabel); err != nil {
 			return newSystemError(err)
 		}
+
+		for _, postcmd := range m.PostmountCmds {
+			if err := mountCmd(postcmd); err != nil {
+				return newSystemError(err)
+			}
+		}
 	}
 	if err := createDevices(config); err != nil {
 		return newSystemError(err)
@@ -61,6 +75,18 @@
 	return nil
 }
 
+func mountCmd(cmd configs.Command) error {
+
+	command := exec.Command(cmd.Path, cmd.Args[:]...)
+	command.Env = cmd.Env
+	command.Dir = cmd.Dir
+	if out, err := command.CombinedOutput(); err != nil {
+		return fmt.Errorf("%#v failed: %s: %v", cmd, string(out), err)
+	}
+
+	return nil
+}
+
 func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error {
 	var (
 		dest = m.Destination
@@ -71,11 +97,19 @@
 	}
 
 	switch m.Device {
-	case "proc", "mqueue", "sysfs":
+	case "proc", "sysfs":
 		if err := os.MkdirAll(dest, 0755); err != nil && !os.IsExist(err) {
 			return err
 		}
 		return syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags), "")
+	case "mqueue":
+		if err := os.MkdirAll(dest, 0755); err != nil && !os.IsExist(err) {
+			return err
+		}
+		if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags), ""); err != nil {
+			return err
+		}
+		return label.SetFileLabel(dest, mountLabel)
 	case "tmpfs":
 		stat, err := os.Stat(dest)
 		if err != nil {
@@ -135,6 +169,37 @@
 				return err
 			}
 		}
+	case "cgroup":
+		mounts, err := cgroups.GetCgroupMounts()
+		if err != nil {
+			return err
+		}
+		var binds []*configs.Mount
+		for _, mm := range mounts {
+			dir, err := mm.GetThisCgroupDir()
+			if err != nil {
+				return err
+			}
+			binds = append(binds, &configs.Mount{
+				Device:      "bind",
+				Source:      filepath.Join(mm.Mountpoint, dir),
+				Destination: filepath.Join(m.Destination, strings.Join(mm.Subsystems, ",")),
+				Flags:       syscall.MS_BIND | syscall.MS_REC | syscall.MS_RDONLY,
+			})
+		}
+		tmpfs := &configs.Mount{
+			Device:      "tmpfs",
+			Destination: m.Destination,
+			Flags:       syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV,
+		}
+		if err := mountToRootfs(tmpfs, rootfs, mountLabel); err != nil {
+			return err
+		}
+		for _, b := range binds {
+			if err := mountToRootfs(b, rootfs, mountLabel); err != nil {
+				return err
+			}
+		}
 	default:
 		return fmt.Errorf("unknown mount device %q to %q", m.Device, m.Destination)
 	}
@@ -273,9 +338,9 @@
 }
 
 func prepareRoot(config *configs.Config) error {
-	flag := syscall.MS_PRIVATE | syscall.MS_REC
-	if config.NoPivotRoot {
-		flag = syscall.MS_SLAVE | syscall.MS_REC
+	flag := syscall.MS_SLAVE | syscall.MS_REC
+	if config.Privatefs {
+		flag = syscall.MS_PRIVATE | syscall.MS_REC
 	}
 	if err := syscall.Mount("", "/", "", uintptr(flag), ""); err != nil {
 		return err
@@ -388,3 +453,10 @@
 	}
 	return nil
 }
+
+// writeSystemProperty writes the value to a path under /proc/sys as determined from the key.
+// For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward.
+func writeSystemProperty(key, value string) error {
+	keyPath := strings.Replace(key, ".", "/", -1)
+	return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644)
+}
diff --git a/vendor/src/github.com/docker/libcontainer/standard_init_linux.go b/vendor/src/github.com/docker/libcontainer/standard_init_linux.go
index 282832b..251c09f 100644
--- a/vendor/src/github.com/docker/libcontainer/standard_init_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/standard_init_linux.go
@@ -64,6 +64,13 @@
 	if err := label.SetProcessLabel(l.config.Config.ProcessLabel); err != nil {
 		return err
 	}
+
+	for key, value := range l.config.Config.SystemProperties {
+		if err := writeSystemProperty(key, value); err != nil {
+			return err
+		}
+	}
+
 	for _, path := range l.config.Config.ReadonlyPaths {
 		if err := remountReadonly(path); err != nil {
 			return err
diff --git a/vendor/src/github.com/docker/libcontainer/system/setns_linux.go b/vendor/src/github.com/docker/libcontainer/system/setns_linux.go
index 228e6cc..a3c4cbb 100644
--- a/vendor/src/github.com/docker/libcontainer/system/setns_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/system/setns_linux.go
@@ -12,8 +12,10 @@
 // We are declaring the macro here because the SETNS syscall does not exist in th stdlib
 var setNsMap = map[string]uintptr{
 	"linux/386":     346,
+	"linux/arm64":   268,
 	"linux/amd64":   308,
-	"linux/arm":     374,
+	"linux/arm":     375,
+	"linux/ppc":     350,
 	"linux/ppc64":   350,
 	"linux/ppc64le": 350,
 	"linux/s390x":   339,
diff --git a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go
index 6840c37..0816bf8 100644
--- a/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go
+++ b/vendor/src/github.com/docker/libcontainer/system/syscall_linux_64.go
@@ -1,4 +1,4 @@
-// +build linux,amd64 linux,ppc64 linux,ppc64le linux,s390x
+// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x
 
 package system
 
diff --git a/vendor/src/github.com/docker/libcontainer/update-vendor.sh b/vendor/src/github.com/docker/libcontainer/update-vendor.sh
index b68f5d4..6d03d77 100755
--- a/vendor/src/github.com/docker/libcontainer/update-vendor.sh
+++ b/vendor/src/github.com/docker/libcontainer/update-vendor.sh
@@ -43,7 +43,7 @@
 clone git github.com/codegangsta/cli 1.1.0
 clone git github.com/coreos/go-systemd v2
 clone git github.com/godbus/dbus v2
-clone git github.com/Sirupsen/logrus v0.6.6
-clone git github.com/syndtr/gocapability 8e4cdcb
+clone git github.com/Sirupsen/logrus v0.7.3
+clone git github.com/syndtr/gocapability 66ef2aa
 
 # intentionally not vendoring Docker itself...  that'd be a circle :)
diff --git a/vendor/src/github.com/docker/libnetwork/.gitignore b/vendor/src/github.com/docker/libnetwork/.gitignore
new file mode 100644
index 0000000..c03c965
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/.gitignore
@@ -0,0 +1,33 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+# Coverage
+*.tmp
+*.coverprofile
+
+# IDE files
+.project
+
+libnetwork-build.created
diff --git a/vendor/src/github.com/docker/libnetwork/Godeps/Godeps.json b/vendor/src/github.com/docker/libnetwork/Godeps/Godeps.json
new file mode 100644
index 0000000..c289309
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/Godeps/Godeps.json
@@ -0,0 +1,85 @@
+{
+	"ImportPath": "github.com/docker/libnetwork",
+	"GoVersion": "go1.4.1",
+	"Packages": [
+		"./..."
+	],
+	"Deps": [
+		{
+			"ImportPath": "github.com/Sirupsen/logrus",
+			"Comment": "v0.6.4-12-g467d9d5",
+			"Rev": "467d9d55c2d2c17248441a8fc661561161f40d5e"
+		},
+		{
+			"ImportPath": "github.com/docker/docker/pkg/homedir",
+			"Comment": "v1.4.1-3479-ga9172f5",
+			"Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+		},
+		{
+			"ImportPath": "github.com/docker/docker/pkg/ioutils",
+			"Comment": "v1.4.1-3479-ga9172f5",
+			"Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+		},
+		{
+			"ImportPath": "github.com/docker/docker/pkg/mflag",
+			"Comment": "v1.4.1-3479-ga9172f5",
+			"Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+		},
+		{
+			"ImportPath": "github.com/docker/docker/pkg/parsers",
+			"Comment": "v1.4.1-3479-ga9172f5",
+			"Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+		},
+		{
+			"ImportPath": "github.com/docker/docker/pkg/plugins",
+			"Comment": "v1.4.1-3479-ga9172f5",
+			"Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+		},
+		{
+			"ImportPath": "github.com/docker/docker/pkg/proxy",
+			"Comment": "v1.4.1-3479-ga9172f5",
+			"Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+		},
+		{
+			"ImportPath": "github.com/docker/docker/pkg/reexec",
+			"Comment": "v1.4.1-3479-ga9172f5",
+			"Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+		},
+		{
+			"ImportPath": "github.com/docker/docker/pkg/stringid",
+			"Comment": "v1.4.1-3479-ga9172f5",
+			"Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+		},
+		{
+			"ImportPath": "github.com/docker/docker/pkg/term",
+			"Comment": "v1.4.1-3479-ga9172f5",
+			"Rev": "a9172f572e13086859c652e2d581950e910d63d4"
+		},
+		{
+			"ImportPath": "github.com/docker/libcontainer/user",
+			"Comment": "v1.4.0-495-g3e66118",
+			"Rev": "3e661186ba24f259d3860f067df052c7f6904bee"
+		},
+		{
+			"ImportPath": "github.com/godbus/dbus",
+			"Comment": "v2-3-g4160802",
+			"Rev": "41608027bdce7bfa8959d653a00b954591220e67"
+		},
+		{
+			"ImportPath": "github.com/gorilla/context",
+			"Rev": "215affda49addc4c8ef7e2534915df2c8c35c6cd"
+		},
+		{
+			"ImportPath": "github.com/gorilla/mux",
+			"Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5"
+		},
+		{
+			"ImportPath": "github.com/vishvananda/netlink",
+			"Rev": "8eb64238879fed52fd51c5b30ad20b928fb4c36c"
+		},
+		{
+			"ImportPath": "github.com/vishvananda/netns",
+			"Rev": "008d17ae001344769b031375bdb38a86219154c6"
+		}
+	]
+}
diff --git a/vendor/src/github.com/docker/libnetwork/Godeps/Readme b/vendor/src/github.com/docker/libnetwork/Godeps/Readme
new file mode 100644
index 0000000..4cdaa53
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/Godeps/Readme
@@ -0,0 +1,5 @@
+This directory tree is generated automatically by godep.
+
+Please do not edit.
+
+See https://github.com/tools/godep for more information.
diff --git a/vendor/src/github.com/docker/libnetwork/LICENSE b/vendor/src/github.com/docker/libnetwork/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/vendor/src/github.com/docker/libnetwork/MAINTAINERS b/vendor/src/github.com/docker/libnetwork/MAINTAINERS
new file mode 100644
index 0000000..398fd6d
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/MAINTAINERS
@@ -0,0 +1,4 @@
+Alexandr Morozov <lk4d4@docker.com> (@LK4D4)
+Arnaud Porterie <arnaud@docker.com> (@icecrime)
+Madhu Venugopal <madhu@docker.com> (@mavenugo)
+Jana Radhakrishnan <mrjana@docker.com> (@mrjana)
diff --git a/vendor/src/github.com/docker/libnetwork/Makefile b/vendor/src/github.com/docker/libnetwork/Makefile
new file mode 100644
index 0000000..59c181e
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/Makefile
@@ -0,0 +1,78 @@
+.PHONY: all all-local build build-local check check-code check-format run-tests check-local install-deps coveralls circle-ci
+SHELL=/bin/bash
+build_image=libnetwork-build
+dockerargs = --privileged -v $(shell pwd):/go/src/github.com/docker/libnetwork -w /go/src/github.com/docker/libnetwork
+container_env = -e "INSIDECONTAINER=-incontainer=true"
+docker = docker run --rm ${dockerargs} ${container_env} ${build_image}
+ciargs = -e "COVERALLS_TOKEN=$$COVERALLS_TOKEN" -e "INSIDECONTAINER=-incontainer=true"
+cidocker = docker run ${ciargs} ${dockerargs} golang:1.4
+
+all: ${build_image}.created
+	${docker} make all-local
+
+all-local: check-local build-local
+
+${build_image}.created:
+	docker run --name=libnetworkbuild -v $(shell pwd):/go/src/github.com/docker/libnetwork -w /go/src/github.com/docker/libnetwork golang:1.4 make install-deps
+	docker commit libnetworkbuild ${build_image}
+	docker rm libnetworkbuild
+	touch ${build_image}.created
+
+build: ${build_image}.created
+	${docker} make build-local
+
+build-local:
+	$(shell which godep) go build -tags experimental ./...
+
+check: ${build_image}.created
+	${docker} make check-local
+
+check-code:
+	@echo "Checking code... "
+	test -z "$$(golint ./... | tee /dev/stderr)"
+	go vet ./...
+	@echo "Done checking code"
+
+check-format:
+	@echo "Checking format... "
+	test -z "$$(goimports -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)"
+	@echo "Done checking format"
+
+run-tests:
+	@echo "Running tests... "
+	@echo "mode: count" > coverage.coverprofile
+	@for dir in $$(find . -maxdepth 10 -not -path './.git*' -not -path '*/_*' -type d); do \
+	    if ls $$dir/*.go &> /dev/null; then \
+		pushd . &> /dev/null ; \
+		cd $$dir ; \
+		$(shell which godep) go test ${INSIDECONTAINER} -test.parallel 3 -test.v -covermode=count -coverprofile=./profile.tmp ; \
+		ret=$$? ;\
+		if [ $$ret -ne 0 ]; then exit $$ret; fi ;\
+		popd &> /dev/null; \
+	        if [ -f $$dir/profile.tmp ]; then \
+		        cat $$dir/profile.tmp | tail -n +2 >> coverage.coverprofile ; \
+				rm $$dir/profile.tmp ; \
+            fi ; \
+        fi ; \
+	done
+	@echo "Done running tests"
+
+check-local: 	check-format check-code run-tests 
+
+install-deps:
+	apt-get update && apt-get -y install iptables
+	go get github.com/tools/godep
+	go get github.com/golang/lint/golint
+	go get golang.org/x/tools/cmd/vet
+	go get golang.org/x/tools/cmd/goimports
+	go get golang.org/x/tools/cmd/cover
+	go get github.com/mattn/goveralls
+
+coveralls:
+	-@goveralls -service circleci -coverprofile=coverage.coverprofile -repotoken $$COVERALLS_TOKEN
+
+# CircleCI's Docker fails when cleaning up using the --rm flag
+# The following target is a workaround for this
+
+circle-ci:
+	@${cidocker} make install-deps check-local coveralls
diff --git a/vendor/src/github.com/docker/libnetwork/README.md b/vendor/src/github.com/docker/libnetwork/README.md
new file mode 100644
index 0000000..e51eba1
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/README.md
@@ -0,0 +1,86 @@
+# libnetwork - networking for containers
+
+[![Circle CI](https://circleci.com/gh/docker/libnetwork/tree/master.svg?style=svg)](https://circleci.com/gh/docker/libnetwork/tree/master) [![Coverage Status](https://coveralls.io/repos/docker/libnetwork/badge.svg)](https://coveralls.io/r/docker/libnetwork) [![GoDoc](https://godoc.org/github.com/docker/libnetwork?status.svg)](https://godoc.org/github.com/docker/libnetwork)
+
+Libnetwork provides a native Go implementation for connecting containers
+
+The goal of libnetwork is to deliver a robust Container Network Model that provides a consistent programming interface and the required network abstractions for applications.
+
+**NOTE**: libnetwork project is under heavy development and is not ready for general use.
+
+#### Design
+Please refer to the [design](docs/design.md) for more information.
+
+#### Using libnetwork
+
+There are many networking solutions available to suit a broad range of use-cases. libnetwork uses a driver / plugin model to support all of these solutions while abstracting the complexity of the driver implementations by exposing a simple and consistent Network Model to users.
+
+
+```go
+        // Create a new controller instance
+        controller := libnetwork.New()
+
+        // Select and configure the network driver
+        networkType := "bridge"
+
+        driverOptions := options.Generic{}
+        genericOption := make(map[string]interface{})
+        genericOption[netlabel.GenericData] = driverOptions
+        err := controller.ConfigureNetworkDriver(networkType, genericOption)
+        if err != nil {
+                return
+        }
+
+        // Create a network for containers to join.
+        // NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can make of
+        network, err := controller.NewNetwork(networkType, "network1")
+        if err != nil {
+                return
+        }
+
+        // For each new container: allocate IP and interfaces. The returned network
+        // settings will be used for container infos (inspect and such), as well as
+        // iptables rules for port publishing. This info is contained or accessible
+        // from the returned endpoint.
+        ep, err := network.CreateEndpoint("Endpoint1")
+        if err != nil {
+                return
+        }
+
+        // A container can join the endpoint by providing the container ID to the join
+        // api which returns the sandbox key which can be used to access the sandbox
+        // created for the container during join.
+        // Join acceps Variadic arguments which will be made use of by libnetwork and Drivers
+        _, err = ep.Join("container1",
+                libnetwork.JoinOptionHostname("test"),
+                libnetwork.JoinOptionDomainname("docker.io"))
+        if err != nil {
+                return
+        }
+
+		// libentwork client can check the endpoint's operational data via the Info() API
+		epInfo, err := ep.DriverInfo()
+		mapData, ok := epInfo[netlabel.PortMap]
+		if ok {
+			portMapping, ok := mapData.([]netutils.PortBinding)
+			if ok {
+				fmt.Printf("Current port mapping for endpoint %s: %v", ep.Name(), portMapping)
+			}
+		}
+
+```
+#### Current Status
+Please watch this space for updates on the progress.
+
+Currently libnetwork is nothing more than an attempt to modularize the Docker platform's networking subsystem by moving it into libnetwork as a library.
+
+## Future
+Please refer to [roadmap](ROADMAP.md) for more information.
+
+## Contributing
+
+Want to hack on libnetwork? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply.
+
+## Copyright and license
+Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons.
+
diff --git a/vendor/src/github.com/docker/libnetwork/ROADMAP.md b/vendor/src/github.com/docker/libnetwork/ROADMAP.md
new file mode 100644
index 0000000..af89645
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/ROADMAP.md
@@ -0,0 +1,29 @@
+# Roadmap
+
+Libnetwork is a young project and is still being defined.
+This document defines the high-level goals of the project and defines the release-relationship to the Docker Platform.
+
+* [Goals](#goals)
+* [Project Planning](#project-planning): release-relationship to the Docker Platform.
+
+## Long-term Goal
+
+libnetwork project will follow Docker and Linux philosophy of delivering small, highly modular and composable tools that works well independently. 
+libnetwork aims to satisfy that composable need for Networking in Containers.
+
+## Short-term Goals
+
+- Modularize the networking logic in Docker Engine and libcontainer in to a single, reusable library
+- Replace the networking subsystem of Docker Engine, with libnetwork
+- Define a flexible model that allows local and remote drivers to provide networking to containers
+- Provide a stand-alone tool "dnet" for managing and testing libnetwork
+
+## Project Planning
+
+Libnetwork versions do not map 1:1 with Docker Platform releases.
+Milestones and Project Pages are used to define the set of features that are included in each release.
+
+| Platform Version | Libnetwork Version | Planning |
+|------------------|--------------------|----------|
+| Docker 1.7       | [0.3](https://github.com/docker/libnetwork/milestones/0.3) | [Project Page](https://github.com/docker/libnetwork/wiki/Docker-1.7-Project-Page) |
+| Docker 1.8       | [1.0](https://github.com/docker/libnetwork/milestones/1.0) | [Project Page](https://github.com/docker/libnetwork/wiki/Docker-1.8-Project-Page) |
diff --git a/vendor/src/github.com/docker/libnetwork/api/api.go b/vendor/src/github.com/docker/libnetwork/api/api.go
new file mode 100644
index 0000000..97305d3
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/api/api.go
@@ -0,0 +1,541 @@
+package api
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"strings"
+
+	"github.com/docker/libnetwork"
+	"github.com/docker/libnetwork/types"
+	"github.com/gorilla/mux"
+)
+
+var (
+	successResponse  = responseStatus{Status: "Success", StatusCode: http.StatusOK}
+	createdResponse  = responseStatus{Status: "Created", StatusCode: http.StatusCreated}
+	mismatchResponse = responseStatus{Status: "Body/URI parameter mismatch", StatusCode: http.StatusBadRequest}
+	badQueryresponse = responseStatus{Status: "Unsupported query", StatusCode: http.StatusBadRequest}
+)
+
+const (
+	// Resource name regex
+	regex = "[a-zA-Z_0-9-]+"
+	// Router URL variable definition
+	nwName = "{" + urlNwName + ":" + regex + "}"
+	nwID   = "{" + urlNwID + ":" + regex + "}"
+	nwPID  = "{" + urlNwPID + ":" + regex + "}"
+	epName = "{" + urlEpName + ":" + regex + "}"
+	epID   = "{" + urlEpID + ":" + regex + "}"
+	epPID  = "{" + urlEpPID + ":" + regex + "}"
+	cnID   = "{" + urlCnID + ":" + regex + "}"
+
+	// Internal URL variable name, they can be anything
+	urlNwName = "network-name"
+	urlNwID   = "network-id"
+	urlNwPID  = "network-partial-id"
+	urlEpName = "endpoint-name"
+	urlEpID   = "endpoint-id"
+	urlEpPID  = "endpoint-partial-id"
+	urlCnID   = "container-id"
+)
+
+// NewHTTPHandler creates and initialize the HTTP handler to serve the requests for libnetwork
+func NewHTTPHandler(c libnetwork.NetworkController) func(w http.ResponseWriter, req *http.Request) {
+	h := &httpHandler{c: c}
+	h.initRouter()
+	return h.handleRequest
+}
+
+type responseStatus struct {
+	Status     string
+	StatusCode int
+}
+
+func (r *responseStatus) isOK() bool {
+	return r.StatusCode == http.StatusOK || r.StatusCode == http.StatusCreated
+}
+
+type processor func(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus)
+
+type httpHandler struct {
+	c libnetwork.NetworkController
+	r *mux.Router
+}
+
+func (h *httpHandler) handleRequest(w http.ResponseWriter, req *http.Request) {
+	// Make sure the service is there
+	if h.c == nil {
+		http.Error(w, "NetworkController is not available", http.StatusServiceUnavailable)
+		return
+	}
+
+	// Get handler from router and execute it
+	h.r.ServeHTTP(w, req)
+}
+
+func (h *httpHandler) initRouter() {
+	m := map[string][]struct {
+		url string
+		qrs []string
+		fct processor
+	}{
+		"GET": {
+			// Order matters
+			{"/networks", []string{"name", nwName}, procGetNetworks},
+			{"/networks", []string{"partial-id", nwPID}, procGetNetworks},
+			{"/networks", nil, procGetNetworks},
+			{"/networks/" + nwID, nil, procGetNetwork},
+			{"/networks/" + nwID + "/endpoints", []string{"name", epName}, procGetEndpoints},
+			{"/networks/" + nwID + "/endpoints", []string{"partial-id", epPID}, procGetEndpoints},
+			{"/networks/" + nwID + "/endpoints", nil, procGetEndpoints},
+			{"/networks/" + nwID + "/endpoints/" + epID, nil, procGetEndpoint},
+		},
+		"POST": {
+			{"/networks", nil, procCreateNetwork},
+			{"/networks/" + nwID + "/endpoints", nil, procCreateEndpoint},
+			{"/networks/" + nwID + "/endpoints/" + epID + "/containers", nil, procJoinEndpoint},
+		},
+		"DELETE": {
+			{"/networks/" + nwID, nil, procDeleteNetwork},
+			{"/networks/" + nwID + "/endpoints/" + epID, nil, procDeleteEndpoint},
+			{"/networks/id/" + nwID + "/endpoints/" + epID + "/containers/" + cnID, nil, procLeaveEndpoint},
+		},
+	}
+
+	h.r = mux.NewRouter()
+	for method, routes := range m {
+		for _, route := range routes {
+			r := h.r.Path("/{.*}" + route.url).Methods(method).HandlerFunc(makeHandler(h.c, route.fct))
+			if route.qrs != nil {
+				r.Queries(route.qrs...)
+			}
+		}
+	}
+}
+
+func makeHandler(ctrl libnetwork.NetworkController, fct processor) http.HandlerFunc {
+	return func(w http.ResponseWriter, req *http.Request) {
+		var (
+			body []byte
+			err  error
+		)
+		if req.Body != nil {
+			body, err = ioutil.ReadAll(req.Body)
+			if err != nil {
+				http.Error(w, "Invalid body: "+err.Error(), http.StatusBadRequest)
+				return
+			}
+		}
+
+		res, rsp := fct(ctrl, mux.Vars(req), body)
+		if !rsp.isOK() {
+			http.Error(w, rsp.Status, rsp.StatusCode)
+			return
+		}
+		if res != nil {
+			writeJSON(w, rsp.StatusCode, res)
+		}
+	}
+}
+
+/*****************
+ Resource Builders
+******************/
+
+func buildNetworkResource(nw libnetwork.Network) *networkResource {
+	r := &networkResource{}
+	if nw != nil {
+		r.Name = nw.Name()
+		r.ID = nw.ID()
+		r.Type = nw.Type()
+		epl := nw.Endpoints()
+		r.Endpoints = make([]*endpointResource, 0, len(epl))
+		for _, e := range epl {
+			epr := buildEndpointResource(e)
+			r.Endpoints = append(r.Endpoints, epr)
+		}
+	}
+	return r
+}
+
+func buildEndpointResource(ep libnetwork.Endpoint) *endpointResource {
+	r := &endpointResource{}
+	if ep != nil {
+		r.Name = ep.Name()
+		r.ID = ep.ID()
+		r.Network = ep.Network()
+	}
+	return r
+}
+
+/**************
+ Options Parser
+***************/
+
+func (ej *endpointJoin) parseOptions() []libnetwork.EndpointOption {
+	var setFctList []libnetwork.EndpointOption
+	if ej.HostName != "" {
+		setFctList = append(setFctList, libnetwork.JoinOptionHostname(ej.HostName))
+	}
+	if ej.DomainName != "" {
+		setFctList = append(setFctList, libnetwork.JoinOptionDomainname(ej.DomainName))
+	}
+	if ej.HostsPath != "" {
+		setFctList = append(setFctList, libnetwork.JoinOptionHostsPath(ej.HostsPath))
+	}
+	if ej.ResolvConfPath != "" {
+		setFctList = append(setFctList, libnetwork.JoinOptionResolvConfPath(ej.ResolvConfPath))
+	}
+	if ej.UseDefaultSandbox {
+		setFctList = append(setFctList, libnetwork.JoinOptionUseDefaultSandbox())
+	}
+	if ej.DNS != nil {
+		for _, d := range ej.DNS {
+			setFctList = append(setFctList, libnetwork.JoinOptionDNS(d))
+		}
+	}
+	if ej.ExtraHosts != nil {
+		for _, e := range ej.ExtraHosts {
+			setFctList = append(setFctList, libnetwork.JoinOptionExtraHost(e.Name, e.Address))
+		}
+	}
+	if ej.ParentUpdates != nil {
+		for _, p := range ej.ParentUpdates {
+			setFctList = append(setFctList, libnetwork.JoinOptionParentUpdate(p.EndpointID, p.Name, p.Address))
+		}
+	}
+	return setFctList
+}
+
+/******************
+ Process functions
+*******************/
+
+/***************************
+ NetworkController interface
+****************************/
+func procCreateNetwork(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+	var create networkCreate
+
+	err := json.Unmarshal(body, &create)
+	if err != nil {
+		return "", &responseStatus{Status: "Invalid body: " + err.Error(), StatusCode: http.StatusBadRequest}
+	}
+
+	nw, err := c.NewNetwork(create.NetworkType, create.Name, nil)
+	if err != nil {
+		return "", convertNetworkError(err)
+	}
+
+	return nw.ID(), &createdResponse
+}
+
+func procGetNetwork(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+	t, by := detectNetworkTarget(vars)
+	nw, errRsp := findNetwork(c, t, by)
+	if !errRsp.isOK() {
+		return nil, errRsp
+	}
+	return buildNetworkResource(nw), &successResponse
+}
+
+func procGetNetworks(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+	var list []*networkResource
+
+	// Look for query filters and validate
+	name, queryByName := vars[urlNwName]
+	shortID, queryByPid := vars[urlNwPID]
+	if queryByName && queryByPid {
+		return nil, &badQueryresponse
+	}
+
+	if queryByName {
+		if nw, errRsp := findNetwork(c, name, byName); errRsp.isOK() {
+			list = append(list, buildNetworkResource(nw))
+		}
+	} else if queryByPid {
+		// Return all the prefix-matching networks
+		l := func(nw libnetwork.Network) bool {
+			if strings.HasPrefix(nw.ID(), shortID) {
+				list = append(list, buildNetworkResource(nw))
+			}
+			return false
+		}
+		c.WalkNetworks(l)
+	} else {
+		for _, nw := range c.Networks() {
+			list = append(list, buildNetworkResource(nw))
+		}
+	}
+
+	return list, &successResponse
+}
+
+/******************
+ Network interface
+*******************/
+func procCreateEndpoint(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+	var ec endpointCreate
+
+	err := json.Unmarshal(body, &ec)
+	if err != nil {
+		return "", &responseStatus{Status: "Invalid body: " + err.Error(), StatusCode: http.StatusBadRequest}
+	}
+
+	nwT, nwBy := detectNetworkTarget(vars)
+	n, errRsp := findNetwork(c, nwT, nwBy)
+	if !errRsp.isOK() {
+		return "", errRsp
+	}
+
+	var setFctList []libnetwork.EndpointOption
+	if ec.ExposedPorts != nil {
+		setFctList = append(setFctList, libnetwork.CreateOptionExposedPorts(ec.ExposedPorts))
+	}
+	if ec.PortMapping != nil {
+		setFctList = append(setFctList, libnetwork.CreateOptionPortMapping(ec.PortMapping))
+	}
+
+	ep, err := n.CreateEndpoint(ec.Name, setFctList...)
+	if err != nil {
+		return "", convertNetworkError(err)
+	}
+
+	return ep.ID(), &createdResponse
+}
+
+func procGetEndpoint(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+	nwT, nwBy := detectNetworkTarget(vars)
+	epT, epBy := detectEndpointTarget(vars)
+
+	ep, errRsp := findEndpoint(c, nwT, epT, nwBy, epBy)
+	if !errRsp.isOK() {
+		return nil, errRsp
+	}
+
+	return buildEndpointResource(ep), &successResponse
+}
+
+func procGetEndpoints(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+	// Look for query filters and validate
+	name, queryByName := vars[urlEpName]
+	shortID, queryByPid := vars[urlEpPID]
+	if queryByName && queryByPid {
+		return nil, &badQueryresponse
+	}
+
+	nwT, nwBy := detectNetworkTarget(vars)
+	nw, errRsp := findNetwork(c, nwT, nwBy)
+	if !errRsp.isOK() {
+		return nil, errRsp
+	}
+
+	var list []*endpointResource
+
+	// If query parameter is specified, return a filtered collection
+	if queryByName {
+		if ep, errRsp := findEndpoint(c, nwT, name, nwBy, byName); errRsp.isOK() {
+			list = append(list, buildEndpointResource(ep))
+		}
+	} else if queryByPid {
+		// Return all the prefix-matching networks
+		l := func(ep libnetwork.Endpoint) bool {
+			if strings.HasPrefix(ep.ID(), shortID) {
+				list = append(list, buildEndpointResource(ep))
+			}
+			return false
+		}
+		nw.WalkEndpoints(l)
+	} else {
+		for _, ep := range nw.Endpoints() {
+			epr := buildEndpointResource(ep)
+			list = append(list, epr)
+		}
+	}
+
+	return list, &successResponse
+}
+
+func procDeleteNetwork(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+	target, by := detectNetworkTarget(vars)
+
+	nw, errRsp := findNetwork(c, target, by)
+	if !errRsp.isOK() {
+		return nil, errRsp
+	}
+
+	err := nw.Delete()
+	if err != nil {
+		return nil, convertNetworkError(err)
+	}
+
+	return nil, &successResponse
+}
+
+/******************
+ Endpoint interface
+*******************/
+func procJoinEndpoint(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+	var ej endpointJoin
+	err := json.Unmarshal(body, &ej)
+	if err != nil {
+		return nil, &responseStatus{Status: "Invalid body: " + err.Error(), StatusCode: http.StatusBadRequest}
+	}
+
+	nwT, nwBy := detectNetworkTarget(vars)
+	epT, epBy := detectEndpointTarget(vars)
+
+	ep, errRsp := findEndpoint(c, nwT, epT, nwBy, epBy)
+	if !errRsp.isOK() {
+		return nil, errRsp
+	}
+
+	cd, err := ep.Join(ej.ContainerID, ej.parseOptions()...)
+	if err != nil {
+		return nil, convertNetworkError(err)
+	}
+	return cd, &successResponse
+}
+
+func procLeaveEndpoint(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+	nwT, nwBy := detectNetworkTarget(vars)
+	epT, epBy := detectEndpointTarget(vars)
+
+	ep, errRsp := findEndpoint(c, nwT, epT, nwBy, epBy)
+	if !errRsp.isOK() {
+		return nil, errRsp
+	}
+
+	err := ep.Leave(vars[urlCnID])
+	if err != nil {
+		return nil, convertNetworkError(err)
+	}
+
+	return nil, &successResponse
+}
+
+func procDeleteEndpoint(c libnetwork.NetworkController, vars map[string]string, body []byte) (interface{}, *responseStatus) {
+	nwT, nwBy := detectNetworkTarget(vars)
+	epT, epBy := detectEndpointTarget(vars)
+
+	ep, errRsp := findEndpoint(c, nwT, epT, nwBy, epBy)
+	if !errRsp.isOK() {
+		return nil, errRsp
+	}
+
+	err := ep.Delete()
+	if err != nil {
+		return nil, convertNetworkError(err)
+	}
+
+	return nil, &successResponse
+}
+
+/***********
+  Utilities
+************/
+const (
+	byID = iota
+	byName
+)
+
+func detectNetworkTarget(vars map[string]string) (string, int) {
+	if target, ok := vars[urlNwName]; ok {
+		return target, byName
+	}
+	if target, ok := vars[urlNwID]; ok {
+		return target, byID
+	}
+	// vars are populated from the URL, following cannot happen
+	panic("Missing URL variable parameter for network")
+}
+
+func detectEndpointTarget(vars map[string]string) (string, int) {
+	if target, ok := vars[urlEpName]; ok {
+		return target, byName
+	}
+	if target, ok := vars[urlEpID]; ok {
+		return target, byID
+	}
+	// vars are populated from the URL, following cannot happen
+	panic("Missing URL variable parameter for endpoint")
+}
+
+func findNetwork(c libnetwork.NetworkController, s string, by int) (libnetwork.Network, *responseStatus) {
+	var (
+		nw  libnetwork.Network
+		err error
+	)
+	switch by {
+	case byID:
+		nw, err = c.NetworkByID(s)
+	case byName:
+		nw, err = c.NetworkByName(s)
+	default:
+		panic(fmt.Sprintf("unexpected selector for network search: %d", by))
+	}
+	if err != nil {
+		if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok {
+			return nil, &responseStatus{Status: "Resource not found: Network", StatusCode: http.StatusNotFound}
+		}
+		return nil, &responseStatus{Status: err.Error(), StatusCode: http.StatusBadRequest}
+	}
+	return nw, &successResponse
+}
+
+func findEndpoint(c libnetwork.NetworkController, ns, es string, nwBy, epBy int) (libnetwork.Endpoint, *responseStatus) {
+	nw, errRsp := findNetwork(c, ns, nwBy)
+	if !errRsp.isOK() {
+		return nil, errRsp
+	}
+	var (
+		err error
+		ep  libnetwork.Endpoint
+	)
+	switch epBy {
+	case byID:
+		ep, err = nw.EndpointByID(es)
+	case byName:
+		ep, err = nw.EndpointByName(es)
+	default:
+		panic(fmt.Sprintf("unexpected selector for endpoint search: %d", epBy))
+	}
+	if err != nil {
+		if _, ok := err.(libnetwork.ErrNoSuchEndpoint); ok {
+			return nil, &responseStatus{Status: "Resource not found: Endpoint", StatusCode: http.StatusNotFound}
+		}
+		return nil, &responseStatus{Status: err.Error(), StatusCode: http.StatusBadRequest}
+	}
+	return ep, &successResponse
+}
+
+func convertNetworkError(err error) *responseStatus {
+	var code int
+	switch err.(type) {
+	case types.BadRequestError:
+		code = http.StatusBadRequest
+	case types.ForbiddenError:
+		code = http.StatusForbidden
+	case types.NotFoundError:
+		code = http.StatusNotFound
+	case types.TimeoutError:
+		code = http.StatusRequestTimeout
+	case types.NotImplementedError:
+		code = http.StatusNotImplemented
+	case types.NoServiceError:
+		code = http.StatusServiceUnavailable
+	case types.InternalError:
+		code = http.StatusInternalServerError
+	default:
+		code = http.StatusInternalServerError
+	}
+	return &responseStatus{Status: err.Error(), StatusCode: code}
+}
+
+func writeJSON(w http.ResponseWriter, code int, v interface{}) error {
+	w.Header().Set("Content-Type", "application/json")
+	w.WriteHeader(code)
+	return json.NewEncoder(w).Encode(v)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/api/api_test.go b/vendor/src/github.com/docker/libnetwork/api/api_test.go
new file mode 100644
index 0000000..7fbbee5
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/api/api_test.go
@@ -0,0 +1,1566 @@
+package api
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"net/http"
+	"os"
+	"runtime"
+	"testing"
+
+	"github.com/docker/docker/pkg/reexec"
+	"github.com/docker/libnetwork"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/netutils"
+	"github.com/docker/libnetwork/options"
+	"github.com/docker/libnetwork/types"
+)
+
+const (
+	bridgeNetType = "bridge"
+	bridgeName    = "docker0"
+)
+
+func getEmptyGenericOption() map[string]interface{} {
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = options.Generic{}
+	return genericOption
+}
+
+func i2s(i interface{}) string {
+	s, ok := i.(string)
+	if !ok {
+		panic(fmt.Sprintf("Failed i2s for %v", i))
+	}
+	return s
+}
+
+func i2e(i interface{}) *endpointResource {
+	s, ok := i.(*endpointResource)
+	if !ok {
+		panic(fmt.Sprintf("Failed i2e for %v", i))
+	}
+	return s
+}
+
+func i2c(i interface{}) *libnetwork.ContainerData {
+	s, ok := i.(*libnetwork.ContainerData)
+	if !ok {
+		panic(fmt.Sprintf("Failed i2c for %v", i))
+	}
+	return s
+}
+
+func i2eL(i interface{}) []*endpointResource {
+	s, ok := i.([]*endpointResource)
+	if !ok {
+		panic(fmt.Sprintf("Failed i2eL for %v", i))
+	}
+	return s
+}
+
+func i2n(i interface{}) *networkResource {
+	s, ok := i.(*networkResource)
+	if !ok {
+		panic(fmt.Sprintf("Failed i2n for %v", i))
+	}
+	return s
+}
+
+func i2nL(i interface{}) []*networkResource {
+	s, ok := i.([]*networkResource)
+	if !ok {
+		panic(fmt.Sprintf("Failed i2nL for %v", i))
+	}
+	return s
+}
+
+func TestMain(m *testing.M) {
+	if reexec.Init() {
+		return
+	}
+	os.Exit(m.Run())
+}
+
+func TestJoinOptionParser(t *testing.T) {
+	hn := "host1"
+	dn := "docker.com"
+	hp := "/etc/hosts"
+	rc := "/etc/resolv.conf"
+	dnss := []string{"8.8.8.8", "172.28.34.5"}
+	ehs := []endpointExtraHost{endpointExtraHost{Name: "extra1", Address: "172.28.9.1"}, endpointExtraHost{Name: "extra2", Address: "172.28.9.2"}}
+	pus := []endpointParentUpdate{endpointParentUpdate{EndpointID: "abc123def456", Name: "serv1", Address: "172.28.30.123"}}
+
+	ej := endpointJoin{
+		HostName:          hn,
+		DomainName:        dn,
+		HostsPath:         hp,
+		ResolvConfPath:    rc,
+		DNS:               dnss,
+		ExtraHosts:        ehs,
+		ParentUpdates:     pus,
+		UseDefaultSandbox: true,
+	}
+
+	if len(ej.parseOptions()) != 10 {
+		t.Fatalf("Failed to generate all libnetwork.EndpointJoinOption methods libnetwork.EndpointJoinOption method")
+	}
+
+}
+
+func TestJson(t *testing.T) {
+	nc := networkCreate{NetworkType: bridgeNetType}
+	b, err := json.Marshal(nc)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var ncp networkCreate
+	err = json.Unmarshal(b, &ncp)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if nc.NetworkType != ncp.NetworkType {
+		t.Fatalf("Incorrect networkCreate after json encoding/deconding: %v", ncp)
+	}
+
+	jl := endpointJoin{ContainerID: "abcdef456789"}
+	b, err = json.Marshal(jl)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var jld endpointJoin
+	err = json.Unmarshal(b, &jld)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if jl.ContainerID != jld.ContainerID {
+		t.Fatalf("Incorrect endpointJoin after json encoding/deconding: %v", jld)
+	}
+}
+
+func TestCreateDeleteNetwork(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	c, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	badBody, err := json.Marshal("bad body")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	vars := make(map[string]string)
+	_, errRsp := procCreateNetwork(c, nil, badBody)
+	if errRsp == &createdResponse {
+		t.Fatalf("Expected to fail but succeeded")
+	}
+	if errRsp.StatusCode != http.StatusBadRequest {
+		t.Fatalf("Expected StatusBadRequest status code, got: %v", errRsp)
+	}
+
+	incompleteBody, err := json.Marshal(networkCreate{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, errRsp = procCreateNetwork(c, vars, incompleteBody)
+	if errRsp == &createdResponse {
+		t.Fatalf("Expected to fail but succeeded")
+	}
+	if errRsp.StatusCode != http.StatusBadRequest {
+		t.Fatalf("Expected StatusBadRequest status code, got: %v", errRsp)
+	}
+
+	ops := make(map[string]interface{})
+	ops[netlabel.GenericData] = options.Generic{}
+	nc := networkCreate{Name: "network_1", NetworkType: bridgeNetType, Options: ops}
+	goodBody, err := json.Marshal(nc)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, errRsp = procCreateNetwork(c, vars, goodBody)
+	if errRsp != &createdResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	vars[urlNwName] = ""
+	_, errRsp = procDeleteNetwork(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected to fail but succeeded")
+	}
+
+	vars[urlNwName] = "abc"
+	_, errRsp = procDeleteNetwork(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected to fail but succeeded")
+	}
+
+	vars[urlNwName] = "network_1"
+	_, errRsp = procDeleteNetwork(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+}
+
+func TestGetNetworksAndEndpoints(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	c, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	nc := networkCreate{Name: "sh", NetworkType: bridgeNetType}
+	body, err := json.Marshal(nc)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	vars := make(map[string]string)
+	inid, errRsp := procCreateNetwork(c, vars, body)
+	if errRsp != &createdResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	nid, ok := inid.(string)
+	if !ok {
+		t.FailNow()
+	}
+
+	ec1 := endpointCreate{
+		Name: "ep1",
+		ExposedPorts: []types.TransportPort{
+			types.TransportPort{Proto: types.TCP, Port: uint16(5000)},
+			types.TransportPort{Proto: types.UDP, Port: uint16(400)},
+			types.TransportPort{Proto: types.TCP, Port: uint16(600)},
+		},
+		PortMapping: []types.PortBinding{
+			types.PortBinding{Proto: types.TCP, Port: uint16(230), HostPort: uint16(23000)},
+			types.PortBinding{Proto: types.UDP, Port: uint16(200), HostPort: uint16(22000)},
+			types.PortBinding{Proto: types.TCP, Port: uint16(120), HostPort: uint16(12000)},
+		},
+	}
+	b1, err := json.Marshal(ec1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ec2 := endpointCreate{Name: "ep2"}
+	b2, err := json.Marshal(ec2)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	vars[urlNwName] = "sh"
+	vars[urlEpName] = "ep1"
+	ieid1, errRsp := procCreateEndpoint(c, vars, b1)
+	if errRsp != &createdResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	eid1 := i2s(ieid1)
+	vars[urlEpName] = "ep2"
+	ieid2, errRsp := procCreateEndpoint(c, vars, b2)
+	if errRsp != &createdResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	eid2 := i2s(ieid2)
+
+	vars[urlNwName] = ""
+	vars[urlEpName] = "ep1"
+	_, errRsp = procGetEndpoint(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure but succeeded: %v", errRsp)
+	}
+	if errRsp.StatusCode != http.StatusBadRequest {
+		t.Fatalf("Expected to fail with http.StatusBadRequest, but got: %d", errRsp.StatusCode)
+	}
+
+	vars = make(map[string]string)
+	vars[urlNwName] = "sh"
+	vars[urlEpID] = ""
+	_, errRsp = procGetEndpoint(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure but succeeded: %v", errRsp)
+	}
+	if errRsp.StatusCode != http.StatusBadRequest {
+		t.Fatalf("Expected to fail with http.StatusBadRequest, but got: %d", errRsp.StatusCode)
+	}
+
+	vars = make(map[string]string)
+	vars[urlNwID] = ""
+	vars[urlEpID] = eid1
+	_, errRsp = procGetEndpoint(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure but succeeded: %v", errRsp)
+	}
+	if errRsp.StatusCode != http.StatusBadRequest {
+		t.Fatalf("Expected to fail with http.StatusBadRequest, but got: %d", errRsp.StatusCode)
+	}
+
+	// nw by name and ep by id
+	vars[urlNwName] = "sh"
+	i1, errRsp := procGetEndpoint(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	// nw by name and ep by name
+	delete(vars, urlEpID)
+	vars[urlEpName] = "ep1"
+	i2, errRsp := procGetEndpoint(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	// nw by id and ep by name
+	delete(vars, urlNwName)
+	vars[urlNwID] = nid
+	i3, errRsp := procGetEndpoint(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	// nw by id and ep by id
+	delete(vars, urlEpName)
+	vars[urlEpID] = eid1
+	i4, errRsp := procGetEndpoint(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	id1 := i2e(i1).ID
+	if id1 != i2e(i2).ID || id1 != i2e(i3).ID || id1 != i2e(i4).ID {
+		t.Fatalf("Endpoints retireved via different query parameters differ: %v, %v, %v, %v", i1, i2, i3, i4)
+	}
+
+	vars[urlNwName] = ""
+	_, errRsp = procGetEndpoints(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	delete(vars, urlNwName)
+	vars[urlNwID] = "fakeID"
+	_, errRsp = procGetEndpoints(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	vars[urlNwID] = nid
+	_, errRsp = procGetEndpoints(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	vars[urlNwName] = "sh"
+	iepList, errRsp := procGetEndpoints(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	epList := i2eL(iepList)
+	if len(epList) != 2 {
+		t.Fatalf("Did not return the expected number (2) of endpoint resources: %d", len(epList))
+	}
+	if "sh" != epList[0].Network || "sh" != epList[1].Network {
+		t.Fatalf("Did not find expected network name in endpoint resources")
+	}
+
+	vars = make(map[string]string)
+	vars[urlNwName] = ""
+	_, errRsp = procGetNetwork(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Exepected failure, got: %v", errRsp)
+	}
+	vars[urlNwName] = "shhhhh"
+	_, errRsp = procGetNetwork(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Exepected failure, got: %v", errRsp)
+	}
+	vars[urlNwName] = "sh"
+	inr1, errRsp := procGetNetwork(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	nr1 := i2n(inr1)
+
+	delete(vars, urlNwName)
+	vars[urlNwID] = "cacca"
+	_, errRsp = procGetNetwork(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	vars[urlNwID] = nid
+	inr2, errRsp := procGetNetwork(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("procgetNetworkByName() != procgetNetworkById(), %v vs %v", inr1, inr2)
+	}
+	nr2 := i2n(inr2)
+	if nr1.Name != nr2.Name || nr1.Type != nr2.Type || nr1.ID != nr2.ID || len(nr1.Endpoints) != len(nr2.Endpoints) {
+		t.Fatalf("Get by name and Get failure: %v", errRsp)
+	}
+
+	if len(nr1.Endpoints) != 2 {
+		t.Fatalf("Did not find the expected number (2) of endpoint resources in the network resource: %d", len(nr1.Endpoints))
+	}
+	for _, er := range nr1.Endpoints {
+		if er.ID != eid1 && er.ID != eid2 {
+			t.Fatalf("Did not find the expected endpoint resources in the network resource: %v", nr1.Endpoints)
+		}
+	}
+
+	iList, errRsp := procGetNetworks(c, nil, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	netList := i2nL(iList)
+	if len(netList) != 1 {
+		t.Fatalf("Did not return the expected number of network resources")
+	}
+	if nid != netList[0].ID {
+		t.Fatalf("Did not find expected network %s: %v", nid, netList)
+	}
+
+	_, errRsp = procDeleteNetwork(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Exepected failure, got: %v", errRsp)
+	}
+
+	vars[urlEpName] = "ep1"
+	_, errRsp = procDeleteEndpoint(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	delete(vars, urlEpName)
+	iepList, errRsp = procGetEndpoints(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	epList = i2eL(iepList)
+	if len(epList) != 1 {
+		t.Fatalf("Did not return the expected number (1) of endpoint resources: %d", len(epList))
+	}
+
+	vars[urlEpName] = "ep2"
+	_, errRsp = procDeleteEndpoint(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	iepList, errRsp = procGetEndpoints(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	epList = i2eL(iepList)
+	if len(epList) != 0 {
+		t.Fatalf("Did not return the expected number (0) of endpoint resources: %d", len(epList))
+	}
+
+	_, errRsp = procDeleteNetwork(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	iList, errRsp = procGetNetworks(c, nil, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	netList = i2nL(iList)
+	if len(netList) != 0 {
+		t.Fatalf("Did not return the expected number of network resources")
+	}
+}
+
+func TestDetectGetNetworksInvalidQueryComposition(t *testing.T) {
+	c, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	vars := map[string]string{urlNwName: "x", urlNwPID: "y"}
+	_, errRsp := procGetNetworks(c, vars, nil)
+	if errRsp.StatusCode != http.StatusBadRequest {
+		t.Fatalf("Expected %d. Got: %v", http.StatusBadRequest, errRsp)
+	}
+}
+
+func TestDetectGetEndpointsInvalidQueryComposition(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	c, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = c.NewNetwork(bridgeNetType, "network", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	vars := map[string]string{urlNwName: "network", urlEpName: "x", urlEpPID: "y"}
+	_, errRsp := procGetEndpoints(c, vars, nil)
+	if errRsp.StatusCode != http.StatusBadRequest {
+		t.Fatalf("Expected %d. Got: %v", http.StatusBadRequest, errRsp)
+	}
+}
+
+func TestFindNetworkUtil(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	c, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	nw, err := c.NewNetwork(bridgeNetType, "network", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	nid := nw.ID()
+
+	defer checkPanic(t)
+	findNetwork(c, "", -1)
+
+	_, errRsp := findNetwork(c, "", byName)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected to fail but succeeded")
+	}
+	if errRsp.StatusCode != http.StatusBadRequest {
+		t.Fatalf("Expected %d, but got: %d", http.StatusBadRequest, errRsp.StatusCode)
+	}
+
+	n, errRsp := findNetwork(c, nid, byID)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexpected failure: %v", errRsp)
+	}
+	if n == nil {
+		t.Fatalf("Unexpected nil libnetwork.Network")
+	}
+	if nid != n.ID() {
+		t.Fatalf("Incorrect libnetwork.Network resource. It has different id: %v", n)
+	}
+	if "network" != n.Name() {
+		t.Fatalf("Incorrect libnetwork.Network resource. It has different name: %v", n)
+	}
+
+	n, errRsp = findNetwork(c, "network", byName)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexpected failure: %v", errRsp)
+	}
+	if n == nil {
+		t.Fatalf("Unexpected nil libnetwork.Network")
+	}
+	if nid != n.ID() {
+		t.Fatalf("Incorrect libnetwork.Network resource. It has different id: %v", n)
+	}
+	if "network" != n.Name() {
+		t.Fatalf("Incorrect libnetwork.Network resource. It has different name: %v", n)
+	}
+
+	n.Delete()
+
+	_, errRsp = findNetwork(c, nid, byID)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected to fail but succeeded")
+	}
+	if errRsp.StatusCode != http.StatusNotFound {
+		t.Fatalf("Expected %d, but got: %d", http.StatusNotFound, errRsp.StatusCode)
+	}
+
+	_, errRsp = findNetwork(c, "network", byName)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected to fail but succeeded")
+	}
+	if errRsp.StatusCode != http.StatusNotFound {
+		t.Fatalf("Expected %d, but got: %d", http.StatusNotFound, errRsp.StatusCode)
+	}
+}
+
+func TestCreateDeleteEndpoints(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	c, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	nc := networkCreate{Name: "firstNet", NetworkType: bridgeNetType}
+	body, err := json.Marshal(nc)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	vars := make(map[string]string)
+	i, errRsp := procCreateNetwork(c, vars, body)
+	if errRsp != &createdResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	nid := i2s(i)
+
+	vbad, err := json.Marshal("bad endppoint create data")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	vars[urlNwName] = "firstNet"
+	_, errRsp = procCreateEndpoint(c, vars, vbad)
+	if errRsp == &createdResponse {
+		t.Fatalf("Expected to fail but succeeded")
+	}
+
+	b, err := json.Marshal(endpointCreate{Name: ""})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	vars[urlNwName] = "secondNet"
+	_, errRsp = procCreateEndpoint(c, vars, b)
+	if errRsp == &createdResponse {
+		t.Fatalf("Expected to fail but succeeded")
+	}
+
+	vars[urlNwName] = "firstNet"
+	_, errRsp = procCreateEndpoint(c, vars, b)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure but succeeded: %v", errRsp)
+	}
+
+	b, err = json.Marshal(endpointCreate{Name: "firstEp"})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	i, errRsp = procCreateEndpoint(c, vars, b)
+	if errRsp != &createdResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+	eid := i2s(i)
+
+	_, errRsp = findEndpoint(c, "myNet", "firstEp", byName, byName)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure but succeeded: %v", errRsp)
+	}
+
+	ep0, errRsp := findEndpoint(c, nid, "firstEp", byID, byName)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	ep1, errRsp := findEndpoint(c, "firstNet", "firstEp", byName, byName)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	ep2, errRsp := findEndpoint(c, nid, eid, byID, byID)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	ep3, errRsp := findEndpoint(c, "firstNet", eid, byName, byID)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	if ep0.ID() != ep1.ID() || ep0.ID() != ep2.ID() || ep0.ID() != ep3.ID() {
+		t.Fatalf("Diffenrent queries returned different endpoints: \nep0: %v\nep1: %v\nep2: %v\nep3: %v", ep0, ep1, ep2, ep3)
+	}
+
+	vars = make(map[string]string)
+	vars[urlNwName] = ""
+	vars[urlEpName] = "ep1"
+	_, errRsp = procDeleteEndpoint(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	vars[urlNwName] = "firstNet"
+	vars[urlEpName] = ""
+	_, errRsp = procDeleteEndpoint(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	vars[urlEpName] = "ep2"
+	_, errRsp = procDeleteEndpoint(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	vars[urlEpName] = "firstEp"
+	_, errRsp = procDeleteEndpoint(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	_, errRsp = findEndpoint(c, "firstNet", "firstEp", byName, byName)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+}
+
+func TestJoinLeave(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	c, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	nb, err := json.Marshal(networkCreate{Name: "network", NetworkType: bridgeNetType})
+	if err != nil {
+		t.Fatal(err)
+	}
+	vars := make(map[string]string)
+	_, errRsp := procCreateNetwork(c, vars, nb)
+	if errRsp != &createdResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	eb, err := json.Marshal(endpointCreate{Name: "endpoint"})
+	if err != nil {
+		t.Fatal(err)
+	}
+	vars[urlNwName] = "network"
+	_, errRsp = procCreateEndpoint(c, vars, eb)
+	if errRsp != &createdResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	vbad, err := json.Marshal("bad data")
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, errRsp = procJoinEndpoint(c, vars, vbad)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	vars[urlEpName] = "endpoint"
+	bad, err := json.Marshal(endpointJoin{})
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, errRsp = procJoinEndpoint(c, vars, bad)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	cid := "abcdefghi"
+	jl := endpointJoin{ContainerID: cid}
+	jlb, err := json.Marshal(jl)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	vars = make(map[string]string)
+	vars[urlNwName] = ""
+	vars[urlEpName] = ""
+	_, errRsp = procJoinEndpoint(c, vars, jlb)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	vars[urlNwName] = "network"
+	vars[urlEpName] = ""
+	_, errRsp = procJoinEndpoint(c, vars, jlb)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	vars[urlEpName] = "epoint"
+	_, errRsp = procJoinEndpoint(c, vars, jlb)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	vars[urlEpName] = "endpoint"
+	cdi, errRsp := procJoinEndpoint(c, vars, jlb)
+	if errRsp != &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	cd := i2c(cdi)
+	if cd.SandboxKey == "" {
+		t.Fatalf("Empty sandbox key")
+	}
+	_, errRsp = procDeleteEndpoint(c, vars, nil)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	vars[urlNwName] = "network2"
+	_, errRsp = procLeaveEndpoint(c, vars, vbad)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+	_, errRsp = procLeaveEndpoint(c, vars, bad)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+	_, errRsp = procLeaveEndpoint(c, vars, jlb)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+	vars = make(map[string]string)
+	vars[urlNwName] = ""
+	vars[urlEpName] = ""
+	_, errRsp = procLeaveEndpoint(c, vars, jlb)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+	vars[urlNwName] = "network"
+	vars[urlEpName] = ""
+	_, errRsp = procLeaveEndpoint(c, vars, jlb)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+	vars[urlEpName] = "2epoint"
+	_, errRsp = procLeaveEndpoint(c, vars, jlb)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+	vars[urlEpName] = "epoint"
+	vars[urlCnID] = "who"
+	_, errRsp = procLeaveEndpoint(c, vars, jlb)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	delete(vars, urlCnID)
+	vars[urlEpName] = "endpoint"
+	_, errRsp = procLeaveEndpoint(c, vars, jlb)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	vars[urlCnID] = cid
+	_, errRsp = procLeaveEndpoint(c, vars, jlb)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	_, errRsp = procLeaveEndpoint(c, vars, jlb)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, got: %v", errRsp)
+	}
+
+	_, errRsp = procDeleteEndpoint(c, vars, nil)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+}
+
+func TestFindEndpointUtil(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	c, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = c.ConfigureNetworkDriver(bridgeNetType, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	nw, err := c.NewNetwork(bridgeNetType, "second", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	nid := nw.ID()
+
+	ep, err := nw.CreateEndpoint("secondEp", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	eid := ep.ID()
+
+	defer checkPanic(t)
+	findEndpoint(c, nid, "", byID, -1)
+
+	_, errRsp := findEndpoint(c, nid, "", byID, byName)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, but got: %v", errRsp)
+	}
+	if errRsp.StatusCode != http.StatusBadRequest {
+		t.Fatalf("Expected %d, but got: %d", http.StatusBadRequest, errRsp.StatusCode)
+	}
+
+	ep0, errRsp := findEndpoint(c, nid, "secondEp", byID, byName)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	ep1, errRsp := findEndpoint(c, "second", "secondEp", byName, byName)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	ep2, errRsp := findEndpoint(c, nid, eid, byID, byID)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	ep3, errRsp := findEndpoint(c, "second", eid, byName, byID)
+	if errRsp != &successResponse {
+		t.Fatalf("Unexepected failure: %v", errRsp)
+	}
+
+	if ep0 != ep1 || ep0 != ep2 || ep0 != ep3 {
+		t.Fatalf("Diffenrent queries returned different endpoints")
+	}
+
+	ep.Delete()
+
+	_, errRsp = findEndpoint(c, nid, "secondEp", byID, byName)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, but got: %v", errRsp)
+	}
+	if errRsp.StatusCode != http.StatusNotFound {
+		t.Fatalf("Expected %d, but got: %d", http.StatusNotFound, errRsp.StatusCode)
+	}
+
+	_, errRsp = findEndpoint(c, "second", "secondEp", byName, byName)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, but got: %v", errRsp)
+	}
+	if errRsp.StatusCode != http.StatusNotFound {
+		t.Fatalf("Expected %d, but got: %d", http.StatusNotFound, errRsp.StatusCode)
+	}
+
+	_, errRsp = findEndpoint(c, nid, eid, byID, byID)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, but got: %v", errRsp)
+	}
+	if errRsp.StatusCode != http.StatusNotFound {
+		t.Fatalf("Expected %d, but got: %d", http.StatusNotFound, errRsp.StatusCode)
+	}
+
+	_, errRsp = findEndpoint(c, "second", eid, byName, byID)
+	if errRsp == &successResponse {
+		t.Fatalf("Expected failure, but got: %v", errRsp)
+	}
+	if errRsp.StatusCode != http.StatusNotFound {
+		t.Fatalf("Expected %d, but got: %d", http.StatusNotFound, errRsp.StatusCode)
+	}
+}
+
+func checkPanic(t *testing.T) {
+	if r := recover(); r != nil {
+		if _, ok := r.(runtime.Error); ok {
+			panic(r)
+		}
+	} else {
+		t.Fatalf("Expected to panic, but suceeded")
+	}
+}
+
+func TestDetectNetworkTargetPanic(t *testing.T) {
+	defer checkPanic(t)
+	vars := make(map[string]string)
+	detectNetworkTarget(vars)
+}
+
+func TestDetectEndpointTargetPanic(t *testing.T) {
+	defer checkPanic(t)
+	vars := make(map[string]string)
+	detectEndpointTarget(vars)
+}
+
+func TestResponseStatus(t *testing.T) {
+	list := []int{
+		http.StatusBadGateway,
+		http.StatusBadRequest,
+		http.StatusConflict,
+		http.StatusContinue,
+		http.StatusExpectationFailed,
+		http.StatusForbidden,
+		http.StatusFound,
+		http.StatusGatewayTimeout,
+		http.StatusGone,
+		http.StatusHTTPVersionNotSupported,
+		http.StatusInternalServerError,
+		http.StatusLengthRequired,
+		http.StatusMethodNotAllowed,
+		http.StatusMovedPermanently,
+		http.StatusMultipleChoices,
+		http.StatusNoContent,
+		http.StatusNonAuthoritativeInfo,
+		http.StatusNotAcceptable,
+		http.StatusNotFound,
+		http.StatusNotModified,
+		http.StatusPartialContent,
+		http.StatusPaymentRequired,
+		http.StatusPreconditionFailed,
+		http.StatusProxyAuthRequired,
+		http.StatusRequestEntityTooLarge,
+		http.StatusRequestTimeout,
+		http.StatusRequestURITooLong,
+		http.StatusRequestedRangeNotSatisfiable,
+		http.StatusResetContent,
+		http.StatusServiceUnavailable,
+		http.StatusSwitchingProtocols,
+		http.StatusTemporaryRedirect,
+		http.StatusUnauthorized,
+		http.StatusUnsupportedMediaType,
+		http.StatusUseProxy,
+	}
+	for _, c := range list {
+		r := responseStatus{StatusCode: c}
+		if r.isOK() {
+			t.Fatalf("isOK() returned true for code% d", c)
+		}
+	}
+
+	r := responseStatus{StatusCode: http.StatusOK}
+	if !r.isOK() {
+		t.Fatalf("isOK() failed")
+	}
+
+	r = responseStatus{StatusCode: http.StatusCreated}
+	if !r.isOK() {
+		t.Fatalf("isOK() failed")
+	}
+}
+
+// Local structs for end to end testing of api.go
+type localReader struct {
+	data  []byte
+	beBad bool
+}
+
+func newLocalReader(data []byte) *localReader {
+	lr := &localReader{data: make([]byte, len(data))}
+	copy(lr.data, data)
+	return lr
+}
+
+func (l *localReader) Read(p []byte) (n int, err error) {
+	if l.beBad {
+		return 0, errors.New("I am a bad reader")
+	}
+	if p == nil {
+		return -1, fmt.Errorf("nil buffer passed")
+	}
+	if l.data == nil || len(l.data) == 0 {
+		return 0, io.EOF
+	}
+	copy(p[:], l.data[:])
+	return len(l.data), io.EOF
+}
+
+type localResponseWriter struct {
+	body       []byte
+	statusCode int
+}
+
+func newWriter() *localResponseWriter {
+	return &localResponseWriter{}
+}
+
+func (f *localResponseWriter) Header() http.Header {
+	return make(map[string][]string, 0)
+}
+
+func (f *localResponseWriter) Write(data []byte) (int, error) {
+	if data == nil {
+		return -1, fmt.Errorf("nil data passed")
+	}
+
+	f.body = make([]byte, len(data))
+	copy(f.body, data)
+
+	return len(f.body), nil
+}
+
+func (f *localResponseWriter) WriteHeader(c int) {
+	f.statusCode = c
+}
+
+func TestwriteJSON(t *testing.T) {
+	testCode := 55
+	testData, err := json.Marshal("test data")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	rsp := newWriter()
+	writeJSON(rsp, testCode, testData)
+	if rsp.statusCode != testCode {
+		t.Fatalf("writeJSON() failed to set the status code. Expected %d. Got %d", testCode, rsp.statusCode)
+	}
+	if !bytes.Equal(testData, rsp.body) {
+		t.Fatalf("writeJSON() failed to set the body. Expected %s. Got %s", testData, rsp.body)
+	}
+
+}
+
+func TestHttpHandlerUninit(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	c, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	h := &httpHandler{c: c}
+	h.initRouter()
+	if h.r == nil {
+		t.Fatalf("initRouter() did not initialize the router")
+	}
+
+	rsp := newWriter()
+	req, err := http.NewRequest("GET", "/v1.19/networks", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	handleRequest := NewHTTPHandler(nil)
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusServiceUnavailable {
+		t.Fatalf("Expected (%d). Got (%d): %s", http.StatusServiceUnavailable, rsp.statusCode, rsp.body)
+	}
+
+	handleRequest = NewHTTPHandler(c)
+
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Expected (%d). Got: (%d): %s", http.StatusOK, rsp.statusCode, rsp.body)
+	}
+
+	var list []*networkResource
+	err = json.Unmarshal(rsp.body, &list)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(list) != 0 {
+		t.Fatalf("Expected empty list. Got %v", list)
+	}
+
+	n, err := c.NewNetwork(bridgeNetType, "didietro", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	nwr := buildNetworkResource(n)
+	expected, err := json.Marshal([]*networkResource{nwr})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+	}
+	if len(rsp.body) == 0 {
+		t.Fatalf("Empty list of networks")
+	}
+	if bytes.Equal(rsp.body, expected) {
+		t.Fatalf("Incorrect list of networks in response's body")
+	}
+}
+
+func TestHttpHandlerBadBody(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	rsp := newWriter()
+
+	c, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest := NewHTTPHandler(c)
+
+	req, err := http.NewRequest("POST", "/v1.19/networks", &localReader{beBad: true})
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusBadRequest {
+		t.Fatalf("Unexpected status code. Expected (%d). Got (%d): %s.", http.StatusBadRequest, rsp.statusCode, string(rsp.body))
+	}
+
+	body := []byte{}
+	lr := newLocalReader(body)
+	req, err = http.NewRequest("POST", "/v1.19/networks", lr)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusBadRequest {
+		t.Fatalf("Unexpected status code. Expected (%d). Got (%d): %s.", http.StatusBadRequest, rsp.statusCode, string(rsp.body))
+	}
+}
+
+func TestEndToEnd(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	rsp := newWriter()
+
+	c, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest := NewHTTPHandler(c)
+
+	// Create network
+	nc := networkCreate{Name: "network-fiftyfive", NetworkType: bridgeNetType}
+	body, err := json.Marshal(nc)
+	if err != nil {
+		t.Fatal(err)
+	}
+	lr := newLocalReader(body)
+	req, err := http.NewRequest("POST", "/v1.19/networks", lr)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusCreated {
+		t.Fatalf("Unexpectded status code. Expected (%d). Got (%d): %s.", http.StatusCreated, rsp.statusCode, string(rsp.body))
+	}
+	if len(rsp.body) == 0 {
+		t.Fatalf("Empty response body")
+	}
+
+	var nid string
+	err = json.Unmarshal(rsp.body, &nid)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Query networks collection
+	req, err = http.NewRequest("GET", "/v1.19/networks", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Expected StatusOK. Got (%d): %s", rsp.statusCode, rsp.body)
+	}
+
+	b0 := make([]byte, len(rsp.body))
+	copy(b0, rsp.body)
+
+	req, err = http.NewRequest("GET", "/v1.19/networks?name=network-fiftyfive", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Expected StatusOK. Got (%d): %s", rsp.statusCode, rsp.body)
+	}
+
+	if !bytes.Equal(b0, rsp.body) {
+		t.Fatalf("Expected same body from GET /networks and GET /networks?name=<nw> when only network <nw> exist.")
+	}
+
+	// Query network by name
+	req, err = http.NewRequest("GET", "/v1.19/networks?name=culo", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Expected StatusOK. Got (%d): %s", rsp.statusCode, rsp.body)
+	}
+
+	var list []*networkResource
+	err = json.Unmarshal(rsp.body, &list)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(list) != 0 {
+		t.Fatalf("Expected empty list. Got %v", list)
+	}
+
+	req, err = http.NewRequest("GET", "/v1.19/networks?name=network-fiftyfive", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+	}
+
+	err = json.Unmarshal(rsp.body, &list)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(list) == 0 {
+		t.Fatalf("Expected non empty list")
+	}
+	if list[0].Name != "network-fiftyfive" || nid != list[0].ID {
+		t.Fatalf("Incongruent resource found: %v", list[0])
+	}
+
+	// Query network by partial id
+	chars := []byte(nid)
+	partial := string(chars[0 : len(chars)/2])
+	req, err = http.NewRequest("GET", "/v1.19/networks?partial-id="+partial, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+	}
+
+	err = json.Unmarshal(rsp.body, &list)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(list) == 0 {
+		t.Fatalf("Expected non empty list")
+	}
+	if list[0].Name != "network-fiftyfive" || nid != list[0].ID {
+		t.Fatalf("Incongruent resource found: %v", list[0])
+	}
+
+	// Get network by id
+	req, err = http.NewRequest("GET", "/v1.19/networks/"+nid, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+	}
+
+	var nwr networkResource
+	err = json.Unmarshal(rsp.body, &nwr)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if nwr.Name != "network-fiftyfive" || nid != nwr.ID {
+		t.Fatalf("Incongruent resource found: %v", nwr)
+	}
+
+	// Create endpoint
+	eb, err := json.Marshal(endpointCreate{Name: "ep-TwentyTwo"})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	lr = newLocalReader(eb)
+	req, err = http.NewRequest("POST", "/v1.19/networks/"+nid+"/endpoints", lr)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusCreated {
+		t.Fatalf("Unexpectded status code. Expected (%d). Got (%d): %s.", http.StatusCreated, rsp.statusCode, string(rsp.body))
+	}
+	if len(rsp.body) == 0 {
+		t.Fatalf("Empty response body")
+	}
+
+	var eid string
+	err = json.Unmarshal(rsp.body, &eid)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Query endpoint(s)
+	req, err = http.NewRequest("GET", "/v1.19/networks/"+nid+"/endpoints", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Expected StatusOK. Got (%d): %s", rsp.statusCode, rsp.body)
+	}
+
+	req, err = http.NewRequest("GET", "/v1.19/networks/"+nid+"/endpoints?name=bla", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+	}
+	var epList []*endpointResource
+	err = json.Unmarshal(rsp.body, &epList)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(epList) != 0 {
+		t.Fatalf("Expected empty list. Got %v", epList)
+	}
+
+	// Query endpoint by name
+	req, err = http.NewRequest("GET", "/v1.19/networks/"+nid+"/endpoints?name=ep-TwentyTwo", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+	}
+
+	err = json.Unmarshal(rsp.body, &epList)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(epList) == 0 {
+		t.Fatalf("Empty response body")
+	}
+	if epList[0].Name != "ep-TwentyTwo" || eid != epList[0].ID {
+		t.Fatalf("Incongruent resource found: %v", epList[0])
+	}
+
+	// Query endpoint by partial id
+	chars = []byte(eid)
+	partial = string(chars[0 : len(chars)/2])
+	req, err = http.NewRequest("GET", "/v1.19/networks/"+nid+"/endpoints?partial-id="+partial, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+	}
+
+	err = json.Unmarshal(rsp.body, &epList)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(epList) == 0 {
+		t.Fatalf("Empty response body")
+	}
+	if epList[0].Name != "ep-TwentyTwo" || eid != epList[0].ID {
+		t.Fatalf("Incongruent resource found: %v", epList[0])
+	}
+
+	// Get endpoint by id
+	req, err = http.NewRequest("GET", "/v1.19/networks/"+nid+"/endpoints/"+eid, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	handleRequest(rsp, req)
+	if rsp.statusCode != http.StatusOK {
+		t.Fatalf("Unexpectded failure: (%d): %s", rsp.statusCode, rsp.body)
+	}
+
+	var epr endpointResource
+	err = json.Unmarshal(rsp.body, &epr)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if epr.Name != "ep-TwentyTwo" || epr.ID != eid {
+		t.Fatalf("Incongruent resource found: %v", epr)
+	}
+}
+
+type bre struct{}
+
+func (b *bre) Error() string {
+	return "I am a bad request error"
+}
+func (b *bre) BadRequest() {}
+
+type nfe struct{}
+
+func (n *nfe) Error() string {
+	return "I am a not found error"
+}
+func (n *nfe) NotFound() {}
+
+type forb struct{}
+
+func (f *forb) Error() string {
+	return "I am a bad request error"
+}
+func (f *forb) Forbidden() {}
+
+type notimpl struct{}
+
+func (nip *notimpl) Error() string {
+	return "I am a not implemented error"
+}
+func (nip *notimpl) NotImplemented() {}
+
+type inter struct{}
+
+func (it *inter) Error() string {
+	return "I am a internal error"
+}
+func (it *inter) Internal() {}
+
+type tout struct{}
+
+func (to *tout) Error() string {
+	return "I am a timeout error"
+}
+func (to *tout) Timeout() {}
+
+type noserv struct{}
+
+func (nos *noserv) Error() string {
+	return "I am a no service error"
+}
+func (nos *noserv) NoService() {}
+
+type notclassified struct{}
+
+func (noc *notclassified) Error() string {
+	return "I am a non classified error"
+}
+
+func TestErrorConversion(t *testing.T) {
+	if convertNetworkError(new(bre)).StatusCode != http.StatusBadRequest {
+		t.Fatalf("Failed to recognize BadRequest error")
+	}
+
+	if convertNetworkError(new(nfe)).StatusCode != http.StatusNotFound {
+		t.Fatalf("Failed to recognize NotFound error")
+	}
+
+	if convertNetworkError(new(forb)).StatusCode != http.StatusForbidden {
+		t.Fatalf("Failed to recognize Forbidden error")
+	}
+
+	if convertNetworkError(new(notimpl)).StatusCode != http.StatusNotImplemented {
+		t.Fatalf("Failed to recognize NotImplemented error")
+	}
+
+	if convertNetworkError(new(inter)).StatusCode != http.StatusInternalServerError {
+		t.Fatalf("Failed to recognize Internal error")
+	}
+
+	if convertNetworkError(new(tout)).StatusCode != http.StatusRequestTimeout {
+		t.Fatalf("Failed to recognize Timeout error")
+	}
+
+	if convertNetworkError(new(noserv)).StatusCode != http.StatusServiceUnavailable {
+		t.Fatalf("Failed to recognize No Service error")
+	}
+
+	if convertNetworkError(new(notclassified)).StatusCode != http.StatusInternalServerError {
+		t.Fatalf("Failed to recognize not classified error as Internal error")
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/api/types.go b/vendor/src/github.com/docker/libnetwork/api/types.go
new file mode 100644
index 0000000..2490a84
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/api/types.go
@@ -0,0 +1,67 @@
+package api
+
+import "github.com/docker/libnetwork/types"
+
+/***********
+ Resources
+************/
+
+// networkResource is the body of the "get network" http response message
+type networkResource struct {
+	Name      string
+	ID        string
+	Type      string
+	Endpoints []*endpointResource
+}
+
+// endpointResource is the body of the "get endpoint" http response message
+type endpointResource struct {
+	Name    string
+	ID      string
+	Network string
+}
+
+/***********
+  Body types
+  ************/
+
+// networkCreate is the expected body of the "create network" http request message
+type networkCreate struct {
+	Name        string
+	NetworkType string
+	Options     map[string]interface{}
+}
+
+// endpointCreate represents the body of the "create endpoint" http request message
+type endpointCreate struct {
+	Name         string
+	ExposedPorts []types.TransportPort
+	PortMapping  []types.PortBinding
+}
+
+// endpointJoin represents the expected body of the "join endpoint" or "leave endpoint" http request messages
+type endpointJoin struct {
+	ContainerID       string
+	HostName          string
+	DomainName        string
+	HostsPath         string
+	ResolvConfPath    string
+	DNS               []string
+	ExtraHosts        []endpointExtraHost
+	ParentUpdates     []endpointParentUpdate
+	UseDefaultSandbox bool
+}
+
+// EndpointExtraHost represents the extra host object
+type endpointExtraHost struct {
+	Name    string
+	Address string
+}
+
+// EndpointParentUpdate is the object carrying the information about the
+// endpoint parent that needs to be updated
+type endpointParentUpdate struct {
+	EndpointID string
+	Name       string
+	Address    string
+}
diff --git a/vendor/src/github.com/docker/libnetwork/circle.yml b/vendor/src/github.com/docker/libnetwork/circle.yml
new file mode 100644
index 0000000..d02f6a9
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/circle.yml
@@ -0,0 +1,12 @@
+machine:
+    services:
+        - docker
+
+dependencies:
+    override:
+        - echo "Nothing to install"
+
+test:
+    override:
+        - make circle-ci
+
diff --git a/vendor/src/github.com/docker/libnetwork/client/client.go b/vendor/src/github.com/docker/libnetwork/client/client.go
new file mode 100644
index 0000000..4bc86da
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/client.go
@@ -0,0 +1,111 @@
+package client
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"reflect"
+	"strings"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+// CallFunc provides environment specific call utility to invoke backend functions from UI
+type CallFunc func(string, string, interface{}, map[string][]string) (io.ReadCloser, int, error)
+
+// NetworkCli is the UI object for network subcmds
+type NetworkCli struct {
+	out  io.Writer
+	err  io.Writer
+	call CallFunc
+}
+
+// NewNetworkCli is a convenient function to create a NetworkCli object
+func NewNetworkCli(out, err io.Writer, call CallFunc) *NetworkCli {
+	return &NetworkCli{
+		out:  out,
+		err:  err,
+		call: call,
+	}
+}
+
+// getMethod is Borrowed from Docker UI which uses reflection to identify the UI Handler
+func (cli *NetworkCli) getMethod(args ...string) (func(string, ...string) error, bool) {
+	camelArgs := make([]string, len(args))
+	for i, s := range args {
+		if len(s) == 0 {
+			return nil, false
+		}
+		camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:])
+	}
+	methodName := "Cmd" + strings.Join(camelArgs, "")
+	method := reflect.ValueOf(cli).MethodByName(methodName)
+	if !method.IsValid() {
+		return nil, false
+	}
+	return method.Interface().(func(string, ...string) error), true
+}
+
+// Cmd is borrowed from Docker UI and acts as the entry point for network UI commands.
+// network UI commands are designed to be invoked from multiple parent chains
+func (cli *NetworkCli) Cmd(chain string, args ...string) error {
+	if len(args) > 2 {
+		method, exists := cli.getMethod(args[:3]...)
+		if exists {
+			return method(chain+" "+args[0]+" "+args[1], args[3:]...)
+		}
+	}
+	if len(args) > 1 {
+		method, exists := cli.getMethod(args[:2]...)
+		if exists {
+			return method(chain+" "+args[0], args[2:]...)
+		}
+	}
+	if len(args) > 0 {
+		method, exists := cli.getMethod(args[0])
+		if !exists {
+			return fmt.Errorf("%s: '%s' is not a %s command. See '%s --help'.\n", chain, args[0], chain, chain)
+		}
+		return method(chain, args[1:]...)
+	}
+	flag.Usage()
+	return nil
+}
+
+// Subcmd is borrowed from Docker UI and performs the same function of configuring the subCmds
+func (cli *NetworkCli) Subcmd(chain, name, signature, description string, exitOnError bool) *flag.FlagSet {
+	var errorHandling flag.ErrorHandling
+	if exitOnError {
+		errorHandling = flag.ExitOnError
+	} else {
+		errorHandling = flag.ContinueOnError
+	}
+	flags := flag.NewFlagSet(name, errorHandling)
+	flags.Usage = func() {
+		options := ""
+		if signature != "" {
+			signature = " " + signature
+		}
+		if flags.FlagCountUndeprecated() > 0 {
+			options = " [OPTIONS]"
+		}
+		fmt.Fprintf(cli.out, "\nUsage: %s %s%s%s\n\n%s\n\n", chain, name, options, signature, description)
+		flags.SetOutput(cli.out)
+		flags.PrintDefaults()
+	}
+	return flags
+}
+
+func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
+	if stream != nil {
+		defer stream.Close()
+	}
+	if err != nil {
+		return nil, statusCode, err
+	}
+	body, err := ioutil.ReadAll(stream)
+	if err != nil {
+		return nil, -1, err
+	}
+	return body, statusCode, nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/client/client_experimental_test.go b/vendor/src/github.com/docker/libnetwork/client/client_experimental_test.go
new file mode 100644
index 0000000..9592b3c
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/client_experimental_test.go
@@ -0,0 +1,124 @@
+// +build experimental
+
+package client
+
+import (
+	"bytes"
+	"testing"
+
+	_ "github.com/docker/libnetwork/netutils"
+)
+
+func TestClientNetworkServiceInvalidCommand(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "service", "invalid")
+	if err == nil {
+		t.Fatalf("Passing invalid commands must fail")
+	}
+}
+
+func TestClientNetworkServiceCreate(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "service", "create", mockServiceName, mockNwName)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+func TestClientNetworkServiceRm(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "service", "rm", mockServiceName, mockNwName)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+func TestClientNetworkServiceLs(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "service", "ls", mockNwName)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+func TestClientNetworkServiceInfo(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "service", "info", mockServiceName, mockNwName)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+func TestClientNetworkServiceInfoById(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "service", "info", mockServiceID, mockNwID)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+func TestClientNetworkServiceJoin(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "service", "join", mockContainerID, mockServiceName, mockNwName)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+func TestClientNetworkServiceLeave(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "service", "leave", mockContainerID, mockServiceName, mockNwName)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+// Docker Flag processing in flag.go uses os.Exit() frequently, even for --help
+// TODO : Handle the --help test-case in the IT when CLI is available
+/*
+func TestClientNetworkServiceCreateHelp(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cFunc := func(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
+		return nil, 0, nil
+	}
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "create", "--help")
+	if err != nil {
+		t.Fatalf(err.Error())
+	}
+}
+*/
+
+// Docker flag processing in flag.go uses os.Exit(1) for incorrect parameter case.
+// TODO : Handle the missing argument case in the IT when CLI is available
+/*
+func TestClientNetworkServiceCreateMissingArgument(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cFunc := func(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
+		return nil, 0, nil
+	}
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "create")
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+*/
diff --git a/vendor/src/github.com/docker/libnetwork/client/client_test.go b/vendor/src/github.com/docker/libnetwork/client/client_test.go
new file mode 100644
index 0000000..3b2f3a8
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/client_test.go
@@ -0,0 +1,212 @@
+package client
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+	"testing"
+
+	_ "github.com/docker/libnetwork/netutils"
+)
+
+// nopCloser is used to provide a dummy CallFunc for Cmd()
+type nopCloser struct {
+	io.Reader
+}
+
+func (nopCloser) Close() error { return nil }
+
+func TestMain(m *testing.M) {
+	setupMockHTTPCallback()
+	os.Exit(m.Run())
+}
+
+var callbackFunc func(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error)
+var mockNwJSON, mockNwListJSON, mockServiceJSON, mockServiceListJSON []byte
+var mockNwName = "test"
+var mockNwID = "2a3456789"
+var mockServiceName = "testSrv"
+var mockServiceID = "2a3456789"
+var mockContainerID = "2a3456789"
+
+func setupMockHTTPCallback() {
+	var list []networkResource
+	nw := networkResource{Name: mockNwName, ID: mockNwID}
+	mockNwJSON, _ = json.Marshal(nw)
+	list = append(list, nw)
+	mockNwListJSON, _ = json.Marshal(list)
+
+	var srvList []endpointResource
+	ep := endpointResource{Name: mockServiceName, ID: mockServiceID, Network: mockNwName}
+	mockServiceJSON, _ = json.Marshal(ep)
+	srvList = append(srvList, ep)
+	mockServiceListJSON, _ = json.Marshal(srvList)
+
+	callbackFunc = func(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
+		var rsp string
+		switch method {
+		case "GET":
+			if strings.Contains(path, fmt.Sprintf("networks?name=%s", mockNwName)) {
+				rsp = string(mockNwListJSON)
+			} else if strings.Contains(path, "networks?name=") {
+				rsp = "[]"
+			} else if strings.Contains(path, fmt.Sprintf("networks?partial-id=%s", mockNwID)) {
+				rsp = string(mockNwListJSON)
+			} else if strings.Contains(path, "networks?partial-id=") {
+				rsp = "[]"
+			} else if strings.HasSuffix(path, "networks") {
+				rsp = string(mockNwListJSON)
+			} else if strings.HasSuffix(path, "networks/"+mockNwID) {
+				rsp = string(mockNwJSON)
+			} else if strings.Contains(path, fmt.Sprintf("endpoints?name=%s", mockServiceName)) {
+				rsp = string(mockServiceListJSON)
+			} else if strings.Contains(path, "endpoints?name=") {
+				rsp = "[]"
+			} else if strings.Contains(path, fmt.Sprintf("endpoints?partial-id=%s", mockServiceID)) {
+				rsp = string(mockServiceListJSON)
+			} else if strings.Contains(path, "endpoints?partial-id=") {
+				rsp = "[]"
+			} else if strings.HasSuffix(path, "endpoints") {
+				rsp = string(mockServiceListJSON)
+			} else if strings.HasSuffix(path, "endpoints/"+mockServiceID) {
+				rsp = string(mockServiceJSON)
+			}
+		case "POST":
+			var data []byte
+			if strings.HasSuffix(path, "networks") {
+				data, _ = json.Marshal(mockNwID)
+			} else if strings.HasSuffix(path, "endpoints") {
+				data, _ = json.Marshal(mockServiceID)
+			} else if strings.HasSuffix(path, "containers") {
+				data, _ = json.Marshal(mockContainerID)
+			}
+			rsp = string(data)
+		case "PUT":
+		case "DELETE":
+			rsp = ""
+		}
+		return nopCloser{bytes.NewBufferString(rsp)}, 200, nil
+	}
+}
+
+func TestClientDummyCommand(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "dummy")
+	if err == nil {
+		t.Fatalf("Incorrect Command must fail")
+	}
+}
+
+func TestClientNetworkInvalidCommand(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "invalid")
+	if err == nil {
+		t.Fatalf("Passing invalid commands must fail")
+	}
+}
+
+func TestClientNetworkCreate(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "create", mockNwName)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+func TestClientNetworkCreateWithDriver(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "create", "-f=dummy", mockNwName)
+	if err == nil {
+		t.Fatalf("Passing incorrect flags to the create command must fail")
+	}
+
+	err = cli.Cmd("docker", "network", "create", "-d=dummy", mockNwName)
+	if err != nil {
+		t.Fatalf(err.Error())
+	}
+}
+
+func TestClientNetworkRm(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "rm", mockNwName)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+func TestClientNetworkLs(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "ls")
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+func TestClientNetworkInfo(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "info", mockNwName)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+func TestClientNetworkInfoById(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "info", mockNwID)
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+
+// Docker Flag processing in flag.go uses os.Exit() frequently, even for --help
+// TODO : Handle the --help test-case in the IT when CLI is available
+/*
+func TestClientNetworkServiceCreateHelp(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cFunc := func(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
+		return nil, 0, nil
+	}
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "create", "--help")
+	if err != nil {
+		t.Fatalf(err.Error())
+	}
+}
+*/
+
+// Docker flag processing in flag.go uses os.Exit(1) for incorrect parameter case.
+// TODO : Handle the missing argument case in the IT when CLI is available
+/*
+func TestClientNetworkServiceCreateMissingArgument(t *testing.T) {
+	var out, errOut bytes.Buffer
+	cFunc := func(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
+		return nil, 0, nil
+	}
+	cli := NewNetworkCli(&out, &errOut, callbackFunc)
+
+	err := cli.Cmd("docker", "network", "create")
+	if err != nil {
+		t.Fatal(err.Error())
+	}
+}
+*/
diff --git a/vendor/src/github.com/docker/libnetwork/client/network.go b/vendor/src/github.com/docker/libnetwork/client/network.go
new file mode 100644
index 0000000..4e02329
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/network.go
@@ -0,0 +1,241 @@
+package client
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"text/tabwriter"
+
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/stringid"
+)
+
+const (
+	nullNetType = "null"
+)
+
+type command struct {
+	name        string
+	description string
+}
+
+var (
+	networkCommands = []command{
+		{"create", "Create a network"},
+		{"rm", "Remove a network"},
+		{"ls", "List all networks"},
+		{"info", "Display information of a network"},
+	}
+)
+
+// CmdNetwork handles the root Network UI
+func (cli *NetworkCli) CmdNetwork(chain string, args ...string) error {
+	cmd := cli.Subcmd(chain, "network", "COMMAND [OPTIONS] [arg...]", networkUsage(chain), false)
+	cmd.Require(flag.Min, 1)
+	err := cmd.ParseFlags(args, true)
+	if err == nil {
+		cmd.Usage()
+		return fmt.Errorf("invalid command : %v", args)
+	}
+	return err
+}
+
+// CmdNetworkCreate handles Network Create UI
+func (cli *NetworkCli) CmdNetworkCreate(chain string, args ...string) error {
+	cmd := cli.Subcmd(chain, "create", "NETWORK-NAME", "Creates a new network with a name specified by the user", false)
+	flDriver := cmd.String([]string{"d", "-driver"}, "null", "Driver to manage the Network")
+	cmd.Require(flag.Min, 1)
+	err := cmd.ParseFlags(args, true)
+	if err != nil {
+		return err
+	}
+	if *flDriver == "" {
+		*flDriver = nullNetType
+	}
+
+	nc := networkCreate{Name: cmd.Arg(0), NetworkType: *flDriver}
+
+	obj, _, err := readBody(cli.call("POST", "/networks", nc, nil))
+	if err != nil {
+		return err
+	}
+	var replyID string
+	err = json.Unmarshal(obj, &replyID)
+	if err != nil {
+		return err
+	}
+	fmt.Fprintf(cli.out, "%s\n", replyID)
+	return nil
+}
+
+// CmdNetworkRm handles Network Delete UI
+func (cli *NetworkCli) CmdNetworkRm(chain string, args ...string) error {
+	cmd := cli.Subcmd(chain, "rm", "NETWORK", "Deletes a network", false)
+	cmd.Require(flag.Min, 1)
+	err := cmd.ParseFlags(args, true)
+	if err != nil {
+		return err
+	}
+	id, err := lookupNetworkID(cli, cmd.Arg(0))
+	if err != nil {
+		return err
+	}
+	_, _, err = readBody(cli.call("DELETE", "/networks/"+id, nil, nil))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// CmdNetworkLs handles Network List UI
+func (cli *NetworkCli) CmdNetworkLs(chain string, args ...string) error {
+	cmd := cli.Subcmd(chain, "ls", "", "Lists all the networks created by the user", false)
+	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
+	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Do not truncate the output")
+	nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show the latest network created")
+	last := cmd.Int([]string{"n"}, -1, "Show n last created networks")
+	err := cmd.ParseFlags(args, true)
+	if err != nil {
+		return err
+	}
+	obj, _, err := readBody(cli.call("GET", "/networks", nil, nil))
+	if err != nil {
+		return err
+	}
+	if *last == -1 && *nLatest {
+		*last = 1
+	}
+
+	var networkResources []networkResource
+	err = json.Unmarshal(obj, &networkResources)
+	if err != nil {
+		return err
+	}
+
+	wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
+
+	// unless quiet (-q) is specified, print field titles
+	if !*quiet {
+		fmt.Fprintln(wr, "NETWORK ID\tNAME\tTYPE")
+	}
+
+	for _, networkResource := range networkResources {
+		ID := networkResource.ID
+		netName := networkResource.Name
+		if !*noTrunc {
+			ID = stringid.TruncateID(ID)
+		}
+		if *quiet {
+			fmt.Fprintln(wr, ID)
+			continue
+		}
+		netType := networkResource.Type
+		fmt.Fprintf(wr, "%s\t%s\t%s\t",
+			ID,
+			netName,
+			netType)
+		fmt.Fprint(wr, "\n")
+	}
+	wr.Flush()
+	return nil
+}
+
+// CmdNetworkInfo handles Network Info UI
+func (cli *NetworkCli) CmdNetworkInfo(chain string, args ...string) error {
+	cmd := cli.Subcmd(chain, "info", "NETWORK", "Displays detailed information on a network", false)
+	cmd.Require(flag.Min, 1)
+	err := cmd.ParseFlags(args, true)
+	if err != nil {
+		return err
+	}
+
+	id, err := lookupNetworkID(cli, cmd.Arg(0))
+	if err != nil {
+		return err
+	}
+
+	obj, _, err := readBody(cli.call("GET", "/networks/"+id, nil, nil))
+	if err != nil {
+		return err
+	}
+	networkResource := &networkResource{}
+	if err := json.NewDecoder(bytes.NewReader(obj)).Decode(networkResource); err != nil {
+		return err
+	}
+	fmt.Fprintf(cli.out, "Network Id: %s\n", networkResource.ID)
+	fmt.Fprintf(cli.out, "Name: %s\n", networkResource.Name)
+	fmt.Fprintf(cli.out, "Type: %s\n", networkResource.Type)
+	if networkResource.Endpoints != nil {
+		for _, endpointResource := range networkResource.Endpoints {
+			fmt.Fprintf(cli.out, "  Service Id: %s\n", endpointResource.ID)
+			fmt.Fprintf(cli.out, "\tName: %s\n", endpointResource.Name)
+		}
+	}
+
+	return nil
+}
+
+// Helper function to predict if a string is a name or id or partial-id
+// This provides a best-effort mechanism to identify a id with the help of GET Filter APIs
+// Being a UI, its most likely that name will be used by the user, which is used to lookup
+// the corresponding ID. If ID is not found, this function will assume that the passed string
+// is an ID by itself.
+
+func lookupNetworkID(cli *NetworkCli, nameID string) (string, error) {
+	obj, statusCode, err := readBody(cli.call("GET", "/networks?name="+nameID, nil, nil))
+	if err != nil {
+		return "", err
+	}
+
+	if statusCode != http.StatusOK {
+		return "", fmt.Errorf("name query failed for %s due to : statuscode(%d) %v", nameID, statusCode, string(obj))
+	}
+
+	var list []*networkResource
+	err = json.Unmarshal(obj, &list)
+	if err != nil {
+		return "", err
+	}
+	if len(list) > 0 {
+		// name query filter will always return a single-element collection
+		return list[0].ID, nil
+	}
+
+	// Check for Partial-id
+	obj, statusCode, err = readBody(cli.call("GET", "/networks?partial-id="+nameID, nil, nil))
+	if err != nil {
+		return "", err
+	}
+
+	if statusCode != http.StatusOK {
+		return "", fmt.Errorf("partial-id match query failed for %s due to : statuscode(%d) %v", nameID, statusCode, string(obj))
+	}
+
+	err = json.Unmarshal(obj, &list)
+	if err != nil {
+		return "", err
+	}
+	if len(list) == 0 {
+		return "", fmt.Errorf("resource not found %s", nameID)
+	}
+	if len(list) > 1 {
+		return "", fmt.Errorf("multiple Networks matching the partial identifier (%s). Please use full identifier", nameID)
+	}
+	return list[0].ID, nil
+}
+
+func networkUsage(chain string) string {
+	help := "Commands:\n"
+
+	for _, cmd := range networkCommands {
+		help += fmt.Sprintf("    %-25.25s%s\n", cmd.name, cmd.description)
+	}
+
+	for _, cmd := range serviceCommands {
+		help += fmt.Sprintf("    %-25.25s%s\n", "service "+cmd.name, cmd.description)
+	}
+
+	help += fmt.Sprintf("\nRun '%s network COMMAND --help' for more information on a command.", chain)
+	return help
+}
diff --git a/vendor/src/github.com/docker/libnetwork/client/service.go b/vendor/src/github.com/docker/libnetwork/client/service.go
new file mode 100644
index 0000000..afdbb7f
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/service.go
@@ -0,0 +1,7 @@
+// +build !experimental
+
+package client
+
+var (
+	serviceCommands = []command{}
+)
diff --git a/vendor/src/github.com/docker/libnetwork/client/service_experimental.go b/vendor/src/github.com/docker/libnetwork/client/service_experimental.go
new file mode 100644
index 0000000..02555fc
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/service_experimental.go
@@ -0,0 +1,317 @@
+// +build experimental
+
+package client
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"text/tabwriter"
+
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/stringid"
+)
+
+var (
+	serviceCommands = []command{
+		{"create", "Create a service endpoint"},
+		{"rm", "Remove a service endpoint"},
+		{"join", "Join a container to a service endpoint"},
+		{"leave", "Leave a container from a service endpoint"},
+		{"ls", "Lists all service endpoints on a network"},
+		{"info", "Display information of a service endpoint"},
+	}
+)
+
+func lookupServiceID(cli *NetworkCli, networkID string, nameID string) (string, error) {
+	obj, statusCode, err := readBody(cli.call("GET", fmt.Sprintf("/networks/%s/endpoints?name=%s", networkID, nameID), nil, nil))
+	if err != nil {
+		return "", err
+	}
+
+	if statusCode != http.StatusOK {
+		return "", fmt.Errorf("name query failed for %s due to : statuscode(%d) %v", nameID, statusCode, string(obj))
+	}
+
+	var list []*networkResource
+	err = json.Unmarshal(obj, &list)
+	if err != nil {
+		return "", err
+	}
+	if len(list) > 0 {
+		// name query filter will always return a single-element collection
+		return list[0].ID, nil
+	}
+
+	// Check for Partial-id
+	obj, statusCode, err = readBody(cli.call("GET", fmt.Sprintf("/networks/%s/endpoints?partial-id=%s", networkID, nameID), nil, nil))
+	if err != nil {
+		return "", err
+	}
+
+	if statusCode != http.StatusOK {
+		return "", fmt.Errorf("partial-id match query failed for %s due to : statuscode(%d) %v", nameID, statusCode, string(obj))
+	}
+
+	err = json.Unmarshal(obj, &list)
+	if err != nil {
+		return "", err
+	}
+	if len(list) == 0 {
+		return "", fmt.Errorf("resource not found %s", nameID)
+	}
+	if len(list) > 1 {
+		return "", fmt.Errorf("multiple services matching the partial identifier (%s). Please use full identifier", nameID)
+	}
+	return list[0].ID, nil
+}
+
+func lookupContainerID(cli *NetworkCli, nameID string) (string, error) {
+	// TODO : containerID to sandbox-key ?
+	return nameID, nil
+}
+
+// CmdNetworkService handles the network service UI
+func (cli *NetworkCli) CmdNetworkService(chain string, args ...string) error {
+	cmd := cli.Subcmd(chain, "service", "COMMAND [OPTIONS] [arg...]", serviceUsage(chain), false)
+	cmd.Require(flag.Min, 1)
+	err := cmd.ParseFlags(args, true)
+	if err == nil {
+		cmd.Usage()
+		return fmt.Errorf("Invalid command : %v", args)
+	}
+	return err
+}
+
+// CmdNetworkServiceCreate handles service create UI
+func (cli *NetworkCli) CmdNetworkServiceCreate(chain string, args ...string) error {
+	cmd := cli.Subcmd(chain, "create", "SERVICE NETWORK", "Creates a new service on a network", false)
+	cmd.Require(flag.Min, 2)
+	err := cmd.ParseFlags(args, true)
+	if err != nil {
+		return err
+	}
+
+	networkID, err := lookupNetworkID(cli, cmd.Arg(1))
+	if err != nil {
+		return err
+	}
+
+	ec := endpointCreate{Name: cmd.Arg(0), NetworkID: networkID}
+
+	obj, _, err := readBody(cli.call("POST", "/networks/"+networkID+"/endpoints", ec, nil))
+	if err != nil {
+		return err
+	}
+
+	var replyID string
+	err = json.Unmarshal(obj, &replyID)
+	if err != nil {
+		return err
+	}
+
+	fmt.Fprintf(cli.out, "%s\n", replyID)
+	return nil
+}
+
+// CmdNetworkServiceRm handles service delete UI
+func (cli *NetworkCli) CmdNetworkServiceRm(chain string, args ...string) error {
+	cmd := cli.Subcmd(chain, "rm", "SERVICE NETWORK", "Deletes a service", false)
+	cmd.Require(flag.Min, 2)
+	err := cmd.ParseFlags(args, true)
+	if err != nil {
+		return err
+	}
+
+	networkID, err := lookupNetworkID(cli, cmd.Arg(1))
+	if err != nil {
+		return err
+	}
+
+	serviceID, err := lookupServiceID(cli, networkID, cmd.Arg(0))
+	if err != nil {
+		return err
+	}
+
+	_, _, err = readBody(cli.call("DELETE", "/networks/"+networkID+"/endpoints/"+serviceID, nil, nil))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// CmdNetworkServiceLs handles service list UI
+func (cli *NetworkCli) CmdNetworkServiceLs(chain string, args ...string) error {
+	cmd := cli.Subcmd(chain, "ls", "NETWORK", "Lists all the services on a network", false)
+	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
+	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Do not truncate the output")
+	nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show the latest network created")
+	last := cmd.Int([]string{"n"}, -1, "Show n last created networks")
+	err := cmd.ParseFlags(args, true)
+	if err != nil {
+		return err
+	}
+
+	cmd.Require(flag.Min, 1)
+
+	networkID, err := lookupNetworkID(cli, cmd.Arg(0))
+	if err != nil {
+		return err
+	}
+
+	obj, _, err := readBody(cli.call("GET", "/networks/"+networkID+"/endpoints", nil, nil))
+	if err != nil {
+		fmt.Fprintf(cli.err, "%s", err.Error())
+		return err
+	}
+	if *last == -1 && *nLatest {
+		*last = 1
+	}
+
+	var endpointResources []endpointResource
+	err = json.Unmarshal(obj, &endpointResources)
+	if err != nil {
+		return err
+	}
+
+	wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
+	// unless quiet (-q) is specified, print field titles
+	if !*quiet {
+		fmt.Fprintln(wr, "NETWORK SERVICE ID\tNAME\tNETWORK")
+	}
+
+	for _, networkResource := range endpointResources {
+		ID := networkResource.ID
+		netName := networkResource.Name
+		if !*noTrunc {
+			ID = stringid.TruncateID(ID)
+		}
+		if *quiet {
+			fmt.Fprintln(wr, ID)
+			continue
+		}
+		network := networkResource.Network
+		fmt.Fprintf(wr, "%s\t%s\t%s",
+			ID,
+			netName,
+			network)
+		fmt.Fprint(wr, "\n")
+	}
+	wr.Flush()
+
+	return nil
+}
+
+// CmdNetworkServiceInfo handles service info UI
+func (cli *NetworkCli) CmdNetworkServiceInfo(chain string, args ...string) error {
+	cmd := cli.Subcmd(chain, "info", "SERVICE NETWORK", "Displays detailed information on a service", false)
+	cmd.Require(flag.Min, 2)
+	err := cmd.ParseFlags(args, true)
+	if err != nil {
+		return err
+	}
+
+	networkID, err := lookupNetworkID(cli, cmd.Arg(1))
+	if err != nil {
+		return err
+	}
+
+	serviceID, err := lookupServiceID(cli, networkID, cmd.Arg(0))
+	if err != nil {
+		return err
+	}
+
+	obj, _, err := readBody(cli.call("GET", "/networks/"+networkID+"/endpoints/"+serviceID, nil, nil))
+	if err != nil {
+		fmt.Fprintf(cli.err, "%s", err.Error())
+		return err
+	}
+
+	endpointResource := &endpointResource{}
+	if err := json.NewDecoder(bytes.NewReader(obj)).Decode(endpointResource); err != nil {
+		return err
+	}
+	fmt.Fprintf(cli.out, "Service Id: %s\n", endpointResource.ID)
+	fmt.Fprintf(cli.out, "\tName: %s\n", endpointResource.Name)
+	fmt.Fprintf(cli.out, "\tNetwork: %s\n", endpointResource.Network)
+
+	return nil
+}
+
+// CmdNetworkServiceJoin handles service join UI
+func (cli *NetworkCli) CmdNetworkServiceJoin(chain string, args ...string) error {
+	cmd := cli.Subcmd(chain, "join", "CONTAINER SERVICE NETWORK", "Sets a container as a service backend", false)
+	cmd.Require(flag.Min, 3)
+	err := cmd.ParseFlags(args, true)
+	if err != nil {
+		return err
+	}
+
+	containerID, err := lookupContainerID(cli, cmd.Arg(0))
+	if err != nil {
+		return err
+	}
+
+	networkID, err := lookupNetworkID(cli, cmd.Arg(2))
+	if err != nil {
+		return err
+	}
+
+	serviceID, err := lookupServiceID(cli, networkID, cmd.Arg(1))
+	if err != nil {
+		return err
+	}
+
+	nc := endpointJoin{ContainerID: containerID}
+
+	_, _, err = readBody(cli.call("POST", "/networks/"+networkID+"/endpoints/"+serviceID+"/containers", nc, nil))
+	if err != nil {
+		fmt.Fprintf(cli.err, "%s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// CmdNetworkServiceLeave handles service leave UI
+func (cli *NetworkCli) CmdNetworkServiceLeave(chain string, args ...string) error {
+	cmd := cli.Subcmd(chain, "leave", "CONTAINER SERVICE NETWORK", "Removes a container from service backend", false)
+	cmd.Require(flag.Min, 3)
+	err := cmd.ParseFlags(args, true)
+	if err != nil {
+		return err
+	}
+
+	containerID, err := lookupContainerID(cli, cmd.Arg(0))
+	if err != nil {
+		return err
+	}
+
+	networkID, err := lookupNetworkID(cli, cmd.Arg(2))
+	if err != nil {
+		return err
+	}
+
+	serviceID, err := lookupServiceID(cli, networkID, cmd.Arg(1))
+	if err != nil {
+		return err
+	}
+
+	_, _, err = readBody(cli.call("DELETE", "/networks/"+networkID+"/endpoints/"+serviceID+"/containers/"+containerID, nil, nil))
+	if err != nil {
+		fmt.Fprintf(cli.err, "%s", err.Error())
+		return err
+	}
+	return nil
+}
+
+func serviceUsage(chain string) string {
+	help := "Commands:\n"
+
+	for _, cmd := range serviceCommands {
+		help += fmt.Sprintf("    %-10.10s%s\n", cmd, cmd.description)
+	}
+
+	help += fmt.Sprintf("\nRun '%s service COMMAND --help' for more information on a command.", chain)
+	return help
+}
diff --git a/vendor/src/github.com/docker/libnetwork/client/types.go b/vendor/src/github.com/docker/libnetwork/client/types.go
new file mode 100644
index 0000000..972ed43
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/client/types.go
@@ -0,0 +1,68 @@
+package client
+
+import "github.com/docker/libnetwork/types"
+
+/***********
+ Resources
+************/
+
+// networkResource is the body of the "get network" http response message
+type networkResource struct {
+	Name      string
+	ID        string
+	Type      string
+	Endpoints []*endpointResource
+}
+
+// endpointResource is the body of the "get endpoint" http response message
+type endpointResource struct {
+	Name    string
+	ID      string
+	Network string
+}
+
+/***********
+  Body types
+  ************/
+
+// networkCreate is the expected body of the "create network" http request message
+type networkCreate struct {
+	Name        string
+	NetworkType string
+	Options     map[string]interface{}
+}
+
+// endpointCreate represents the body of the "create endpoint" http request message
+type endpointCreate struct {
+	Name         string
+	NetworkID    string
+	ExposedPorts []types.TransportPort
+	PortMapping  []types.PortBinding
+}
+
+// endpointJoin represents the expected body of the "join endpoint" or "leave endpoint" http request messages
+type endpointJoin struct {
+	ContainerID       string
+	HostName          string
+	DomainName        string
+	HostsPath         string
+	ResolvConfPath    string
+	DNS               []string
+	ExtraHosts        []endpointExtraHost
+	ParentUpdates     []endpointParentUpdate
+	UseDefaultSandbox bool
+}
+
+// EndpointExtraHost represents the extra host object
+type endpointExtraHost struct {
+	Name    string
+	Address string
+}
+
+// EndpointParentUpdate is the object carrying the information about the
+// endpoint parent that needs to be updated
+type endpointParentUpdate struct {
+	EndpointID string
+	Name       string
+	Address    string
+}
diff --git a/vendor/src/github.com/docker/libnetwork/cmd/dnet/dnet.go b/vendor/src/github.com/docker/libnetwork/cmd/dnet/dnet.go
new file mode 100644
index 0000000..8c59924
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/cmd/dnet/dnet.go
@@ -0,0 +1,204 @@
+package main
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"strings"
+
+	flag "github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/parsers"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/libnetwork"
+	"github.com/docker/libnetwork/api"
+	"github.com/docker/libnetwork/client"
+	"github.com/gorilla/mux"
+)
+
+var (
+	// DefaultHTTPHost is used if only port is provided to -H flag e.g. docker -d -H tcp://:8080
+	DefaultHTTPHost = "127.0.0.1"
+	// DefaultHTTPPort is the default http port used by dnet
+	DefaultHTTPPort = 2385
+	// DefaultUnixSocket exported
+	DefaultUnixSocket = "/var/run/dnet.sock"
+)
+
+func main() {
+	_, stdout, stderr := term.StdStreams()
+	logrus.SetOutput(stderr)
+
+	err := dnetCommand(stdout, stderr)
+	if err != nil {
+		os.Exit(1)
+	}
+}
+
+func dnetCommand(stdout, stderr io.Writer) error {
+	flag.Parse()
+
+	if *flHelp {
+		flag.Usage()
+		return nil
+	}
+
+	if *flLogLevel != "" {
+		lvl, err := logrus.ParseLevel(*flLogLevel)
+		if err != nil {
+			fmt.Fprintf(stderr, "Unable to parse logging level: %s\n", *flLogLevel)
+			return err
+		}
+		logrus.SetLevel(lvl)
+	} else {
+		logrus.SetLevel(logrus.InfoLevel)
+	}
+
+	if *flDebug {
+		logrus.SetLevel(logrus.DebugLevel)
+	}
+
+	if *flHost == "" {
+		defaultHost := os.Getenv("DNET_HOST")
+		if defaultHost == "" {
+			// TODO : Add UDS support
+			defaultHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
+		}
+		*flHost = defaultHost
+	}
+
+	dc, err := newDnetConnection(*flHost)
+	if err != nil {
+		if *flDaemon {
+			logrus.Error(err)
+		} else {
+			fmt.Fprint(stderr, err)
+		}
+		return err
+	}
+
+	if *flDaemon {
+		err := dc.dnetDaemon()
+		if err != nil {
+			logrus.Errorf("dnet Daemon exited with an error : %v", err)
+		}
+		return err
+	}
+
+	cli := client.NewNetworkCli(stdout, stderr, dc.httpCall)
+	if err := cli.Cmd("dnet", flag.Args()...); err != nil {
+		fmt.Fprintln(stderr, err)
+		return err
+	}
+	return nil
+}
+
+type dnetConnection struct {
+	// proto holds the client protocol i.e. unix.
+	proto string
+	// addr holds the client address.
+	addr string
+}
+
+func (d *dnetConnection) dnetDaemon() error {
+	controller, err := libnetwork.New()
+	if err != nil {
+		fmt.Println("Error starting dnetDaemon :", err)
+		return err
+	}
+	httpHandler := api.NewHTTPHandler(controller)
+	r := mux.NewRouter().StrictSlash(false)
+	post := r.PathPrefix("/{.*}/networks").Subrouter()
+	post.Methods("GET", "PUT", "POST", "DELETE").HandlerFunc(httpHandler)
+	return http.ListenAndServe(d.addr, r)
+}
+
+func newDnetConnection(val string) (*dnetConnection, error) {
+	url, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val)
+	if err != nil {
+		return nil, err
+	}
+	protoAddrParts := strings.SplitN(url, "://", 2)
+	if len(protoAddrParts) != 2 {
+		return nil, fmt.Errorf("bad format, expected tcp://ADDR")
+	}
+	if strings.ToLower(protoAddrParts[0]) != "tcp" {
+		return nil, fmt.Errorf("dnet currently only supports tcp transport")
+	}
+
+	return &dnetConnection{protoAddrParts[0], protoAddrParts[1]}, nil
+}
+
+func (d *dnetConnection) httpCall(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) {
+	var in io.Reader
+	in, err := encodeData(data)
+	if err != nil {
+		return nil, -1, err
+	}
+
+	req, err := http.NewRequest(method, fmt.Sprintf("/dnet%s", path), in)
+	if err != nil {
+		return nil, -1, err
+	}
+
+	setupRequestHeaders(method, data, req, headers)
+
+	req.URL.Host = d.addr
+	req.URL.Scheme = "http"
+
+	httpClient := &http.Client{}
+	resp, err := httpClient.Do(req)
+	statusCode := -1
+	if resp != nil {
+		statusCode = resp.StatusCode
+	}
+	if err != nil {
+		return nil, statusCode, fmt.Errorf("error when trying to connect: %v", err)
+	}
+
+	if statusCode < 200 || statusCode >= 400 {
+		body, err := ioutil.ReadAll(resp.Body)
+		if err != nil {
+			return nil, statusCode, err
+		}
+		return nil, statusCode, fmt.Errorf("error : %s", bytes.TrimSpace(body))
+	}
+
+	return resp.Body, statusCode, nil
+}
+
+func setupRequestHeaders(method string, data interface{}, req *http.Request, headers map[string][]string) {
+	if data != nil {
+		if headers == nil {
+			headers = make(map[string][]string)
+		}
+		headers["Content-Type"] = []string{"application/json"}
+	}
+
+	expectedPayload := (method == "POST" || method == "PUT")
+
+	if expectedPayload && req.Header.Get("Content-Type") == "" {
+		req.Header.Set("Content-Type", "text/plain")
+	}
+
+	if headers != nil {
+		for k, v := range headers {
+			req.Header[k] = v
+		}
+	}
+}
+
+func encodeData(data interface{}) (*bytes.Buffer, error) {
+	params := bytes.NewBuffer(nil)
+	if data != nil {
+		if err := json.NewEncoder(params).Encode(data); err != nil {
+			return nil, err
+		}
+	}
+	return params, nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/cmd/dnet/dnet_test.go b/vendor/src/github.com/docker/libnetwork/cmd/dnet/dnet_test.go
new file mode 100644
index 0000000..b8466f1
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/cmd/dnet/dnet_test.go
@@ -0,0 +1,132 @@
+package main
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"testing"
+	"time"
+
+	"github.com/docker/libnetwork/netutils"
+)
+
+const dnetCommandName = "dnet"
+
+var origStdOut = os.Stdout
+
+func TestDnetDaemonCustom(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		t.Skip("This test must run inside a container ")
+	}
+	customPort := 4567
+	doneChan := make(chan bool)
+	go func() {
+		args := []string{dnetCommandName, "-d", fmt.Sprintf("-H=:%d", customPort)}
+		executeDnetCommand(t, args, true)
+		doneChan <- true
+	}()
+
+	select {
+	case <-doneChan:
+		t.Fatal("dnet Daemon is not supposed to exit")
+	case <-time.After(3 * time.Second):
+		args := []string{dnetCommandName, "-d=false", fmt.Sprintf("-H=:%d", customPort), "-D", "network", "ls"}
+		executeDnetCommand(t, args, true)
+	}
+}
+
+func TestDnetDaemonInvalidCustom(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		t.Skip("This test must run inside a container ")
+	}
+	customPort := 4668
+	doneChan := make(chan bool)
+	go func() {
+		args := []string{dnetCommandName, "-d=true", fmt.Sprintf("-H=:%d", customPort)}
+		executeDnetCommand(t, args, true)
+		doneChan <- true
+	}()
+
+	select {
+	case <-doneChan:
+		t.Fatal("dnet Daemon is not supposed to exit")
+	case <-time.After(3 * time.Second):
+		args := []string{dnetCommandName, "-d=false", "-H=:6669", "-D", "network", "ls"}
+		executeDnetCommand(t, args, false)
+	}
+}
+
+func TestDnetDaemonInvalidParams(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		t.Skip("This test must run inside a container ")
+	}
+	args := []string{dnetCommandName, "-d=false", "-H=tcp:/127.0.0.1:8080"}
+	executeDnetCommand(t, args, false)
+
+	args = []string{dnetCommandName, "-d=false", "-H=unix://var/run/dnet.sock"}
+	executeDnetCommand(t, args, false)
+
+	args = []string{dnetCommandName, "-d=false", "-H=", "-l=invalid"}
+	executeDnetCommand(t, args, false)
+
+	args = []string{dnetCommandName, "-d=false", "-H=", "-l=error", "invalid"}
+	executeDnetCommand(t, args, false)
+}
+
+func TestDnetDefaultsWithFlags(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		t.Skip("This test must run inside a container ")
+	}
+	doneChan := make(chan bool)
+	go func() {
+		args := []string{dnetCommandName, "-d=true", "-H=", "-l=error"}
+		executeDnetCommand(t, args, true)
+		doneChan <- true
+	}()
+
+	select {
+	case <-doneChan:
+		t.Fatal("dnet Daemon is not supposed to exit")
+	case <-time.After(3 * time.Second):
+		args := []string{dnetCommandName, "-d=false", "network", "create", "-d=null", "test"}
+		executeDnetCommand(t, args, true)
+
+		args = []string{dnetCommandName, "-d=false", "-D", "network", "ls"}
+		executeDnetCommand(t, args, true)
+	}
+}
+
+func TestDnetMain(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		t.Skip("This test must run inside a container ")
+	}
+	customPort := 4568
+	doneChan := make(chan bool)
+	go func() {
+		args := []string{dnetCommandName, "-d=true", "-h=false", fmt.Sprintf("-H=:%d", customPort)}
+		os.Args = args
+		main()
+		doneChan <- true
+	}()
+	select {
+	case <-doneChan:
+		t.Fatal("dnet Daemon is not supposed to exit")
+	case <-time.After(2 * time.Second):
+	}
+}
+
+func executeDnetCommand(t *testing.T, args []string, shouldSucced bool) {
+	_, w, _ := os.Pipe()
+	os.Stdout = w
+
+	os.Args = args
+	err := dnetCommand(ioutil.Discard, ioutil.Discard)
+	if shouldSucced && err != nil {
+		os.Stdout = origStdOut
+		t.Fatalf("cli [%v] must succeed, but failed with an error :  %v", args, err)
+	} else if !shouldSucced && err == nil {
+		os.Stdout = origStdOut
+		t.Fatalf("cli [%v] must fail, but succeeded with an error :  %v", args, err)
+	}
+	os.Stdout = origStdOut
+}
diff --git a/vendor/src/github.com/docker/libnetwork/cmd/dnet/flags.go b/vendor/src/github.com/docker/libnetwork/cmd/dnet/flags.go
new file mode 100644
index 0000000..2e77e18
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/cmd/dnet/flags.go
@@ -0,0 +1,49 @@
+package main
+
+import (
+	"fmt"
+	"os"
+
+	flag "github.com/docker/docker/pkg/mflag"
+)
+
+type command struct {
+	name        string
+	description string
+}
+
+type byName []command
+
+var (
+	flDaemon   = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode")
+	flHost     = flag.String([]string{"H", "-host"}, "", "Daemon socket to connect to")
+	flLogLevel = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level")
+	flDebug    = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode")
+	flHelp     = flag.Bool([]string{"h", "-help"}, false, "Print usage")
+
+	dnetCommands = []command{
+		{"network", "Network management commands"},
+	}
+)
+
+func init() {
+	flag.Usage = func() {
+		fmt.Fprint(os.Stdout, "Usage: dnet [OPTIONS] COMMAND [arg...]\n\nA self-sufficient runtime for container networking.\n\nOptions:\n")
+
+		flag.CommandLine.SetOutput(os.Stdout)
+		flag.PrintDefaults()
+
+		help := "\nCommands:\n"
+
+		for _, cmd := range dnetCommands {
+			help += fmt.Sprintf("    %-10.10s%s\n", cmd.name, cmd.description)
+		}
+
+		help += "\nRun 'dnet COMMAND --help' for more information on a command."
+		fmt.Fprintf(os.Stdout, "%s\n", help)
+	}
+}
+
+func printUsage() {
+	fmt.Println("Usage: dnet network <subcommand> <OPTIONS>")
+}
diff --git a/vendor/src/github.com/docker/libnetwork/cmd/readme_test/readme.go b/vendor/src/github.com/docker/libnetwork/cmd/readme_test/readme.go
new file mode 100644
index 0000000..a15fda0
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/cmd/readme_test/readme.go
@@ -0,0 +1,66 @@
+package main
+
+import (
+	"fmt"
+
+	"github.com/docker/libnetwork"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/options"
+	"github.com/docker/libnetwork/types"
+)
+
+func main() {
+	// Create a new controller instance
+	controller, err := libnetwork.New()
+	if err != nil {
+		return
+	}
+
+	// Select and configure the network driver
+	networkType := "bridge"
+
+	driverOptions := options.Generic{}
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = driverOptions
+	err = controller.ConfigureNetworkDriver(networkType, genericOption)
+	if err != nil {
+		return
+	}
+
+	// Create a network for containers to join.
+	// NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can make of
+	network, err := controller.NewNetwork(networkType, "network1")
+	if err != nil {
+		return
+	}
+
+	// For each new container: allocate IP and interfaces. The returned network
+	// settings will be used for container infos (inspect and such), as well as
+	// iptables rules for port publishing. This info is contained or accessible
+	// from the returned endpoint.
+	ep, err := network.CreateEndpoint("Endpoint1")
+	if err != nil {
+		return
+	}
+
+	// A container can join the endpoint by providing the container ID to the join
+	// api which returns the sandbox key which can be used to access the sandbox
+	// created for the container during join.
+	// Join acceps Variadic arguments which will be made use of by libnetwork and Drivers
+	_, err = ep.Join("container1",
+		libnetwork.JoinOptionHostname("test"),
+		libnetwork.JoinOptionDomainname("docker.io"))
+	if err != nil {
+		return
+	}
+
+	// libentwork client can check the endpoint's operational data via the Info() API
+	epInfo, err := ep.DriverInfo()
+	mapData, ok := epInfo[netlabel.PortMap]
+	if ok {
+		portMapping, ok := mapData.([]types.PortBinding)
+		if ok {
+			fmt.Printf("Current port mapping for endpoint %s: %v", ep.Name(), portMapping)
+		}
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/cmd/test/main.go b/vendor/src/github.com/docker/libnetwork/cmd/test/main.go
new file mode 100644
index 0000000..d944654
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/cmd/test/main.go
@@ -0,0 +1,28 @@
+package main
+
+import (
+	"fmt"
+	"log"
+	"net"
+
+	"github.com/docker/libnetwork"
+	"github.com/docker/libnetwork/options"
+)
+
+func main() {
+	ip, net, _ := net.ParseCIDR("192.168.100.1/24")
+	net.IP = ip
+
+	options := options.Generic{"AddressIPv4": net}
+	controller, err := libnetwork.New()
+	if err != nil {
+		log.Fatal(err)
+	}
+	netType := "bridge"
+	err = controller.ConfigureNetworkDriver(netType, options)
+	netw, err := controller.NewNetwork(netType, "dummy")
+	if err != nil {
+		log.Fatal(err)
+	}
+	fmt.Printf("Network=%#v\n", netw)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/controller.go b/vendor/src/github.com/docker/libnetwork/controller.go
new file mode 100644
index 0000000..442473e
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/controller.go
@@ -0,0 +1,301 @@
+/*
+Package libnetwork provides the basic functionality and extension points to
+create network namespaces and allocate interfaces for containers to use.
+
+        // Create a new controller instance
+        controller, _err := libnetwork.New()
+
+        // Select and configure the network driver
+        networkType := "bridge"
+
+        driverOptions := options.Generic{}
+        genericOption := make(map[string]interface{})
+        genericOption[netlabel.GenericData] = driverOptions
+        err := controller.ConfigureNetworkDriver(networkType, genericOption)
+        if err != nil {
+                return
+        }
+
+        // Create a network for containers to join.
+        // NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can make of
+        network, err := controller.NewNetwork(networkType, "network1")
+        if err != nil {
+                return
+        }
+
+        // For each new container: allocate IP and interfaces. The returned network
+        // settings will be used for container infos (inspect and such), as well as
+        // iptables rules for port publishing. This info is contained or accessible
+        // from the returned endpoint.
+        ep, err := network.CreateEndpoint("Endpoint1")
+        if err != nil {
+                return
+        }
+
+        // A container can join the endpoint by providing the container ID to the join
+        // api which returns the sandbox key which can be used to access the sandbox
+        // created for the container during join.
+        // Join acceps Variadic arguments which will be made use of by libnetwork and Drivers
+        _, err = ep.Join("container1",
+                libnetwork.JoinOptionHostname("test"),
+                libnetwork.JoinOptionDomainname("docker.io"))
+        if err != nil {
+                return
+        }
+*/
+package libnetwork
+
+import (
+	"sync"
+
+	"github.com/docker/docker/pkg/plugins"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/libnetwork/driverapi"
+	"github.com/docker/libnetwork/sandbox"
+	"github.com/docker/libnetwork/types"
+)
+
+// NetworkController provides the interface for controller instance which manages
+// networks.
+type NetworkController interface {
+	// ConfigureNetworkDriver applies the passed options to the driver instance for the specified network type
+	ConfigureNetworkDriver(networkType string, options map[string]interface{}) error
+
+	// Create a new network. The options parameter carries network specific options.
+	// Labels support will be added in the near future.
+	NewNetwork(networkType, name string, options ...NetworkOption) (Network, error)
+
+	// Networks returns the list of Network(s) managed by this controller.
+	Networks() []Network
+
+	// WalkNetworks uses the provided function to walk the Network(s) managed by this controller.
+	WalkNetworks(walker NetworkWalker)
+
+	// NetworkByName returns the Network which has the passed name. If not found, the error ErrNoSuchNetwork is returned.
+	NetworkByName(name string) (Network, error)
+
+	// NetworkByID returns the Network which has the passed id. If not found, the error ErrNoSuchNetwork is returned.
+	NetworkByID(id string) (Network, error)
+}
+
+// NetworkWalker is a client provided function which will be used to walk the Networks.
+// When the function returns true, the walk will stop.
+type NetworkWalker func(nw Network) bool
+
+type sandboxData struct {
+	sandbox sandbox.Sandbox
+	refCnt  int
+}
+
+type networkTable map[types.UUID]*network
+type endpointTable map[types.UUID]*endpoint
+type sandboxTable map[string]*sandboxData
+
+type controller struct {
+	networks  networkTable
+	drivers   driverTable
+	sandboxes sandboxTable
+	sync.Mutex
+}
+
+// New creates a new instance of network controller.
+func New() (NetworkController, error) {
+	c := &controller{
+		networks:  networkTable{},
+		sandboxes: sandboxTable{},
+		drivers:   driverTable{}}
+	if err := initDrivers(c); err != nil {
+		return nil, err
+	}
+	return c, nil
+}
+
+func (c *controller) ConfigureNetworkDriver(networkType string, options map[string]interface{}) error {
+	c.Lock()
+	d, ok := c.drivers[networkType]
+	c.Unlock()
+	if !ok {
+		return NetworkTypeError(networkType)
+	}
+	return d.Config(options)
+}
+
+func (c *controller) RegisterDriver(networkType string, driver driverapi.Driver) error {
+	c.Lock()
+	defer c.Unlock()
+	if _, ok := c.drivers[networkType]; ok {
+		return driverapi.ErrActiveRegistration(networkType)
+	}
+	c.drivers[networkType] = driver
+	return nil
+}
+
+// NewNetwork creates a new network of the specified network type. The options
+// are network specific and modeled in a generic way.
+func (c *controller) NewNetwork(networkType, name string, options ...NetworkOption) (Network, error) {
+	if name == "" {
+		return nil, ErrInvalidName(name)
+	}
+	// Check if a driver for the specified network type is available
+	c.Lock()
+	d, ok := c.drivers[networkType]
+	c.Unlock()
+	if !ok {
+		var err error
+		d, err = c.loadDriver(networkType)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Check if a network already exists with the specified network name
+	c.Lock()
+	for _, n := range c.networks {
+		if n.name == name {
+			c.Unlock()
+			return nil, NetworkNameError(name)
+		}
+	}
+	c.Unlock()
+
+	// Construct the network object
+	network := &network{
+		name:      name,
+		id:        types.UUID(stringid.GenerateRandomID()),
+		ctrlr:     c,
+		driver:    d,
+		endpoints: endpointTable{},
+	}
+
+	network.processOptions(options...)
+	// Create the network
+	if err := d.CreateNetwork(network.id, network.generic); err != nil {
+		return nil, err
+	}
+
+	// Store the network handler in controller
+	c.Lock()
+	c.networks[network.id] = network
+	c.Unlock()
+
+	return network, nil
+}
+
+func (c *controller) Networks() []Network {
+	c.Lock()
+	defer c.Unlock()
+
+	list := make([]Network, 0, len(c.networks))
+	for _, n := range c.networks {
+		list = append(list, n)
+	}
+
+	return list
+}
+
+func (c *controller) WalkNetworks(walker NetworkWalker) {
+	for _, n := range c.Networks() {
+		if walker(n) {
+			return
+		}
+	}
+}
+
+func (c *controller) NetworkByName(name string) (Network, error) {
+	if name == "" {
+		return nil, ErrInvalidName(name)
+	}
+	var n Network
+
+	s := func(current Network) bool {
+		if current.Name() == name {
+			n = current
+			return true
+		}
+		return false
+	}
+
+	c.WalkNetworks(s)
+
+	if n == nil {
+		return nil, ErrNoSuchNetwork(name)
+	}
+
+	return n, nil
+}
+
+func (c *controller) NetworkByID(id string) (Network, error) {
+	if id == "" {
+		return nil, ErrInvalidID(id)
+	}
+	c.Lock()
+	defer c.Unlock()
+	if n, ok := c.networks[types.UUID(id)]; ok {
+		return n, nil
+	}
+	return nil, ErrNoSuchNetwork(id)
+}
+
+func (c *controller) sandboxAdd(key string, create bool) (sandbox.Sandbox, error) {
+	c.Lock()
+	defer c.Unlock()
+
+	sData, ok := c.sandboxes[key]
+	if !ok {
+		sb, err := sandbox.NewSandbox(key, create)
+		if err != nil {
+			return nil, err
+		}
+
+		sData = &sandboxData{sandbox: sb, refCnt: 1}
+		c.sandboxes[key] = sData
+		return sData.sandbox, nil
+	}
+
+	sData.refCnt++
+	return sData.sandbox, nil
+}
+
+func (c *controller) sandboxRm(key string) {
+	c.Lock()
+	defer c.Unlock()
+
+	sData := c.sandboxes[key]
+	sData.refCnt--
+
+	if sData.refCnt == 0 {
+		sData.sandbox.Destroy()
+		delete(c.sandboxes, key)
+	}
+}
+
+func (c *controller) sandboxGet(key string) sandbox.Sandbox {
+	c.Lock()
+	defer c.Unlock()
+
+	sData, ok := c.sandboxes[key]
+	if !ok {
+		return nil
+	}
+
+	return sData.sandbox
+}
+
+func (c *controller) loadDriver(networkType string) (driverapi.Driver, error) {
+	// Plugins pkg performs lazy loading of plugins that acts as remote drivers.
+	// As per the design, this Get call will result in remote driver discovery if there is a corresponding plugin available.
+	_, err := plugins.Get(networkType, driverapi.NetworkPluginEndpointType)
+	if err != nil {
+		if err == plugins.ErrNotFound {
+			return nil, types.NotFoundErrorf(err.Error())
+		}
+		return nil, err
+	}
+	c.Lock()
+	defer c.Unlock()
+	d, ok := c.drivers[networkType]
+	if !ok {
+		return nil, ErrInvalidNetworkDriver(networkType)
+	}
+	return d, nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/docs/bridge.md b/vendor/src/github.com/docker/libnetwork/docs/bridge.md
new file mode 100644
index 0000000..4633ce8
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/docs/bridge.md
@@ -0,0 +1,13 @@
+Bridge Driver
+=============
+
+The bridge driver is an implementation that uses Linux Bridging and iptables to provide connectvity for containers
+It creates a single bridge, called `docker0` by default, and attaches a `veth pair` between the bridge and every endpoint.
+
+## Configuration
+
+The bridge driver supports configuration through the Docker Daemon flags. 
+
+## Usage
+
+This driver is supported for the default "bridge" network only and it cannot be used for any other networks.
diff --git a/vendor/src/github.com/docker/libnetwork/docs/design.md b/vendor/src/github.com/docker/libnetwork/docs/design.md
new file mode 100644
index 0000000..b3112da
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/docs/design.md
@@ -0,0 +1,148 @@
+Design
+======
+
+The vision and goals of libnetwork are highlighted in [roadmap](../ROADMAP.md).
+This document describes how libnetwork has been designed in order to acheive this.
+Requirements for individual releases can be found on the [Project Page](https://github.com/docker/libnetwork/wiki)
+
+Many of the design decisions are inspired by the learnings from the Docker networking design as of Docker v1.6.
+Please refer to this [Docker v1.6 Design](legacy.md) document for more information on networking design as of Docker v1.6.
+
+## Goal
+
+libnetwork project will follow Docker and Linux philosophy of developing small, highly modular and composable tools that works well independently.
+Libnetwork aims to satisfy that composable need for Networking in Containers.
+
+## The Container Network Model
+
+Libnetwork implements Container Network Model (CNM) which formalizes the steps required to provide networking for containers while providing an abstraction that can be used to support multiple network drivers. The CNM is built on 3 main components.
+
+**Sandbox**
+
+A Sandbox contains the configuration of a container's network stack.
+This includes management of the container's interfaces, routing table and DNS settings. 
+An implementation of a Sandbox could be a Linux Network Namespace, a FreeBSD Jail or other similar concept.
+A Sandbox may contain *many* endpoints from *multiple* networks
+
+**Endpoint**
+
+An Endpoint joins a Sandbox to a Network.
+An implementation of an Endpoint could be a `veth` pair, an Open vSwitch internal port or similar.
+An Endpoint can belong to *only one* network but may only belong to *one* Sandbox
+
+**Network**
+
+A Network is a group of Endpoints that are able to communicate with each-other directly.
+An implementation of a Network could be a Linux bridge, a VLAN etc...
+Networks consist of *many* endpoints
+
+## CNM Objects
+
+**NetworkController**
+`NetworkController` object provides the entry-point into libnetwork that exposes simple APIs for the users (such as Docker Engine) to allocate and manage Networks. libnetwork supports multiple active drivers (both inbuilt and remote). `NetworkController` allows user to bind a particular driver to a given network.
+
+**Driver**
+`Driver` is not an user visible object, but drivers provides the actual implementation that makes network work. `NetworkController` however provides an API to configure any specific driver with driver-specific options/labels that is transparent to libnetwork, but can be handled by the drivers directly. Drivers can be both inbuilt (such as Bridge, Host, None & overlay) and remote (from plugin providers) to satisfy various usecases & deployment scenarios. At this point, the Driver owns a network and is responsible for managing the network (including IPAM, etc.). This can be improved in the future by having multiple drivers participating in handling various network management functionalities.
+
+**Network**
+`Network` object is an implementation of the `CNM : Network` as defined above. `NetworkController` provides APIs to create and manage `Network` object. Whenever a `Network` is created or updated, the corresponding `Driver` will be notified of the event. LibNetwork treats `Network` object at an abstract level to provide connectivity between a group of end-points that belong to the same network and isolate from the rest. The Driver performs the actual work of providing the required connectivity and isolation. The connectivity can be within the same host or across multiple-hosts. Hence `Network` has a global scope within a cluster.
+
+**Endpoint**
+`Endpoint` represents a Service Endpoint. It provides the connectivity for services exposed by a container in a network with other services provided by other containers in the network. `Network` object provides APIs to create and manage endpoint. An endpoint can be attached to only one network. `Endpoint` creation calls are made to the corresponding `Driver` which is responsible for allocating resources for the corresponding `Sandbox`. Since Endpoint represents a Service and not necessarily a particular container, `Endpoint` has a global scope within a cluster as well.
+
+**Sandbox**
+`Sandbox` object represents container's network configuration such as ip-address, mac-address, routes, DNS entries. A `Sandbox` object is created when the user requests to create an endpoint on a network. The `Driver` that handles the `Network` is responsible to allocate the required network resources (such as ip-address) and pass the info called `SandboxInfo` back to libnetwork. libnetwork will make use of OS specific constructs (example: netns for Linux) to populate the network configuration into the containers that is represented by the `Sandbox`. A `Sandbox` can have multiple endpoints attached to different networks. Since `Sandbox` is associated with a particular container in a given host, it has a local scope that represents the Host that the Container belong to.
+
+**CNM Attributes**
+
+***Options***
+`Options` provides a generic and flexible mechanism to pass `Driver` specific configuration option from the user to the `Driver` directly. `Options` are just key-value pairs of data with `key` represented by a string and `value` represented by a generic object (such as golang `interface{}`). Libnetwork will operate on the `Options` ONLY if the  `key` matches any of the well-known `Label` defined in the `net-labels` package. `Options` also encompasses `Labels` as explained below. `Options` are generally NOT end-user visible (in UI), while `Labels` are.
+
+***Labels***
+`Labels` are very similar to `Options` & infact they  are just a subset of `Options`. `Labels` are typically end-user visible and are represented in the UI explicitely using the `--labels` option. They are passed from the UI to the `Driver` so that `Driver` can make use of it and perform any `Driver` specific operation (such as a subnet to allocate IP-Addresses from in a Network).
+
+## CNM Lifecycle
+
+Consumers of the CNM, like Docker for example, interact through the CNM Objects and its APIs to network the containers that they manage.
+
+0. `Drivers` registers with `NetworkController`. Build-in drivers registers inside of LibNetwork, while remote Drivers registers with LibNetwork via Plugin mechanism. (*plugin-mechanism is WIP*). Each `driver` handles a particular `networkType`.
+
+1. `NetworkController` object is created using `libnetwork.New()` API to manage the allocation of Networks and optionally configure a `Driver` with driver specific `Options`.
+
+2. `Network` is created using the controller's `NewNetwork()` API by providing a `name` and `networkType`. `networkType` parameter helps to choose a corresponding `Driver` and binds the created `Network` to that `Driver`. From this point, any operation on `Network` will be handled by that `Driver`.
+
+3. `controller.NewNetwork()` API also takes in optional `options` parameter which carries Driver-specific options and `Labels`, which the Drivers can make use for its purpose.
+
+4. `network.CreateEndpoint()` can be called to create a new Endpoint in a given network. This API also accepts optional `options` parameter which drivers can make use of. These 'options' carry both well-known labels and driver-specific labels. Drivers will in turn be called with `driver.CreateEndpoint` and it can choose to reserve IPv4/IPv6 addresses when an `Endpoint` is created in a `Network`. The `Driver` will assign these addresses using `InterfaceInfo` interface defined in the `driverapi`. The IP/IPv6 are needed to complete the endpoint as service definition along with the ports the endpoint exposes since essentially a service endpoint is nothing but a network address and the port number that the application container is listening on.
+
+5. `endpoint.Join()` can be used to attach a container to a `Endpoint`. The Join operation will create a `Sandbox` if it doesnt exist already for that container. The Drivers can make use of the Sandbox Key to identify multiple endpoints attached to a same container. This API also accepts optional `options` parameter which drivers can make use of.
+  * Though it is not a direct design issue of LibNetwork, it is highly encouraged to have users like `Docker` to call the endpoint.Join() during Container's `Start()` lifecycle that is invoked *before* the container is made operational. As part of Docker integration, this will be taken care of.
+  * one of a FAQ on endpoint join() API is that, why do we need an API to create an Endpoint and another to join the endpoint.
+    - The answer is based on the fact that Endpoint represents a Service which may or may not be backed by a Container. When an Endpoint is created, it will have its resources reserved so that any container can get attached to the endpoint later and get a consistent networking behaviour.
+
+6. `endpoint.Leave()` can be invoked when a container is stopped. The `Driver` can cleanup the states that it allocated during the `Join()` call. LibNetwork will delete the `Sandbox` when the last referencing endpoint leaves the network. But LibNetwork keeps hold of the IP addresses as long as the endpoint is still present and will be reused when the container(or any container) joins again. This ensures that the container's resources are reused when they are Stopped and Started again.
+
+7. `endpoint.Delete()` is used to delete an endpoint from a network. This results in deleting an endpoint and cleaning up the cached `sandbox.Info`.
+
+8. `network.Delete()` is used to delete a network. LibNetwork will not allow the delete to proceed if there are any existing endpoints attached to the Network. 
+
+
+## Implementation Details
+
+### Networks & Endpoints
+
+LibNetwork's Network and Endpoint APIs are primiarly for managing the corresponding Objects and book-keeping them to provide a level of abstraction as required by the CNM. It delegates the actual implementation to the drivers which  realizes the functionality as promised in the CNM. For more information on these details, please see [the drivers section](#Drivers)
+
+### Sandbox
+
+Libnetwork provides a framework to implement of a Sandbox in multiple Operating Systems. Currently we have implemented Sandbox for Linux using `namespace_linux.go` and `configure_linux.go` in `sandbox` package 
+This creates a Network Namespace for each sandbox which is uniquely identified by a path on the host filesystem.
+Netlink calls are used to move interfaces from the global namespace to the Sandbox namespace.
+Netlink is also used to manage the routing table in the namespace.
+
+## Drivers
+
+## API
+
+Drivers are essentially an extension of libnetwork and provides the actual implementation for all of the LibNetwork APIs defined above. Hence there is an 1-1 correspondance for all the `Network` and `Endpoint` APIs, which includes :
+* `driver.Config`
+* `driver.CreateNetwork`
+* `driver.DeleteNetwork`
+* `driver.CreateEndpoint`
+* `driver.DeleteEndpoint`
+* `driver.Join`
+* `driver.Leave` 
+
+These Driver facing APIs makes use of unique identifiers (`networkid`,`endpointid`,...) instead of names (as seen in user-facing APIs). 
+
+The APIs are still work in progress and there can be changes to these based on the driver requirements especially when it comes to Multi-host networking.
+
+## Implementations
+
+Libnetwork includes the following driver packages:
+
+- null
+- bridge
+- overlay
+- remote
+
+### Null
+
+The null driver is a `noop` implementation of the driver API, used only in cases where no networking is desired. This is to provide backward compatibility to the Docker's `--net=none` option.
+
+### Bridge
+
+The `bridge` driver provides a Linux-specific bridging implementation based on the Linux Bridge.
+For more details, please [see the Bridge Driver documentation](bridge.md)
+
+### Overlay
+
+The `overlay` driver implements networking that can span multiple hosts using overlay network encapsulations such as VXLAN.
+For more details on its design, please see the [Overlay Driver Design](overlay.md)
+
+### Remote
+
+The `remote` package does not provide a driver, but provides a means of supporting drivers over a remote transport.
+This allows a driver to be written in a language of your choice.
+For further details, please see the [Remote Driver Design](remote.md)
+
diff --git a/vendor/src/github.com/docker/libnetwork/docs/legacy.md b/vendor/src/github.com/docker/libnetwork/docs/legacy.md
new file mode 100644
index 0000000..7a19dcd
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/docs/legacy.md
@@ -0,0 +1,15 @@
+
+This document provides a TLD&R version of https://docs.docker.com/v1.6/articles/networking/.
+If more interested in detailed operational design, please refer to this link.
+
+## Docker Networking design as of Docker v1.6
+
+Prior to libnetwork, Docker Networking was handled in both Docker Engine and libcontainer.
+Docker Engine makes use of the Bridge Driver to provide single-host networking solution with the help of linux bridge and IPTables.
+Docker Engine provides simple configurations such as `--link`, `--expose`,... to enable container connectivity within the same host by abstracting away networking configuration completely from the Containers.
+For external connectivity, it relied upon NAT & Port-mapping 
+
+Docker Engine was responsible for providing the configuration for the container's networking stack.
+
+Libcontainer would then use this information to create the necessary networking devices and move them in to a network namespace.
+This namespace would then be used when the container is started.
diff --git a/vendor/src/github.com/docker/libnetwork/docs/overlay.md b/vendor/src/github.com/docker/libnetwork/docs/overlay.md
new file mode 100644
index 0000000..ec48618
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/docs/overlay.md
@@ -0,0 +1,6 @@
+Overlay Driver
+==============
+
+## Configuration
+
+## Usage
diff --git a/vendor/src/github.com/docker/libnetwork/docs/remote.md b/vendor/src/github.com/docker/libnetwork/docs/remote.md
new file mode 100644
index 0000000..c34a1cd
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/docs/remote.md
@@ -0,0 +1,18 @@
+Remote Drivers
+==============
+
+The remote driver package provides the integration point for dynamically-registered drivers.
+
+## LibNetwork Integration
+
+When LibNetwork initialises the `Remote` package with the `Init()` function, it passes a `DriverCallback` as a parameter, which implements the `RegisterDriver()`. The Remote Driver package can use this interface to register any of the `Dynamic` Drivers/Plugins with LibNetwork's `NetworkController`.
+
+This design ensures that the implementation details (TBD) of Dynamic Driver Registration mechanism is completely owned by the inbuilt-Remote driver, and it doesn't expose any of the driver layer to the North of LibNetwork (none of the LibNetwork client APIs are impacted).
+
+## Implementation
+
+The actual implementation of how the Inbuilt Remote Driver registers with the Dynamic Driver is Work-In-Progress. But, the Design Goal is to Honor the bigger goals of LibNetwork by keeping it Highly modular and make sure that LibNetwork is fully composable in nature. 
+
+## Usage
+
+The In-Built Remote Driver follows all the rules of any other In-Built Driver and has exactly the same Driver APIs exposed. LibNetwork will also support driver-specific `options` and User-supplied `Labels` which the Dynamic Drivers can make use for its operations.
diff --git a/vendor/src/github.com/docker/libnetwork/driverapi/driverapi.go b/vendor/src/github.com/docker/libnetwork/driverapi/driverapi.go
new file mode 100644
index 0000000..9fb41ff
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/driverapi/driverapi.go
@@ -0,0 +1,118 @@
+package driverapi
+
+import (
+	"net"
+
+	"github.com/docker/libnetwork/types"
+)
+
+// NetworkPluginEndpointType represents the Endpoint Type used by Plugin system
+const NetworkPluginEndpointType = "NetworkDriver"
+
+// Driver is an interface that every plugin driver needs to implement.
+type Driver interface {
+	// Push driver specific config to the driver
+	Config(options map[string]interface{}) error
+
+	// CreateNetwork invokes the driver method to create a network passing
+	// the network id and network specific config. The config mechanism will
+	// eventually be replaced with labels which are yet to be introduced.
+	CreateNetwork(nid types.UUID, options map[string]interface{}) error
+
+	// DeleteNetwork invokes the driver method to delete network passing
+	// the network id.
+	DeleteNetwork(nid types.UUID) error
+
+	// CreateEndpoint invokes the driver method to create an endpoint
+	// passing the network id, endpoint id endpoint information and driver
+	// specific config. The endpoint information can be either consumed by
+	// the driver or populated by the driver. The config mechanism will
+	// eventually be replaced with labels which are yet to be introduced.
+	CreateEndpoint(nid, eid types.UUID, epInfo EndpointInfo, options map[string]interface{}) error
+
+	// DeleteEndpoint invokes the driver method to delete an endpoint
+	// passing the network id and endpoint id.
+	DeleteEndpoint(nid, eid types.UUID) error
+
+	// EndpointOperInfo retrieves from the driver the operational data related to the specified endpoint
+	EndpointOperInfo(nid, eid types.UUID) (map[string]interface{}, error)
+
+	// Join method is invoked when a Sandbox is attached to an endpoint.
+	Join(nid, eid types.UUID, sboxKey string, jinfo JoinInfo, options map[string]interface{}) error
+
+	// Leave method is invoked when a Sandbox detaches from an endpoint.
+	Leave(nid, eid types.UUID) error
+
+	// Type returns the the type of this driver, the network type this driver manages
+	Type() string
+}
+
+// EndpointInfo provides a go interface to fetch or populate endpoint assigned network resources.
+type EndpointInfo interface {
+	// Interfaces returns a list of interfaces bound to the endpoint.
+	// If the list is not empty the driver is only expected to consume the interfaces.
+	// It is an error to try to add interfaces to a non-empty list.
+	// If the list is empty the driver is expected to populate with 0 or more interfaces.
+	Interfaces() []InterfaceInfo
+
+	// AddInterface is used by the driver to add an interface to the interface list.
+	// This method will return an error if the driver attempts to add interfaces
+	// if the Interfaces() method returned a non-empty list.
+	// ID field need only have significance within the endpoint so it can be a simple
+	// monotonically increasing number
+	AddInterface(ID int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error
+}
+
+// InterfaceInfo provides a go interface for drivers to retrive
+// network information to interface resources.
+type InterfaceInfo interface {
+	// MacAddress returns the MAC address.
+	MacAddress() net.HardwareAddr
+
+	// Address returns the IPv4 address.
+	Address() net.IPNet
+
+	// AddressIPv6 returns the IPv6 address.
+	AddressIPv6() net.IPNet
+
+	// ID returns the numerical id of the interface and has significance only within
+	// the endpoint.
+	ID() int
+}
+
+// InterfaceNameInfo provides a go interface for the drivers to assign names
+// to interfaces.
+type InterfaceNameInfo interface {
+	// SetNames method assigns the srcName and dstPrefix for the interface.
+	SetNames(srcName, dstPrefix string) error
+
+	// ID returns the numerical id that was assigned to the interface by the driver
+	// CreateEndpoint.
+	ID() int
+}
+
+// JoinInfo represents a set of resources that the driver has the ability to provide during
+// join time.
+type JoinInfo interface {
+	// InterfaceNames returns a list of InterfaceNameInfo go interface to facilitate
+	// setting the names for the interfaces.
+	InterfaceNames() []InterfaceNameInfo
+
+	// SetGateway sets the default IPv4 gateway when a container joins the endpoint.
+	SetGateway(net.IP) error
+
+	// SetGatewayIPv6 sets the default IPv6 gateway when a container joins the endpoint.
+	SetGatewayIPv6(net.IP) error
+
+	// SetHostsPath sets the overriding /etc/hosts path to use for the container.
+	SetHostsPath(string) error
+
+	// SetResolvConfPath sets the overriding /etc/resolv.conf path to use for the container.
+	SetResolvConfPath(string) error
+}
+
+// DriverCallback provides a Callback interface for Drivers into LibNetwork
+type DriverCallback interface {
+	// RegisterDriver provides a way for Remote drivers to dynamically register new NetworkType and associate with a driver instance
+	RegisterDriver(name string, driver Driver) error
+}
diff --git a/vendor/src/github.com/docker/libnetwork/driverapi/errors.go b/vendor/src/github.com/docker/libnetwork/driverapi/errors.go
new file mode 100644
index 0000000..041ef41
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/driverapi/errors.go
@@ -0,0 +1,56 @@
+package driverapi
+
+import (
+	"fmt"
+)
+
+// ErrNoNetwork is returned if no network with the specified id exists
+type ErrNoNetwork string
+
+func (enn ErrNoNetwork) Error() string {
+	return fmt.Sprintf("No network (%s) exists", string(enn))
+}
+
+// NotFound denotes the type of this error
+func (enn ErrNoNetwork) NotFound() {}
+
+// ErrEndpointExists is returned if more than one endpoint is added to the network
+type ErrEndpointExists string
+
+func (ee ErrEndpointExists) Error() string {
+	return fmt.Sprintf("Endpoint (%s) already exists (Only one endpoint allowed)", string(ee))
+}
+
+// Forbidden denotes the type of this error
+func (ee ErrEndpointExists) Forbidden() {}
+
+// ErrNotImplemented is returned when a Driver has not implemented an API yet
+type ErrNotImplemented struct{}
+
+func (eni *ErrNotImplemented) Error() string {
+	return "The API is not implemented yet"
+}
+
+// NotImplemented denotes the type of this error
+func (eni *ErrNotImplemented) NotImplemented() {}
+
+// ErrNoEndpoint is returned if no endpoint with the specified id exists
+type ErrNoEndpoint string
+
+func (ene ErrNoEndpoint) Error() string {
+	return fmt.Sprintf("No endpoint (%s) exists", string(ene))
+}
+
+// NotFound denotes the type of this error
+func (ene ErrNoEndpoint) NotFound() {}
+
+// ErrActiveRegistration represents an error when a driver is registered to a networkType that is previously registered
+type ErrActiveRegistration string
+
+// Error interface for ErrActiveRegistration
+func (ar ErrActiveRegistration) Error() string {
+	return fmt.Sprintf("Driver already registered for type %q", string(ar))
+}
+
+// Forbidden denotes the type of this error
+func (ar ErrActiveRegistration) Forbidden() {}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers.go b/vendor/src/github.com/docker/libnetwork/drivers.go
new file mode 100644
index 0000000..130f7ab
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers.go
@@ -0,0 +1,25 @@
+package libnetwork
+
+import (
+	"github.com/docker/libnetwork/driverapi"
+	"github.com/docker/libnetwork/drivers/bridge"
+	"github.com/docker/libnetwork/drivers/host"
+	"github.com/docker/libnetwork/drivers/null"
+	"github.com/docker/libnetwork/drivers/remote"
+)
+
+type driverTable map[string]driverapi.Driver
+
+func initDrivers(dc driverapi.DriverCallback) error {
+	for _, fn := range [](func(driverapi.DriverCallback) error){
+		bridge.Init,
+		host.Init,
+		null.Init,
+		remote.Init,
+	} {
+		if err := fn(dc); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
new file mode 100644
index 0000000..b1cfe74
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
@@ -0,0 +1,928 @@
+package bridge
+
+import (
+	"errors"
+	"net"
+	"strings"
+	"sync"
+
+	"github.com/docker/libnetwork/driverapi"
+	"github.com/docker/libnetwork/ipallocator"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/netutils"
+	"github.com/docker/libnetwork/options"
+	"github.com/docker/libnetwork/portmapper"
+	"github.com/docker/libnetwork/sandbox"
+	"github.com/docker/libnetwork/types"
+	"github.com/vishvananda/netlink"
+)
+
+const (
+	networkType             = "bridge"
+	vethPrefix              = "veth"
+	vethLen                 = 7
+	containerVethPrefix     = "eth"
+	maxAllocatePortAttempts = 10
+	ifaceID                 = 1
+)
+
+var (
+	ipAllocator *ipallocator.IPAllocator
+	portMapper  *portmapper.PortMapper
+)
+
+// Configuration info for the "bridge" driver.
+type Configuration struct {
+	EnableIPForwarding bool
+}
+
+// NetworkConfiguration for network specific configuration
+type NetworkConfiguration struct {
+	BridgeName            string
+	AddressIPv4           *net.IPNet
+	FixedCIDR             *net.IPNet
+	FixedCIDRv6           *net.IPNet
+	EnableIPv6            bool
+	EnableIPTables        bool
+	EnableIPMasquerade    bool
+	EnableICC             bool
+	Mtu                   int
+	DefaultGatewayIPv4    net.IP
+	DefaultGatewayIPv6    net.IP
+	DefaultBindingIP      net.IP
+	AllowNonDefaultBridge bool
+	EnableUserlandProxy   bool
+}
+
+// EndpointConfiguration represents the user specified configuration for the sandbox endpoint
+type EndpointConfiguration struct {
+	MacAddress   net.HardwareAddr
+	PortBindings []types.PortBinding
+	ExposedPorts []types.TransportPort
+}
+
+// ContainerConfiguration represents the user specified configuration for a container
+type ContainerConfiguration struct {
+	ParentEndpoints []string
+	ChildEndpoints  []string
+}
+
+type bridgeEndpoint struct {
+	id              types.UUID
+	intf            *sandbox.Interface
+	macAddress      net.HardwareAddr
+	config          *EndpointConfiguration // User specified parameters
+	containerConfig *ContainerConfiguration
+	portMapping     []types.PortBinding // Operation port bindings
+}
+
+type bridgeNetwork struct {
+	id        types.UUID
+	bridge    *bridgeInterface // The bridge's L3 interface
+	config    *NetworkConfiguration
+	endpoints map[types.UUID]*bridgeEndpoint // key: endpoint id
+	sync.Mutex
+}
+
+type driver struct {
+	config  *Configuration
+	network *bridgeNetwork
+	sync.Mutex
+}
+
+func init() {
+	ipAllocator = ipallocator.New()
+	portMapper = portmapper.New()
+}
+
+// New constructs a new bridge driver
+func newDriver() driverapi.Driver {
+	return &driver{}
+}
+
+// Init registers a new instance of bridge driver
+func Init(dc driverapi.DriverCallback) error {
+	return dc.RegisterDriver(networkType, newDriver())
+}
+
+// Validate performs a static validation on the network configuration parameters.
+// Whatever can be assessed a priori before attempting any programming.
+func (c *NetworkConfiguration) Validate() error {
+	if c.Mtu < 0 {
+		return ErrInvalidMtu(c.Mtu)
+	}
+
+	// If bridge v4 subnet is specified
+	if c.AddressIPv4 != nil {
+		// If Container restricted subnet is specified, it must be a subset of bridge subnet
+		if c.FixedCIDR != nil {
+			// Check Network address
+			if !c.AddressIPv4.Contains(c.FixedCIDR.IP) {
+				return &ErrInvalidContainerSubnet{}
+			}
+			// Check it is effectively a subset
+			brNetLen, _ := c.AddressIPv4.Mask.Size()
+			cnNetLen, _ := c.FixedCIDR.Mask.Size()
+			if brNetLen > cnNetLen {
+				return &ErrInvalidContainerSubnet{}
+			}
+		}
+		// If default gw is specified, it must be part of bridge subnet
+		if c.DefaultGatewayIPv4 != nil {
+			if !c.AddressIPv4.Contains(c.DefaultGatewayIPv4) {
+				return &ErrInvalidGateway{}
+			}
+		}
+	}
+
+	// If default v6 gw is specified, FixedCIDRv6 must be specified and gw must belong to FixedCIDRv6 subnet
+	if c.EnableIPv6 && c.DefaultGatewayIPv6 != nil {
+		if c.FixedCIDRv6 == nil || !c.FixedCIDRv6.Contains(c.DefaultGatewayIPv6) {
+			return &ErrInvalidGateway{}
+		}
+	}
+
+	return nil
+}
+
+func (n *bridgeNetwork) getEndpoint(eid types.UUID) (*bridgeEndpoint, error) {
+	n.Lock()
+	defer n.Unlock()
+
+	if eid == "" {
+		return nil, InvalidEndpointIDError(eid)
+	}
+
+	if ep, ok := n.endpoints[eid]; ok {
+		return ep, nil
+	}
+
+	return nil, nil
+}
+
+func (d *driver) Config(option map[string]interface{}) error {
+	var config *Configuration
+
+	d.Lock()
+	defer d.Unlock()
+
+	if d.config != nil {
+		return &ErrConfigExists{}
+	}
+
+	genericData, ok := option[netlabel.GenericData]
+	if ok && genericData != nil {
+		switch opt := genericData.(type) {
+		case options.Generic:
+			opaqueConfig, err := options.GenerateFromModel(opt, &Configuration{})
+			if err != nil {
+				return err
+			}
+			config = opaqueConfig.(*Configuration)
+		case *Configuration:
+			config = opt
+		default:
+			return &ErrInvalidDriverConfig{}
+		}
+
+		d.config = config
+	} else {
+		config = &Configuration{}
+	}
+
+	if config.EnableIPForwarding {
+		return setupIPForwarding(config)
+	}
+
+	return nil
+}
+
+func (d *driver) getNetwork(id types.UUID) (*bridgeNetwork, error) {
+	// Just a dummy function to return the only network managed by Bridge driver.
+	// But this API makes the caller code unchanged when we move to support multiple networks.
+	d.Lock()
+	defer d.Unlock()
+	return d.network, nil
+}
+
+func parseNetworkOptions(option options.Generic) (*NetworkConfiguration, error) {
+	var config *NetworkConfiguration
+
+	genericData, ok := option[netlabel.GenericData]
+	if ok && genericData != nil {
+		switch opt := genericData.(type) {
+		case options.Generic:
+			opaqueConfig, err := options.GenerateFromModel(opt, &NetworkConfiguration{})
+			if err != nil {
+				return nil, err
+			}
+			config = opaqueConfig.(*NetworkConfiguration)
+		case *NetworkConfiguration:
+			config = opt
+		default:
+			return nil, &ErrInvalidNetworkConfig{}
+		}
+
+		if err := config.Validate(); err != nil {
+			return nil, err
+		}
+	} else {
+		config = &NetworkConfiguration{}
+	}
+
+	if _, ok := option[netlabel.EnableIPv6]; ok {
+		config.EnableIPv6 = option[netlabel.EnableIPv6].(bool)
+	}
+
+	return config, nil
+}
+
+// Create a new network using bridge plugin
+func (d *driver) CreateNetwork(id types.UUID, option map[string]interface{}) error {
+	var err error
+
+	// Driver must be configured
+	d.Lock()
+
+	// Sanity checks
+	if d.network != nil {
+		d.Unlock()
+		return &ErrNetworkExists{}
+	}
+
+	// Create and set network handler in driver
+	d.network = &bridgeNetwork{id: id, endpoints: make(map[types.UUID]*bridgeEndpoint)}
+	network := d.network
+	d.Unlock()
+
+	// On failure make sure to reset driver network handler to nil
+	defer func() {
+		if err != nil {
+			d.Lock()
+			d.network = nil
+			d.Unlock()
+		}
+	}()
+
+	config, err := parseNetworkOptions(option)
+	if err != nil {
+		return err
+	}
+	network.config = config
+
+	// Create or retrieve the bridge L3 interface
+	bridgeIface := newInterface(config)
+	network.bridge = bridgeIface
+
+	// Prepare the bridge setup configuration
+	bridgeSetup := newBridgeSetup(config, bridgeIface)
+
+	// If the bridge interface doesn't exist, we need to start the setup steps
+	// by creating a new device and assigning it an IPv4 address.
+	bridgeAlreadyExists := bridgeIface.exists()
+	if !bridgeAlreadyExists {
+		bridgeSetup.queueStep(setupDevice)
+	}
+
+	// Even if a bridge exists try to setup IPv4.
+	bridgeSetup.queueStep(setupBridgeIPv4)
+
+	// Conditionally queue setup steps depending on configuration values.
+	for _, step := range []struct {
+		Condition bool
+		Fn        setupStep
+	}{
+		// Enable IPv6 on the bridge if required. We do this even for a
+		// previously  existing bridge, as it may be here from a previous
+		// installation where IPv6 wasn't supported yet and needs to be
+		// assigned an IPv6 link-local address.
+		{config.EnableIPv6, setupBridgeIPv6},
+
+		// We ensure that the bridge has the expectedIPv4 and IPv6 addresses in
+		// the case of a previously existing device.
+		{bridgeAlreadyExists, setupVerifyAndReconcile},
+
+		// Setup the bridge to allocate containers IPv4 addresses in the
+		// specified subnet.
+		{config.FixedCIDR != nil, setupFixedCIDRv4},
+
+		// Setup the bridge to allocate containers global IPv6 addresses in the
+		// specified subnet.
+		{config.FixedCIDRv6 != nil, setupFixedCIDRv6},
+
+		// Setup Loopback Adresses Routing
+		{!config.EnableUserlandProxy, setupLoopbackAdressesRouting},
+
+		// Setup IPTables.
+		{config.EnableIPTables, setupIPTables},
+
+		// Setup DefaultGatewayIPv4
+		{config.DefaultGatewayIPv4 != nil, setupGatewayIPv4},
+
+		// Setup DefaultGatewayIPv6
+		{config.DefaultGatewayIPv6 != nil, setupGatewayIPv6},
+	} {
+		if step.Condition {
+			bridgeSetup.queueStep(step.Fn)
+		}
+	}
+
+	// Block bridge IP from being allocated.
+	bridgeSetup.queueStep(allocateBridgeIP)
+	// Apply the prepared list of steps, and abort at the first error.
+	bridgeSetup.queueStep(setupDeviceUp)
+	if err = bridgeSetup.apply(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (d *driver) DeleteNetwork(nid types.UUID) error {
+	var err error
+
+	// Get network handler and remove it from driver
+	d.Lock()
+	n := d.network
+	d.network = nil
+	d.Unlock()
+
+	// On failure set network handler back in driver, but
+	// only if is not already taken over by some other thread
+	defer func() {
+		if err != nil {
+			d.Lock()
+			if d.network == nil {
+				d.network = n
+			}
+			d.Unlock()
+		}
+	}()
+
+	// Sanity check
+	if n == nil {
+		err = driverapi.ErrNoNetwork(nid)
+		return err
+	}
+
+	// Cannot remove network if endpoints are still present
+	if len(n.endpoints) != 0 {
+		err = ActiveEndpointsError(n.id)
+		return err
+	}
+
+	// Programming
+	err = netlink.LinkDel(n.bridge.Link)
+
+	return err
+}
+
+func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointInfo, epOptions map[string]interface{}) error {
+	var (
+		ipv6Addr *net.IPNet
+		err      error
+	)
+
+	if epInfo == nil {
+		return errors.New("invalid endpoint info passed")
+	}
+
+	if len(epInfo.Interfaces()) != 0 {
+		return errors.New("non empty interface list passed to bridge(local) driver")
+	}
+
+	// Get the network handler and make sure it exists
+	d.Lock()
+	n := d.network
+	config := n.config
+	d.Unlock()
+	if n == nil {
+		return driverapi.ErrNoNetwork(nid)
+	}
+
+	// Sanity check
+	n.Lock()
+	if n.id != nid {
+		n.Unlock()
+		return InvalidNetworkIDError(nid)
+	}
+	n.Unlock()
+
+	// Check if endpoint id is good and retrieve correspondent endpoint
+	ep, err := n.getEndpoint(eid)
+	if err != nil {
+		return err
+	}
+
+	// Endpoint with that id exists either on desired or other sandbox
+	if ep != nil {
+		return driverapi.ErrEndpointExists(eid)
+	}
+
+	// Try to convert the options to endpoint configuration
+	epConfig, err := parseEndpointOptions(epOptions)
+	if err != nil {
+		return err
+	}
+
+	// Create and add the endpoint
+	n.Lock()
+	endpoint := &bridgeEndpoint{id: eid, config: epConfig}
+	n.endpoints[eid] = endpoint
+	n.Unlock()
+
+	// On failure make sure to remove the endpoint
+	defer func() {
+		if err != nil {
+			n.Lock()
+			delete(n.endpoints, eid)
+			n.Unlock()
+		}
+	}()
+
+	// Generate a name for what will be the host side pipe interface
+	name1, err := generateIfaceName()
+	if err != nil {
+		return err
+	}
+
+	// Generate a name for what will be the sandbox side pipe interface
+	name2, err := generateIfaceName()
+	if err != nil {
+		return err
+	}
+
+	// Generate and add the interface pipe host <-> sandbox
+	veth := &netlink.Veth{
+		LinkAttrs: netlink.LinkAttrs{Name: name1, TxQLen: 0},
+		PeerName:  name2}
+	if err = netlink.LinkAdd(veth); err != nil {
+		return err
+	}
+
+	// Get the host side pipe interface handler
+	host, err := netlink.LinkByName(name1)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			netlink.LinkDel(host)
+		}
+	}()
+
+	// Get the sandbox side pipe interface handler
+	sbox, err := netlink.LinkByName(name2)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			netlink.LinkDel(sbox)
+		}
+	}()
+
+	// Set the sbox's MAC. If specified, use the one configured by user, otherwise use a random one
+	mac := electMacAddress(epConfig)
+	err = netlink.LinkSetHardwareAddr(sbox, mac)
+	if err != nil {
+		return err
+	}
+	endpoint.macAddress = mac
+
+	// Add bridge inherited attributes to pipe interfaces
+	if config.Mtu != 0 {
+		err = netlink.LinkSetMTU(host, config.Mtu)
+		if err != nil {
+			return err
+		}
+		err = netlink.LinkSetMTU(sbox, config.Mtu)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Attach host side pipe interface into the bridge
+	if err = netlink.LinkSetMaster(host,
+		&netlink.Bridge{LinkAttrs: netlink.LinkAttrs{Name: config.BridgeName}}); err != nil {
+		return err
+	}
+
+	// v4 address for the sandbox side pipe interface
+	ip4, err := ipAllocator.RequestIP(n.bridge.bridgeIPv4, nil)
+	if err != nil {
+		return err
+	}
+	ipv4Addr := &net.IPNet{IP: ip4, Mask: n.bridge.bridgeIPv4.Mask}
+
+	// v6 address for the sandbox side pipe interface
+	ipv6Addr = &net.IPNet{}
+	if config.EnableIPv6 {
+		var ip6 net.IP
+
+		network := n.bridge.bridgeIPv6
+		if config.FixedCIDRv6 != nil {
+			network = config.FixedCIDRv6
+		}
+
+		ones, _ := network.Mask.Size()
+		if ones <= 80 {
+			ip6 = make(net.IP, len(network.IP))
+			copy(ip6, network.IP)
+			for i, h := range mac {
+				ip6[i+10] = h
+			}
+		}
+
+		ip6, err := ipAllocator.RequestIP(network, ip6)
+		if err != nil {
+			return err
+		}
+
+		ipv6Addr = &net.IPNet{IP: ip6, Mask: network.Mask}
+	}
+
+	// Create the sandbox side pipe interface
+	intf := &sandbox.Interface{}
+	intf.SrcName = name2
+	intf.DstName = containerVethPrefix
+	intf.Address = ipv4Addr
+
+	if config.EnableIPv6 {
+		intf.AddressIPv6 = ipv6Addr
+	}
+
+	// Store the interface in endpoint, this is needed for cleanup on DeleteEndpoint()
+	endpoint.intf = intf
+
+	err = epInfo.AddInterface(ifaceID, endpoint.macAddress, *ipv4Addr, *ipv6Addr)
+	if err != nil {
+		return err
+	}
+
+	// Program any required port mapping and store them in the endpoint
+	endpoint.portMapping, err = allocatePorts(epConfig, intf, config.DefaultBindingIP, config.EnableUserlandProxy)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (d *driver) DeleteEndpoint(nid, eid types.UUID) error {
+	var err error
+
+	// Get the network handler and make sure it exists
+	d.Lock()
+	n := d.network
+	config := n.config
+	d.Unlock()
+	if n == nil {
+		return driverapi.ErrNoNetwork(nid)
+	}
+
+	// Sanity Check
+	n.Lock()
+	if n.id != nid {
+		n.Unlock()
+		return InvalidNetworkIDError(nid)
+	}
+	n.Unlock()
+
+	// Check endpoint id and if an endpoint is actually there
+	ep, err := n.getEndpoint(eid)
+	if err != nil {
+		return err
+	}
+	if ep == nil {
+		return EndpointNotFoundError(eid)
+	}
+
+	// Remove it
+	n.Lock()
+	delete(n.endpoints, eid)
+	n.Unlock()
+
+	// On failure make sure to set back ep in n.endpoints, but only
+	// if it hasn't been taken over already by some other thread.
+	defer func() {
+		if err != nil {
+			n.Lock()
+			if _, ok := n.endpoints[eid]; !ok {
+				n.endpoints[eid] = ep
+			}
+			n.Unlock()
+		}
+	}()
+
+	// Remove port mappings. Do not stop endpoint delete on unmap failure
+	releasePorts(ep)
+
+	// Release the v4 address allocated to this endpoint's sandbox interface
+	err = ipAllocator.ReleaseIP(n.bridge.bridgeIPv4, ep.intf.Address.IP)
+	if err != nil {
+		return err
+	}
+
+	// Release the v6 address allocated to this endpoint's sandbox interface
+	if config.EnableIPv6 {
+		err := ipAllocator.ReleaseIP(n.bridge.bridgeIPv6, ep.intf.AddressIPv6.IP)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Try removal of link. Discard error: link pair might have
+	// already been deleted by sandbox delete.
+	link, err := netlink.LinkByName(ep.intf.SrcName)
+	if err == nil {
+		netlink.LinkDel(link)
+	}
+
+	return nil
+}
+
+func (d *driver) EndpointOperInfo(nid, eid types.UUID) (map[string]interface{}, error) {
+	// Get the network handler and make sure it exists
+	d.Lock()
+	n := d.network
+	d.Unlock()
+	if n == nil {
+		return nil, driverapi.ErrNoNetwork(nid)
+	}
+
+	// Sanity check
+	n.Lock()
+	if n.id != nid {
+		n.Unlock()
+		return nil, InvalidNetworkIDError(nid)
+	}
+	n.Unlock()
+
+	// Check if endpoint id is good and retrieve correspondent endpoint
+	ep, err := n.getEndpoint(eid)
+	if err != nil {
+		return nil, err
+	}
+	if ep == nil {
+		return nil, driverapi.ErrNoEndpoint(eid)
+	}
+
+	m := make(map[string]interface{})
+
+	if ep.portMapping != nil {
+		// Return a copy of the operational data
+		pmc := make([]types.PortBinding, 0, len(ep.portMapping))
+		for _, pm := range ep.portMapping {
+			pmc = append(pmc, pm.GetCopy())
+		}
+		m[netlabel.PortMap] = pmc
+	}
+
+	if len(ep.macAddress) != 0 {
+		m[netlabel.MacAddress] = ep.macAddress
+	}
+
+	return m, nil
+}
+
+// Join method is invoked when a Sandbox is attached to an endpoint.
+func (d *driver) Join(nid, eid types.UUID, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
+	network, err := d.getNetwork(nid)
+	if err != nil {
+		return err
+	}
+
+	endpoint, err := network.getEndpoint(eid)
+	if err != nil {
+		return err
+	}
+
+	if endpoint == nil {
+		return EndpointNotFoundError(eid)
+	}
+
+	for _, iNames := range jinfo.InterfaceNames() {
+		// Make sure to set names on the correct interface ID.
+		if iNames.ID() == ifaceID {
+			err = iNames.SetNames(endpoint.intf.SrcName, endpoint.intf.DstName)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	err = jinfo.SetGateway(network.bridge.gatewayIPv4)
+	if err != nil {
+		return err
+	}
+
+	err = jinfo.SetGatewayIPv6(network.bridge.gatewayIPv6)
+	if err != nil {
+		return err
+	}
+
+	if !network.config.EnableICC {
+		return d.link(network, endpoint, options, true)
+	}
+
+	return nil
+}
+
+// Leave method is invoked when a Sandbox detaches from an endpoint.
+func (d *driver) Leave(nid, eid types.UUID) error {
+	network, err := d.getNetwork(nid)
+	if err != nil {
+		return err
+	}
+
+	endpoint, err := network.getEndpoint(eid)
+	if err != nil {
+		return err
+	}
+
+	if endpoint == nil {
+		return EndpointNotFoundError(eid)
+	}
+
+	if !network.config.EnableICC {
+		return d.link(network, endpoint, nil, false)
+	}
+
+	return nil
+}
+
+func (d *driver) link(network *bridgeNetwork, endpoint *bridgeEndpoint, options map[string]interface{}, enable bool) error {
+	var (
+		cc  *ContainerConfiguration
+		err error
+	)
+
+	if enable {
+		cc, err = parseContainerOptions(options)
+		if err != nil {
+			return err
+		}
+	} else {
+		cc = endpoint.containerConfig
+	}
+
+	if cc == nil {
+		return nil
+	}
+
+	if endpoint.config != nil && endpoint.config.ExposedPorts != nil {
+		for _, p := range cc.ParentEndpoints {
+			var parentEndpoint *bridgeEndpoint
+			parentEndpoint, err = network.getEndpoint(types.UUID(p))
+			if err != nil {
+				return err
+			}
+			if parentEndpoint == nil {
+				err = InvalidEndpointIDError(p)
+				return err
+			}
+
+			l := newLink(parentEndpoint.intf.Address.IP.String(),
+				endpoint.intf.Address.IP.String(),
+				endpoint.config.ExposedPorts, network.config.BridgeName)
+			if enable {
+				err = l.Enable()
+				if err != nil {
+					return err
+				}
+				defer func() {
+					if err != nil {
+						l.Disable()
+					}
+				}()
+			} else {
+				l.Disable()
+			}
+		}
+	}
+
+	for _, c := range cc.ChildEndpoints {
+		var childEndpoint *bridgeEndpoint
+		childEndpoint, err = network.getEndpoint(types.UUID(c))
+		if err != nil {
+			return err
+		}
+		if childEndpoint == nil {
+			err = InvalidEndpointIDError(c)
+			return err
+		}
+		if childEndpoint.config == nil || childEndpoint.config.ExposedPorts == nil {
+			continue
+		}
+
+		l := newLink(endpoint.intf.Address.IP.String(),
+			childEndpoint.intf.Address.IP.String(),
+			childEndpoint.config.ExposedPorts, network.config.BridgeName)
+		if enable {
+			err = l.Enable()
+			if err != nil {
+				return err
+			}
+			defer func() {
+				if err != nil {
+					l.Disable()
+				}
+			}()
+		} else {
+			l.Disable()
+		}
+	}
+
+	if enable {
+		endpoint.containerConfig = cc
+	}
+
+	return nil
+}
+
+func (d *driver) Type() string {
+	return networkType
+}
+
+func parseEndpointOptions(epOptions map[string]interface{}) (*EndpointConfiguration, error) {
+	if epOptions == nil {
+		return nil, nil
+	}
+
+	ec := &EndpointConfiguration{}
+
+	if opt, ok := epOptions[netlabel.MacAddress]; ok {
+		if mac, ok := opt.(net.HardwareAddr); ok {
+			ec.MacAddress = mac
+		} else {
+			return nil, &ErrInvalidEndpointConfig{}
+		}
+	}
+
+	if opt, ok := epOptions[netlabel.PortMap]; ok {
+		if bs, ok := opt.([]types.PortBinding); ok {
+			ec.PortBindings = bs
+		} else {
+			return nil, &ErrInvalidEndpointConfig{}
+		}
+	}
+
+	if opt, ok := epOptions[netlabel.ExposedPorts]; ok {
+		if ports, ok := opt.([]types.TransportPort); ok {
+			ec.ExposedPorts = ports
+		} else {
+			return nil, &ErrInvalidEndpointConfig{}
+		}
+	}
+
+	return ec, nil
+}
+
+func parseContainerOptions(cOptions map[string]interface{}) (*ContainerConfiguration, error) {
+	if cOptions == nil {
+		return nil, nil
+	}
+	genericData := cOptions[netlabel.GenericData]
+	if genericData == nil {
+		return nil, nil
+	}
+	switch opt := genericData.(type) {
+	case options.Generic:
+		opaqueConfig, err := options.GenerateFromModel(opt, &ContainerConfiguration{})
+		if err != nil {
+			return nil, err
+		}
+		return opaqueConfig.(*ContainerConfiguration), nil
+	case *ContainerConfiguration:
+		return opt, nil
+	default:
+		return nil, nil
+	}
+}
+
+func electMacAddress(epConfig *EndpointConfiguration) net.HardwareAddr {
+	if epConfig != nil && epConfig.MacAddress != nil {
+		return epConfig.MacAddress
+	}
+	return netutils.GenerateRandomMAC()
+}
+
+// Generates a name to be used for a virtual ethernet
+// interface. The name is constructed by 'veth' appended
+// by a randomly generated hex value. (example: veth0f60e2c)
+func generateIfaceName() (string, error) {
+	for i := 0; i < 3; i++ {
+		name, err := netutils.GenerateRandomName(vethPrefix, vethLen)
+		if err != nil {
+			continue
+		}
+		if _, err := net.InterfaceByName(name); err != nil {
+			if strings.Contains(err.Error(), "no such") {
+				return name, nil
+			}
+			return "", err
+		}
+	}
+	return "", &ErrIfaceName{}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_test.go
new file mode 100644
index 0000000..f896755
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_test.go
@@ -0,0 +1,532 @@
+package bridge
+
+import (
+	"bytes"
+	"fmt"
+	"net"
+	"regexp"
+	"testing"
+
+	"github.com/docker/libnetwork/driverapi"
+	"github.com/docker/libnetwork/iptables"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/netutils"
+	"github.com/docker/libnetwork/types"
+	"github.com/vishvananda/netlink"
+)
+
+func TestCreateFullOptions(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+	d := newDriver()
+
+	config := &Configuration{
+		EnableIPForwarding: true,
+	}
+
+	netConfig := &NetworkConfiguration{
+		BridgeName:     DefaultBridgeName,
+		EnableIPv6:     true,
+		FixedCIDR:      bridgeNetworks[0],
+		EnableIPTables: true,
+	}
+	_, netConfig.FixedCIDRv6, _ = net.ParseCIDR("2001:db8::/48")
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = config
+
+	if err := d.Config(genericOption); err != nil {
+		t.Fatalf("Failed to setup driver config: %v", err)
+	}
+
+	netOption := make(map[string]interface{})
+	netOption[netlabel.GenericData] = netConfig
+
+	err := d.CreateNetwork("dummy", netOption)
+	if err != nil {
+		t.Fatalf("Failed to create bridge: %v", err)
+	}
+}
+
+func TestCreate(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+	d := newDriver()
+
+	config := &NetworkConfiguration{BridgeName: DefaultBridgeName}
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = config
+
+	if err := d.CreateNetwork("dummy", genericOption); err != nil {
+		t.Fatalf("Failed to create bridge: %v", err)
+	}
+}
+
+func TestCreateFail(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+	d := newDriver()
+
+	config := &NetworkConfiguration{BridgeName: "dummy0"}
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = config
+
+	if err := d.CreateNetwork("dummy", genericOption); err == nil {
+		t.Fatal("Bridge creation was expected to fail")
+	}
+}
+
+type testInterface struct {
+	id      int
+	mac     net.HardwareAddr
+	addr    net.IPNet
+	addrv6  net.IPNet
+	srcName string
+	dstName string
+}
+
+type testEndpoint struct {
+	ifaces         []*testInterface
+	gw             net.IP
+	gw6            net.IP
+	hostsPath      string
+	resolvConfPath string
+}
+
+func (te *testEndpoint) Interfaces() []driverapi.InterfaceInfo {
+	iList := make([]driverapi.InterfaceInfo, len(te.ifaces))
+
+	for i, iface := range te.ifaces {
+		iList[i] = iface
+	}
+
+	return iList
+}
+
+func (te *testEndpoint) AddInterface(id int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
+	iface := &testInterface{id: id, addr: ipv4, addrv6: ipv6}
+	te.ifaces = append(te.ifaces, iface)
+	return nil
+}
+
+func (i *testInterface) ID() int {
+	return i.id
+}
+
+func (i *testInterface) MacAddress() net.HardwareAddr {
+	return i.mac
+}
+
+func (i *testInterface) Address() net.IPNet {
+	return i.addr
+}
+
+func (i *testInterface) AddressIPv6() net.IPNet {
+	return i.addrv6
+}
+
+func (i *testInterface) SetNames(srcName string, dstName string) error {
+	i.srcName = srcName
+	i.dstName = dstName
+	return nil
+}
+
+func (te *testEndpoint) InterfaceNames() []driverapi.InterfaceNameInfo {
+	iList := make([]driverapi.InterfaceNameInfo, len(te.ifaces))
+
+	for i, iface := range te.ifaces {
+		iList[i] = iface
+	}
+
+	return iList
+}
+
+func (te *testEndpoint) SetGateway(gw net.IP) error {
+	te.gw = gw
+	return nil
+}
+
+func (te *testEndpoint) SetGatewayIPv6(gw6 net.IP) error {
+	te.gw6 = gw6
+	return nil
+}
+
+func (te *testEndpoint) SetHostsPath(path string) error {
+	te.hostsPath = path
+	return nil
+}
+
+func (te *testEndpoint) SetResolvConfPath(path string) error {
+	te.resolvConfPath = path
+	return nil
+}
+
+func TestQueryEndpointInfo(t *testing.T) {
+	testQueryEndpointInfo(t, true)
+}
+
+func TestQueryEndpointInfoHairpin(t *testing.T) {
+	testQueryEndpointInfo(t, false)
+}
+
+func testQueryEndpointInfo(t *testing.T, ulPxyEnabled bool) {
+	defer netutils.SetupTestNetNS(t)()
+	d := newDriver()
+	dd, _ := d.(*driver)
+
+	config := &NetworkConfiguration{
+		BridgeName:          DefaultBridgeName,
+		EnableIPTables:      true,
+		EnableICC:           false,
+		EnableUserlandProxy: ulPxyEnabled,
+	}
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = config
+
+	err := d.CreateNetwork("net1", genericOption)
+	if err != nil {
+		t.Fatalf("Failed to create bridge: %v", err)
+	}
+
+	portMappings := getPortMapping()
+	epOptions := make(map[string]interface{})
+	epOptions[netlabel.PortMap] = portMappings
+
+	te := &testEndpoint{ifaces: []*testInterface{}}
+	err = d.CreateEndpoint("net1", "ep1", te, epOptions)
+	if err != nil {
+		t.Fatalf("Failed to create an endpoint : %s", err.Error())
+	}
+
+	ep, _ := dd.network.endpoints["ep1"]
+	data, err := d.EndpointOperInfo(dd.network.id, ep.id)
+	if err != nil {
+		t.Fatalf("Failed to ask for endpoint operational data:  %v", err)
+	}
+	pmd, ok := data[netlabel.PortMap]
+	if !ok {
+		t.Fatalf("Endpoint operational data does not contain port mapping data")
+	}
+	pm, ok := pmd.([]types.PortBinding)
+	if !ok {
+		t.Fatalf("Unexpected format for port mapping in endpoint operational data")
+	}
+	if len(ep.portMapping) != len(pm) {
+		t.Fatalf("Incomplete data for port mapping in endpoint operational data")
+	}
+	for i, pb := range ep.portMapping {
+		if !pb.Equal(&pm[i]) {
+			t.Fatalf("Unexpected data for port mapping in endpoint operational data")
+		}
+	}
+
+	// Cleanup as host ports are there
+	err = releasePorts(ep)
+	if err != nil {
+		t.Fatalf("Failed to release mapped ports: %v", err)
+	}
+}
+
+func TestCreateLinkWithOptions(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+	d := newDriver()
+
+	config := &NetworkConfiguration{BridgeName: DefaultBridgeName}
+	netOptions := make(map[string]interface{})
+	netOptions[netlabel.GenericData] = config
+
+	err := d.CreateNetwork("net1", netOptions)
+	if err != nil {
+		t.Fatalf("Failed to create bridge: %v", err)
+	}
+
+	mac := net.HardwareAddr([]byte{0x1e, 0x67, 0x66, 0x44, 0x55, 0x66})
+	epOptions := make(map[string]interface{})
+	epOptions[netlabel.MacAddress] = mac
+
+	te := &testEndpoint{ifaces: []*testInterface{}}
+	err = d.CreateEndpoint("net1", "ep", te, epOptions)
+	if err != nil {
+		t.Fatalf("Failed to create an endpoint: %s", err.Error())
+	}
+
+	err = d.Join("net1", "ep", "sbox", te, nil)
+	if err != nil {
+		t.Fatalf("Failed to join the endpoint: %v", err)
+	}
+
+	ifaceName := te.ifaces[0].srcName
+	veth, err := netlink.LinkByName(ifaceName)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !bytes.Equal(mac, veth.Attrs().HardwareAddr) {
+		t.Fatalf("Failed to parse and program endpoint configuration")
+	}
+}
+
+func getExposedPorts() []types.TransportPort {
+	return []types.TransportPort{
+		types.TransportPort{Proto: types.TCP, Port: uint16(5000)},
+		types.TransportPort{Proto: types.UDP, Port: uint16(400)},
+		types.TransportPort{Proto: types.TCP, Port: uint16(600)},
+	}
+}
+
+func getPortMapping() []types.PortBinding {
+	return []types.PortBinding{
+		types.PortBinding{Proto: types.TCP, Port: uint16(230), HostPort: uint16(23000)},
+		types.PortBinding{Proto: types.UDP, Port: uint16(200), HostPort: uint16(22000)},
+		types.PortBinding{Proto: types.TCP, Port: uint16(120), HostPort: uint16(12000)},
+	}
+}
+
+func TestLinkContainers(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	d := newDriver()
+
+	config := &NetworkConfiguration{
+		BridgeName:     DefaultBridgeName,
+		EnableIPTables: true,
+		EnableICC:      false,
+	}
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = config
+
+	err := d.CreateNetwork("net1", genericOption)
+	if err != nil {
+		t.Fatalf("Failed to create bridge: %v", err)
+	}
+
+	exposedPorts := getExposedPorts()
+	epOptions := make(map[string]interface{})
+	epOptions[netlabel.ExposedPorts] = exposedPorts
+
+	te1 := &testEndpoint{ifaces: []*testInterface{}}
+	err = d.CreateEndpoint("net1", "ep1", te1, epOptions)
+	if err != nil {
+		t.Fatalf("Failed to create an endpoint : %s", err.Error())
+	}
+
+	addr1 := te1.ifaces[0].addr
+	if addr1.IP.To4() == nil {
+		t.Fatalf("No Ipv4 address assigned to the endpoint:  ep1")
+	}
+
+	te2 := &testEndpoint{ifaces: []*testInterface{}}
+	err = d.CreateEndpoint("net1", "ep2", te2, nil)
+	if err != nil {
+		t.Fatalf("Failed to create an endpoint : %s", err.Error())
+	}
+
+	addr2 := te2.ifaces[0].addr
+	if addr2.IP.To4() == nil {
+		t.Fatalf("No Ipv4 address assigned to the endpoint:  ep2")
+	}
+
+	ce := []string{"ep1"}
+	cConfig := &ContainerConfiguration{ChildEndpoints: ce}
+	genericOption = make(map[string]interface{})
+	genericOption[netlabel.GenericData] = cConfig
+
+	err = d.Join("net1", "ep2", "", te2, genericOption)
+	if err != nil {
+		t.Fatalf("Failed to link ep1 and ep2")
+	}
+
+	out, err := iptables.Raw("-L", DockerChain)
+	for _, pm := range exposedPorts {
+		regex := fmt.Sprintf("%s dpt:%d", pm.Proto.String(), pm.Port)
+		re := regexp.MustCompile(regex)
+		matches := re.FindAllString(string(out[:]), -1)
+		if len(matches) != 1 {
+			t.Fatalf("IP Tables programming failed %s", string(out[:]))
+		}
+
+		regex = fmt.Sprintf("%s spt:%d", pm.Proto.String(), pm.Port)
+		matched, _ := regexp.MatchString(regex, string(out[:]))
+		if !matched {
+			t.Fatalf("IP Tables programming failed %s", string(out[:]))
+		}
+	}
+
+	err = d.Leave("net1", "ep2")
+	if err != nil {
+		t.Fatalf("Failed to unlink ep1 and ep2")
+	}
+
+	out, err = iptables.Raw("-L", DockerChain)
+	for _, pm := range exposedPorts {
+		regex := fmt.Sprintf("%s dpt:%d", pm.Proto.String(), pm.Port)
+		re := regexp.MustCompile(regex)
+		matches := re.FindAllString(string(out[:]), -1)
+		if len(matches) != 0 {
+			t.Fatalf("Leave should have deleted relevant IPTables rules  %s", string(out[:]))
+		}
+
+		regex = fmt.Sprintf("%s spt:%d", pm.Proto.String(), pm.Port)
+		matched, _ := regexp.MatchString(regex, string(out[:]))
+		if matched {
+			t.Fatalf("Leave should have deleted relevant IPTables rules  %s", string(out[:]))
+		}
+	}
+
+	// Error condition test with an invalid endpoint-id "ep4"
+	ce = []string{"ep1", "ep4"}
+	cConfig = &ContainerConfiguration{ChildEndpoints: ce}
+	genericOption = make(map[string]interface{})
+	genericOption[netlabel.GenericData] = cConfig
+
+	err = d.Join("net1", "ep2", "", te2, genericOption)
+	if err != nil {
+		out, err = iptables.Raw("-L", DockerChain)
+		for _, pm := range exposedPorts {
+			regex := fmt.Sprintf("%s dpt:%d", pm.Proto.String(), pm.Port)
+			re := regexp.MustCompile(regex)
+			matches := re.FindAllString(string(out[:]), -1)
+			if len(matches) != 0 {
+				t.Fatalf("Error handling should rollback relevant IPTables rules  %s", string(out[:]))
+			}
+
+			regex = fmt.Sprintf("%s spt:%d", pm.Proto.String(), pm.Port)
+			matched, _ := regexp.MatchString(regex, string(out[:]))
+			if matched {
+				t.Fatalf("Error handling should rollback relevant IPTables rules  %s", string(out[:]))
+			}
+		}
+	} else {
+		t.Fatalf("Expected Join to fail given link conditions are not satisfied")
+	}
+}
+
+func TestValidateConfig(t *testing.T) {
+
+	// Test mtu
+	c := NetworkConfiguration{Mtu: -2}
+	err := c.Validate()
+	if err == nil {
+		t.Fatalf("Failed to detect invalid MTU number")
+	}
+
+	c.Mtu = 9000
+	err = c.Validate()
+	if err != nil {
+		t.Fatalf("unexpected validation error on MTU number")
+	}
+
+	// Bridge network
+	_, network, _ := net.ParseCIDR("172.28.0.0/16")
+
+	// Test FixedCIDR
+	_, containerSubnet, _ := net.ParseCIDR("172.27.0.0/16")
+	c = NetworkConfiguration{
+		AddressIPv4: network,
+		FixedCIDR:   containerSubnet,
+	}
+
+	err = c.Validate()
+	if err == nil {
+		t.Fatalf("Failed to detect invalid FixedCIDR network")
+	}
+
+	_, containerSubnet, _ = net.ParseCIDR("172.28.0.0/16")
+	c.FixedCIDR = containerSubnet
+	err = c.Validate()
+	if err != nil {
+		t.Fatalf("Unexpected validation error on FixedCIDR network")
+	}
+
+	_, containerSubnet, _ = net.ParseCIDR("172.28.0.0/15")
+	c.FixedCIDR = containerSubnet
+	err = c.Validate()
+	if err == nil {
+		t.Fatalf("Failed to detect invalid FixedCIDR network")
+	}
+
+	_, containerSubnet, _ = net.ParseCIDR("172.28.0.0/17")
+	c.FixedCIDR = containerSubnet
+	err = c.Validate()
+	if err != nil {
+		t.Fatalf("Unexpected validation error on FixedCIDR network")
+	}
+
+	// Test v4 gw
+	c.DefaultGatewayIPv4 = net.ParseIP("172.27.30.234")
+	err = c.Validate()
+	if err == nil {
+		t.Fatalf("Failed to detect invalid default gateway")
+	}
+
+	c.DefaultGatewayIPv4 = net.ParseIP("172.28.30.234")
+	err = c.Validate()
+	if err != nil {
+		t.Fatalf("Unexpected validation error on default gateway")
+	}
+
+	// Test v6 gw
+	_, containerSubnet, _ = net.ParseCIDR("2001:1234:ae:b004::/64")
+	c = NetworkConfiguration{
+		EnableIPv6:         true,
+		FixedCIDRv6:        containerSubnet,
+		DefaultGatewayIPv6: net.ParseIP("2001:1234:ac:b004::bad:a55"),
+	}
+	err = c.Validate()
+	if err == nil {
+		t.Fatalf("Failed to detect invalid v6 default gateway")
+	}
+
+	c.DefaultGatewayIPv6 = net.ParseIP("2001:1234:ae:b004::bad:a55")
+	err = c.Validate()
+	if err != nil {
+		t.Fatalf("Unexpected validation error on v6 default gateway")
+	}
+
+	c.FixedCIDRv6 = nil
+	err = c.Validate()
+	if err == nil {
+		t.Fatalf("Failed to detect invalid v6 default gateway")
+	}
+}
+
+func TestSetDefaultGw(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+	d := newDriver()
+
+	_, subnetv6, _ := net.ParseCIDR("2001:db8:ea9:9abc:b0c4::/80")
+	gw4 := bridgeNetworks[0].IP.To4()
+	gw4[3] = 254
+	gw6 := net.ParseIP("2001:db8:ea9:9abc:b0c4::254")
+
+	config := &NetworkConfiguration{
+		BridgeName:         DefaultBridgeName,
+		EnableIPv6:         true,
+		FixedCIDRv6:        subnetv6,
+		DefaultGatewayIPv4: gw4,
+		DefaultGatewayIPv6: gw6,
+	}
+
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = config
+
+	err := d.CreateNetwork("dummy", genericOption)
+	if err != nil {
+		t.Fatalf("Failed to create bridge: %v", err)
+	}
+
+	te := &testEndpoint{ifaces: []*testInterface{}}
+	err = d.CreateEndpoint("dummy", "ep", te, nil)
+	if err != nil {
+		t.Fatalf("Failed to create endpoint: %v", err)
+	}
+
+	err = d.Join("dummy", "ep", "sbox", te, nil)
+	if err != nil {
+		t.Fatalf("Failed to join endpoint: %v", err)
+	}
+
+	if !gw4.Equal(te.gw) {
+		t.Fatalf("Failed to configure default gateway. Expected %v. Found %v", gw4, te.gw)
+	}
+
+	if !gw6.Equal(te.gw6) {
+		t.Fatalf("Failed to configure default gateway. Expected %v. Found %v", gw6, te.gw6)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go
new file mode 100644
index 0000000..d22912c
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go
@@ -0,0 +1,341 @@
+package bridge
+
+import (
+	"fmt"
+	"net"
+)
+
+// ErrConfigExists error is returned when driver already has a config applied.
+type ErrConfigExists struct{}
+
+func (ece *ErrConfigExists) Error() string {
+	return "configuration already exists, bridge configuration can be applied only once"
+}
+
+// Forbidden denotes the type of this error
+func (ece *ErrConfigExists) Forbidden() {}
+
+// ErrInvalidDriverConfig error is returned when Bridge Driver is passed an invalid config
+type ErrInvalidDriverConfig struct{}
+
+func (eidc *ErrInvalidDriverConfig) Error() string {
+	return "Invalid configuration passed to Bridge Driver"
+}
+
+// BadRequest denotes the type of this error
+func (eidc *ErrInvalidDriverConfig) BadRequest() {}
+
+// ErrInvalidNetworkConfig error is returned when a network is created on a driver without valid config.
+type ErrInvalidNetworkConfig struct{}
+
+func (einc *ErrInvalidNetworkConfig) Error() string {
+	return "trying to create a network on a driver without valid config"
+}
+
+// Forbidden denotes the type of this error
+func (einc *ErrInvalidNetworkConfig) Forbidden() {}
+
+// ErrInvalidContainerConfig error is returned when a endpoint create is attempted with an invalid configuration.
+type ErrInvalidContainerConfig struct{}
+
+func (eicc *ErrInvalidContainerConfig) Error() string {
+	return "Error in joining a container due to invalid configuration"
+}
+
+// BadRequest denotes the type of this error
+func (eicc *ErrInvalidContainerConfig) BadRequest() {}
+
+// ErrInvalidEndpointConfig error is returned when a endpoint create is attempted with an invalid endpoint configuration.
+type ErrInvalidEndpointConfig struct{}
+
+func (eiec *ErrInvalidEndpointConfig) Error() string {
+	return "trying to create an endpoint with an invalid endpoint configuration"
+}
+
+// BadRequest denotes the type of this error
+func (eiec *ErrInvalidEndpointConfig) BadRequest() {}
+
+// ErrNetworkExists error is returned when a network already exists and another network is created.
+type ErrNetworkExists struct{}
+
+func (ene *ErrNetworkExists) Error() string {
+	return "network already exists, bridge can only have one network"
+}
+
+// Forbidden denotes the type of this error
+func (ene *ErrNetworkExists) Forbidden() {}
+
+// ErrIfaceName error is returned when a new name could not be generated.
+type ErrIfaceName struct{}
+
+func (ein *ErrIfaceName) Error() string {
+	return "failed to find name for new interface"
+}
+
+// InternalError denotes the type of this error
+func (ein *ErrIfaceName) InternalError() {}
+
+// ErrNoIPAddr error is returned when bridge has no IPv4 address configured.
+type ErrNoIPAddr struct{}
+
+func (enip *ErrNoIPAddr) Error() string {
+	return "bridge has no IPv4 address configured"
+}
+
+// InternalError denotes the type of this error
+func (enip *ErrNoIPAddr) InternalError() {}
+
+// ErrInvalidGateway is returned when the user provided default gateway (v4/v6) is not not valid.
+type ErrInvalidGateway struct{}
+
+func (eig *ErrInvalidGateway) Error() string {
+	return "default gateway ip must be part of the network"
+}
+
+// BadRequest denotes the type of this error
+func (eig *ErrInvalidGateway) BadRequest() {}
+
+// ErrInvalidContainerSubnet is returned when the container subnet (FixedCIDR) is not valid.
+type ErrInvalidContainerSubnet struct{}
+
+func (eis *ErrInvalidContainerSubnet) Error() string {
+	return "container subnet must be a subset of bridge network"
+}
+
+// BadRequest denotes the type of this error
+func (eis *ErrInvalidContainerSubnet) BadRequest() {}
+
+// ErrInvalidMtu is returned when the user provided MTU is not valid.
+type ErrInvalidMtu int
+
+func (eim ErrInvalidMtu) Error() string {
+	return fmt.Sprintf("invalid MTU number: %d", int(eim))
+}
+
+// BadRequest denotes the type of this error
+func (eim ErrInvalidMtu) BadRequest() {}
+
+// ErrIPFwdCfg is returned when ip forwarding setup is invoked when the configuration
+// not enabled.
+type ErrIPFwdCfg struct{}
+
+func (eipf *ErrIPFwdCfg) Error() string {
+	return "unexpected request to enable IP Forwarding"
+}
+
+// BadRequest denotes the type of this error
+func (eipf *ErrIPFwdCfg) BadRequest() {}
+
+// ErrInvalidPort is returned when the container or host port specified in the port binding is not valid.
+type ErrInvalidPort string
+
+func (ip ErrInvalidPort) Error() string {
+	return fmt.Sprintf("invalid transport port: %s", string(ip))
+}
+
+// BadRequest denotes the type of this error
+func (ip ErrInvalidPort) BadRequest() {}
+
+// ErrUnsupportedAddressType is returned when the specified address type is not supported.
+type ErrUnsupportedAddressType string
+
+func (uat ErrUnsupportedAddressType) Error() string {
+	return fmt.Sprintf("unsupported address type: %s", string(uat))
+}
+
+// BadRequest denotes the type of this error
+func (uat ErrUnsupportedAddressType) BadRequest() {}
+
+// ErrInvalidAddressBinding is returned when the host address specified in the port binding is not valid.
+type ErrInvalidAddressBinding string
+
+func (iab ErrInvalidAddressBinding) Error() string {
+	return fmt.Sprintf("invalid host address in port binding: %s", string(iab))
+}
+
+// BadRequest denotes the type of this error
+func (iab ErrInvalidAddressBinding) BadRequest() {}
+
+// ActiveEndpointsError is returned when there are
+// still active endpoints in the network being deleted.
+type ActiveEndpointsError string
+
+func (aee ActiveEndpointsError) Error() string {
+	return fmt.Sprintf("network %s has active endpoint", string(aee))
+}
+
+// Forbidden denotes the type of this error
+func (aee ActiveEndpointsError) Forbidden() {}
+
+// InvalidNetworkIDError is returned when the passed
+// network id for an existing network is not a known id.
+type InvalidNetworkIDError string
+
+func (inie InvalidNetworkIDError) Error() string {
+	return fmt.Sprintf("invalid network id %s", string(inie))
+}
+
+// NotFound denotes the type of this error
+func (inie InvalidNetworkIDError) NotFound() {}
+
+// InvalidEndpointIDError is returned when the passed
+// endpoint id is not valid.
+type InvalidEndpointIDError string
+
+func (ieie InvalidEndpointIDError) Error() string {
+	return fmt.Sprintf("invalid endpoint id: %s", string(ieie))
+}
+
+// BadRequest denotes the type of this error
+func (ieie InvalidEndpointIDError) BadRequest() {}
+
+// InvalidSandboxIDError is returned when the passed
+// sandbox id is not valid.
+type InvalidSandboxIDError string
+
+func (isie InvalidSandboxIDError) Error() string {
+	return fmt.Sprintf("invalid sanbox id: %s", string(isie))
+}
+
+// BadRequest denotes the type of this error
+func (isie InvalidSandboxIDError) BadRequest() {}
+
+// EndpointNotFoundError is returned when the no endpoint
+// with the passed endpoint id is found.
+type EndpointNotFoundError string
+
+func (enfe EndpointNotFoundError) Error() string {
+	return fmt.Sprintf("endpoint not found: %s", string(enfe))
+}
+
+// NotFound denotes the type of this error
+func (enfe EndpointNotFoundError) NotFound() {}
+
+// NonDefaultBridgeExistError is returned when a non-default
+// bridge config is passed but it does not already exist.
+type NonDefaultBridgeExistError string
+
+func (ndbee NonDefaultBridgeExistError) Error() string {
+	return fmt.Sprintf("bridge device with non default name %s must be created manually", string(ndbee))
+}
+
+// Forbidden denotes the type of this error
+func (ndbee NonDefaultBridgeExistError) Forbidden() {}
+
+// FixedCIDRv4Error is returned when fixed-cidrv4 configuration
+// failed.
+type FixedCIDRv4Error struct {
+	Net    *net.IPNet
+	Subnet *net.IPNet
+	Err    error
+}
+
+func (fcv4 *FixedCIDRv4Error) Error() string {
+	return fmt.Sprintf("setup FixedCIDRv4 failed for subnet %s in %s: %v", fcv4.Subnet, fcv4.Net, fcv4.Err)
+}
+
+// InternalError denotes the type of this error
+func (fcv4 *FixedCIDRv4Error) InternalError() {}
+
+// FixedCIDRv6Error is returned when fixed-cidrv6 configuration
+// failed.
+type FixedCIDRv6Error struct {
+	Net *net.IPNet
+	Err error
+}
+
+func (fcv6 *FixedCIDRv6Error) Error() string {
+	return fmt.Sprintf("setup FixedCIDRv6 failed for subnet %s in %s: %v", fcv6.Net, fcv6.Net, fcv6.Err)
+}
+
+// InternalError denotes the type of this error
+func (fcv6 *FixedCIDRv6Error) InternalError() {}
+
+// IPTableCfgError is returned when an unexpected ip tables configuration is entered
+type IPTableCfgError string
+
+func (name IPTableCfgError) Error() string {
+	return fmt.Sprintf("unexpected request to set IP tables for interface: %s", string(name))
+}
+
+// BadRequest denotes the type of this error
+func (name IPTableCfgError) BadRequest() {}
+
+// InvalidIPTablesCfgError is returned when an invalid ip tables configuration is entered
+type InvalidIPTablesCfgError string
+
+func (action InvalidIPTablesCfgError) Error() string {
+	return fmt.Sprintf("Invalid IPTables action '%s'", string(action))
+}
+
+// BadRequest denotes the type of this error
+func (action InvalidIPTablesCfgError) BadRequest() {}
+
+// IPv4AddrRangeError is returned when a valid IP address range couldn't be found.
+type IPv4AddrRangeError string
+
+func (name IPv4AddrRangeError) Error() string {
+	return fmt.Sprintf("can't find an address range for interface %q", string(name))
+}
+
+// BadRequest denotes the type of this error
+func (name IPv4AddrRangeError) BadRequest() {}
+
+// IPv4AddrAddError is returned when IPv4 address could not be added to the bridge.
+type IPv4AddrAddError struct {
+	IP  *net.IPNet
+	Err error
+}
+
+func (ipv4 *IPv4AddrAddError) Error() string {
+	return fmt.Sprintf("failed to add IPv4 address %s to bridge: %v", ipv4.IP, ipv4.Err)
+}
+
+// InternalError denotes the type of this error
+func (ipv4 *IPv4AddrAddError) InternalError() {}
+
+// IPv6AddrAddError is returned when IPv6 address could not be added to the bridge.
+type IPv6AddrAddError struct {
+	IP  *net.IPNet
+	Err error
+}
+
+func (ipv6 *IPv6AddrAddError) Error() string {
+	return fmt.Sprintf("failed to add IPv6 address %s to bridge: %v", ipv6.IP, ipv6.Err)
+}
+
+// InternalError denotes the type of this error
+func (ipv6 *IPv6AddrAddError) InternalError() {}
+
+// IPv4AddrNoMatchError is returned when the bridge's IPv4 address does not match configured.
+type IPv4AddrNoMatchError struct {
+	IP    net.IP
+	CfgIP net.IP
+}
+
+func (ipv4 *IPv4AddrNoMatchError) Error() string {
+	return fmt.Sprintf("bridge IPv4 (%s) does not match requested configuration %s", ipv4.IP, ipv4.CfgIP)
+}
+
+// BadRequest denotes the type of this error
+func (ipv4 *IPv4AddrNoMatchError) BadRequest() {}
+
+// IPv6AddrNoMatchError is returned when the bridge's IPv6 address does not match configured.
+type IPv6AddrNoMatchError net.IPNet
+
+func (ipv6 *IPv6AddrNoMatchError) Error() string {
+	return fmt.Sprintf("bridge IPv6 addresses do not match the expected bridge configuration %s", (*net.IPNet)(ipv6).String())
+}
+
+// BadRequest denotes the type of this error
+func (ipv6 *IPv6AddrNoMatchError) BadRequest() {}
+
+// InvalidLinkIPAddrError is returned when a link is configured to a container with an invalid ip address
+type InvalidLinkIPAddrError string
+
+func (address InvalidLinkIPAddrError) Error() string {
+	return fmt.Sprintf("Cannot link to a container with Invalid IP Address '%s'", string(address))
+}
+
+// BadRequest denotes the type of this error
+func (address InvalidLinkIPAddrError) BadRequest() {}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go
new file mode 100644
index 0000000..215a7f4
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go
@@ -0,0 +1,63 @@
+package bridge
+
+import (
+	"net"
+
+	"github.com/vishvananda/netlink"
+)
+
+const (
+	// DefaultBridgeName is the default name for the bridge interface managed
+	// by the driver when unspecified by the caller.
+	DefaultBridgeName = "docker0"
+)
+
+// Interface models the bridge network device.
+type bridgeInterface struct {
+	Link        netlink.Link
+	bridgeIPv4  *net.IPNet
+	bridgeIPv6  *net.IPNet
+	gatewayIPv4 net.IP
+	gatewayIPv6 net.IP
+}
+
+// newInterface creates a new bridge interface structure. It attempts to find
+// an already existing device identified by the Configuration BridgeName field,
+// or the default bridge name when unspecified), but doesn't attempt to create
+// one when missing
+func newInterface(config *NetworkConfiguration) *bridgeInterface {
+	i := &bridgeInterface{}
+
+	// Initialize the bridge name to the default if unspecified.
+	if config.BridgeName == "" {
+		config.BridgeName = DefaultBridgeName
+	}
+
+	// Attempt to find an existing bridge named with the specified name.
+	i.Link, _ = netlink.LinkByName(config.BridgeName)
+	return i
+}
+
+// exists indicates if the existing bridge interface exists on the system.
+func (i *bridgeInterface) exists() bool {
+	return i.Link != nil
+}
+
+// addresses returns a single IPv4 address and all IPv6 addresses for the
+// bridge interface.
+func (i *bridgeInterface) addresses() (netlink.Addr, []netlink.Addr, error) {
+	v4addr, err := netlink.AddrList(i.Link, netlink.FAMILY_V4)
+	if err != nil {
+		return netlink.Addr{}, nil, err
+	}
+
+	v6addr, err := netlink.AddrList(i.Link, netlink.FAMILY_V6)
+	if err != nil {
+		return netlink.Addr{}, nil, err
+	}
+
+	if len(v4addr) == 0 {
+		return netlink.Addr{}, v6addr, nil
+	}
+	return v4addr[0], v6addr, nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface_test.go
new file mode 100644
index 0000000..07bfe9e
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/interface_test.go
@@ -0,0 +1,33 @@
+package bridge
+
+import (
+	"testing"
+
+	"github.com/docker/libnetwork/netutils"
+	"github.com/vishvananda/netlink"
+)
+
+func TestInterfaceDefaultName(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	config := &NetworkConfiguration{}
+	if _ = newInterface(config); config.BridgeName != DefaultBridgeName {
+		t.Fatalf("Expected default interface name %q, got %q", DefaultBridgeName, config.BridgeName)
+	}
+}
+
+func TestAddressesEmptyInterface(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	inf := newInterface(&NetworkConfiguration{})
+	addrv4, addrsv6, err := inf.addresses()
+	if err != nil {
+		t.Fatalf("Failed to get addresses of default interface: %v", err)
+	}
+	if expected := (netlink.Addr{}); addrv4 != expected {
+		t.Fatalf("Default interface has unexpected IPv4: %s", addrv4)
+	}
+	if len(addrsv6) != 0 {
+		t.Fatalf("Default interface has unexpected IPv6: %v", addrsv6)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/link.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/link.go
new file mode 100644
index 0000000..4e4444e
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/link.go
@@ -0,0 +1,80 @@
+package bridge
+
+import (
+	"fmt"
+	"net"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/libnetwork/iptables"
+	"github.com/docker/libnetwork/types"
+)
+
+type link struct {
+	parentIP string
+	childIP  string
+	ports    []types.TransportPort
+	bridge   string
+}
+
+func (l *link) String() string {
+	return fmt.Sprintf("%s <-> %s [%v] on %s", l.parentIP, l.childIP, l.ports, l.bridge)
+}
+
+func newLink(parentIP, childIP string, ports []types.TransportPort, bridge string) *link {
+	return &link{
+		childIP:  childIP,
+		parentIP: parentIP,
+		ports:    ports,
+		bridge:   bridge,
+	}
+
+}
+
+func (l *link) Enable() error {
+	// -A == iptables append flag
+	return linkContainers("-A", l.parentIP, l.childIP, l.ports, l.bridge, false)
+}
+
+func (l *link) Disable() {
+	// -D == iptables delete flag
+	err := linkContainers("-D", l.parentIP, l.childIP, l.ports, l.bridge, true)
+	if err != nil {
+		log.Errorf("Error removing IPTables rules for a link %s due to %s", l.String(), err.Error())
+	}
+	// Return proper error once we move to use a proper iptables package
+	// that returns typed errors
+}
+
+func linkContainers(action, parentIP, childIP string, ports []types.TransportPort, bridge string,
+	ignoreErrors bool) error {
+	var nfAction iptables.Action
+
+	switch action {
+	case "-A":
+		nfAction = iptables.Append
+	case "-I":
+		nfAction = iptables.Insert
+	case "-D":
+		nfAction = iptables.Delete
+	default:
+		return InvalidIPTablesCfgError(action)
+	}
+
+	ip1 := net.ParseIP(parentIP)
+	if ip1 == nil {
+		return InvalidLinkIPAddrError(parentIP)
+	}
+	ip2 := net.ParseIP(childIP)
+	if ip2 == nil {
+		return InvalidLinkIPAddrError(childIP)
+	}
+
+	chain := iptables.Chain{Name: DockerChain, Bridge: bridge}
+	for _, port := range ports {
+		err := chain.Link(nfAction, ip1, ip2, int(port.Port), port.Proto.String())
+		if !ignoreErrors && err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/link_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/link_test.go
new file mode 100644
index 0000000..fc4a625
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/link_test.go
@@ -0,0 +1,39 @@
+package bridge
+
+import (
+	"testing"
+
+	"github.com/docker/libnetwork/types"
+)
+
+func getPorts() []types.TransportPort {
+	return []types.TransportPort{
+		types.TransportPort{Proto: types.TCP, Port: uint16(5000)},
+		types.TransportPort{Proto: types.UDP, Port: uint16(400)},
+		types.TransportPort{Proto: types.TCP, Port: uint16(600)},
+	}
+}
+
+func TestLinkNew(t *testing.T) {
+	ports := getPorts()
+
+	link := newLink("172.0.17.3", "172.0.17.2", ports, "docker0")
+
+	if link == nil {
+		t.FailNow()
+	}
+	if link.parentIP != "172.0.17.3" {
+		t.Fail()
+	}
+	if link.childIP != "172.0.17.2" {
+		t.Fail()
+	}
+	for i, p := range link.ports {
+		if p != ports[i] {
+			t.Fail()
+		}
+	}
+	if link.bridge != "docker0" {
+		t.Fail()
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/network_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/network_test.go
new file mode 100644
index 0000000..20afea9
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/network_test.go
@@ -0,0 +1,200 @@
+package bridge
+
+import (
+	"testing"
+
+	"github.com/docker/libnetwork/driverapi"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/netutils"
+	"github.com/vishvananda/netlink"
+)
+
+func TestLinkCreate(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+	d := newDriver()
+	dr := d.(*driver)
+
+	mtu := 1490
+	config := &NetworkConfiguration{
+		BridgeName: DefaultBridgeName,
+		Mtu:        mtu,
+		EnableIPv6: true,
+	}
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = config
+
+	err := d.CreateNetwork("dummy", genericOption)
+	if err != nil {
+		t.Fatalf("Failed to create bridge: %v", err)
+	}
+
+	te := &testEndpoint{ifaces: []*testInterface{}}
+	err = d.CreateEndpoint("dummy", "", te, nil)
+	if err != nil {
+		if _, ok := err.(InvalidEndpointIDError); !ok {
+			t.Fatalf("Failed with a wrong error :%s", err.Error())
+		}
+	} else {
+		t.Fatalf("Failed to detect invalid config")
+	}
+
+	// Good endpoint creation
+	err = d.CreateEndpoint("dummy", "ep", te, nil)
+	if err != nil {
+		t.Fatalf("Failed to create a link: %s", err.Error())
+	}
+
+	err = d.Join("dummy", "ep", "sbox", te, nil)
+	if err != nil {
+		t.Fatalf("Failed to create a link: %s", err.Error())
+	}
+
+	// Verify sbox endoint interface inherited MTU value from bridge config
+	sboxLnk, err := netlink.LinkByName(te.ifaces[0].srcName)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if mtu != sboxLnk.Attrs().MTU {
+		t.Fatalf("Sandbox endpoint interface did not inherit bridge interface MTU config")
+	}
+	// TODO: if we could get peer name from (sboxLnk.(*netlink.Veth)).PeerName
+	// then we could check the MTU on hostLnk as well.
+
+	te1 := &testEndpoint{ifaces: []*testInterface{}}
+	err = d.CreateEndpoint("dummy", "ep", te1, nil)
+	if err == nil {
+		t.Fatalf("Failed to detect duplicate endpoint id on same network")
+	}
+
+	if len(te.ifaces) != 1 {
+		t.Fatalf("Expected exactly one interface. Instead got %d interface(s)", len(te.ifaces))
+	}
+
+	if te.ifaces[0].dstName == "" {
+		t.Fatal("Invalid Dstname returned")
+	}
+
+	_, err = netlink.LinkByName(te.ifaces[0].srcName)
+	if err != nil {
+		t.Fatalf("Could not find source link %s: %v", te.ifaces[0].srcName, err)
+	}
+
+	n := dr.network
+	ip := te.ifaces[0].addr.IP
+	if !n.bridge.bridgeIPv4.Contains(ip) {
+		t.Fatalf("IP %s is not a valid ip in the subnet %s", ip.String(), n.bridge.bridgeIPv4.String())
+	}
+
+	ip6 := te.ifaces[0].addrv6.IP
+	if !n.bridge.bridgeIPv6.Contains(ip6) {
+		t.Fatalf("IP %s is not a valid ip in the subnet %s", ip6.String(), bridgeIPv6.String())
+	}
+
+	if !te.gw.Equal(n.bridge.bridgeIPv4.IP) {
+		t.Fatalf("Invalid default gateway. Expected %s. Got %s", n.bridge.bridgeIPv4.IP.String(),
+			te.gw.String())
+	}
+
+	if !te.gw6.Equal(n.bridge.bridgeIPv6.IP) {
+		t.Fatalf("Invalid default gateway for IPv6. Expected %s. Got %s", n.bridge.bridgeIPv6.IP.String(),
+			te.gw6.String())
+	}
+}
+
+func TestLinkCreateTwo(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+	d := newDriver()
+
+	config := &NetworkConfiguration{
+		BridgeName: DefaultBridgeName,
+		EnableIPv6: true}
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = config
+
+	err := d.CreateNetwork("dummy", genericOption)
+	if err != nil {
+		t.Fatalf("Failed to create bridge: %v", err)
+	}
+
+	te1 := &testEndpoint{ifaces: []*testInterface{}}
+	err = d.CreateEndpoint("dummy", "ep", te1, nil)
+	if err != nil {
+		t.Fatalf("Failed to create a link: %s", err.Error())
+	}
+
+	te2 := &testEndpoint{ifaces: []*testInterface{}}
+	err = d.CreateEndpoint("dummy", "ep", te2, nil)
+	if err != nil {
+		if _, ok := err.(driverapi.ErrEndpointExists); !ok {
+			t.Fatalf("Failed with a wrong error: %s", err.Error())
+		}
+	} else {
+		t.Fatalf("Expected to fail while trying to add same endpoint twice")
+	}
+}
+
+func TestLinkCreateNoEnableIPv6(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+	d := newDriver()
+
+	config := &NetworkConfiguration{
+		BridgeName: DefaultBridgeName}
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = config
+
+	err := d.CreateNetwork("dummy", genericOption)
+	if err != nil {
+		t.Fatalf("Failed to create bridge: %v", err)
+	}
+
+	te := &testEndpoint{ifaces: []*testInterface{}}
+	err = d.CreateEndpoint("dummy", "ep", te, nil)
+	if err != nil {
+		t.Fatalf("Failed to create a link: %s", err.Error())
+	}
+
+	interfaces := te.ifaces
+	if interfaces[0].addrv6.IP.To16() != nil {
+		t.Fatalf("Expectd IPv6 address to be nil when IPv6 is not enabled. Got IPv6 = %s", interfaces[0].addrv6.String())
+	}
+
+	if te.gw6.To16() != nil {
+		t.Fatalf("Expected GatewayIPv6 to be nil when IPv6 is not enabled. Got GatewayIPv6 = %s", te.gw6.String())
+	}
+}
+
+func TestLinkDelete(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+	d := newDriver()
+
+	config := &NetworkConfiguration{
+		BridgeName: DefaultBridgeName,
+		EnableIPv6: true}
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = config
+
+	err := d.CreateNetwork("dummy", genericOption)
+	if err != nil {
+		t.Fatalf("Failed to create bridge: %v", err)
+	}
+
+	te := &testEndpoint{ifaces: []*testInterface{}}
+	err = d.CreateEndpoint("dummy", "ep1", te, nil)
+	if err != nil {
+		t.Fatalf("Failed to create a link: %s", err.Error())
+	}
+
+	err = d.DeleteEndpoint("dummy", "")
+	if err != nil {
+		if _, ok := err.(InvalidEndpointIDError); !ok {
+			t.Fatalf("Failed with a wrong error :%s", err.Error())
+		}
+	} else {
+		t.Fatalf("Failed to detect invalid config")
+	}
+
+	err = d.DeleteEndpoint("dummy", "ep1")
+	if err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping.go
new file mode 100644
index 0000000..52d0362
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping.go
@@ -0,0 +1,124 @@
+package bridge
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/libnetwork/sandbox"
+	"github.com/docker/libnetwork/types"
+)
+
+var (
+	defaultBindingIP = net.IPv4(0, 0, 0, 0)
+)
+
+func allocatePorts(epConfig *EndpointConfiguration, intf *sandbox.Interface, reqDefBindIP net.IP, ulPxyEnabled bool) ([]types.PortBinding, error) {
+	if epConfig == nil || epConfig.PortBindings == nil {
+		return nil, nil
+	}
+
+	defHostIP := defaultBindingIP
+	if reqDefBindIP != nil {
+		defHostIP = reqDefBindIP
+	}
+
+	return allocatePortsInternal(epConfig.PortBindings, intf.Address.IP, defHostIP, ulPxyEnabled)
+}
+
+func allocatePortsInternal(bindings []types.PortBinding, containerIP, defHostIP net.IP, ulPxyEnabled bool) ([]types.PortBinding, error) {
+	bs := make([]types.PortBinding, 0, len(bindings))
+	for _, c := range bindings {
+		b := c.GetCopy()
+		if err := allocatePort(&b, containerIP, defHostIP, ulPxyEnabled); err != nil {
+			// On allocation failure, release previously allocated ports. On cleanup error, just log a warning message
+			if cuErr := releasePortsInternal(bs); cuErr != nil {
+				logrus.Warnf("Upon allocation failure for %v, failed to clear previously allocated port bindings: %v", b, cuErr)
+			}
+			return nil, err
+		}
+		bs = append(bs, b)
+	}
+	return bs, nil
+}
+
+func allocatePort(bnd *types.PortBinding, containerIP, defHostIP net.IP, ulPxyEnabled bool) error {
+	var (
+		host net.Addr
+		err  error
+	)
+
+	// Store the container interface address in the operational binding
+	bnd.IP = containerIP
+
+	// Adjust the host address in the operational binding
+	if len(bnd.HostIP) == 0 {
+		bnd.HostIP = defHostIP
+	}
+
+	// Construct the container side transport address
+	container, err := bnd.ContainerAddr()
+	if err != nil {
+		return err
+	}
+
+	// Try up to maxAllocatePortAttempts times to get a port that's not already allocated.
+	for i := 0; i < maxAllocatePortAttempts; i++ {
+		if host, err = portMapper.Map(container, bnd.HostIP, int(bnd.HostPort), ulPxyEnabled); err == nil {
+			break
+		}
+		// There is no point in immediately retrying to map an explicitly chosen port.
+		if bnd.HostPort != 0 {
+			logrus.Warnf("Failed to allocate and map port %d: %s", bnd.HostPort, err)
+			break
+		}
+		logrus.Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1)
+	}
+	if err != nil {
+		return err
+	}
+
+	// Save the host port (regardless it was or not specified in the binding)
+	switch netAddr := host.(type) {
+	case *net.TCPAddr:
+		bnd.HostPort = uint16(host.(*net.TCPAddr).Port)
+		return nil
+	case *net.UDPAddr:
+		bnd.HostPort = uint16(host.(*net.UDPAddr).Port)
+		return nil
+	default:
+		// For completeness
+		return ErrUnsupportedAddressType(fmt.Sprintf("%T", netAddr))
+	}
+}
+
+func releasePorts(ep *bridgeEndpoint) error {
+	return releasePortsInternal(ep.portMapping)
+}
+
+func releasePortsInternal(bindings []types.PortBinding) error {
+	var errorBuf bytes.Buffer
+
+	// Attempt to release all port bindings, do not stop on failure
+	for _, m := range bindings {
+		if err := releasePort(m); err != nil {
+			errorBuf.WriteString(fmt.Sprintf("\ncould not release %v because of %v", m, err))
+		}
+	}
+
+	if errorBuf.Len() != 0 {
+		return errors.New(errorBuf.String())
+	}
+	return nil
+}
+
+func releasePort(bnd types.PortBinding) error {
+	// Construct the host side transport address
+	host, err := bnd.HostAddr()
+	if err != nil {
+		return err
+	}
+	return portMapper.Unmap(host)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping_test.go
new file mode 100644
index 0000000..5eb16b6
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping_test.go
@@ -0,0 +1,67 @@
+package bridge
+
+import (
+	"os"
+	"testing"
+
+	"github.com/docker/docker/pkg/reexec"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/netutils"
+	"github.com/docker/libnetwork/types"
+)
+
+func TestMain(m *testing.M) {
+	if reexec.Init() {
+		return
+	}
+	os.Exit(m.Run())
+}
+
+func TestPortMappingConfig(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+	d := newDriver()
+
+	binding1 := types.PortBinding{Proto: types.UDP, Port: uint16(400), HostPort: uint16(54000)}
+	binding2 := types.PortBinding{Proto: types.TCP, Port: uint16(500), HostPort: uint16(65000)}
+	portBindings := []types.PortBinding{binding1, binding2}
+
+	epOptions := make(map[string]interface{})
+	epOptions[netlabel.PortMap] = portBindings
+
+	netConfig := &NetworkConfiguration{
+		BridgeName:     DefaultBridgeName,
+		EnableIPTables: true,
+	}
+	netOptions := make(map[string]interface{})
+	netOptions[netlabel.GenericData] = netConfig
+
+	err := d.CreateNetwork("dummy", netOptions)
+	if err != nil {
+		t.Fatalf("Failed to create bridge: %v", err)
+	}
+
+	te := &testEndpoint{ifaces: []*testInterface{}}
+	err = d.CreateEndpoint("dummy", "ep1", te, epOptions)
+	if err != nil {
+		t.Fatalf("Failed to create the endpoint: %s", err.Error())
+	}
+
+	dd := d.(*driver)
+	ep, _ := dd.network.endpoints["ep1"]
+	if len(ep.portMapping) != 2 {
+		t.Fatalf("Failed to store the port bindings into the sandbox info. Found: %v", ep.portMapping)
+	}
+	if ep.portMapping[0].Proto != binding1.Proto || ep.portMapping[0].Port != binding1.Port ||
+		ep.portMapping[1].Proto != binding2.Proto || ep.portMapping[1].Port != binding2.Port {
+		t.Fatalf("bridgeEndpoint has incorrect port mapping values")
+	}
+	if ep.portMapping[0].HostIP == nil || ep.portMapping[0].HostPort == 0 ||
+		ep.portMapping[1].HostIP == nil || ep.portMapping[1].HostPort == 0 {
+		t.Fatalf("operational port mapping data not found on bridgeEndpoint")
+	}
+
+	err = releasePorts(ep)
+	if err != nil {
+		t.Fatalf("Failed to release mapped ports: %v", err)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf.go
new file mode 100644
index 0000000..8861184
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf.go
@@ -0,0 +1,67 @@
+package bridge
+
+import (
+	"bytes"
+	"io/ioutil"
+	"regexp"
+)
+
+const (
+	ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)`
+	ipv4Address  = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock
+
+	// This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also
+	// will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants
+	// -- e.g. other link-local types -- either won't work in containers or are unnecessary.
+	// For readability and sufficiency for Docker purposes this seemed more reasonable than a
+	// 1000+ character regexp with exact and complete IPv6 validation
+	ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})`
+)
+
+var nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`)
+
+func readResolvConf() ([]byte, error) {
+	resolv, err := ioutil.ReadFile("/etc/resolv.conf")
+	if err != nil {
+		return nil, err
+	}
+	return resolv, nil
+}
+
+// getLines parses input into lines and strips away comments.
+func getLines(input []byte, commentMarker []byte) [][]byte {
+	lines := bytes.Split(input, []byte("\n"))
+	var output [][]byte
+	for _, currentLine := range lines {
+		var commentIndex = bytes.Index(currentLine, commentMarker)
+		if commentIndex == -1 {
+			output = append(output, currentLine)
+		} else {
+			output = append(output, currentLine[:commentIndex])
+		}
+	}
+	return output
+}
+
+// GetNameserversAsCIDR returns nameservers (if any) listed in
+// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
+// This function's output is intended for net.ParseCIDR
+func getNameserversAsCIDR(resolvConf []byte) []string {
+	nameservers := []string{}
+	for _, nameserver := range getNameservers(resolvConf) {
+		nameservers = append(nameservers, nameserver+"/32")
+	}
+	return nameservers
+}
+
+// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf
+func getNameservers(resolvConf []byte) []string {
+	nameservers := []string{}
+	for _, line := range getLines(resolvConf, []byte("#")) {
+		var ns = nsRegexp.FindSubmatch(line)
+		if len(ns) > 0 {
+			nameservers = append(nameservers, string(ns[1]))
+		}
+	}
+	return nameservers
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf_test.go
new file mode 100644
index 0000000..029f41c
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf_test.go
@@ -0,0 +1,53 @@
+package bridge
+
+import (
+	"bytes"
+	"testing"
+)
+
+func TestResolveConfRead(t *testing.T) {
+	b, err := readResolvConf()
+	if err != nil {
+		t.Fatalf("Failed to read resolv.conf: %v", err)
+	}
+
+	if b == nil {
+		t.Fatal("Reading resolv.conf returned no content")
+	}
+}
+
+func TestResolveConfReadLines(t *testing.T) {
+	commentChar := []byte("#")
+
+	b, _ := readResolvConf()
+	lines := getLines(b, commentChar)
+	if lines == nil {
+		t.Fatal("Failed to read resolv.conf lines")
+	}
+
+	for _, line := range lines {
+		if bytes.Index(line, commentChar) != -1 {
+			t.Fatal("Returned comment content from resolv.conf")
+		}
+	}
+}
+
+func TestResolvConfNameserversAsCIDR(t *testing.T) {
+	resolvConf := `# Commented line
+nameserver 1.2.3.4
+
+nameserver 5.6.7.8 # Test
+`
+
+	cidrs := getNameserversAsCIDR([]byte(resolvConf))
+	if expected := 2; len(cidrs) != expected {
+		t.Fatalf("Expected %d nameservers, got %d", expected, len(cidrs))
+	}
+
+	expected := []string{"1.2.3.4/32", "5.6.7.8/32"}
+	for i, exp := range expected {
+		if cidrs[i] != exp {
+			t.Fatalf("Expected nameservers %s, got %s", exp, cidrs[i])
+		}
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup.go
new file mode 100644
index 0000000..f2d0344
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup.go
@@ -0,0 +1,26 @@
+package bridge
+
+type setupStep func(*NetworkConfiguration, *bridgeInterface) error
+
+type bridgeSetup struct {
+	config *NetworkConfiguration
+	bridge *bridgeInterface
+	steps  []setupStep
+}
+
+func newBridgeSetup(c *NetworkConfiguration, i *bridgeInterface) *bridgeSetup {
+	return &bridgeSetup{config: c, bridge: i}
+}
+
+func (b *bridgeSetup) apply() error {
+	for _, fn := range b.steps {
+		if err := fn(b.config, b.bridge); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (b *bridgeSetup) queueStep(step setupStep) {
+	b.steps = append(b.steps, step)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go
new file mode 100644
index 0000000..1e0e168
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go
@@ -0,0 +1,50 @@
+package bridge
+
+import (
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/parsers/kernel"
+	"github.com/docker/libnetwork/netutils"
+	"github.com/vishvananda/netlink"
+)
+
+// SetupDevice create a new bridge interface/
+func setupDevice(config *NetworkConfiguration, i *bridgeInterface) error {
+	// We only attempt to create the bridge when the requested device name is
+	// the default one.
+	if config.BridgeName != DefaultBridgeName && !config.AllowNonDefaultBridge {
+		return NonDefaultBridgeExistError(config.BridgeName)
+	}
+
+	// Set the bridgeInterface netlink.Bridge.
+	i.Link = &netlink.Bridge{
+		LinkAttrs: netlink.LinkAttrs{
+			Name: config.BridgeName,
+		},
+	}
+
+	// Only set the bridge's MAC address if the kernel version is > 3.3, as it
+	// was not supported before that.
+	kv, err := kernel.GetKernelVersion()
+	if err == nil && (kv.Kernel >= 3 && kv.Major >= 3) {
+		i.Link.Attrs().HardwareAddr = netutils.GenerateRandomMAC()
+		log.Debugf("Setting bridge mac address to %s", i.Link.Attrs().HardwareAddr)
+	}
+
+	// Call out to netlink to create the device.
+	return netlink.LinkAdd(i.Link)
+}
+
+// SetupDeviceUp ups the given bridge interface.
+func setupDeviceUp(config *NetworkConfiguration, i *bridgeInterface) error {
+	err := netlink.LinkSetUp(i.Link)
+	if err != nil {
+		return err
+	}
+
+	// Attempt to update the bridge interface to refresh the flags status,
+	// ignoring any failure to do so.
+	if lnk, err := netlink.LinkByName(config.BridgeName); err == nil {
+		i.Link = lnk
+	}
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device_test.go
new file mode 100644
index 0000000..499f46a
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device_test.go
@@ -0,0 +1,75 @@
+package bridge
+
+import (
+	"bytes"
+	"net"
+	"testing"
+
+	"github.com/docker/libnetwork/netutils"
+	"github.com/vishvananda/netlink"
+)
+
+func TestSetupNewBridge(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	config := &NetworkConfiguration{BridgeName: DefaultBridgeName}
+	br := &bridgeInterface{}
+
+	if err := setupDevice(config, br); err != nil {
+		t.Fatalf("Bridge creation failed: %v", err)
+	}
+	if br.Link == nil {
+		t.Fatal("bridgeInterface link is nil (expected valid link)")
+	}
+	if _, err := netlink.LinkByName(DefaultBridgeName); err != nil {
+		t.Fatalf("Failed to retrieve bridge device: %v", err)
+	}
+	if br.Link.Attrs().Flags&net.FlagUp == net.FlagUp {
+		t.Fatalf("bridgeInterface should be created down")
+	}
+}
+
+func TestSetupNewNonDefaultBridge(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	config := &NetworkConfiguration{BridgeName: "test0"}
+	br := &bridgeInterface{}
+
+	err := setupDevice(config, br)
+	if err == nil {
+		t.Fatal("Expected bridge creation failure with \"non default name\", succeeded")
+	}
+
+	if _, ok := err.(NonDefaultBridgeExistError); !ok {
+		t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+	}
+}
+
+func TestSetupDeviceUp(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	config := &NetworkConfiguration{BridgeName: DefaultBridgeName}
+	br := &bridgeInterface{}
+
+	if err := setupDevice(config, br); err != nil {
+		t.Fatalf("Bridge creation failed: %v", err)
+	}
+	if err := setupDeviceUp(config, br); err != nil {
+		t.Fatalf("Failed to up bridge device: %v", err)
+	}
+
+	lnk, _ := netlink.LinkByName(DefaultBridgeName)
+	if lnk.Attrs().Flags&net.FlagUp != net.FlagUp {
+		t.Fatalf("bridgeInterface should be up")
+	}
+}
+
+func TestGenerateRandomMAC(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	mac1 := netutils.GenerateRandomMAC()
+	mac2 := netutils.GenerateRandomMAC()
+	if bytes.Compare(mac1, mac2) == 0 {
+		t.Fatalf("Generated twice the same MAC address %v", mac1)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv4.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv4.go
new file mode 100644
index 0000000..7657aa3
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv4.go
@@ -0,0 +1,19 @@
+package bridge
+
+import (
+	log "github.com/Sirupsen/logrus"
+)
+
+func setupFixedCIDRv4(config *NetworkConfiguration, i *bridgeInterface) error {
+	addrv4, _, err := i.addresses()
+	if err != nil {
+		return err
+	}
+
+	log.Debugf("Using IPv4 subnet: %v", config.FixedCIDR)
+	if err := ipAllocator.RegisterSubnet(addrv4.IPNet, config.FixedCIDR); err != nil {
+		return &FixedCIDRv4Error{Subnet: config.FixedCIDR, Net: addrv4.IPNet, Err: err}
+	}
+
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv4_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv4_test.go
new file mode 100644
index 0000000..5bb57d0
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv4_test.go
@@ -0,0 +1,62 @@
+package bridge
+
+import (
+	"net"
+	"testing"
+
+	"github.com/docker/libnetwork/netutils"
+)
+
+func TestSetupFixedCIDRv4(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	config := &NetworkConfiguration{
+		BridgeName:  DefaultBridgeName,
+		AddressIPv4: &net.IPNet{IP: net.ParseIP("192.168.1.1"), Mask: net.CIDRMask(16, 32)},
+		FixedCIDR:   &net.IPNet{IP: net.ParseIP("192.168.2.0"), Mask: net.CIDRMask(24, 32)}}
+	br := &bridgeInterface{}
+
+	if err := setupDevice(config, br); err != nil {
+		t.Fatalf("Bridge creation failed: %v", err)
+	}
+	if err := setupBridgeIPv4(config, br); err != nil {
+		t.Fatalf("Assign IPv4 to bridge failed: %v", err)
+	}
+
+	if err := setupFixedCIDRv4(config, br); err != nil {
+		t.Fatalf("Failed to setup bridge FixedCIDRv4: %v", err)
+	}
+
+	if ip, err := ipAllocator.RequestIP(config.FixedCIDR, nil); err != nil {
+		t.Fatalf("Failed to request IP to allocator: %v", err)
+	} else if expected := "192.168.2.1"; ip.String() != expected {
+		t.Fatalf("Expected allocated IP %s, got %s", expected, ip)
+	}
+}
+
+func TestSetupBadFixedCIDRv4(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	config := &NetworkConfiguration{
+		BridgeName:  DefaultBridgeName,
+		AddressIPv4: &net.IPNet{IP: net.ParseIP("192.168.1.1"), Mask: net.CIDRMask(24, 32)},
+		FixedCIDR:   &net.IPNet{IP: net.ParseIP("192.168.2.0"), Mask: net.CIDRMask(24, 32)}}
+	br := &bridgeInterface{}
+
+	if err := setupDevice(config, br); err != nil {
+		t.Fatalf("Bridge creation failed: %v", err)
+	}
+	if err := setupBridgeIPv4(config, br); err != nil {
+		t.Fatalf("Assign IPv4 to bridge failed: %v", err)
+	}
+
+	err := setupFixedCIDRv4(config, br)
+	if err == nil {
+		t.Fatal("Setup bridge FixedCIDRv4 should have failed")
+	}
+
+	if _, ok := err.(*FixedCIDRv4Error); !ok {
+		t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+	}
+
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv6.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv6.go
new file mode 100644
index 0000000..ade465a
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv6.go
@@ -0,0 +1,14 @@
+package bridge
+
+import (
+	log "github.com/Sirupsen/logrus"
+)
+
+func setupFixedCIDRv6(config *NetworkConfiguration, i *bridgeInterface) error {
+	log.Debugf("Using IPv6 subnet: %v", config.FixedCIDRv6)
+	if err := ipAllocator.RegisterSubnet(config.FixedCIDRv6, config.FixedCIDRv6); err != nil {
+		return &FixedCIDRv6Error{Net: config.FixedCIDRv6, Err: err}
+	}
+
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv6_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv6_test.go
new file mode 100644
index 0000000..a5a2c29
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_fixedcidrv6_test.go
@@ -0,0 +1,37 @@
+package bridge
+
+import (
+	"net"
+	"testing"
+
+	"github.com/docker/libnetwork/netutils"
+)
+
+func TestSetupFixedCIDRv6(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	config := &NetworkConfiguration{}
+	br := newInterface(config)
+
+	_, config.FixedCIDRv6, _ = net.ParseCIDR("2002:db8::/48")
+	if err := setupDevice(config, br); err != nil {
+		t.Fatalf("Bridge creation failed: %v", err)
+	}
+	if err := setupBridgeIPv4(config, br); err != nil {
+		t.Fatalf("Assign IPv4 to bridge failed: %v", err)
+	}
+
+	if err := setupBridgeIPv6(config, br); err != nil {
+		t.Fatalf("Assign IPv4 to bridge failed: %v", err)
+	}
+
+	if err := setupFixedCIDRv6(config, br); err != nil {
+		t.Fatalf("Failed to setup bridge FixedCIDRv6: %v", err)
+	}
+
+	if ip, err := ipAllocator.RequestIP(config.FixedCIDRv6, nil); err != nil {
+		t.Fatalf("Failed to request IP to allocator: %v", err)
+	} else if expected := "2002:db8::1"; ip.String() != expected {
+		t.Fatalf("Expected allocated IP %s, got %s", expected, ip)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding.go
new file mode 100644
index 0000000..1bc3416
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding.go
@@ -0,0 +1,25 @@
+package bridge
+
+import (
+	"fmt"
+	"io/ioutil"
+)
+
+const (
+	ipv4ForwardConf     = "/proc/sys/net/ipv4/ip_forward"
+	ipv4ForwardConfPerm = 0644
+)
+
+func setupIPForwarding(config *Configuration) error {
+	// Sanity Check
+	if config.EnableIPForwarding == false {
+		return &ErrIPFwdCfg{}
+	}
+
+	// Enable IPv4 forwarding
+	if err := ioutil.WriteFile(ipv4ForwardConf, []byte{'1', '\n'}, ipv4ForwardConfPerm); err != nil {
+		return fmt.Errorf("Setup IP forwarding failed: %v", err)
+	}
+
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding_test.go
new file mode 100644
index 0000000..7c4cfea
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding_test.go
@@ -0,0 +1,75 @@
+package bridge
+
+import (
+	"bytes"
+	"io/ioutil"
+	"testing"
+)
+
+func TestSetupIPForwarding(t *testing.T) {
+	// Read current setting and ensure the original value gets restored
+	procSetting := readCurrentIPForwardingSetting(t)
+	defer reconcileIPForwardingSetting(t, procSetting)
+
+	// Disable IP Forwarding if enabled
+	if bytes.Compare(procSetting, []byte("1\n")) == 0 {
+		writeIPForwardingSetting(t, []byte{'0', '\n'})
+	}
+
+	// Create test interface with ip forwarding setting enabled
+	config := &Configuration{
+		EnableIPForwarding: true}
+
+	// Set IP Forwarding
+	if err := setupIPForwarding(config); err != nil {
+		t.Fatalf("Failed to setup IP forwarding: %v", err)
+	}
+
+	// Read new setting
+	procSetting = readCurrentIPForwardingSetting(t)
+	if bytes.Compare(procSetting, []byte("1\n")) != 0 {
+		t.Fatalf("Failed to effectively setup IP forwarding")
+	}
+}
+
+func TestUnexpectedSetupIPForwarding(t *testing.T) {
+	// Read current setting and ensure the original value gets restored
+	procSetting := readCurrentIPForwardingSetting(t)
+	defer reconcileIPForwardingSetting(t, procSetting)
+
+	// Create test interface without ip forwarding setting enabled
+	config := &Configuration{
+		EnableIPForwarding: false}
+
+	// Attempt Set IP Forwarding
+	err := setupIPForwarding(config)
+	if err == nil {
+		t.Fatal("Setup IP forwarding was expected to fail")
+	}
+
+	if _, ok := err.(*ErrIPFwdCfg); !ok {
+		t.Fatalf("Setup IP forwarding failed with unexpected error: %v", err)
+	}
+}
+
+func readCurrentIPForwardingSetting(t *testing.T) []byte {
+	procSetting, err := ioutil.ReadFile(ipv4ForwardConf)
+	if err != nil {
+		t.Fatalf("Can't execute test: Failed to read current IP forwarding setting: %v", err)
+	}
+	return procSetting
+}
+
+func writeIPForwardingSetting(t *testing.T, chars []byte) {
+	err := ioutil.WriteFile(ipv4ForwardConf, chars, ipv4ForwardConfPerm)
+	if err != nil {
+		t.Fatalf("Can't execute or cleanup after test: Failed to reset IP forwarding: %v", err)
+	}
+}
+
+func reconcileIPForwardingSetting(t *testing.T, original []byte) {
+	current := readCurrentIPForwardingSetting(t)
+	if bytes.Compare(original, current) != 0 {
+		writeIPForwardingSetting(t, original)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
new file mode 100644
index 0000000..3d46197
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
@@ -0,0 +1,173 @@
+package bridge
+
+import (
+	"fmt"
+	"net"
+
+	"github.com/docker/libnetwork/iptables"
+	"github.com/docker/libnetwork/netutils"
+)
+
+// DockerChain: DOCKER iptable chain name
+const (
+	DockerChain = "DOCKER"
+)
+
+func setupIPTables(config *NetworkConfiguration, i *bridgeInterface) error {
+	// Sanity check.
+	if config.EnableIPTables == false {
+		return IPTableCfgError(config.BridgeName)
+	}
+
+	hairpinMode := !config.EnableUserlandProxy
+
+	addrv4, _, err := netutils.GetIfaceAddr(config.BridgeName)
+	if err != nil {
+		return fmt.Errorf("Failed to setup IP tables, cannot acquire Interface address: %s", err.Error())
+	}
+	if err = setupIPTablesInternal(config.BridgeName, addrv4, config.EnableICC, config.EnableIPMasquerade, hairpinMode, true); err != nil {
+		return fmt.Errorf("Failed to Setup IP tables: %s", err.Error())
+	}
+
+	_, err = iptables.NewChain(DockerChain, config.BridgeName, iptables.Nat, hairpinMode)
+	if err != nil {
+		return fmt.Errorf("Failed to create NAT chain: %s", err.Error())
+	}
+
+	chain, err := iptables.NewChain(DockerChain, config.BridgeName, iptables.Filter, hairpinMode)
+	if err != nil {
+		return fmt.Errorf("Failed to create FILTER chain: %s", err.Error())
+	}
+
+	portMapper.SetIptablesChain(chain)
+
+	return nil
+}
+
+type iptRule struct {
+	table   iptables.Table
+	chain   string
+	preArgs []string
+	args    []string
+}
+
+func setupIPTablesInternal(bridgeIface string, addr net.Addr, icc, ipmasq, hairpin, enable bool) error {
+
+	var (
+		address   = addr.String()
+		natRule   = iptRule{table: iptables.Nat, chain: "POSTROUTING", preArgs: []string{"-t", "nat"}, args: []string{"-s", address, "!", "-o", bridgeIface, "-j", "MASQUERADE"}}
+		hpNatRule = iptRule{table: iptables.Nat, chain: "POSTROUTING", preArgs: []string{"-t", "nat"}, args: []string{"-m", "addrtype", "--src-type", "LOCAL", "-o", bridgeIface, "-j", "MASQUERADE"}}
+		outRule   = iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"}}
+		inRule    = iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}}
+	)
+
+	// Set NAT.
+	if ipmasq {
+		if err := programChainRule(natRule, "NAT", enable); err != nil {
+			return err
+		}
+	}
+
+	// In hairpin mode, masquerade traffic from localhost
+	if hairpin {
+		if err := programChainRule(hpNatRule, "MASQ LOCAL HOST", enable); err != nil {
+			return err
+		}
+	}
+
+	// Set Inter Container Communication.
+	if err := setIcc(bridgeIface, icc, enable); err != nil {
+		return err
+	}
+
+	// Set Accept on all non-intercontainer outgoing packets.
+	if err := programChainRule(outRule, "ACCEPT NON_ICC OUTGOING", enable); err != nil {
+		return err
+	}
+
+	// Set Accept on incoming packets for existing connections.
+	if err := programChainRule(inRule, "ACCEPT INCOMING", enable); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func programChainRule(rule iptRule, ruleDescr string, insert bool) error {
+	var (
+		prefix    []string
+		operation string
+		condition bool
+		doesExist = iptables.Exists(rule.table, rule.chain, rule.args...)
+	)
+
+	if insert {
+		condition = !doesExist
+		prefix = []string{"-I", rule.chain}
+		operation = "enable"
+	} else {
+		condition = doesExist
+		prefix = []string{"-D", rule.chain}
+		operation = "disable"
+	}
+	if rule.preArgs != nil {
+		prefix = append(rule.preArgs, prefix...)
+	}
+
+	if condition {
+		if output, err := iptables.Raw(append(prefix, rule.args...)...); err != nil {
+			return fmt.Errorf("Unable to %s %s rule: %s", operation, ruleDescr, err.Error())
+		} else if len(output) != 0 {
+			return &iptables.ChainError{Chain: rule.chain, Output: output}
+		}
+	}
+
+	return nil
+}
+
+func setIcc(bridgeIface string, iccEnable, insert bool) error {
+	var (
+		table      = iptables.Filter
+		chain      = "FORWARD"
+		args       = []string{"-i", bridgeIface, "-o", bridgeIface, "-j"}
+		acceptArgs = append(args, "ACCEPT")
+		dropArgs   = append(args, "DROP")
+	)
+
+	if insert {
+		if !iccEnable {
+			iptables.Raw(append([]string{"-D", chain}, acceptArgs...)...)
+
+			if !iptables.Exists(table, chain, dropArgs...) {
+				if output, err := iptables.Raw(append([]string{"-A", chain}, dropArgs...)...); err != nil {
+					return fmt.Errorf("Unable to prevent intercontainer communication: %s", err.Error())
+				} else if len(output) != 0 {
+					return fmt.Errorf("Error disabling intercontainer communication: %s", output)
+				}
+			}
+		} else {
+			iptables.Raw(append([]string{"-D", chain}, dropArgs...)...)
+
+			if !iptables.Exists(table, chain, acceptArgs...) {
+				if output, err := iptables.Raw(append([]string{"-A", chain}, acceptArgs...)...); err != nil {
+					return fmt.Errorf("Unable to allow intercontainer communication: %s", err.Error())
+				} else if len(output) != 0 {
+					return fmt.Errorf("Error enabling intercontainer communication: %s", output)
+				}
+			}
+		}
+	} else {
+		// Remove any ICC rule.
+		if !iccEnable {
+			if iptables.Exists(table, chain, dropArgs...) {
+				iptables.Raw(append([]string{"-D", chain}, dropArgs...)...)
+			}
+		} else {
+			if iptables.Exists(table, chain, acceptArgs...) {
+				iptables.Raw(append([]string{"-D", chain}, acceptArgs...)...)
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables_test.go
new file mode 100644
index 0000000..1c73ba9
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables_test.go
@@ -0,0 +1,103 @@
+package bridge
+
+import (
+	"net"
+	"testing"
+
+	"github.com/docker/libnetwork/iptables"
+	"github.com/docker/libnetwork/netutils"
+)
+
+const (
+	iptablesTestBridgeIP = "192.168.42.1"
+)
+
+func TestProgramIPTable(t *testing.T) {
+	// Create a test bridge with a basic bridge configuration (name + IPv4).
+	defer netutils.SetupTestNetNS(t)()
+	createTestBridge(getBasicTestConfig(), &bridgeInterface{}, t)
+
+	// Store various iptables chain rules we care for.
+	rules := []struct {
+		rule  iptRule
+		descr string
+	}{
+		{iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-d", "127.1.2.3", "-i", "lo", "-o", "lo", "-j", "DROP"}}, "Test Loopback"},
+		{iptRule{table: iptables.Nat, chain: "POSTROUTING", preArgs: []string{"-t", "nat"}, args: []string{"-s", iptablesTestBridgeIP, "!", "-o", DefaultBridgeName, "-j", "MASQUERADE"}}, "NAT Test"},
+		{iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-i", DefaultBridgeName, "!", "-o", DefaultBridgeName, "-j", "ACCEPT"}}, "Test ACCEPT NON_ICC OUTGOING"},
+		{iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-o", DefaultBridgeName, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}}, "Test ACCEPT INCOMING"},
+		{iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-i", DefaultBridgeName, "-o", DefaultBridgeName, "-j", "ACCEPT"}}, "Test enable ICC"},
+		{iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-i", DefaultBridgeName, "-o", DefaultBridgeName, "-j", "DROP"}}, "Test disable ICC"},
+	}
+
+	// Assert the chain rules' insertion and removal.
+	for _, c := range rules {
+		assertIPTableChainProgramming(c.rule, c.descr, t)
+	}
+}
+
+func TestSetupIPTables(t *testing.T) {
+	// Create a test bridge with a basic bridge configuration (name + IPv4).
+	defer netutils.SetupTestNetNS(t)()
+	config := getBasicTestConfig()
+	br := &bridgeInterface{}
+
+	createTestBridge(config, br, t)
+
+	// Modify iptables params in base configuration and apply them.
+	config.EnableIPTables = true
+	assertBridgeConfig(config, br, t)
+
+	config.EnableIPMasquerade = true
+	assertBridgeConfig(config, br, t)
+
+	config.EnableICC = true
+	assertBridgeConfig(config, br, t)
+
+	config.EnableIPMasquerade = false
+	assertBridgeConfig(config, br, t)
+}
+
+func getBasicTestConfig() *NetworkConfiguration {
+	config := &NetworkConfiguration{
+		BridgeName:  DefaultBridgeName,
+		AddressIPv4: &net.IPNet{IP: net.ParseIP(iptablesTestBridgeIP), Mask: net.CIDRMask(16, 32)}}
+	return config
+}
+
+func createTestBridge(config *NetworkConfiguration, br *bridgeInterface, t *testing.T) {
+	if err := setupDevice(config, br); err != nil {
+		t.Fatalf("Failed to create the testing Bridge: %s", err.Error())
+	}
+	if err := setupBridgeIPv4(config, br); err != nil {
+		t.Fatalf("Failed to bring up the testing Bridge: %s", err.Error())
+	}
+}
+
+// Assert base function which pushes iptables chain rules on insertion and removal.
+func assertIPTableChainProgramming(rule iptRule, descr string, t *testing.T) {
+	// Add
+	if err := programChainRule(rule, descr, true); err != nil {
+		t.Fatalf("Failed to program iptable rule %s: %s", descr, err.Error())
+	}
+	if iptables.Exists(rule.table, rule.chain, rule.args...) == false {
+		t.Fatalf("Failed to effectively program iptable rule: %s", descr)
+	}
+
+	// Remove
+	if err := programChainRule(rule, descr, false); err != nil {
+		t.Fatalf("Failed to remove iptable rule %s: %s", descr, err.Error())
+	}
+	if iptables.Exists(rule.table, rule.chain, rule.args...) == true {
+		t.Fatalf("Failed to effectively remove iptable rule: %s", descr)
+	}
+}
+
+// Assert function which pushes chains based on bridge config parameters.
+func assertBridgeConfig(config *NetworkConfiguration, br *bridgeInterface, t *testing.T) {
+	// Attempt programming of ip tables.
+	err := setupIPTables(config, br)
+	if err != nil {
+		t.Fatalf("%v", err)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go
new file mode 100644
index 0000000..a0059c8
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go
@@ -0,0 +1,136 @@
+package bridge
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net"
+
+	"path/filepath"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/libnetwork/netutils"
+	"github.com/vishvananda/netlink"
+)
+
+var bridgeNetworks []*net.IPNet
+
+func init() {
+	// Here we don't follow the convention of using the 1st IP of the range for the gateway.
+	// This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges.
+	// In theory this shouldn't matter - in practice there's bound to be a few scripts relying
+	// on the internal addressing or other stupid things like that.
+	// They shouldn't, but hey, let's not break them unless we really have to.
+	for _, addr := range []string{
+		"172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23
+		"10.0.42.1/16",   // Don't even try using the entire /8, that's too intrusive
+		"10.1.42.1/16",
+		"10.42.42.1/16",
+		"172.16.42.1/24",
+		"172.16.43.1/24",
+		"172.16.44.1/24",
+		"10.0.42.1/24",
+		"10.0.43.1/24",
+		"192.168.42.1/24",
+		"192.168.43.1/24",
+		"192.168.44.1/24",
+	} {
+		ip, net, err := net.ParseCIDR(addr)
+		if err != nil {
+			log.Errorf("Failed to parse address %s", addr)
+			continue
+		}
+		net.IP = ip.To4()
+		bridgeNetworks = append(bridgeNetworks, net)
+	}
+}
+
+func setupBridgeIPv4(config *NetworkConfiguration, i *bridgeInterface) error {
+	addrv4, _, err := i.addresses()
+	if err != nil {
+		return err
+	}
+
+	// Check if we have an IP address already on the bridge.
+	if addrv4.IPNet != nil {
+		// Make sure to store bridge network and default gateway before getting out.
+		i.bridgeIPv4 = addrv4.IPNet
+		i.gatewayIPv4 = addrv4.IPNet.IP
+		return nil
+	}
+
+	// Do not try to configure IPv4 on a non-default bridge unless you are
+	// specifically asked to do so.
+	if config.BridgeName != DefaultBridgeName && !config.AllowNonDefaultBridge {
+		return NonDefaultBridgeExistError(config.BridgeName)
+	}
+
+	bridgeIPv4, err := electBridgeIPv4(config)
+	if err != nil {
+		return err
+	}
+
+	log.Debugf("Creating bridge interface %q with network %s", config.BridgeName, bridgeIPv4)
+	if err := netlink.AddrAdd(i.Link, &netlink.Addr{IPNet: bridgeIPv4}); err != nil {
+		return &IPv4AddrAddError{IP: bridgeIPv4, Err: err}
+	}
+
+	// Store bridge network and default gateway
+	i.bridgeIPv4 = bridgeIPv4
+	i.gatewayIPv4 = i.bridgeIPv4.IP
+
+	return nil
+}
+
+func allocateBridgeIP(config *NetworkConfiguration, i *bridgeInterface) error {
+	ipAllocator.RequestIP(i.bridgeIPv4, i.bridgeIPv4.IP)
+	return nil
+}
+
+func electBridgeIPv4(config *NetworkConfiguration) (*net.IPNet, error) {
+	// Use the requested IPv4 CIDR when available.
+	if config.AddressIPv4 != nil {
+		return config.AddressIPv4, nil
+	}
+
+	// We don't check for an error here, because we don't really care if we
+	// can't read /etc/resolv.conf. So instead we skip the append if resolvConf
+	// is nil. It either doesn't exist, or we can't read it for some reason.
+	nameservers := []string{}
+	if resolvConf, _ := readResolvConf(); resolvConf != nil {
+		nameservers = append(nameservers, getNameserversAsCIDR(resolvConf)...)
+	}
+
+	// Try to automatically elect appropriate bridge IPv4 settings.
+	for _, n := range bridgeNetworks {
+		if err := netutils.CheckNameserverOverlaps(nameservers, n); err == nil {
+			if err := netutils.CheckRouteOverlaps(n); err == nil {
+				return n, nil
+			}
+		}
+	}
+
+	return nil, IPv4AddrRangeError(config.BridgeName)
+}
+
+func setupGatewayIPv4(config *NetworkConfiguration, i *bridgeInterface) error {
+	if !i.bridgeIPv4.Contains(config.DefaultGatewayIPv4) {
+		return &ErrInvalidGateway{}
+	}
+	if _, err := ipAllocator.RequestIP(i.bridgeIPv4, config.DefaultGatewayIPv4); err != nil {
+		return err
+	}
+
+	// Store requested default gateway
+	i.gatewayIPv4 = config.DefaultGatewayIPv4
+
+	return nil
+}
+
+func setupLoopbackAdressesRouting(config *NetworkConfiguration, i *bridgeInterface) error {
+	// Enable loopback adresses routing
+	sysPath := filepath.Join("/proc/sys/net/ipv4/conf", config.BridgeName, "route_localnet")
+	if err := ioutil.WriteFile(sysPath, []byte{'1', '\n'}, 0644); err != nil {
+		return fmt.Errorf("Unable to enable local routing for hairpin mode: %v", err)
+	}
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4_test.go
new file mode 100644
index 0000000..e311d64
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4_test.go
@@ -0,0 +1,100 @@
+package bridge
+
+import (
+	"net"
+	"testing"
+
+	"github.com/docker/libnetwork/netutils"
+	"github.com/vishvananda/netlink"
+)
+
+func setupTestInterface(t *testing.T) (*NetworkConfiguration, *bridgeInterface) {
+	config := &NetworkConfiguration{
+		BridgeName: DefaultBridgeName}
+	br := &bridgeInterface{}
+
+	if err := setupDevice(config, br); err != nil {
+		t.Fatalf("Bridge creation failed: %v", err)
+	}
+	return config, br
+}
+
+func TestSetupBridgeIPv4Fixed(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	ip, netw, err := net.ParseCIDR("192.168.1.1/24")
+	if err != nil {
+		t.Fatalf("Failed to parse bridge IPv4: %v", err)
+	}
+
+	config, br := setupTestInterface(t)
+	config.AddressIPv4 = &net.IPNet{IP: ip, Mask: netw.Mask}
+	if err := setupBridgeIPv4(config, br); err != nil {
+		t.Fatalf("Failed to setup bridge IPv4: %v", err)
+	}
+
+	addrsv4, err := netlink.AddrList(br.Link, netlink.FAMILY_V4)
+	if err != nil {
+		t.Fatalf("Failed to list device IPv4 addresses: %v", err)
+	}
+
+	var found bool
+	for _, addr := range addrsv4 {
+		if config.AddressIPv4.String() == addr.IPNet.String() {
+			found = true
+			break
+		}
+	}
+
+	if !found {
+		t.Fatalf("Bridge device does not have requested IPv4 address %v", config.AddressIPv4)
+	}
+}
+
+func TestSetupBridgeIPv4Auto(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	config, br := setupTestInterface(t)
+	if err := setupBridgeIPv4(config, br); err != nil {
+		t.Fatalf("Failed to setup bridge IPv4: %v", err)
+	}
+
+	addrsv4, err := netlink.AddrList(br.Link, netlink.FAMILY_V4)
+	if err != nil {
+		t.Fatalf("Failed to list device IPv4 addresses: %v", err)
+	}
+
+	var found bool
+	for _, addr := range addrsv4 {
+		if bridgeNetworks[0].String() == addr.IPNet.String() {
+			found = true
+			break
+		}
+	}
+
+	if !found {
+		t.Fatalf("Bridge device does not have the automatic IPv4 address %v", bridgeNetworks[0].String())
+	}
+}
+
+func TestSetupGatewayIPv4(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	ip, nw, _ := net.ParseCIDR("192.168.0.24/16")
+	nw.IP = ip
+	gw := net.ParseIP("192.168.0.254")
+
+	config := &NetworkConfiguration{
+		BridgeName:         DefaultBridgeName,
+		DefaultGatewayIPv4: gw}
+
+	br := &bridgeInterface{bridgeIPv4: nw}
+
+	if err := setupGatewayIPv4(config, br); err != nil {
+		t.Fatalf("Set Default Gateway failed: %v", err)
+	}
+
+	if !gw.Equal(br.gatewayIPv4) {
+		t.Fatalf("Set Default Gateway failed. Expected %v, Found %v", gw, br.gatewayIPv4)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go
new file mode 100644
index 0000000..264e5b2
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go
@@ -0,0 +1,66 @@
+package bridge
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net"
+
+	"github.com/vishvananda/netlink"
+)
+
+var bridgeIPv6 *net.IPNet
+
+const bridgeIPv6Str = "fe80::1/64"
+
+func init() {
+	// We allow ourselves to panic in this special case because we indicate a
+	// failure to parse a compile-time define constant.
+	if ip, netw, err := net.ParseCIDR(bridgeIPv6Str); err == nil {
+		bridgeIPv6 = &net.IPNet{IP: ip, Mask: netw.Mask}
+	} else {
+		panic(fmt.Sprintf("Cannot parse default bridge IPv6 address %q: %v", bridgeIPv6Str, err))
+	}
+}
+
+func setupBridgeIPv6(config *NetworkConfiguration, i *bridgeInterface) error {
+	// Enable IPv6 on the bridge
+	procFile := "/proc/sys/net/ipv6/conf/" + config.BridgeName + "/disable_ipv6"
+	if err := ioutil.WriteFile(procFile, []byte{'0', '\n'}, 0644); err != nil {
+		return fmt.Errorf("Unable to enable IPv6 addresses on bridge: %v", err)
+	}
+
+	_, addrsv6, err := i.addresses()
+	if err != nil {
+		return err
+	}
+
+	// Add the default link local ipv6 address if it doesn't exist
+	if !findIPv6Address(netlink.Addr{IPNet: bridgeIPv6}, addrsv6) {
+		if err := netlink.AddrAdd(i.Link, &netlink.Addr{IPNet: bridgeIPv6}); err != nil {
+			return &IPv6AddrAddError{IP: bridgeIPv6, Err: err}
+		}
+	}
+
+	// Store bridge network and default gateway
+	i.bridgeIPv6 = bridgeIPv6
+	i.gatewayIPv6 = i.bridgeIPv6.IP
+
+	return nil
+}
+
+func setupGatewayIPv6(config *NetworkConfiguration, i *bridgeInterface) error {
+	if config.FixedCIDRv6 == nil {
+		return &ErrInvalidContainerSubnet{}
+	}
+	if !config.FixedCIDRv6.Contains(config.DefaultGatewayIPv6) {
+		return &ErrInvalidGateway{}
+	}
+	if _, err := ipAllocator.RequestIP(config.FixedCIDRv6, config.DefaultGatewayIPv6); err != nil {
+		return err
+	}
+
+	// Store requested default gateway
+	i.gatewayIPv6 = config.DefaultGatewayIPv6
+
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6_test.go
new file mode 100644
index 0000000..cb8c17f
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6_test.go
@@ -0,0 +1,70 @@
+package bridge
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"testing"
+
+	"github.com/docker/libnetwork/netutils"
+	"github.com/vishvananda/netlink"
+)
+
+func TestSetupIPv6(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	config, br := setupTestInterface(t)
+	if err := setupBridgeIPv6(config, br); err != nil {
+		t.Fatalf("Failed to setup bridge IPv6: %v", err)
+	}
+
+	procSetting, err := ioutil.ReadFile(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/disable_ipv6", config.BridgeName))
+	if err != nil {
+		t.Fatalf("Failed to read disable_ipv6 kernel setting: %v", err)
+	}
+
+	if expected := []byte("0\n"); bytes.Compare(expected, procSetting) != 0 {
+		t.Fatalf("Invalid kernel setting disable_ipv6: expected %q, got %q", string(expected), string(procSetting))
+	}
+
+	addrsv6, err := netlink.AddrList(br.Link, netlink.FAMILY_V6)
+	if err != nil {
+		t.Fatalf("Failed to list device IPv6 addresses: %v", err)
+	}
+
+	var found bool
+	for _, addr := range addrsv6 {
+		if bridgeIPv6Str == addr.IPNet.String() {
+			found = true
+			break
+		}
+	}
+
+	if !found {
+		t.Fatalf("Bridge device does not have requested IPv6 address %v", bridgeIPv6Str)
+	}
+
+}
+
+func TestSetupGatewayIPv6(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	_, nw, _ := net.ParseCIDR("2001:db8:ea9:9abc:ffff::/80")
+	gw := net.ParseIP("2001:db8:ea9:9abc:ffff::254")
+
+	config := &NetworkConfiguration{
+		BridgeName:         DefaultBridgeName,
+		FixedCIDRv6:        nw,
+		DefaultGatewayIPv6: gw}
+
+	br := &bridgeInterface{}
+
+	if err := setupGatewayIPv6(config, br); err != nil {
+		t.Fatalf("Set Default Gateway failed: %v", err)
+	}
+
+	if !gw.Equal(br.gatewayIPv6) {
+		t.Fatalf("Set Default Gateway failed. Expected %v, Found %v", gw, br.gatewayIPv6)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go
new file mode 100644
index 0000000..46d025d
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go
@@ -0,0 +1,46 @@
+package bridge
+
+import (
+	"github.com/vishvananda/netlink"
+)
+
+func setupVerifyAndReconcile(config *NetworkConfiguration, i *bridgeInterface) error {
+	// Fetch a single IPv4 and a slice of IPv6 addresses from the bridge.
+	addrv4, addrsv6, err := i.addresses()
+	if err != nil {
+		return err
+	}
+
+	// Verify that the bridge does have an IPv4 address.
+	if addrv4.IPNet == nil {
+		return &ErrNoIPAddr{}
+	}
+
+	// Verify that the bridge IPv4 address matches the requested configuration.
+	if config.AddressIPv4 != nil && !addrv4.IP.Equal(config.AddressIPv4.IP) {
+		return &IPv4AddrNoMatchError{IP: addrv4.IP, CfgIP: config.AddressIPv4.IP}
+	}
+
+	// Verify that one of the bridge IPv6 addresses matches the requested
+	// configuration.
+	if config.EnableIPv6 && !findIPv6Address(netlink.Addr{IPNet: bridgeIPv6}, addrsv6) {
+		return (*IPv6AddrNoMatchError)(bridgeIPv6)
+	}
+
+	// By this time we have either configured a new bridge with an IP address
+	// or made sure an existing bridge's IP matches the configuration
+	// Now is the time to cache these states in the bridgeInterface.
+	i.bridgeIPv4 = addrv4.IPNet
+	i.bridgeIPv6 = bridgeIPv6
+
+	return nil
+}
+
+func findIPv6Address(addr netlink.Addr, addresses []netlink.Addr) bool {
+	for _, addrv6 := range addresses {
+		if addrv6.String() == addr.String() {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify_test.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify_test.go
new file mode 100644
index 0000000..d3c79dd
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify_test.go
@@ -0,0 +1,110 @@
+package bridge
+
+import (
+	"net"
+	"testing"
+
+	"github.com/docker/libnetwork/netutils"
+	"github.com/vishvananda/netlink"
+)
+
+func setupVerifyTest(t *testing.T) *bridgeInterface {
+	inf := &bridgeInterface{}
+
+	br := netlink.Bridge{}
+	br.LinkAttrs.Name = "default0"
+	if err := netlink.LinkAdd(&br); err == nil {
+		inf.Link = &br
+	} else {
+		t.Fatalf("Failed to create bridge interface: %v", err)
+	}
+
+	return inf
+}
+
+func TestSetupVerify(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	addrv4 := net.IPv4(192, 168, 1, 1)
+	inf := setupVerifyTest(t)
+	config := &NetworkConfiguration{}
+	config.AddressIPv4 = &net.IPNet{IP: addrv4, Mask: addrv4.DefaultMask()}
+
+	if err := netlink.AddrAdd(inf.Link, &netlink.Addr{IPNet: config.AddressIPv4}); err != nil {
+		t.Fatalf("Failed to assign IPv4 %s to interface: %v", config.AddressIPv4, err)
+	}
+
+	if err := setupVerifyAndReconcile(config, inf); err != nil {
+		t.Fatalf("Address verification failed: %v", err)
+	}
+}
+
+func TestSetupVerifyBad(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	addrv4 := net.IPv4(192, 168, 1, 1)
+	inf := setupVerifyTest(t)
+	config := &NetworkConfiguration{}
+	config.AddressIPv4 = &net.IPNet{IP: addrv4, Mask: addrv4.DefaultMask()}
+
+	ipnet := &net.IPNet{IP: net.IPv4(192, 168, 1, 2), Mask: addrv4.DefaultMask()}
+	if err := netlink.AddrAdd(inf.Link, &netlink.Addr{IPNet: ipnet}); err != nil {
+		t.Fatalf("Failed to assign IPv4 %s to interface: %v", ipnet, err)
+	}
+
+	if err := setupVerifyAndReconcile(config, inf); err == nil {
+		t.Fatal("Address verification was expected to fail")
+	}
+}
+
+func TestSetupVerifyMissing(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	addrv4 := net.IPv4(192, 168, 1, 1)
+	inf := setupVerifyTest(t)
+	config := &NetworkConfiguration{}
+	config.AddressIPv4 = &net.IPNet{IP: addrv4, Mask: addrv4.DefaultMask()}
+
+	if err := setupVerifyAndReconcile(config, inf); err == nil {
+		t.Fatal("Address verification was expected to fail")
+	}
+}
+
+func TestSetupVerifyIPv6(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	addrv4 := net.IPv4(192, 168, 1, 1)
+	inf := setupVerifyTest(t)
+	config := &NetworkConfiguration{}
+	config.AddressIPv4 = &net.IPNet{IP: addrv4, Mask: addrv4.DefaultMask()}
+	config.EnableIPv6 = true
+
+	if err := netlink.AddrAdd(inf.Link, &netlink.Addr{IPNet: bridgeIPv6}); err != nil {
+		t.Fatalf("Failed to assign IPv6 %s to interface: %v", bridgeIPv6, err)
+	}
+	if err := netlink.AddrAdd(inf.Link, &netlink.Addr{IPNet: config.AddressIPv4}); err != nil {
+		t.Fatalf("Failed to assign IPv4 %s to interface: %v", config.AddressIPv4, err)
+	}
+
+	if err := setupVerifyAndReconcile(config, inf); err != nil {
+		t.Fatalf("Address verification failed: %v", err)
+	}
+}
+
+func TestSetupVerifyIPv6Missing(t *testing.T) {
+	defer netutils.SetupTestNetNS(t)()
+
+	addrv4 := net.IPv4(192, 168, 1, 1)
+	inf := setupVerifyTest(t)
+	config := &NetworkConfiguration{}
+	config.AddressIPv4 = &net.IPNet{IP: addrv4, Mask: addrv4.DefaultMask()}
+	config.EnableIPv6 = true
+
+	if err := netlink.AddrAdd(inf.Link, &netlink.Addr{IPNet: config.AddressIPv4}); err != nil {
+		t.Fatalf("Failed to assign IPv4 %s to interface: %v", config.AddressIPv4, err)
+	}
+
+	if err := setupVerifyAndReconcile(config, inf); err == nil {
+		t.Fatal("Address verification was expected to fail")
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/host/host.go b/vendor/src/github.com/docker/libnetwork/drivers/host/host.go
new file mode 100644
index 0000000..50cdad7
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/host/host.go
@@ -0,0 +1,53 @@
+package host
+
+import (
+	"github.com/docker/libnetwork/driverapi"
+	"github.com/docker/libnetwork/types"
+)
+
+const networkType = "host"
+
+type driver struct{}
+
+// Init registers a new instance of host driver
+func Init(dc driverapi.DriverCallback) error {
+	return dc.RegisterDriver(networkType, &driver{})
+}
+
+func (d *driver) Config(option map[string]interface{}) error {
+	return nil
+}
+
+func (d *driver) CreateNetwork(id types.UUID, option map[string]interface{}) error {
+	return nil
+}
+
+func (d *driver) DeleteNetwork(nid types.UUID) error {
+	return nil
+}
+
+func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointInfo, epOptions map[string]interface{}) error {
+	return nil
+}
+
+func (d *driver) DeleteEndpoint(nid, eid types.UUID) error {
+	return nil
+}
+
+func (d *driver) EndpointOperInfo(nid, eid types.UUID) (map[string]interface{}, error) {
+	return make(map[string]interface{}, 0), nil
+}
+
+// Join method is invoked when a Sandbox is attached to an endpoint.
+func (d *driver) Join(nid, eid types.UUID, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
+	return (jinfo.SetHostsPath("/etc/hosts"))
+}
+
+// Leave method is invoked when a Sandbox detaches from an endpoint.
+func (d *driver) Leave(nid, eid types.UUID) error {
+	return nil
+}
+
+func (d *driver) Type() string {
+	return networkType
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/null/null.go b/vendor/src/github.com/docker/libnetwork/drivers/null/null.go
new file mode 100644
index 0000000..11ac469
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/null/null.go
@@ -0,0 +1,53 @@
+package null
+
+import (
+	"github.com/docker/libnetwork/driverapi"
+	"github.com/docker/libnetwork/types"
+)
+
+const networkType = "null"
+
+type driver struct{}
+
+// Init registers a new instance of null driver
+func Init(dc driverapi.DriverCallback) error {
+	return dc.RegisterDriver(networkType, &driver{})
+}
+
+func (d *driver) Config(option map[string]interface{}) error {
+	return nil
+}
+
+func (d *driver) CreateNetwork(id types.UUID, option map[string]interface{}) error {
+	return nil
+}
+
+func (d *driver) DeleteNetwork(nid types.UUID) error {
+	return nil
+}
+
+func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointInfo, epOptions map[string]interface{}) error {
+	return nil
+}
+
+func (d *driver) DeleteEndpoint(nid, eid types.UUID) error {
+	return nil
+}
+
+func (d *driver) EndpointOperInfo(nid, eid types.UUID) (map[string]interface{}, error) {
+	return make(map[string]interface{}, 0), nil
+}
+
+// Join method is invoked when a Sandbox is attached to an endpoint.
+func (d *driver) Join(nid, eid types.UUID, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
+	return nil
+}
+
+// Leave method is invoked when a Sandbox detaches from an endpoint.
+func (d *driver) Leave(nid, eid types.UUID) error {
+	return nil
+}
+
+func (d *driver) Type() string {
+	return networkType
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/remote/driver.go b/vendor/src/github.com/docker/libnetwork/drivers/remote/driver.go
new file mode 100644
index 0000000..ffeb720
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/remote/driver.go
@@ -0,0 +1,213 @@
+package remote
+
+import (
+	"fmt"
+	"net"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/plugins"
+	"github.com/docker/libnetwork/driverapi"
+	"github.com/docker/libnetwork/types"
+)
+
+type driver struct {
+	endpoint    *plugins.Client
+	networkType string
+}
+
+func newDriver(name string, client *plugins.Client) driverapi.Driver {
+	return &driver{networkType: name, endpoint: client}
+}
+
+// Init makes sure a remote driver is registered when a network driver
+// plugin is activated.
+func Init(dc driverapi.DriverCallback) error {
+	plugins.Handle(driverapi.NetworkPluginEndpointType, func(name string, client *plugins.Client) {
+		if err := dc.RegisterDriver(name, newDriver(name, client)); err != nil {
+			log.Errorf("error registering driver for %s due to %v", name, err)
+		}
+	})
+	return nil
+}
+
+// Config is not implemented for remote drivers, since it is assumed
+// to be supplied to the remote process out-of-band (e.g., as command
+// line arguments).
+func (d *driver) Config(option map[string]interface{}) error {
+	return &driverapi.ErrNotImplemented{}
+}
+
+func (d *driver) call(methodName string, arg interface{}, retVal maybeError) error {
+	method := driverapi.NetworkPluginEndpointType + "." + methodName
+	err := d.endpoint.Call(method, arg, retVal)
+	if err != nil {
+		return err
+	}
+	if e := retVal.getError(); e != "" {
+		return fmt.Errorf("remote: %s", e)
+	}
+	return nil
+}
+
+func (d *driver) CreateNetwork(id types.UUID, options map[string]interface{}) error {
+	create := &createNetworkRequest{
+		NetworkID: string(id),
+		Options:   options,
+	}
+	return d.call("CreateNetwork", create, &createNetworkResponse{})
+}
+
+func (d *driver) DeleteNetwork(nid types.UUID) error {
+	delete := &deleteNetworkRequest{NetworkID: string(nid)}
+	return d.call("DeleteNetwork", delete, &deleteNetworkResponse{})
+}
+
+func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointInfo, epOptions map[string]interface{}) error {
+	if epInfo == nil {
+		return fmt.Errorf("must not be called with nil EndpointInfo")
+	}
+
+	reqIfaces := make([]*endpointInterface, len(epInfo.Interfaces()))
+	for i, iface := range epInfo.Interfaces() {
+		addr4 := iface.Address()
+		addr6 := iface.AddressIPv6()
+		reqIfaces[i] = &endpointInterface{
+			ID:          iface.ID(),
+			Address:     addr4.String(),
+			AddressIPv6: addr6.String(),
+			MacAddress:  iface.MacAddress().String(),
+		}
+	}
+	create := &createEndpointRequest{
+		NetworkID:  string(nid),
+		EndpointID: string(eid),
+		Interfaces: reqIfaces,
+		Options:    epOptions,
+	}
+	var res createEndpointResponse
+	if err := d.call("CreateEndpoint", create, &res); err != nil {
+		return err
+	}
+
+	ifaces, err := res.parseInterfaces()
+	if err != nil {
+		return err
+	}
+	if len(reqIfaces) > 0 && len(ifaces) > 0 {
+		// We're not supposed to add interfaces if there already are
+		// some. Attempt to roll back
+		return errorWithRollback("driver attempted to add more interfaces", d.DeleteEndpoint(nid, eid))
+	}
+	for _, iface := range ifaces {
+		var addr4, addr6 net.IPNet
+		if iface.Address != nil {
+			addr4 = *(iface.Address)
+		}
+		if iface.AddressIPv6 != nil {
+			addr6 = *(iface.AddressIPv6)
+		}
+		if err := epInfo.AddInterface(iface.ID, iface.MacAddress, addr4, addr6); err != nil {
+			return errorWithRollback(fmt.Sprintf("failed to AddInterface %v: %s", iface, err), d.DeleteEndpoint(nid, eid))
+		}
+	}
+	return nil
+}
+
+func errorWithRollback(msg string, err error) error {
+	rollback := "rolled back"
+	if err != nil {
+		rollback = "failed to roll back: " + err.Error()
+	}
+	return fmt.Errorf("%s; %s", msg, rollback)
+}
+
+func (d *driver) DeleteEndpoint(nid, eid types.UUID) error {
+	delete := &deleteEndpointRequest{
+		NetworkID:  string(nid),
+		EndpointID: string(eid),
+	}
+	return d.call("DeleteEndpoint", delete, &deleteEndpointResponse{})
+}
+
+func (d *driver) EndpointOperInfo(nid, eid types.UUID) (map[string]interface{}, error) {
+	info := &endpointInfoRequest{
+		NetworkID:  string(nid),
+		EndpointID: string(eid),
+	}
+	var res endpointInfoResponse
+	if err := d.call("EndpointOperInfo", info, &res); err != nil {
+		return nil, err
+	}
+	return res.Value, nil
+}
+
+// Join method is invoked when a Sandbox is attached to an endpoint.
+func (d *driver) Join(nid, eid types.UUID, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
+	join := &joinRequest{
+		NetworkID:  string(nid),
+		EndpointID: string(eid),
+		SandboxKey: sboxKey,
+		Options:    options,
+	}
+	var (
+		res joinResponse
+		err error
+	)
+	if err = d.call("Join", join, &res); err != nil {
+		return err
+	}
+
+	// Expect each interface ID given by CreateEndpoint to have an
+	// entry at that index in the names supplied here. In other words,
+	// if you supply 0..n interfaces with IDs 0..n above, you should
+	// supply the names in the same order.
+	ifaceNames := res.InterfaceNames
+	for _, iface := range jinfo.InterfaceNames() {
+		i := iface.ID()
+		if i >= len(ifaceNames) || i < 0 {
+			return fmt.Errorf("no correlating interface %d in supplied interface names", i)
+		}
+		supplied := ifaceNames[i]
+		if err := iface.SetNames(supplied.SrcName, supplied.DstName); err != nil {
+			return errorWithRollback(fmt.Sprintf("failed to set interface name: %s", err), d.Leave(nid, eid))
+		}
+	}
+
+	var addr net.IP
+	if res.Gateway != "" {
+		if addr = net.ParseIP(res.Gateway); addr == nil {
+			return fmt.Errorf(`unable to parse Gateway "%s"`, res.Gateway)
+		}
+		if jinfo.SetGateway(addr) != nil {
+			return errorWithRollback(fmt.Sprintf("failed to set gateway: %v", addr), d.Leave(nid, eid))
+		}
+	}
+	if res.GatewayIPv6 != "" {
+		if addr = net.ParseIP(res.GatewayIPv6); addr == nil {
+			return fmt.Errorf(`unable to parse GatewayIPv6 "%s"`, res.GatewayIPv6)
+		}
+		if jinfo.SetGatewayIPv6(addr) != nil {
+			return errorWithRollback(fmt.Sprintf("failed to set gateway IPv6: %v", addr), d.Leave(nid, eid))
+		}
+	}
+	if jinfo.SetHostsPath(res.HostsPath) != nil {
+		return errorWithRollback(fmt.Sprintf("failed to set hosts path: %s", res.HostsPath), d.Leave(nid, eid))
+	}
+	if jinfo.SetResolvConfPath(res.ResolvConfPath) != nil {
+		return errorWithRollback(fmt.Sprintf("failed to set resolv.conf path: %s", res.ResolvConfPath), d.Leave(nid, eid))
+	}
+	return nil
+}
+
+// Leave method is invoked when a Sandbox detaches from an endpoint.
+func (d *driver) Leave(nid, eid types.UUID) error {
+	leave := &leaveRequest{
+		NetworkID:  string(nid),
+		EndpointID: string(eid),
+	}
+	return d.call("Leave", leave, &leaveResponse{})
+}
+
+func (d *driver) Type() string {
+	return d.networkType
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/remote/driver_test.go b/vendor/src/github.com/docker/libnetwork/drivers/remote/driver_test.go
new file mode 100644
index 0000000..a9fb8b4
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/remote/driver_test.go
@@ -0,0 +1,397 @@
+package remote
+
+import (
+	"encoding/json"
+	"fmt"
+	"net"
+	"net/http"
+	"os"
+	"testing"
+
+	"github.com/docker/docker/pkg/plugins"
+	"github.com/docker/libnetwork/driverapi"
+	_ "github.com/docker/libnetwork/netutils"
+	"github.com/docker/libnetwork/types"
+)
+
+func decodeToMap(r *http.Request) (res map[string]interface{}, err error) {
+	err = json.NewDecoder(r.Body).Decode(&res)
+	return
+}
+
+func handle(t *testing.T, mux *http.ServeMux, method string, h func(map[string]interface{}) interface{}) {
+	mux.HandleFunc(fmt.Sprintf("/%s.%s", driverapi.NetworkPluginEndpointType, method), func(w http.ResponseWriter, r *http.Request) {
+		ask, err := decodeToMap(r)
+		if err != nil {
+			t.Fatal(err)
+		}
+		answer := h(ask)
+		err = json.NewEncoder(w).Encode(&answer)
+		if err != nil {
+			t.Fatal(err)
+		}
+	})
+}
+
+func setupPlugin(t *testing.T, name string, mux *http.ServeMux) func() {
+	if err := os.MkdirAll("/usr/share/docker/plugins", 0755); err != nil {
+		t.Fatal(err)
+	}
+
+	listener, err := net.Listen("unix", fmt.Sprintf("/usr/share/docker/plugins/%s.sock", name))
+	if err != nil {
+		t.Fatal("Could not listen to the plugin socket")
+	}
+
+	mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
+		fmt.Fprintf(w, `{"Implements": ["%s"]}`, driverapi.NetworkPluginEndpointType)
+	})
+
+	go http.Serve(listener, mux)
+
+	return func() {
+		listener.Close()
+		if err := os.RemoveAll("/usr/share/docker/plugins"); err != nil {
+			t.Fatal(err)
+		}
+	}
+}
+
+type testEndpoint struct {
+	t              *testing.T
+	id             int
+	src            string
+	dst            string
+	address        string
+	addressIPv6    string
+	macAddress     string
+	gateway        string
+	gatewayIPv6    string
+	resolvConfPath string
+	hostsPath      string
+}
+
+func (test *testEndpoint) Interfaces() []driverapi.InterfaceInfo {
+	// return an empty one so we don't trip the check for existing
+	// interfaces; we don't care about this after that
+	return []driverapi.InterfaceInfo{}
+}
+
+func (test *testEndpoint) AddInterface(ID int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
+	if ID != test.id {
+		test.t.Fatalf("Wrong ID passed to AddInterface: %d", ID)
+	}
+	ip4, net4, _ := net.ParseCIDR(test.address)
+	ip6, net6, _ := net.ParseCIDR(test.addressIPv6)
+	if ip4 != nil {
+		net4.IP = ip4
+		if !types.CompareIPNet(net4, &ipv4) {
+			test.t.Fatalf("Wrong address given %+v", ipv4)
+		}
+	}
+	if ip6 != nil {
+		net6.IP = ip6
+		if !types.CompareIPNet(net6, &ipv6) {
+			test.t.Fatalf("Wrong address (IPv6) given %+v", ipv6)
+		}
+	}
+	if test.macAddress != "" && mac.String() != test.macAddress {
+		test.t.Fatalf("Wrong MAC address given %v", mac)
+	}
+	return nil
+}
+
+func (test *testEndpoint) InterfaceNames() []driverapi.InterfaceNameInfo {
+	return []driverapi.InterfaceNameInfo{test}
+}
+
+func compareIPs(t *testing.T, kind string, shouldBe string, supplied net.IP) {
+	ip := net.ParseIP(shouldBe)
+	if ip == nil {
+		t.Fatalf(`Invalid IP to test against: "%s"`, shouldBe)
+	}
+	if !ip.Equal(supplied) {
+		t.Fatalf(`%s IPs are not equal: expected "%s", got %v`, kind, shouldBe, supplied)
+	}
+}
+
+func (test *testEndpoint) SetGateway(ipv4 net.IP) error {
+	compareIPs(test.t, "Gateway", test.gateway, ipv4)
+	return nil
+}
+
+func (test *testEndpoint) SetGatewayIPv6(ipv6 net.IP) error {
+	compareIPs(test.t, "GatewayIPv6", test.gatewayIPv6, ipv6)
+	return nil
+}
+
+func (test *testEndpoint) SetHostsPath(p string) error {
+	if p != test.hostsPath {
+		test.t.Fatalf(`Wrong HostsPath; expected "%s", got "%s"`, test.hostsPath, p)
+	}
+	return nil
+}
+
+func (test *testEndpoint) SetResolvConfPath(p string) error {
+	if p != test.resolvConfPath {
+		test.t.Fatalf(`Wrong ResolvConfPath; expected "%s", got "%s"`, test.resolvConfPath, p)
+	}
+	return nil
+}
+
+func (test *testEndpoint) SetNames(src string, dst string) error {
+	if test.src != src {
+		test.t.Fatalf(`Wrong SrcName; expected "%s", got "%s"`, test.src, src)
+	}
+	if test.dst != dst {
+		test.t.Fatalf(`Wrong DstName; expected "%s", got "%s"`, test.dst, dst)
+	}
+	return nil
+}
+
+func (test *testEndpoint) ID() int {
+	return test.id
+}
+
+func TestRemoteDriver(t *testing.T) {
+	var plugin = "test-net-driver"
+
+	ep := &testEndpoint{
+		t:              t,
+		src:            "vethsrc",
+		dst:            "vethdst",
+		address:        "192.168.5.7/16",
+		addressIPv6:    "2001:DB8::5:7/48",
+		macAddress:     "7a:56:78:34:12:da",
+		gateway:        "192.168.0.1",
+		gatewayIPv6:    "2001:DB8::1",
+		hostsPath:      "/here/comes/the/host/path",
+		resolvConfPath: "/there/goes/the/resolv/conf",
+	}
+
+	mux := http.NewServeMux()
+	defer setupPlugin(t, plugin, mux)()
+
+	var networkID string
+
+	handle(t, mux, "CreateNetwork", func(msg map[string]interface{}) interface{} {
+		nid := msg["NetworkID"]
+		var ok bool
+		if networkID, ok = nid.(string); !ok {
+			t.Fatal("RPC did not include network ID string")
+		}
+		return map[string]interface{}{}
+	})
+	handle(t, mux, "DeleteNetwork", func(msg map[string]interface{}) interface{} {
+		if nid, ok := msg["NetworkID"]; !ok || nid != networkID {
+			t.Fatal("Network ID missing or does not match that created")
+		}
+		return map[string]interface{}{}
+	})
+	handle(t, mux, "CreateEndpoint", func(msg map[string]interface{}) interface{} {
+		iface := map[string]interface{}{
+			"ID":          ep.id,
+			"Address":     ep.address,
+			"AddressIPv6": ep.addressIPv6,
+			"MacAddress":  ep.macAddress,
+		}
+		return map[string]interface{}{
+			"Interfaces": []interface{}{iface},
+		}
+	})
+	handle(t, mux, "Join", func(msg map[string]interface{}) interface{} {
+		options := msg["Options"].(map[string]interface{})
+		foo, ok := options["foo"].(string)
+		if !ok || foo != "fooValue" {
+			t.Fatalf("Did not receive expected foo string in request options: %+v", msg)
+		}
+		return map[string]interface{}{
+			"Gateway":        ep.gateway,
+			"GatewayIPv6":    ep.gatewayIPv6,
+			"HostsPath":      ep.hostsPath,
+			"ResolvConfPath": ep.resolvConfPath,
+			"InterfaceNames": []map[string]interface{}{
+				map[string]interface{}{
+					"SrcName": ep.src,
+					"DstName": ep.dst,
+				},
+			},
+		}
+	})
+	handle(t, mux, "Leave", func(msg map[string]interface{}) interface{} {
+		return map[string]string{}
+	})
+	handle(t, mux, "DeleteEndpoint", func(msg map[string]interface{}) interface{} {
+		return map[string]interface{}{}
+	})
+	handle(t, mux, "EndpointOperInfo", func(msg map[string]interface{}) interface{} {
+		return map[string]interface{}{
+			"Value": map[string]string{
+				"Arbitrary": "key",
+				"Value":     "pairs?",
+			},
+		}
+	})
+
+	p, err := plugins.Get(plugin, driverapi.NetworkPluginEndpointType)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	driver := newDriver(plugin, p.Client)
+	if driver.Type() != plugin {
+		t.Fatal("Driver type does not match that given")
+	}
+
+	netID := types.UUID("dummy-network")
+	err = driver.CreateNetwork(netID, map[string]interface{}{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	endID := types.UUID("dummy-endpoint")
+	err = driver.CreateEndpoint(netID, endID, ep, map[string]interface{}{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	joinOpts := map[string]interface{}{"foo": "fooValue"}
+	err = driver.Join(netID, endID, "sandbox-key", ep, joinOpts)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err = driver.EndpointOperInfo(netID, endID); err != nil {
+		t.Fatal(err)
+	}
+	if err = driver.Leave(netID, endID); err != nil {
+		t.Fatal(err)
+	}
+	if err = driver.DeleteEndpoint(netID, endID); err != nil {
+		t.Fatal(err)
+	}
+	if err = driver.DeleteNetwork(netID); err != nil {
+		t.Fatal(err)
+	}
+}
+
+type failEndpoint struct {
+	t *testing.T
+}
+
+func (f *failEndpoint) Interfaces() []*driverapi.InterfaceInfo {
+	f.t.Fatal("Unexpected call of Interfaces")
+	return nil
+}
+func (f *failEndpoint) AddInterface(int, net.HardwareAddr, net.IPNet, net.IPNet) error {
+	f.t.Fatal("Unexpected call of AddInterface")
+	return nil
+}
+
+func TestDriverError(t *testing.T) {
+	var plugin = "test-net-driver-error"
+
+	mux := http.NewServeMux()
+	defer setupPlugin(t, plugin, mux)()
+
+	handle(t, mux, "CreateEndpoint", func(msg map[string]interface{}) interface{} {
+		return map[string]interface{}{
+			"Err": "this should get raised as an error",
+		}
+	})
+
+	p, err := plugins.Get(plugin, driverapi.NetworkPluginEndpointType)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	driver := newDriver(plugin, p.Client)
+
+	if err := driver.CreateEndpoint(types.UUID("dummy"), types.UUID("dummy"), &testEndpoint{t: t}, map[string]interface{}{}); err == nil {
+		t.Fatalf("Expected error from driver")
+	}
+}
+
+func TestMissingValues(t *testing.T) {
+	var plugin = "test-net-driver-missing"
+
+	mux := http.NewServeMux()
+	defer setupPlugin(t, plugin, mux)()
+
+	ep := &testEndpoint{
+		t:  t,
+		id: 0,
+	}
+
+	handle(t, mux, "CreateEndpoint", func(msg map[string]interface{}) interface{} {
+		iface := map[string]interface{}{
+			"ID":          ep.id,
+			"Address":     ep.address,
+			"AddressIPv6": ep.addressIPv6,
+			"MacAddress":  ep.macAddress,
+		}
+		return map[string]interface{}{
+			"Interfaces": []interface{}{iface},
+		}
+	})
+
+	p, err := plugins.Get(plugin, driverapi.NetworkPluginEndpointType)
+	if err != nil {
+		t.Fatal(err)
+	}
+	driver := newDriver(plugin, p.Client)
+
+	if err := driver.CreateEndpoint(types.UUID("dummy"), types.UUID("dummy"), ep, map[string]interface{}{}); err != nil {
+		t.Fatal(err)
+	}
+}
+
+type rollbackEndpoint struct {
+}
+
+func (r *rollbackEndpoint) Interfaces() []driverapi.InterfaceInfo {
+	return []driverapi.InterfaceInfo{}
+}
+
+func (r *rollbackEndpoint) AddInterface(_ int, _ net.HardwareAddr, _ net.IPNet, _ net.IPNet) error {
+	return fmt.Errorf("fail this to trigger a rollback")
+}
+
+func TestRollback(t *testing.T) {
+	var plugin = "test-net-driver-rollback"
+
+	mux := http.NewServeMux()
+	defer setupPlugin(t, plugin, mux)()
+
+	rolledback := false
+
+	handle(t, mux, "CreateEndpoint", func(msg map[string]interface{}) interface{} {
+		iface := map[string]interface{}{
+			"ID":          0,
+			"Address":     "192.168.4.5/16",
+			"AddressIPv6": "",
+			"MacAddress":  "7a:12:34:56:78:90",
+		}
+		return map[string]interface{}{
+			"Interfaces": []interface{}{iface},
+		}
+	})
+	handle(t, mux, "DeleteEndpoint", func(msg map[string]interface{}) interface{} {
+		rolledback = true
+		return map[string]interface{}{}
+	})
+
+	p, err := plugins.Get(plugin, driverapi.NetworkPluginEndpointType)
+	if err != nil {
+		t.Fatal(err)
+	}
+	driver := newDriver(plugin, p.Client)
+
+	ep := &rollbackEndpoint{}
+
+	if err := driver.CreateEndpoint(types.UUID("dummy"), types.UUID("dummy"), ep, map[string]interface{}{}); err == nil {
+		t.Fatalf("Expected error from driver")
+	}
+	if !rolledback {
+		t.Fatalf("Expected to have had DeleteEndpoint called")
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/remote/messages.go b/vendor/src/github.com/docker/libnetwork/drivers/remote/messages.go
new file mode 100644
index 0000000..8e03a16
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/drivers/remote/messages.go
@@ -0,0 +1,143 @@
+package remote
+
+import "net"
+
+type response struct {
+	Err string
+}
+
+type maybeError interface {
+	getError() string
+}
+
+func (r *response) getError() string {
+	return r.Err
+}
+
+type createNetworkRequest struct {
+	NetworkID string
+	Options   map[string]interface{}
+}
+
+type createNetworkResponse struct {
+	response
+}
+
+type deleteNetworkRequest struct {
+	NetworkID string
+}
+
+type deleteNetworkResponse struct {
+	response
+}
+
+type createEndpointRequest struct {
+	NetworkID  string
+	EndpointID string
+	Interfaces []*endpointInterface
+	Options    map[string]interface{}
+}
+
+type endpointInterface struct {
+	ID          int
+	Address     string
+	AddressIPv6 string
+	MacAddress  string
+}
+
+type createEndpointResponse struct {
+	response
+	Interfaces []*endpointInterface
+}
+
+func toAddr(ipAddr string) (*net.IPNet, error) {
+	ip, ipnet, err := net.ParseCIDR(ipAddr)
+	if err != nil {
+		return nil, err
+	}
+	ipnet.IP = ip
+	return ipnet, nil
+}
+
+type iface struct {
+	ID          int
+	Address     *net.IPNet
+	AddressIPv6 *net.IPNet
+	MacAddress  net.HardwareAddr
+}
+
+func (r *createEndpointResponse) parseInterfaces() ([]*iface, error) {
+	var (
+		ifaces = make([]*iface, len(r.Interfaces))
+	)
+	for i, inIf := range r.Interfaces {
+		var err error
+		outIf := &iface{ID: inIf.ID}
+		if inIf.Address != "" {
+			if outIf.Address, err = toAddr(inIf.Address); err != nil {
+				return nil, err
+			}
+		}
+		if inIf.AddressIPv6 != "" {
+			if outIf.AddressIPv6, err = toAddr(inIf.AddressIPv6); err != nil {
+				return nil, err
+			}
+		}
+		if inIf.MacAddress != "" {
+			if outIf.MacAddress, err = net.ParseMAC(inIf.MacAddress); err != nil {
+				return nil, err
+			}
+		}
+		ifaces[i] = outIf
+	}
+	return ifaces, nil
+}
+
+type deleteEndpointRequest struct {
+	NetworkID  string
+	EndpointID string
+}
+
+type deleteEndpointResponse struct {
+	response
+}
+
+type endpointInfoRequest struct {
+	NetworkID  string
+	EndpointID string
+}
+
+type endpointInfoResponse struct {
+	response
+	Value map[string]interface{}
+}
+
+type joinRequest struct {
+	NetworkID  string
+	EndpointID string
+	SandboxKey string
+	Options    map[string]interface{}
+}
+
+type ifaceName struct {
+	SrcName string
+	DstName string
+}
+
+type joinResponse struct {
+	response
+	InterfaceNames []*ifaceName
+	Gateway        string
+	GatewayIPv6    string
+	HostsPath      string
+	ResolvConfPath string
+}
+
+type leaveRequest struct {
+	NetworkID  string
+	EndpointID string
+}
+
+type leaveResponse struct {
+	response
+}
diff --git a/vendor/src/github.com/docker/libnetwork/endpoint.go b/vendor/src/github.com/docker/libnetwork/endpoint.go
new file mode 100644
index 0000000..9b83235
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/endpoint.go
@@ -0,0 +1,728 @@
+package libnetwork
+
+import (
+	"bytes"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/libnetwork/etchosts"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/resolvconf"
+	"github.com/docker/libnetwork/sandbox"
+	"github.com/docker/libnetwork/types"
+)
+
+// Endpoint represents a logical connection between a network and a sandbox.
+type Endpoint interface {
+	// A system generated id for this endpoint.
+	ID() string
+
+	// Name returns the name of this endpoint.
+	Name() string
+
+	// Network returns the name of the network to which this endpoint is attached.
+	Network() string
+
+	// Join creates a new sandbox for the given container ID and populates the
+	// network resources allocated for the endpoint and joins the sandbox to
+	// the endpoint. It returns the sandbox key to the caller
+	Join(containerID string, options ...EndpointOption) (*ContainerData, error)
+
+	// Leave removes the sandbox associated with  container ID and detaches
+	// the network resources populated in the sandbox
+	Leave(containerID string, options ...EndpointOption) error
+
+	// Return certain operational data belonging to this endpoint
+	Info() EndpointInfo
+
+	// Info returns a collection of driver operational data related to this endpoint retrieved from the driver
+	DriverInfo() (map[string]interface{}, error)
+
+	// Delete and detaches this endpoint from the network.
+	Delete() error
+}
+
+// EndpointOption is a option setter function type used to pass varios options to Network
+// and Endpoint interfaces methods. The various setter functions of type EndpointOption are
+// provided by libnetwork, they look like <Create|Join|Leave>Option[...](...)
+type EndpointOption func(ep *endpoint)
+
+// ContainerData is a set of data returned when a container joins an endpoint.
+type ContainerData struct {
+	SandboxKey string
+}
+
+// These are the container configs used to customize container /etc/hosts file.
+type hostsPathConfig struct {
+	hostName      string
+	domainName    string
+	hostsPath     string
+	extraHosts    []extraHost
+	parentUpdates []parentUpdate
+}
+
+// These are the container configs used to customize container /etc/resolv.conf file.
+type resolvConfPathConfig struct {
+	resolvConfPath string
+	dnsList        []string
+	dnsSearchList  []string
+}
+
+type containerConfig struct {
+	hostsPathConfig
+	resolvConfPathConfig
+	generic           map[string]interface{}
+	useDefaultSandBox bool
+}
+
+type extraHost struct {
+	name string
+	IP   string
+}
+
+type parentUpdate struct {
+	eid  string
+	name string
+	ip   string
+}
+
+type containerInfo struct {
+	id     string
+	config containerConfig
+	data   ContainerData
+}
+
+type endpoint struct {
+	name          string
+	id            types.UUID
+	network       *network
+	sandboxInfo   *sandbox.Info
+	iFaces        []*endpointInterface
+	joinInfo      *endpointJoinInfo
+	container     *containerInfo
+	exposedPorts  []types.TransportPort
+	generic       map[string]interface{}
+	joinLeaveDone chan struct{}
+	sync.Mutex
+}
+
+const defaultPrefix = "/var/lib/docker/network/files"
+
+func (ep *endpoint) ID() string {
+	ep.Lock()
+	defer ep.Unlock()
+
+	return string(ep.id)
+}
+
+func (ep *endpoint) Name() string {
+	ep.Lock()
+	defer ep.Unlock()
+
+	return ep.name
+}
+
+func (ep *endpoint) Network() string {
+	ep.Lock()
+	defer ep.Unlock()
+
+	return ep.network.name
+}
+
+func (ep *endpoint) processOptions(options ...EndpointOption) {
+	ep.Lock()
+	defer ep.Unlock()
+
+	for _, opt := range options {
+		if opt != nil {
+			opt(ep)
+		}
+	}
+}
+
+func createBasePath(dir string) error {
+	err := os.MkdirAll(dir, 0644)
+	if err != nil && !os.IsExist(err) {
+		return err
+	}
+
+	return nil
+}
+
+func createFile(path string) error {
+	var f *os.File
+
+	dir, _ := filepath.Split(path)
+	err := createBasePath(dir)
+	if err != nil {
+		return err
+	}
+
+	f, err = os.Create(path)
+	if err == nil {
+		f.Close()
+	}
+
+	return err
+}
+
+// joinLeaveStart waits to ensure there are no joins or leaves in progress and
+// marks this join/leave in progress without race
+func (ep *endpoint) joinLeaveStart() {
+	ep.Lock()
+	defer ep.Unlock()
+
+	for ep.joinLeaveDone != nil {
+		joinLeaveDone := ep.joinLeaveDone
+		ep.Unlock()
+
+		select {
+		case <-joinLeaveDone:
+		}
+
+		ep.Lock()
+	}
+
+	ep.joinLeaveDone = make(chan struct{})
+}
+
+// joinLeaveEnd marks the end of this join/leave operation and
+// signals the same without race to other join and leave waiters
+func (ep *endpoint) joinLeaveEnd() {
+	ep.Lock()
+	defer ep.Unlock()
+
+	if ep.joinLeaveDone != nil {
+		close(ep.joinLeaveDone)
+		ep.joinLeaveDone = nil
+	}
+}
+
+func (ep *endpoint) Join(containerID string, options ...EndpointOption) (*ContainerData, error) {
+	var err error
+
+	if containerID == "" {
+		return nil, InvalidContainerIDError(containerID)
+	}
+
+	ep.joinLeaveStart()
+	defer ep.joinLeaveEnd()
+
+	ep.Lock()
+	if ep.container != nil {
+		ep.Unlock()
+		return nil, ErrInvalidJoin{}
+	}
+
+	ep.container = &containerInfo{
+		id: containerID,
+		config: containerConfig{
+			hostsPathConfig: hostsPathConfig{
+				extraHosts:    []extraHost{},
+				parentUpdates: []parentUpdate{},
+			},
+		}}
+
+	ep.joinInfo = &endpointJoinInfo{}
+
+	container := ep.container
+	network := ep.network
+	epid := ep.id
+	joinInfo := ep.joinInfo
+	ifaces := ep.iFaces
+
+	ep.Unlock()
+	defer func() {
+		ep.Lock()
+		if err != nil {
+			ep.container = nil
+		}
+		ep.Unlock()
+	}()
+
+	network.Lock()
+	driver := network.driver
+	nid := network.id
+	ctrlr := network.ctrlr
+	network.Unlock()
+
+	ep.processOptions(options...)
+
+	sboxKey := sandbox.GenerateKey(containerID)
+	if container.config.useDefaultSandBox {
+		sboxKey = sandbox.GenerateKey("default")
+	}
+
+	err = driver.Join(nid, epid, sboxKey, ep, container.config.generic)
+	if err != nil {
+		return nil, err
+	}
+
+	err = ep.buildHostsFiles()
+	if err != nil {
+		return nil, err
+	}
+
+	err = ep.updateParentHosts()
+	if err != nil {
+		return nil, err
+	}
+
+	err = ep.setupDNS()
+	if err != nil {
+		return nil, err
+	}
+
+	sb, err := ctrlr.sandboxAdd(sboxKey, !container.config.useDefaultSandBox)
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		if err != nil {
+			ctrlr.sandboxRm(sboxKey)
+		}
+	}()
+
+	for _, i := range ifaces {
+		iface := &sandbox.Interface{
+			SrcName: i.srcName,
+			DstName: i.dstPrefix,
+			Address: &i.addr,
+		}
+		if i.addrv6.IP.To16() != nil {
+			iface.AddressIPv6 = &i.addrv6
+		}
+		err = sb.AddInterface(iface)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	err = sb.SetGateway(joinInfo.gw)
+	if err != nil {
+		return nil, err
+	}
+
+	err = sb.SetGatewayIPv6(joinInfo.gw6)
+	if err != nil {
+		return nil, err
+	}
+
+	container.data.SandboxKey = sb.Key()
+	cData := container.data
+
+	return &cData, nil
+}
+
+func (ep *endpoint) Leave(containerID string, options ...EndpointOption) error {
+	var err error
+
+	ep.joinLeaveStart()
+	defer ep.joinLeaveEnd()
+
+	ep.processOptions(options...)
+
+	ep.Lock()
+	container := ep.container
+	n := ep.network
+
+	if container == nil || container.id == "" ||
+		containerID == "" || container.id != containerID {
+		if container == nil {
+			err = ErrNoContainer{}
+		} else {
+			err = InvalidContainerIDError(containerID)
+		}
+
+		ep.Unlock()
+		return err
+	}
+	ep.container = nil
+	ep.Unlock()
+
+	n.Lock()
+	driver := n.driver
+	ctrlr := n.ctrlr
+	n.Unlock()
+
+	err = driver.Leave(n.id, ep.id)
+
+	sb := ctrlr.sandboxGet(container.data.SandboxKey)
+	for _, i := range sb.Interfaces() {
+		err = sb.RemoveInterface(i)
+		if err != nil {
+			logrus.Debugf("Remove interface failed: %v", err)
+		}
+	}
+
+	ctrlr.sandboxRm(container.data.SandboxKey)
+
+	return err
+}
+
+func (ep *endpoint) Delete() error {
+	var err error
+
+	ep.Lock()
+	epid := ep.id
+	name := ep.name
+	if ep.container != nil {
+		ep.Unlock()
+		return &ActiveContainerError{name: name, id: string(epid)}
+	}
+
+	n := ep.network
+	ep.Unlock()
+
+	n.Lock()
+	_, ok := n.endpoints[epid]
+	if !ok {
+		n.Unlock()
+		return &UnknownEndpointError{name: name, id: string(epid)}
+	}
+
+	nid := n.id
+	driver := n.driver
+	delete(n.endpoints, epid)
+	n.Unlock()
+	defer func() {
+		if err != nil {
+			n.Lock()
+			n.endpoints[epid] = ep
+			n.Unlock()
+		}
+	}()
+
+	err = driver.DeleteEndpoint(nid, epid)
+	return err
+}
+
+func (ep *endpoint) buildHostsFiles() error {
+	var extraContent []etchosts.Record
+
+	ep.Lock()
+	container := ep.container
+	joinInfo := ep.joinInfo
+	ifaces := ep.iFaces
+	ep.Unlock()
+
+	if container == nil {
+		return ErrNoContainer{}
+	}
+
+	if container.config.hostsPath == "" {
+		container.config.hostsPath = defaultPrefix + "/" + container.id + "/hosts"
+	}
+
+	dir, _ := filepath.Split(container.config.hostsPath)
+	err := createBasePath(dir)
+	if err != nil {
+		return err
+	}
+
+	if joinInfo != nil && joinInfo.hostsPath != "" {
+		content, err := ioutil.ReadFile(joinInfo.hostsPath)
+		if err != nil && !os.IsNotExist(err) {
+			return err
+		}
+
+		if err == nil {
+			return ioutil.WriteFile(container.config.hostsPath, content, 0644)
+		}
+	}
+
+	name := container.config.hostName
+	if container.config.domainName != "" {
+		name = name + "." + container.config.domainName
+	}
+
+	for _, extraHost := range container.config.extraHosts {
+		extraContent = append(extraContent,
+			etchosts.Record{Hosts: extraHost.name, IP: extraHost.IP})
+	}
+
+	IP := ""
+	if len(ifaces) != 0 && ifaces[0] != nil {
+		IP = ifaces[0].addr.IP.String()
+	}
+
+	return etchosts.Build(container.config.hostsPath, IP, container.config.hostName,
+		container.config.domainName, extraContent)
+}
+
+func (ep *endpoint) updateParentHosts() error {
+	ep.Lock()
+	container := ep.container
+	network := ep.network
+	ep.Unlock()
+
+	if container == nil {
+		return ErrNoContainer{}
+	}
+
+	for _, update := range container.config.parentUpdates {
+		network.Lock()
+		pep, ok := network.endpoints[types.UUID(update.eid)]
+		if !ok {
+			network.Unlock()
+			continue
+		}
+		network.Unlock()
+
+		pep.Lock()
+		pContainer := pep.container
+		pep.Unlock()
+
+		if pContainer != nil {
+			if err := etchosts.Update(pContainer.config.hostsPath, update.ip, update.name); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (ep *endpoint) updateDNS(resolvConf []byte) error {
+	ep.Lock()
+	container := ep.container
+	network := ep.network
+	ep.Unlock()
+
+	if container == nil {
+		return ErrNoContainer{}
+	}
+
+	oldHash := []byte{}
+	hashFile := container.config.resolvConfPath + ".hash"
+
+	resolvBytes, err := ioutil.ReadFile(container.config.resolvConfPath)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return err
+		}
+	} else {
+		oldHash, err = ioutil.ReadFile(hashFile)
+		if err != nil {
+			if !os.IsNotExist(err) {
+				return err
+			}
+
+			oldHash = []byte{}
+		}
+	}
+
+	curHash, err := ioutils.HashData(bytes.NewReader(resolvBytes))
+	if err != nil {
+		return err
+	}
+
+	if string(oldHash) != "" && curHash != string(oldHash) {
+		// Seems the user has changed the container resolv.conf since the last time
+		// we checked so return without doing anything.
+		return nil
+	}
+
+	// replace any localhost/127.* and remove IPv6 nameservers if IPv6 disabled.
+	resolvConf, _ = resolvconf.FilterResolvDNS(resolvConf, network.enableIPv6)
+
+	newHash, err := ioutils.HashData(bytes.NewReader(resolvConf))
+	if err != nil {
+		return err
+	}
+
+	// for atomic updates to these files, use temporary files with os.Rename:
+	dir := path.Dir(container.config.resolvConfPath)
+	tmpHashFile, err := ioutil.TempFile(dir, "hash")
+	if err != nil {
+		return err
+	}
+	tmpResolvFile, err := ioutil.TempFile(dir, "resolv")
+	if err != nil {
+		return err
+	}
+
+	// Change the perms to 0644 since ioutil.TempFile creates it by default as 0600
+	if err := os.Chmod(tmpResolvFile.Name(), 0644); err != nil {
+		return err
+	}
+
+	// write the updates to the temp files
+	if err = ioutil.WriteFile(tmpHashFile.Name(), []byte(newHash), 0644); err != nil {
+		return err
+	}
+	if err = ioutil.WriteFile(tmpResolvFile.Name(), resolvConf, 0644); err != nil {
+		return err
+	}
+
+	// rename the temp files for atomic replace
+	if err = os.Rename(tmpHashFile.Name(), hashFile); err != nil {
+		return err
+	}
+	return os.Rename(tmpResolvFile.Name(), container.config.resolvConfPath)
+}
+
+func (ep *endpoint) setupDNS() error {
+	ep.Lock()
+	container := ep.container
+	ep.Unlock()
+
+	if container == nil {
+		return ErrNoContainer{}
+	}
+
+	if container.config.resolvConfPath == "" {
+		container.config.resolvConfPath = defaultPrefix + "/" + container.id + "/resolv.conf"
+	}
+
+	dir, _ := filepath.Split(container.config.resolvConfPath)
+	err := createBasePath(dir)
+	if err != nil {
+		return err
+	}
+
+	resolvConf, err := resolvconf.Get()
+	if err != nil {
+		return err
+	}
+
+	if len(container.config.dnsList) > 0 ||
+		len(container.config.dnsSearchList) > 0 {
+		var (
+			dnsList       = resolvconf.GetNameservers(resolvConf)
+			dnsSearchList = resolvconf.GetSearchDomains(resolvConf)
+		)
+
+		if len(container.config.dnsList) > 0 {
+			dnsList = container.config.dnsList
+		}
+
+		if len(container.config.dnsSearchList) > 0 {
+			dnsSearchList = container.config.dnsSearchList
+		}
+
+		return resolvconf.Build(container.config.resolvConfPath, dnsList, dnsSearchList)
+	}
+
+	return ep.updateDNS(resolvConf)
+}
+
+// EndpointOptionGeneric function returns an option setter for a Generic option defined
+// in a Dictionary of Key-Value pair
+func EndpointOptionGeneric(generic map[string]interface{}) EndpointOption {
+	return func(ep *endpoint) {
+		for k, v := range generic {
+			ep.generic[k] = v
+		}
+	}
+}
+
+// JoinOptionHostname function returns an option setter for hostname option to
+// be passed to endpoint Join method.
+func JoinOptionHostname(name string) EndpointOption {
+	return func(ep *endpoint) {
+		ep.container.config.hostName = name
+	}
+}
+
+// JoinOptionDomainname function returns an option setter for domainname option to
+// be passed to endpoint Join method.
+func JoinOptionDomainname(name string) EndpointOption {
+	return func(ep *endpoint) {
+		ep.container.config.domainName = name
+	}
+}
+
+// JoinOptionHostsPath function returns an option setter for hostspath option to
+// be passed to endpoint Join method.
+func JoinOptionHostsPath(path string) EndpointOption {
+	return func(ep *endpoint) {
+		ep.container.config.hostsPath = path
+	}
+}
+
+// JoinOptionExtraHost function returns an option setter for extra /etc/hosts options
+// which is a name and IP as strings.
+func JoinOptionExtraHost(name string, IP string) EndpointOption {
+	return func(ep *endpoint) {
+		ep.container.config.extraHosts = append(ep.container.config.extraHosts, extraHost{name: name, IP: IP})
+	}
+}
+
+// JoinOptionParentUpdate function returns an option setter for parent container
+// which needs to update the IP address for the linked container.
+func JoinOptionParentUpdate(eid string, name, ip string) EndpointOption {
+	return func(ep *endpoint) {
+		ep.container.config.parentUpdates = append(ep.container.config.parentUpdates, parentUpdate{eid: eid, name: name, ip: ip})
+	}
+}
+
+// JoinOptionResolvConfPath function returns an option setter for resolvconfpath option to
+// be passed to endpoint Join method.
+func JoinOptionResolvConfPath(path string) EndpointOption {
+	return func(ep *endpoint) {
+		ep.container.config.resolvConfPath = path
+	}
+}
+
+// JoinOptionDNS function returns an option setter for dns entry option to
+// be passed to endpoint Join method.
+func JoinOptionDNS(dns string) EndpointOption {
+	return func(ep *endpoint) {
+		ep.container.config.dnsList = append(ep.container.config.dnsList, dns)
+	}
+}
+
+// JoinOptionDNSSearch function returns an option setter for dns search entry option to
+// be passed to endpoint Join method.
+func JoinOptionDNSSearch(search string) EndpointOption {
+	return func(ep *endpoint) {
+		ep.container.config.dnsSearchList = append(ep.container.config.dnsSearchList, search)
+	}
+}
+
+// JoinOptionUseDefaultSandbox function returns an option setter for using default sandbox to
+// be passed to endpoint Join method.
+func JoinOptionUseDefaultSandbox() EndpointOption {
+	return func(ep *endpoint) {
+		ep.container.config.useDefaultSandBox = true
+	}
+}
+
+// CreateOptionExposedPorts function returns an option setter for the container exposed
+// ports option to be passed to network.CreateEndpoint() method.
+func CreateOptionExposedPorts(exposedPorts []types.TransportPort) EndpointOption {
+	return func(ep *endpoint) {
+		// Defensive copy
+		eps := make([]types.TransportPort, len(exposedPorts))
+		copy(eps, exposedPorts)
+		// Store endpoint label and in generic because driver needs it
+		ep.exposedPorts = eps
+		ep.generic[netlabel.ExposedPorts] = eps
+	}
+}
+
+// CreateOptionPortMapping function returns an option setter for the mapping
+// ports option to be passed to network.CreateEndpoint() method.
+func CreateOptionPortMapping(portBindings []types.PortBinding) EndpointOption {
+	return func(ep *endpoint) {
+		// Store a copy of the bindings as generic data to pass to the driver
+		pbs := make([]types.PortBinding, len(portBindings))
+		copy(pbs, portBindings)
+		ep.generic[netlabel.PortMap] = pbs
+	}
+}
+
+// JoinOptionGeneric function returns an option setter for Generic configuration
+// that is not managed by libNetwork but can be used by the Drivers during the call to
+// endpoint join method. Container Labels are a good example.
+func JoinOptionGeneric(generic map[string]interface{}) EndpointOption {
+	return func(ep *endpoint) {
+		ep.container.config.generic = generic
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/endpoint_info.go b/vendor/src/github.com/docker/libnetwork/endpoint_info.go
new file mode 100644
index 0000000..f045215
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/endpoint_info.go
@@ -0,0 +1,215 @@
+package libnetwork
+
+import (
+	"net"
+
+	"github.com/docker/libnetwork/driverapi"
+	"github.com/docker/libnetwork/types"
+)
+
+// EndpointInfo provides an interface to retrieve network resources bound to the endpoint.
+type EndpointInfo interface {
+	// InterfaceList returns an interface list which were assigned to the endpoint
+	// by the driver. This can be used after the endpoint has been created.
+	InterfaceList() []InterfaceInfo
+
+	// Gateway returns the IPv4 gateway assigned by the driver.
+	// This will only return a valid value if a container has joined the endpoint.
+	Gateway() net.IP
+
+	// GatewayIPv6 returns the IPv6 gateway assigned by the driver.
+	// This will only return a valid value if a container has joined the endpoint.
+	GatewayIPv6() net.IP
+
+	// SandboxKey returns the sanbox key for the container which has joined
+	// the endpoint. If there is no container joined then this will return an
+	// empty string.
+	SandboxKey() string
+}
+
+// InterfaceInfo provides an interface to retrieve interface addresses bound to the endpoint.
+type InterfaceInfo interface {
+	// MacAddress returns the MAC address assigned to the endpoint.
+	MacAddress() net.HardwareAddr
+
+	// Address returns the IPv4 address assigned to the endpoint.
+	Address() net.IPNet
+
+	// AddressIPv6 returns the IPv6 address assigned to the endpoint.
+	AddressIPv6() net.IPNet
+}
+
+type endpointInterface struct {
+	id        int
+	mac       net.HardwareAddr
+	addr      net.IPNet
+	addrv6    net.IPNet
+	srcName   string
+	dstPrefix string
+}
+
+type endpointJoinInfo struct {
+	gw             net.IP
+	gw6            net.IP
+	hostsPath      string
+	resolvConfPath string
+}
+
+func (ep *endpoint) Info() EndpointInfo {
+	return ep
+}
+
+func (ep *endpoint) DriverInfo() (map[string]interface{}, error) {
+	ep.Lock()
+	network := ep.network
+	epid := ep.id
+	ep.Unlock()
+
+	network.Lock()
+	driver := network.driver
+	nid := network.id
+	network.Unlock()
+
+	return driver.EndpointOperInfo(nid, epid)
+}
+
+func (ep *endpoint) InterfaceList() []InterfaceInfo {
+	ep.Lock()
+	defer ep.Unlock()
+
+	iList := make([]InterfaceInfo, len(ep.iFaces))
+
+	for i, iface := range ep.iFaces {
+		iList[i] = iface
+	}
+
+	return iList
+}
+
+func (ep *endpoint) Interfaces() []driverapi.InterfaceInfo {
+	ep.Lock()
+	defer ep.Unlock()
+
+	iList := make([]driverapi.InterfaceInfo, len(ep.iFaces))
+
+	for i, iface := range ep.iFaces {
+		iList[i] = iface
+	}
+
+	return iList
+}
+
+func (ep *endpoint) AddInterface(id int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
+	ep.Lock()
+	defer ep.Unlock()
+
+	iface := &endpointInterface{
+		id:     id,
+		addr:   *types.GetIPNetCopy(&ipv4),
+		addrv6: *types.GetIPNetCopy(&ipv6),
+	}
+	iface.mac = types.GetMacCopy(mac)
+
+	ep.iFaces = append(ep.iFaces, iface)
+	return nil
+}
+
+func (i *endpointInterface) ID() int {
+	return i.id
+}
+
+func (i *endpointInterface) MacAddress() net.HardwareAddr {
+	return types.GetMacCopy(i.mac)
+}
+
+func (i *endpointInterface) Address() net.IPNet {
+	return (*types.GetIPNetCopy(&i.addr))
+}
+
+func (i *endpointInterface) AddressIPv6() net.IPNet {
+	return (*types.GetIPNetCopy(&i.addrv6))
+}
+
+func (i *endpointInterface) SetNames(srcName string, dstPrefix string) error {
+	i.srcName = srcName
+	i.dstPrefix = dstPrefix
+	return nil
+}
+
+func (ep *endpoint) InterfaceNames() []driverapi.InterfaceNameInfo {
+	ep.Lock()
+	defer ep.Unlock()
+
+	iList := make([]driverapi.InterfaceNameInfo, len(ep.iFaces))
+
+	for i, iface := range ep.iFaces {
+		iList[i] = iface
+	}
+
+	return iList
+}
+
+func (ep *endpoint) SandboxKey() string {
+	ep.Lock()
+	defer ep.Unlock()
+
+	if ep.container == nil {
+		return ""
+	}
+
+	return ep.container.data.SandboxKey
+}
+
+func (ep *endpoint) Gateway() net.IP {
+	ep.Lock()
+	defer ep.Unlock()
+
+	if ep.joinInfo == nil {
+		return net.IP{}
+	}
+
+	return types.GetIPCopy(ep.joinInfo.gw)
+}
+
+func (ep *endpoint) GatewayIPv6() net.IP {
+	ep.Lock()
+	defer ep.Unlock()
+
+	if ep.joinInfo == nil {
+		return net.IP{}
+	}
+
+	return types.GetIPCopy(ep.joinInfo.gw6)
+}
+
+func (ep *endpoint) SetGateway(gw net.IP) error {
+	ep.Lock()
+	defer ep.Unlock()
+
+	ep.joinInfo.gw = types.GetIPCopy(gw)
+	return nil
+}
+
+func (ep *endpoint) SetGatewayIPv6(gw6 net.IP) error {
+	ep.Lock()
+	defer ep.Unlock()
+
+	ep.joinInfo.gw6 = types.GetIPCopy(gw6)
+	return nil
+}
+
+func (ep *endpoint) SetHostsPath(path string) error {
+	ep.Lock()
+	defer ep.Unlock()
+
+	ep.joinInfo.hostsPath = path
+	return nil
+}
+
+func (ep *endpoint) SetResolvConfPath(path string) error {
+	ep.Lock()
+	defer ep.Unlock()
+
+	ep.joinInfo.resolvConfPath = path
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/error.go b/vendor/src/github.com/docker/libnetwork/error.go
new file mode 100644
index 0000000..a1cd01d
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/error.go
@@ -0,0 +1,168 @@
+package libnetwork
+
+import (
+	"fmt"
+)
+
+// ErrNoSuchNetwork is returned when a network query finds no result
+type ErrNoSuchNetwork string
+
+func (nsn ErrNoSuchNetwork) Error() string {
+	return fmt.Sprintf("network %s not found", string(nsn))
+}
+
+// BadRequest denotes the type of this error
+func (nsn ErrNoSuchNetwork) BadRequest() {}
+
+// ErrNoSuchEndpoint is returned when a endpoint query finds no result
+type ErrNoSuchEndpoint string
+
+func (nse ErrNoSuchEndpoint) Error() string {
+	return fmt.Sprintf("endpoint %s not found", string(nse))
+}
+
+// BadRequest denotes the type of this error
+func (nse ErrNoSuchEndpoint) BadRequest() {}
+
+// ErrInvalidNetworkDriver is returned if an invalid driver
+// name is passed.
+type ErrInvalidNetworkDriver string
+
+func (ind ErrInvalidNetworkDriver) Error() string {
+	return fmt.Sprintf("invalid driver bound to network: %s", string(ind))
+}
+
+// BadRequest denotes the type of this error
+func (ind ErrInvalidNetworkDriver) BadRequest() {}
+
+// ErrInvalidJoin is returned if a join is attempted on an endpoint
+// which already has a container joined.
+type ErrInvalidJoin struct{}
+
+func (ij ErrInvalidJoin) Error() string {
+	return "a container has already joined the endpoint"
+}
+
+// BadRequest denotes the type of this error
+func (ij ErrInvalidJoin) BadRequest() {}
+
+// ErrNoContainer is returned when the endpoint has no container
+// attached to it.
+type ErrNoContainer struct{}
+
+func (nc ErrNoContainer) Error() string {
+	return "a container has already joined the endpoint"
+}
+
+// Maskable denotes the type of this error
+func (nc ErrNoContainer) Maskable() {}
+
+// ErrInvalidID is returned when a query-by-id method is being invoked
+// with an empty id parameter
+type ErrInvalidID string
+
+func (ii ErrInvalidID) Error() string {
+	return fmt.Sprintf("invalid id: %s", string(ii))
+}
+
+// BadRequest denotes the type of this error
+func (ii ErrInvalidID) BadRequest() {}
+
+// ErrInvalidName is returned when a query-by-name or resource create method is
+// invoked with an empty name parameter
+type ErrInvalidName string
+
+func (in ErrInvalidName) Error() string {
+	return fmt.Sprintf("invalid name: %s", string(in))
+}
+
+// BadRequest denotes the type of this error
+func (in ErrInvalidName) BadRequest() {}
+
+// NetworkTypeError type is returned when the network type string is not
+// known to libnetwork.
+type NetworkTypeError string
+
+func (nt NetworkTypeError) Error() string {
+	return fmt.Sprintf("unknown driver %q", string(nt))
+}
+
+// NotFound denotes the type of this error
+func (nt NetworkTypeError) NotFound() {}
+
+// NetworkNameError is returned when a network with the same name already exists.
+type NetworkNameError string
+
+func (nnr NetworkNameError) Error() string {
+	return fmt.Sprintf("network with name %s already exists", string(nnr))
+}
+
+// Forbidden denotes the type of this error
+func (nnr NetworkNameError) Forbidden() {}
+
+// UnknownNetworkError is returned when libnetwork could not find in it's database
+// a network with the same name and id.
+type UnknownNetworkError struct {
+	name string
+	id   string
+}
+
+func (une *UnknownNetworkError) Error() string {
+	return fmt.Sprintf("unknown network %s id %s", une.name, une.id)
+}
+
+// NotFound denotes the type of this error
+func (une *UnknownNetworkError) NotFound() {}
+
+// ActiveEndpointsError is returned when a network is deleted which has active
+// endpoints in it.
+type ActiveEndpointsError struct {
+	name string
+	id   string
+}
+
+func (aee *ActiveEndpointsError) Error() string {
+	return fmt.Sprintf("network with name %s id %s has active endpoints", aee.name, aee.id)
+}
+
+// Forbidden denotes the type of this error
+func (aee *ActiveEndpointsError) Forbidden() {}
+
+// UnknownEndpointError is returned when libnetwork could not find in it's database
+// an endpoint with the same name and id.
+type UnknownEndpointError struct {
+	name string
+	id   string
+}
+
+func (uee *UnknownEndpointError) Error() string {
+	return fmt.Sprintf("unknown endpoint %s id %s", uee.name, uee.id)
+}
+
+// NotFound denotes the type of this error
+func (uee *UnknownEndpointError) NotFound() {}
+
+// ActiveContainerError is returned when an endpoint is deleted which has active
+// containers attached to it.
+type ActiveContainerError struct {
+	name string
+	id   string
+}
+
+func (ace *ActiveContainerError) Error() string {
+	return fmt.Sprintf("endpoint with name %s id %s has active containers", ace.name, ace.id)
+}
+
+// Forbidden denotes the type of this error
+func (ace *ActiveContainerError) Forbidden() {}
+
+// InvalidContainerIDError is returned when an invalid container id is passed
+// in Join/Leave
+type InvalidContainerIDError string
+
+func (id InvalidContainerIDError) Error() string {
+	return fmt.Sprintf("invalid container id %s", string(id))
+}
+
+// BadRequest denotes the type of this error
+func (id InvalidContainerIDError) BadRequest() {}
diff --git a/vendor/src/github.com/docker/libnetwork/errors_test.go b/vendor/src/github.com/docker/libnetwork/errors_test.go
new file mode 100644
index 0000000..29bf668
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/errors_test.go
@@ -0,0 +1,51 @@
+package libnetwork
+
+import (
+	"testing"
+
+	"github.com/docker/libnetwork/types"
+)
+
+func TestErrorInterfaces(t *testing.T) {
+
+	badRequestErrorList := []error{ErrInvalidID(""), ErrInvalidName(""), ErrInvalidJoin{}, ErrInvalidNetworkDriver(""), InvalidContainerIDError(""), ErrNoSuchNetwork(""), ErrNoSuchEndpoint("")}
+	for _, err := range badRequestErrorList {
+		switch u := err.(type) {
+		case types.BadRequestError:
+			return
+		default:
+			t.Fatalf("Failed to detect err %v is of type BadRequestError. Got type: %T", err, u)
+		}
+	}
+
+	maskableErrorList := []error{ErrNoContainer{}}
+	for _, err := range maskableErrorList {
+		switch u := err.(type) {
+		case types.MaskableError:
+			return
+		default:
+			t.Fatalf("Failed to detect err %v is of type MaskableError. Got type: %T", err, u)
+		}
+	}
+
+	notFoundErrorList := []error{NetworkTypeError(""), &UnknownNetworkError{}, &UnknownEndpointError{}}
+	for _, err := range notFoundErrorList {
+		switch u := err.(type) {
+		case types.NotFoundError:
+			return
+		default:
+			t.Fatalf("Failed to detect err %v is of type NotFoundError. Got type: %T", err, u)
+		}
+	}
+
+	forbiddenErrorList := []error{NetworkTypeError(""), &UnknownNetworkError{}, &UnknownEndpointError{}}
+	for _, err := range forbiddenErrorList {
+		switch u := err.(type) {
+		case types.ForbiddenError:
+			return
+		default:
+			t.Fatalf("Failed to detect err %v is of type ForbiddenError. Got type: %T", err, u)
+		}
+	}
+
+}
diff --git a/vendor/src/github.com/docker/libnetwork/etchosts/etchosts.go b/vendor/src/github.com/docker/libnetwork/etchosts/etchosts.go
new file mode 100644
index 0000000..88e6b63
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/etchosts/etchosts.go
@@ -0,0 +1,79 @@
+package etchosts
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"regexp"
+)
+
+// Record Structure for a single host record
+type Record struct {
+	Hosts string
+	IP    string
+}
+
+// WriteTo writes record to file and returns bytes written or error
+func (r Record) WriteTo(w io.Writer) (int64, error) {
+	n, err := fmt.Fprintf(w, "%s\t%s\n", r.IP, r.Hosts)
+	return int64(n), err
+}
+
+// Default hosts config records slice
+var defaultContent = []Record{
+	{Hosts: "localhost", IP: "127.0.0.1"},
+	{Hosts: "localhost ip6-localhost ip6-loopback", IP: "::1"},
+	{Hosts: "ip6-localnet", IP: "fe00::0"},
+	{Hosts: "ip6-mcastprefix", IP: "ff00::0"},
+	{Hosts: "ip6-allnodes", IP: "ff02::1"},
+	{Hosts: "ip6-allrouters", IP: "ff02::2"},
+}
+
+// Build function
+// path is path to host file string required
+// IP, hostname, and domainname set main record leave empty for no master record
+// extraContent is an array of extra host records.
+func Build(path, IP, hostname, domainname string, extraContent []Record) error {
+	content := bytes.NewBuffer(nil)
+	if IP != "" {
+		//set main record
+		var mainRec Record
+		mainRec.IP = IP
+		if domainname != "" {
+			mainRec.Hosts = fmt.Sprintf("%s.%s %s", hostname, domainname, hostname)
+		} else {
+			mainRec.Hosts = hostname
+		}
+		if _, err := mainRec.WriteTo(content); err != nil {
+			return err
+		}
+	}
+	// Write defaultContent slice to buffer
+	for _, r := range defaultContent {
+		if _, err := r.WriteTo(content); err != nil {
+			return err
+		}
+	}
+	// Write extra content from function arguments
+	for _, r := range extraContent {
+		if _, err := r.WriteTo(content); err != nil {
+			return err
+		}
+	}
+
+	return ioutil.WriteFile(path, content.Bytes(), 0644)
+}
+
+// Update all IP addresses where hostname matches.
+// path is path to host file
+// IP is new IP address
+// hostname is hostname to search for to replace IP
+func Update(path, IP, hostname string) error {
+	old, err := ioutil.ReadFile(path)
+	if err != nil {
+		return err
+	}
+	var re = regexp.MustCompile(fmt.Sprintf("(\\S*)(\\t%s)", regexp.QuoteMeta(hostname)))
+	return ioutil.WriteFile(path, re.ReplaceAll(old, []byte(IP+"$2")), 0644)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/etchosts/etchosts_test.go b/vendor/src/github.com/docker/libnetwork/etchosts/etchosts_test.go
new file mode 100644
index 0000000..8c8b87c
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/etchosts/etchosts_test.go
@@ -0,0 +1,136 @@
+package etchosts
+
+import (
+	"bytes"
+	"io/ioutil"
+	"os"
+	"testing"
+
+	_ "github.com/docker/libnetwork/netutils"
+)
+
+func TestBuildDefault(t *testing.T) {
+	file, err := ioutil.TempFile("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(file.Name())
+
+	// check that /etc/hosts has consistent ordering
+	for i := 0; i <= 5; i++ {
+		err = Build(file.Name(), "", "", "", nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		content, err := ioutil.ReadFile(file.Name())
+		if err != nil {
+			t.Fatal(err)
+		}
+		expected := "127.0.0.1\tlocalhost\n::1\tlocalhost ip6-localhost ip6-loopback\nfe00::0\tip6-localnet\nff00::0\tip6-mcastprefix\nff02::1\tip6-allnodes\nff02::2\tip6-allrouters\n"
+
+		if expected != string(content) {
+			t.Fatalf("Expected to find '%s' got '%s'", expected, content)
+		}
+	}
+}
+
+func TestBuildHostnameDomainname(t *testing.T) {
+	file, err := ioutil.TempFile("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(file.Name())
+
+	err = Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	content, err := ioutil.ReadFile(file.Name())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if expected := "10.11.12.13\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) {
+		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
+	}
+}
+
+func TestBuildHostname(t *testing.T) {
+	file, err := ioutil.TempFile("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(file.Name())
+
+	err = Build(file.Name(), "10.11.12.13", "testhostname", "", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	content, err := ioutil.ReadFile(file.Name())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if expected := "10.11.12.13\ttesthostname\n"; !bytes.Contains(content, []byte(expected)) {
+		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
+	}
+}
+
+func TestBuildNoIP(t *testing.T) {
+	file, err := ioutil.TempFile("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(file.Name())
+
+	err = Build(file.Name(), "", "testhostname", "", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	content, err := ioutil.ReadFile(file.Name())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if expected := ""; !bytes.Contains(content, []byte(expected)) {
+		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
+	}
+}
+
+func TestUpdate(t *testing.T) {
+	file, err := ioutil.TempFile("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(file.Name())
+
+	if err := Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname", nil); err != nil {
+		t.Fatal(err)
+	}
+
+	content, err := ioutil.ReadFile(file.Name())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if expected := "10.11.12.13\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) {
+		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
+	}
+
+	if err := Update(file.Name(), "1.1.1.1", "testhostname"); err != nil {
+		t.Fatal(err)
+	}
+
+	content, err = ioutil.ReadFile(file.Name())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if expected := "1.1.1.1\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) {
+		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/ipallocator/allocator.go b/vendor/src/github.com/docker/libnetwork/ipallocator/allocator.go
new file mode 100644
index 0000000..1560099
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/ipallocator/allocator.go
@@ -0,0 +1,172 @@
+// Package ipallocator defines the default IP allocator. It will move out of libnetwork as an external IPAM plugin.
+// This has been imported unchanged from Docker, besides additon of registration logic
+package ipallocator
+
+import (
+	"errors"
+	"math/big"
+	"net"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/libnetwork/netutils"
+)
+
+// allocatedMap is thread-unsafe set of allocated IP
+type allocatedMap struct {
+	p     map[string]struct{}
+	last  *big.Int
+	begin *big.Int
+	end   *big.Int
+}
+
+func newAllocatedMap(network *net.IPNet) *allocatedMap {
+	firstIP, lastIP := netutils.NetworkRange(network)
+	begin := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1))
+	end := big.NewInt(0).Sub(ipToBigInt(lastIP), big.NewInt(1))
+
+	return &allocatedMap{
+		p:     make(map[string]struct{}),
+		begin: begin,
+		end:   end,
+		last:  big.NewInt(0).Sub(begin, big.NewInt(1)), // so first allocated will be begin
+	}
+}
+
+type networkSet map[string]*allocatedMap
+
+var (
+	// ErrNoAvailableIPs preformatted error
+	ErrNoAvailableIPs = errors.New("no available ip addresses on network")
+	// ErrIPAlreadyAllocated preformatted error
+	ErrIPAlreadyAllocated = errors.New("ip already allocated")
+	// ErrIPOutOfRange preformatted error
+	ErrIPOutOfRange = errors.New("requested ip is out of range")
+	// ErrNetworkAlreadyRegistered preformatted error
+	ErrNetworkAlreadyRegistered = errors.New("network already registered")
+	// ErrBadSubnet preformatted error
+	ErrBadSubnet = errors.New("network does not contain specified subnet")
+)
+
+// IPAllocator manages the ipam
+type IPAllocator struct {
+	allocatedIPs networkSet
+	mutex        sync.Mutex
+}
+
+// New returns a new instance of IPAllocator
+func New() *IPAllocator {
+	return &IPAllocator{networkSet{}, sync.Mutex{}}
+}
+
+// RegisterSubnet registers network in global allocator with bounds
+// defined by subnet. If you want to use network range you must call
+// this method before first RequestIP, otherwise full network range will be used
+func (a *IPAllocator) RegisterSubnet(network *net.IPNet, subnet *net.IPNet) error {
+	a.mutex.Lock()
+	defer a.mutex.Unlock()
+
+	key := network.String()
+	if _, ok := a.allocatedIPs[key]; ok {
+		return ErrNetworkAlreadyRegistered
+	}
+
+	// Check that subnet is within network
+	beginIP, endIP := netutils.NetworkRange(subnet)
+	if !(network.Contains(beginIP) && network.Contains(endIP)) {
+		return ErrBadSubnet
+	}
+
+	n := newAllocatedMap(subnet)
+	a.allocatedIPs[key] = n
+	return nil
+}
+
+// RequestIP requests an available ip from the given network.  It
+// will return the next available ip if the ip provided is nil.  If the
+// ip provided is not nil it will validate that the provided ip is available
+// for use or return an error
+func (a *IPAllocator) RequestIP(network *net.IPNet, ip net.IP) (net.IP, error) {
+	a.mutex.Lock()
+	defer a.mutex.Unlock()
+
+	key := network.String()
+	allocated, ok := a.allocatedIPs[key]
+	if !ok {
+		allocated = newAllocatedMap(network)
+		a.allocatedIPs[key] = allocated
+	}
+
+	if ip == nil {
+		return allocated.getNextIP()
+	}
+	return allocated.checkIP(ip)
+}
+
+// ReleaseIP adds the provided ip back into the pool of
+// available ips to be returned for use.
+func (a *IPAllocator) ReleaseIP(network *net.IPNet, ip net.IP) error {
+	a.mutex.Lock()
+	defer a.mutex.Unlock()
+
+	if allocated, exists := a.allocatedIPs[network.String()]; exists {
+		delete(allocated.p, ip.String())
+	}
+	return nil
+}
+
+func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) {
+	if _, ok := allocated.p[ip.String()]; ok {
+		return nil, ErrIPAlreadyAllocated
+	}
+
+	pos := ipToBigInt(ip)
+	// Verify that the IP address is within our network range.
+	if pos.Cmp(allocated.begin) == -1 || pos.Cmp(allocated.end) == 1 {
+		return nil, ErrIPOutOfRange
+	}
+
+	// Register the IP.
+	allocated.p[ip.String()] = struct{}{}
+
+	return ip, nil
+}
+
+// return an available ip if one is currently available.  If not,
+// return the next available ip for the network
+func (allocated *allocatedMap) getNextIP() (net.IP, error) {
+	pos := big.NewInt(0).Set(allocated.last)
+	allRange := big.NewInt(0).Sub(allocated.end, allocated.begin)
+	for i := big.NewInt(0); i.Cmp(allRange) <= 0; i.Add(i, big.NewInt(1)) {
+		pos.Add(pos, big.NewInt(1))
+		if pos.Cmp(allocated.end) == 1 {
+			pos.Set(allocated.begin)
+		}
+		if _, ok := allocated.p[bigIntToIP(pos).String()]; ok {
+			continue
+		}
+		allocated.p[bigIntToIP(pos).String()] = struct{}{}
+		allocated.last.Set(pos)
+		return bigIntToIP(pos), nil
+	}
+	return nil, ErrNoAvailableIPs
+}
+
+// Converts a 4 bytes IP into a 128 bit integer
+func ipToBigInt(ip net.IP) *big.Int {
+	x := big.NewInt(0)
+	if ip4 := ip.To4(); ip4 != nil {
+		return x.SetBytes(ip4)
+	}
+	if ip6 := ip.To16(); ip6 != nil {
+		return x.SetBytes(ip6)
+	}
+
+	logrus.Errorf("ipToBigInt: Wrong IP length! %s", ip)
+	return nil
+}
+
+// Converts 128 bit integer into a 4 bytes IP address
+func bigIntToIP(v *big.Int) net.IP {
+	return net.IP(v.Bytes())
+}
diff --git a/vendor/src/github.com/docker/libnetwork/ipallocator/allocator_test.go b/vendor/src/github.com/docker/libnetwork/ipallocator/allocator_test.go
new file mode 100644
index 0000000..fffe6e3
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/ipallocator/allocator_test.go
@@ -0,0 +1,690 @@
+package ipallocator
+
+import (
+	"fmt"
+	"math/big"
+	"net"
+	"testing"
+)
+
+func TestConversion(t *testing.T) {
+	ip := net.ParseIP("127.0.0.1")
+	i := ipToBigInt(ip)
+	if i.Cmp(big.NewInt(0x7f000001)) != 0 {
+		t.Fatal("incorrect conversion")
+	}
+	conv := bigIntToIP(i)
+	if !ip.Equal(conv) {
+		t.Error(conv.String())
+	}
+}
+
+func TestConversionIPv6(t *testing.T) {
+	ip := net.ParseIP("2a00:1450::1")
+	ip2 := net.ParseIP("2a00:1450::2")
+	ip3 := net.ParseIP("2a00:1450::1:1")
+	i := ipToBigInt(ip)
+	val, success := big.NewInt(0).SetString("2a001450000000000000000000000001", 16)
+	if !success {
+		t.Fatal("Hex-String to BigInt conversion failed.")
+	}
+	if i.Cmp(val) != 0 {
+		t.Fatal("incorrent conversion")
+	}
+
+	conv := bigIntToIP(i)
+	conv2 := bigIntToIP(big.NewInt(0).Add(i, big.NewInt(1)))
+	conv3 := bigIntToIP(big.NewInt(0).Add(i, big.NewInt(0x10000)))
+
+	if !ip.Equal(conv) {
+		t.Error("2a00:1450::1 should be equal to " + conv.String())
+	}
+	if !ip2.Equal(conv2) {
+		t.Error("2a00:1450::2 should be equal to " + conv2.String())
+	}
+	if !ip3.Equal(conv3) {
+		t.Error("2a00:1450::1:1 should be equal to " + conv3.String())
+	}
+}
+
+func TestRequestNewIps(t *testing.T) {
+	a := New()
+
+	network := &net.IPNet{
+		IP:   []byte{192, 168, 0, 1},
+		Mask: []byte{255, 255, 255, 0},
+	}
+
+	var ip net.IP
+	var err error
+
+	for i := 1; i < 10; i++ {
+		ip, err = a.RequestIP(network, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if expected := fmt.Sprintf("192.168.0.%d", i); ip.String() != expected {
+			t.Fatalf("Expected ip %s got %s", expected, ip.String())
+		}
+	}
+	value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String()
+	if err := a.ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+	ip, err = a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ip.String() != value {
+		t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String())
+	}
+}
+
+func TestRequestNewIpV6(t *testing.T) {
+	a := New()
+
+	network := &net.IPNet{
+		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
+	}
+
+	var ip net.IP
+	var err error
+	for i := 1; i < 10; i++ {
+		ip, err = a.RequestIP(network, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if expected := fmt.Sprintf("2a00:1450::%d", i); ip.String() != expected {
+			t.Fatalf("Expected ip %s got %s", expected, ip.String())
+		}
+	}
+	value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String()
+	if err := a.ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+	ip, err = a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ip.String() != value {
+		t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String())
+	}
+}
+
+func TestReleaseIp(t *testing.T) {
+	a := New()
+
+	network := &net.IPNet{
+		IP:   []byte{192, 168, 0, 1},
+		Mask: []byte{255, 255, 255, 0},
+	}
+
+	ip, err := a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := a.ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestReleaseIpV6(t *testing.T) {
+	a := New()
+
+	network := &net.IPNet{
+		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
+	}
+
+	ip, err := a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := a.ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestGetReleasedIp(t *testing.T) {
+	a := New()
+	network := &net.IPNet{
+		IP:   []byte{192, 168, 0, 1},
+		Mask: []byte{255, 255, 255, 0},
+	}
+
+	ip, err := a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	value := ip.String()
+	if err := a.ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+
+	for i := 0; i < 253; i++ {
+		_, err = a.RequestIP(network, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+		err = a.ReleaseIP(network, ip)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	ip, err = a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if ip.String() != value {
+		t.Fatalf("Expected to receive same ip %s got %s", value, ip.String())
+	}
+}
+
+func TestGetReleasedIpV6(t *testing.T) {
+	a := New()
+
+	network := &net.IPNet{
+		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0},
+	}
+
+	ip, err := a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	value := ip.String()
+	if err := a.ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+
+	for i := 0; i < 253; i++ {
+		_, err = a.RequestIP(network, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+		err = a.ReleaseIP(network, ip)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	ip, err = a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if ip.String() != value {
+		t.Fatalf("Expected to receive same ip %s got %s", value, ip.String())
+	}
+}
+
+func TestRequestSpecificIp(t *testing.T) {
+	a := New()
+
+	network := &net.IPNet{
+		IP:   []byte{192, 168, 0, 1},
+		Mask: []byte{255, 255, 255, 224},
+	}
+
+	ip := net.ParseIP("192.168.0.5")
+
+	// Request a "good" IP.
+	if _, err := a.RequestIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+
+	// Request the same IP again.
+	if _, err := a.RequestIP(network, ip); err != ErrIPAlreadyAllocated {
+		t.Fatalf("Got the same IP twice: %#v", err)
+	}
+
+	// Request an out of range IP.
+	if _, err := a.RequestIP(network, net.ParseIP("192.168.0.42")); err != ErrIPOutOfRange {
+		t.Fatalf("Got an out of range IP: %#v", err)
+	}
+}
+
+func TestRequestSpecificIpV6(t *testing.T) {
+	a := New()
+
+	network := &net.IPNet{
+		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
+	}
+
+	ip := net.ParseIP("2a00:1450::5")
+
+	// Request a "good" IP.
+	if _, err := a.RequestIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+
+	// Request the same IP again.
+	if _, err := a.RequestIP(network, ip); err != ErrIPAlreadyAllocated {
+		t.Fatalf("Got the same IP twice: %#v", err)
+	}
+
+	// Request an out of range IP.
+	if _, err := a.RequestIP(network, net.ParseIP("2a00:1500::1")); err != ErrIPOutOfRange {
+		t.Fatalf("Got an out of range IP: %#v", err)
+	}
+}
+
+func TestIPAllocator(t *testing.T) {
+	a := New()
+
+	expectedIPs := []net.IP{
+		0: net.IPv4(127, 0, 0, 1),
+		1: net.IPv4(127, 0, 0, 2),
+		2: net.IPv4(127, 0, 0, 3),
+		3: net.IPv4(127, 0, 0, 4),
+		4: net.IPv4(127, 0, 0, 5),
+		5: net.IPv4(127, 0, 0, 6),
+	}
+
+	gwIP, n, _ := net.ParseCIDR("127.0.0.1/29")
+
+	network := &net.IPNet{IP: gwIP, Mask: n.Mask}
+	// Pool after initialisation (f = free, u = used)
+	// 1(f) - 2(f) - 3(f) - 4(f) - 5(f) - 6(f)
+	//  ↑
+
+	// Check that we get 6 IPs, from 127.0.0.1–127.0.0.6, in that
+	// order.
+	for i := 0; i < 6; i++ {
+		ip, err := a.RequestIP(network, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		assertIPEquals(t, expectedIPs[i], ip)
+	}
+	// Before loop begin
+	// 1(f) - 2(f) - 3(f) - 4(f) - 5(f) - 6(f)
+	//  ↑
+
+	// After i = 0
+	// 1(u) - 2(f) - 3(f) - 4(f) - 5(f) - 6(f)
+	//         ↑
+
+	// After i = 1
+	// 1(u) - 2(u) - 3(f) - 4(f) - 5(f) - 6(f)
+	//                ↑
+
+	// After i = 2
+	// 1(u) - 2(u) - 3(u) - 4(f) - 5(f) - 6(f)
+	//                       ↑
+
+	// After i = 3
+	// 1(u) - 2(u) - 3(u) - 4(u) - 5(f) - 6(f)
+	//                              ↑
+
+	// After i = 4
+	// 1(u) - 2(u) - 3(u) - 4(u) - 5(u) - 6(f)
+	//                                     ↑
+
+	// After i = 5
+	// 1(u) - 2(u) - 3(u) - 4(u) - 5(u) - 6(u)
+	//  ↑
+
+	// Check that there are no more IPs
+	ip, err := a.RequestIP(network, nil)
+	if err == nil {
+		t.Fatalf("There shouldn't be any IP addresses at this point, got %s\n", ip)
+	}
+
+	// Release some IPs in non-sequential order
+	if err := a.ReleaseIP(network, expectedIPs[3]); err != nil {
+		t.Fatal(err)
+	}
+	// 1(u) - 2(u) - 3(u) - 4(f) - 5(u) - 6(u)
+	//                       ↑
+
+	if err := a.ReleaseIP(network, expectedIPs[2]); err != nil {
+		t.Fatal(err)
+	}
+	// 1(u) - 2(u) - 3(f) - 4(f) - 5(u) - 6(u)
+	//                ↑
+
+	if err := a.ReleaseIP(network, expectedIPs[4]); err != nil {
+		t.Fatal(err)
+	}
+	// 1(u) - 2(u) - 3(f) - 4(f) - 5(f) - 6(u)
+	//                              ↑
+
+	// Make sure that IPs are reused in sequential order, starting
+	// with the first released IP
+	newIPs := make([]net.IP, 3)
+	for i := 0; i < 3; i++ {
+		ip, err := a.RequestIP(network, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		newIPs[i] = ip
+	}
+	assertIPEquals(t, expectedIPs[2], newIPs[0])
+	assertIPEquals(t, expectedIPs[3], newIPs[1])
+	assertIPEquals(t, expectedIPs[4], newIPs[2])
+
+	_, err = a.RequestIP(network, nil)
+	if err == nil {
+		t.Fatal("There shouldn't be any IP addresses at this point")
+	}
+}
+
+func TestAllocateFirstIP(t *testing.T) {
+	a := New()
+
+	network := &net.IPNet{
+		IP:   []byte{192, 168, 0, 0},
+		Mask: []byte{255, 255, 255, 0},
+	}
+
+	firstIP := network.IP.To4().Mask(network.Mask)
+	first := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1))
+
+	ip, err := a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	allocated := ipToBigInt(ip)
+
+	if allocated == first {
+		t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated)
+	}
+}
+
+func TestAllocateAllIps(t *testing.T) {
+	a := New()
+
+	network := &net.IPNet{
+		IP:   []byte{192, 168, 0, 1},
+		Mask: []byte{255, 255, 255, 0},
+	}
+
+	var (
+		current, first net.IP
+		err            error
+		isFirst        = true
+	)
+
+	for err == nil {
+		current, err = a.RequestIP(network, nil)
+		if isFirst {
+			first = current
+			isFirst = false
+		}
+	}
+
+	if err != ErrNoAvailableIPs {
+		t.Fatal(err)
+	}
+
+	if _, err := a.RequestIP(network, nil); err != ErrNoAvailableIPs {
+		t.Fatal(err)
+	}
+
+	if err := a.ReleaseIP(network, first); err != nil {
+		t.Fatal(err)
+	}
+
+	again, err := a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertIPEquals(t, first, again)
+
+	// ensure that alloc.last == alloc.begin won't result in dead loop
+	if _, err := a.RequestIP(network, nil); err != ErrNoAvailableIPs {
+		t.Fatal(err)
+	}
+
+	// Test by making alloc.last the only free ip and ensure we get it back
+	// #1. first of the range, (alloc.last == ipToInt(first) already)
+	if err := a.ReleaseIP(network, first); err != nil {
+		t.Fatal(err)
+	}
+
+	ret, err := a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertIPEquals(t, first, ret)
+
+	// #2. last of the range, note that current is the last one
+	last := net.IPv4(192, 168, 0, 254)
+	setLastTo(t, a, network, last)
+
+	ret, err = a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertIPEquals(t, last, ret)
+
+	// #3. middle of the range
+	mid := net.IPv4(192, 168, 0, 7)
+	setLastTo(t, a, network, mid)
+
+	ret, err = a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertIPEquals(t, mid, ret)
+}
+
+// make sure the pool is full when calling setLastTo.
+// we don't cheat here
+func setLastTo(t *testing.T, a *IPAllocator, network *net.IPNet, ip net.IP) {
+	if err := a.ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+
+	ret, err := a.RequestIP(network, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	assertIPEquals(t, ip, ret)
+
+	if err := a.ReleaseIP(network, ip); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestAllocateDifferentSubnets(t *testing.T) {
+	a := New()
+	network1 := &net.IPNet{
+		IP:   []byte{192, 168, 0, 1},
+		Mask: []byte{255, 255, 255, 0},
+	}
+	network2 := &net.IPNet{
+		IP:   []byte{127, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 0},
+	}
+	network3 := &net.IPNet{
+		IP:   []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
+	}
+	network4 := &net.IPNet{
+		IP:   []byte{0x2a, 0x00, 0x16, 0x32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
+		Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
+	}
+	expectedIPs := []net.IP{
+		0: net.IPv4(192, 168, 0, 1),
+		1: net.IPv4(192, 168, 0, 2),
+		2: net.IPv4(127, 0, 0, 1),
+		3: net.IPv4(127, 0, 0, 2),
+		4: net.ParseIP("2a00:1450::1"),
+		5: net.ParseIP("2a00:1450::2"),
+		6: net.ParseIP("2a00:1450::3"),
+		7: net.ParseIP("2a00:1632::1"),
+		8: net.ParseIP("2a00:1632::2"),
+	}
+
+	ip11, err := a.RequestIP(network1, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ip12, err := a.RequestIP(network1, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ip21, err := a.RequestIP(network2, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ip22, err := a.RequestIP(network2, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ip31, err := a.RequestIP(network3, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ip32, err := a.RequestIP(network3, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ip33, err := a.RequestIP(network3, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ip41, err := a.RequestIP(network4, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	ip42, err := a.RequestIP(network4, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	assertIPEquals(t, expectedIPs[0], ip11)
+	assertIPEquals(t, expectedIPs[1], ip12)
+	assertIPEquals(t, expectedIPs[2], ip21)
+	assertIPEquals(t, expectedIPs[3], ip22)
+	assertIPEquals(t, expectedIPs[4], ip31)
+	assertIPEquals(t, expectedIPs[5], ip32)
+	assertIPEquals(t, expectedIPs[6], ip33)
+	assertIPEquals(t, expectedIPs[7], ip41)
+	assertIPEquals(t, expectedIPs[8], ip42)
+}
+
+func TestRegisterBadTwice(t *testing.T) {
+	a := New()
+	network := &net.IPNet{
+		IP:   []byte{192, 168, 1, 1},
+		Mask: []byte{255, 255, 255, 0},
+	}
+	subnet := &net.IPNet{
+		IP:   []byte{192, 168, 1, 8},
+		Mask: []byte{255, 255, 255, 248},
+	}
+
+	if err := a.RegisterSubnet(network, subnet); err != nil {
+		t.Fatal(err)
+	}
+	subnet = &net.IPNet{
+		IP:   []byte{192, 168, 1, 16},
+		Mask: []byte{255, 255, 255, 248},
+	}
+	if err := a.RegisterSubnet(network, subnet); err != ErrNetworkAlreadyRegistered {
+		t.Fatalf("Expecteded ErrNetworkAlreadyRegistered error, got %v", err)
+	}
+}
+
+func TestRegisterBadRange(t *testing.T) {
+	a := New()
+	network := &net.IPNet{
+		IP:   []byte{192, 168, 1, 1},
+		Mask: []byte{255, 255, 255, 0},
+	}
+	subnet := &net.IPNet{
+		IP:   []byte{192, 168, 1, 1},
+		Mask: []byte{255, 255, 0, 0},
+	}
+	if err := a.RegisterSubnet(network, subnet); err != ErrBadSubnet {
+		t.Fatalf("Expected ErrBadSubnet error, got %v", err)
+	}
+}
+
+func TestAllocateFromRange(t *testing.T) {
+	a := New()
+	network := &net.IPNet{
+		IP:   []byte{192, 168, 0, 1},
+		Mask: []byte{255, 255, 255, 0},
+	}
+	// 192.168.1.9 - 192.168.1.14
+	subnet := &net.IPNet{
+		IP:   []byte{192, 168, 0, 8},
+		Mask: []byte{255, 255, 255, 248},
+	}
+
+	if err := a.RegisterSubnet(network, subnet); err != nil {
+		t.Fatal(err)
+	}
+	expectedIPs := []net.IP{
+		0: net.IPv4(192, 168, 0, 9),
+		1: net.IPv4(192, 168, 0, 10),
+		2: net.IPv4(192, 168, 0, 11),
+		3: net.IPv4(192, 168, 0, 12),
+		4: net.IPv4(192, 168, 0, 13),
+		5: net.IPv4(192, 168, 0, 14),
+	}
+	for _, ip := range expectedIPs {
+		rip, err := a.RequestIP(network, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+		assertIPEquals(t, ip, rip)
+	}
+
+	if _, err := a.RequestIP(network, nil); err != ErrNoAvailableIPs {
+		t.Fatalf("Expected ErrNoAvailableIPs error, got %v", err)
+	}
+	for _, ip := range expectedIPs {
+		a.ReleaseIP(network, ip)
+		rip, err := a.RequestIP(network, nil)
+		if err != nil {
+			t.Fatal(err)
+		}
+		assertIPEquals(t, ip, rip)
+	}
+}
+
+func assertIPEquals(t *testing.T, ip1, ip2 net.IP) {
+	if !ip1.Equal(ip2) {
+		t.Fatalf("Expected IP %s, got %s", ip1, ip2)
+	}
+}
+
+func BenchmarkRequestIP(b *testing.B) {
+	network := &net.IPNet{
+		IP:   []byte{192, 168, 0, 1},
+		Mask: []byte{255, 255, 255, 0},
+	}
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		a := New()
+
+		for j := 0; j < 253; j++ {
+			_, err := a.RequestIP(network, nil)
+			if err != nil {
+				b.Fatal(err)
+			}
+		}
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/iptables/firewalld.go b/vendor/src/github.com/docker/libnetwork/iptables/firewalld.go
new file mode 100644
index 0000000..1227647
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/iptables/firewalld.go
@@ -0,0 +1,164 @@
+package iptables
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/godbus/dbus"
+)
+
+// IPV defines the table string
+type IPV string
+
+const (
+	// Iptables point ipv4 table
+	Iptables IPV = "ipv4"
+	// IP6Tables point to ipv6 table
+	IP6Tables IPV = "ipv6"
+	// Ebtables point to bridge table
+	Ebtables IPV = "eb"
+)
+const (
+	dbusInterface = "org.fedoraproject.FirewallD1"
+	dbusPath      = "/org/fedoraproject/FirewallD1"
+)
+
+// Conn is a connection to firewalld dbus endpoint.
+type Conn struct {
+	sysconn *dbus.Conn
+	sysobj  *dbus.Object
+	signal  chan *dbus.Signal
+}
+
+var (
+	connection       *Conn
+	firewalldRunning bool      // is Firewalld service running
+	onReloaded       []*func() // callbacks when Firewalld has been reloaded
+)
+
+// FirewalldInit initializes firewalld management code.
+func FirewalldInit() error {
+	var err error
+
+	if connection, err = newConnection(); err != nil {
+		return fmt.Errorf("Failed to connect to D-Bus system bus: %v", err)
+	}
+	if connection != nil {
+		go signalHandler()
+	}
+
+	firewalldRunning = checkRunning()
+	return nil
+}
+
+// New() establishes a connection to the system bus.
+func newConnection() (*Conn, error) {
+	c := new(Conn)
+	if err := c.initConnection(); err != nil {
+		return nil, err
+	}
+
+	return c, nil
+}
+
+// Innitialize D-Bus connection.
+func (c *Conn) initConnection() error {
+	var err error
+
+	c.sysconn, err = dbus.SystemBus()
+	if err != nil {
+		return err
+	}
+
+	// This never fails, even if the service is not running atm.
+	c.sysobj = c.sysconn.Object(dbusInterface, dbus.ObjectPath(dbusPath))
+
+	rule := fmt.Sprintf("type='signal',path='%s',interface='%s',sender='%s',member='Reloaded'",
+		dbusPath, dbusInterface, dbusInterface)
+	c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, rule)
+
+	rule = fmt.Sprintf("type='signal',interface='org.freedesktop.DBus',member='NameOwnerChanged',path='/org/freedesktop/DBus',sender='org.freedesktop.DBus',arg0='%s'",
+		dbusInterface)
+	c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, rule)
+
+	c.signal = make(chan *dbus.Signal, 10)
+	c.sysconn.Signal(c.signal)
+
+	return nil
+}
+
+func signalHandler() {
+	for signal := range connection.signal {
+		if strings.Contains(signal.Name, "NameOwnerChanged") {
+			firewalldRunning = checkRunning()
+			dbusConnectionChanged(signal.Body)
+		} else if strings.Contains(signal.Name, "Reloaded") {
+			reloaded()
+		}
+	}
+}
+
+func dbusConnectionChanged(args []interface{}) {
+	name := args[0].(string)
+	oldOwner := args[1].(string)
+	newOwner := args[2].(string)
+
+	if name != dbusInterface {
+		return
+	}
+
+	if len(newOwner) > 0 {
+		connectionEstablished()
+	} else if len(oldOwner) > 0 {
+		connectionLost()
+	}
+}
+
+func connectionEstablished() {
+	reloaded()
+}
+
+func connectionLost() {
+	// Doesn't do anything for now. Libvirt also doesn't react to this.
+}
+
+// call all callbacks
+func reloaded() {
+	for _, pf := range onReloaded {
+		(*pf)()
+	}
+}
+
+// OnReloaded add callback
+func OnReloaded(callback func()) {
+	for _, pf := range onReloaded {
+		if pf == &callback {
+			return
+		}
+	}
+	onReloaded = append(onReloaded, &callback)
+}
+
+// Call some remote method to see whether the service is actually running.
+func checkRunning() bool {
+	var zone string
+	var err error
+
+	if connection != nil {
+		err = connection.sysobj.Call(dbusInterface+".getDefaultZone", 0).Store(&zone)
+		logrus.Infof("Firewalld running: %t", err == nil)
+		return err == nil
+	}
+	return false
+}
+
+// Passthrough method simply passes args through to iptables/ip6tables
+func Passthrough(ipv IPV, args ...string) ([]byte, error) {
+	var output string
+	logrus.Debugf("Firewalld passthrough: %s, %s", ipv, args)
+	if err := connection.sysobj.Call(dbusInterface+".direct.passthrough", 0, ipv, args).Store(&output); err != nil {
+		return nil, err
+	}
+	return []byte(output), nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/iptables/firewalld_test.go b/vendor/src/github.com/docker/libnetwork/iptables/firewalld_test.go
new file mode 100644
index 0000000..547ba7e
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/iptables/firewalld_test.go
@@ -0,0 +1,83 @@
+package iptables
+
+import (
+	"net"
+	"strconv"
+	"testing"
+)
+
+func TestFirewalldInit(t *testing.T) {
+	if !checkRunning() {
+		t.Skip("firewalld is not running")
+	}
+	if err := FirewalldInit(); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestReloaded(t *testing.T) {
+	var err error
+	var fwdChain *Chain
+
+	fwdChain, err = NewChain("FWD", "lo", Filter, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer fwdChain.Remove()
+
+	// copy-pasted from iptables_test:TestLink
+	ip1 := net.ParseIP("192.168.1.1")
+	ip2 := net.ParseIP("192.168.1.2")
+	port := 1234
+	proto := "tcp"
+
+	err = fwdChain.Link(Append, ip1, ip2, port, proto)
+	if err != nil {
+		t.Fatal(err)
+	} else {
+		// to be re-called again later
+		OnReloaded(func() { fwdChain.Link(Append, ip1, ip2, port, proto) })
+	}
+
+	rule1 := []string{
+		"-i", fwdChain.Bridge,
+		"-o", fwdChain.Bridge,
+		"-p", proto,
+		"-s", ip1.String(),
+		"-d", ip2.String(),
+		"--dport", strconv.Itoa(port),
+		"-j", "ACCEPT"}
+
+	if !Exists(fwdChain.Table, fwdChain.Name, rule1...) {
+		t.Fatalf("rule1 does not exist")
+	}
+
+	// flush all rules
+	fwdChain.Remove()
+
+	reloaded()
+
+	// make sure the rules have been recreated
+	if !Exists(fwdChain.Table, fwdChain.Name, rule1...) {
+		t.Fatalf("rule1 hasn't been recreated")
+	}
+}
+
+func TestPassthrough(t *testing.T) {
+	rule1 := []string{
+		"-i", "lo",
+		"-p", "udp",
+		"--dport", "123",
+		"-j", "ACCEPT"}
+
+	if firewalldRunning {
+		_, err := Passthrough(Iptables, append([]string{"-A"}, rule1...)...)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if !Exists(Filter, "INPUT", rule1...) {
+			t.Fatalf("rule1 does not exist")
+		}
+	}
+
+}
diff --git a/vendor/src/github.com/docker/libnetwork/iptables/iptables.go b/vendor/src/github.com/docker/libnetwork/iptables/iptables.go
new file mode 100644
index 0000000..4299a7e
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/iptables/iptables.go
@@ -0,0 +1,320 @@
+package iptables
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"os/exec"
+	"strconv"
+	"strings"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
+)
+
+// Action signifies the iptable action.
+type Action string
+
+// Table refers to Nat, Filter or Mangle.
+type Table string
+
+const (
+	// Append appends the rule at the end of the chain.
+	Append Action = "-A"
+	// Delete deletes the rule from the chain.
+	Delete Action = "-D"
+	// Insert inserts the rule at the top of the chain.
+	Insert Action = "-I"
+	// Nat table is used for nat translation rules.
+	Nat Table = "nat"
+	// Filter table is used for filter rules.
+	Filter Table = "filter"
+	// Mangle table is used for mangling the packet.
+	Mangle Table = "mangle"
+)
+
+var (
+	iptablesPath  string
+	supportsXlock = false
+	// used to lock iptables commands if xtables lock is not supported
+	bestEffortLock sync.Mutex
+	// ErrIptablesNotFound is returned when the rule is not found.
+	ErrIptablesNotFound = errors.New("Iptables not found")
+)
+
+// Chain defines the iptables chain.
+type Chain struct {
+	Name   string
+	Bridge string
+	Table  Table
+}
+
+// ChainError is returned to represent errors during ip table operation.
+type ChainError struct {
+	Chain  string
+	Output []byte
+}
+
+func (e ChainError) Error() string {
+	return fmt.Sprintf("Error iptables %s: %s", e.Chain, string(e.Output))
+}
+
+func initCheck() error {
+
+	if iptablesPath == "" {
+		path, err := exec.LookPath("iptables")
+		if err != nil {
+			return ErrIptablesNotFound
+		}
+		iptablesPath = path
+		supportsXlock = exec.Command(iptablesPath, "--wait", "-L", "-n").Run() == nil
+	}
+	return nil
+}
+
+// NewChain adds a new chain to ip table.
+func NewChain(name, bridge string, table Table, hairpinMode bool) (*Chain, error) {
+	c := &Chain{
+		Name:   name,
+		Bridge: bridge,
+		Table:  table,
+	}
+
+	if string(c.Table) == "" {
+		c.Table = Filter
+	}
+
+	// Add chain if it doesn't exist
+	if _, err := Raw("-t", string(c.Table), "-n", "-L", c.Name); err != nil {
+		if output, err := Raw("-t", string(c.Table), "-N", c.Name); err != nil {
+			return nil, err
+		} else if len(output) != 0 {
+			return nil, fmt.Errorf("Could not create %s/%s chain: %s", c.Table, c.Name, output)
+		}
+	}
+
+	switch table {
+	case Nat:
+		preroute := []string{
+			"-m", "addrtype",
+			"--dst-type", "LOCAL"}
+		if !Exists(Nat, "PREROUTING", preroute...) {
+			if err := c.Prerouting(Append, preroute...); err != nil {
+				return nil, fmt.Errorf("Failed to inject docker in PREROUTING chain: %s", err)
+			}
+		}
+		output := []string{
+			"-m", "addrtype",
+			"--dst-type", "LOCAL"}
+		if !hairpinMode {
+			output = append(output, "!", "--dst", "127.0.0.0/8")
+		}
+		if !Exists(Nat, "OUTPUT", output...) {
+			if err := c.Output(Append, output...); err != nil {
+				return nil, fmt.Errorf("Failed to inject docker in OUTPUT chain: %s", err)
+			}
+		}
+	case Filter:
+		link := []string{
+			"-o", c.Bridge,
+			"-j", c.Name}
+		if !Exists(Filter, "FORWARD", link...) {
+			insert := append([]string{string(Insert), "FORWARD"}, link...)
+			if output, err := Raw(insert...); err != nil {
+				return nil, err
+			} else if len(output) != 0 {
+				return nil, fmt.Errorf("Could not create linking rule to %s/%s: %s", c.Table, c.Name, output)
+			}
+		}
+	}
+	return c, nil
+}
+
+// RemoveExistingChain removes existing chain from the table.
+func RemoveExistingChain(name string, table Table) error {
+	c := &Chain{
+		Name:  name,
+		Table: table,
+	}
+	if string(c.Table) == "" {
+		c.Table = Filter
+	}
+	return c.Remove()
+}
+
+// Forward adds forwarding rule to 'filter' table and corresponding nat rule to 'nat' table.
+func (c *Chain) Forward(action Action, ip net.IP, port int, proto, destAddr string, destPort int) error {
+	daddr := ip.String()
+	if ip.IsUnspecified() {
+		// iptables interprets "0.0.0.0" as "0.0.0.0/32", whereas we
+		// want "0.0.0.0/0". "0/0" is correctly interpreted as "any
+		// value" by both iptables and ip6tables.
+		daddr = "0/0"
+	}
+	if output, err := Raw("-t", string(Nat), string(action), c.Name,
+		"-p", proto,
+		"-d", daddr,
+		"--dport", strconv.Itoa(port),
+		"-j", "DNAT",
+		"--to-destination", net.JoinHostPort(destAddr, strconv.Itoa(destPort))); err != nil {
+		return err
+	} else if len(output) != 0 {
+		return ChainError{Chain: "FORWARD", Output: output}
+	}
+
+	if output, err := Raw("-t", string(Filter), string(action), c.Name,
+		"!", "-i", c.Bridge,
+		"-o", c.Bridge,
+		"-p", proto,
+		"-d", destAddr,
+		"--dport", strconv.Itoa(destPort),
+		"-j", "ACCEPT"); err != nil {
+		return err
+	} else if len(output) != 0 {
+		return ChainError{Chain: "FORWARD", Output: output}
+	}
+
+	if output, err := Raw("-t", string(Nat), string(action), "POSTROUTING",
+		"-p", proto,
+		"-s", destAddr,
+		"-d", destAddr,
+		"--dport", strconv.Itoa(destPort),
+		"-j", "MASQUERADE"); err != nil {
+		return err
+	} else if len(output) != 0 {
+		return ChainError{Chain: "FORWARD", Output: output}
+	}
+
+	return nil
+}
+
+// Link adds reciprocal ACCEPT rule for two supplied IP addresses.
+// Traffic is allowed from ip1 to ip2 and vice-versa
+func (c *Chain) Link(action Action, ip1, ip2 net.IP, port int, proto string) error {
+	if output, err := Raw("-t", string(Filter), string(action), c.Name,
+		"-i", c.Bridge, "-o", c.Bridge,
+		"-p", proto,
+		"-s", ip1.String(),
+		"-d", ip2.String(),
+		"--dport", strconv.Itoa(port),
+		"-j", "ACCEPT"); err != nil {
+		return err
+	} else if len(output) != 0 {
+		return fmt.Errorf("Error iptables forward: %s", output)
+	}
+	if output, err := Raw("-t", string(Filter), string(action), c.Name,
+		"-i", c.Bridge, "-o", c.Bridge,
+		"-p", proto,
+		"-s", ip2.String(),
+		"-d", ip1.String(),
+		"--sport", strconv.Itoa(port),
+		"-j", "ACCEPT"); err != nil {
+		return err
+	} else if len(output) != 0 {
+		return fmt.Errorf("Error iptables forward: %s", output)
+	}
+	return nil
+}
+
+// Prerouting adds linking rule to nat/PREROUTING chain.
+func (c *Chain) Prerouting(action Action, args ...string) error {
+	a := []string{"-t", string(Nat), string(action), "PREROUTING"}
+	if len(args) > 0 {
+		a = append(a, args...)
+	}
+	if output, err := Raw(append(a, "-j", c.Name)...); err != nil {
+		return err
+	} else if len(output) != 0 {
+		return ChainError{Chain: "PREROUTING", Output: output}
+	}
+	return nil
+}
+
+// Output adds linking rule to an OUTPUT chain.
+func (c *Chain) Output(action Action, args ...string) error {
+	a := []string{"-t", string(c.Table), string(action), "OUTPUT"}
+	if len(args) > 0 {
+		a = append(a, args...)
+	}
+	if output, err := Raw(append(a, "-j", c.Name)...); err != nil {
+		return err
+	} else if len(output) != 0 {
+		return ChainError{Chain: "OUTPUT", Output: output}
+	}
+	return nil
+}
+
+// Remove removes the chain.
+func (c *Chain) Remove() error {
+	// Ignore errors - This could mean the chains were never set up
+	if c.Table == Nat {
+		c.Prerouting(Delete, "-m", "addrtype", "--dst-type", "LOCAL")
+		c.Output(Delete, "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8")
+		c.Output(Delete, "-m", "addrtype", "--dst-type", "LOCAL") // Created in versions <= 0.1.6
+
+		c.Prerouting(Delete)
+		c.Output(Delete)
+	}
+	Raw("-t", string(c.Table), "-F", c.Name)
+	Raw("-t", string(c.Table), "-X", c.Name)
+	return nil
+}
+
+// Exists checks if a rule exists
+func Exists(table Table, chain string, rule ...string) bool {
+	if string(table) == "" {
+		table = Filter
+	}
+
+	// iptables -C, --check option was added in v.1.4.11
+	// http://ftp.netfilter.org/pub/iptables/changes-iptables-1.4.11.txt
+
+	// try -C
+	// if exit status is 0 then return true, the rule exists
+	if _, err := Raw(append([]string{
+		"-t", string(table), "-C", chain}, rule...)...); err == nil {
+		return true
+	}
+
+	// parse "iptables -S" for the rule (this checks rules in a specific chain
+	// in a specific table)
+	ruleString := strings.Join(rule, " ")
+	existingRules, _ := exec.Command(iptablesPath, "-t", string(table), "-S", chain).Output()
+
+	return strings.Contains(string(existingRules), ruleString)
+}
+
+// Raw calls 'iptables' system command, passing supplied arguments.
+func Raw(args ...string) ([]byte, error) {
+	if firewalldRunning {
+		output, err := Passthrough(Iptables, args...)
+		if err == nil || !strings.Contains(err.Error(), "was not provided by any .service files") {
+			return output, err
+		}
+
+	}
+
+	if err := initCheck(); err != nil {
+		return nil, err
+	}
+	if supportsXlock {
+		args = append([]string{"--wait"}, args...)
+	} else {
+		bestEffortLock.Lock()
+		defer bestEffortLock.Unlock()
+	}
+
+	logrus.Debugf("%s, %v", iptablesPath, args)
+
+	output, err := exec.Command(iptablesPath, args...).CombinedOutput()
+	if err != nil {
+		return nil, fmt.Errorf("iptables failed: iptables %v: %s (%s)", strings.Join(args, " "), output, err)
+	}
+
+	// ignore iptables' message about xtables lock
+	if strings.Contains(string(output), "waiting for it to exit") {
+		output = []byte("")
+	}
+
+	return output, err
+}
diff --git a/vendor/src/github.com/docker/libnetwork/iptables/iptables_test.go b/vendor/src/github.com/docker/libnetwork/iptables/iptables_test.go
new file mode 100644
index 0000000..afb3587
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/iptables/iptables_test.go
@@ -0,0 +1,239 @@
+package iptables
+
+import (
+	"net"
+	"os/exec"
+	"strconv"
+	"strings"
+	"sync"
+	"testing"
+
+	_ "github.com/docker/libnetwork/netutils"
+)
+
+const chainName = "DOCKEREST"
+
+var natChain *Chain
+var filterChain *Chain
+
+func TestNewChain(t *testing.T) {
+	var err error
+
+	natChain, err = NewChain(chainName, "lo", Nat, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	filterChain, err = NewChain(chainName, "lo", Filter, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestForward(t *testing.T) {
+	ip := net.ParseIP("192.168.1.1")
+	port := 1234
+	dstAddr := "172.17.0.1"
+	dstPort := 4321
+	proto := "tcp"
+
+	err := natChain.Forward(Insert, ip, port, proto, dstAddr, dstPort)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	dnatRule := []string{
+		"-d", ip.String(),
+		"-p", proto,
+		"--dport", strconv.Itoa(port),
+		"-j", "DNAT",
+		"--to-destination", dstAddr + ":" + strconv.Itoa(dstPort),
+	}
+
+	if !Exists(natChain.Table, natChain.Name, dnatRule...) {
+		t.Fatalf("DNAT rule does not exist")
+	}
+
+	filterRule := []string{
+		"!", "-i", filterChain.Bridge,
+		"-o", filterChain.Bridge,
+		"-d", dstAddr,
+		"-p", proto,
+		"--dport", strconv.Itoa(dstPort),
+		"-j", "ACCEPT",
+	}
+
+	if !Exists(filterChain.Table, filterChain.Name, filterRule...) {
+		t.Fatalf("filter rule does not exist")
+	}
+
+	masqRule := []string{
+		"-d", dstAddr,
+		"-s", dstAddr,
+		"-p", proto,
+		"--dport", strconv.Itoa(dstPort),
+		"-j", "MASQUERADE",
+	}
+
+	if !Exists(natChain.Table, "POSTROUTING", masqRule...) {
+		t.Fatalf("MASQUERADE rule does not exist")
+	}
+}
+
+func TestLink(t *testing.T) {
+	var err error
+
+	ip1 := net.ParseIP("192.168.1.1")
+	ip2 := net.ParseIP("192.168.1.2")
+	port := 1234
+	proto := "tcp"
+
+	err = filterChain.Link(Append, ip1, ip2, port, proto)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	rule1 := []string{
+		"-i", filterChain.Bridge,
+		"-o", filterChain.Bridge,
+		"-p", proto,
+		"-s", ip1.String(),
+		"-d", ip2.String(),
+		"--dport", strconv.Itoa(port),
+		"-j", "ACCEPT"}
+
+	if !Exists(filterChain.Table, filterChain.Name, rule1...) {
+		t.Fatalf("rule1 does not exist")
+	}
+
+	rule2 := []string{
+		"-i", filterChain.Bridge,
+		"-o", filterChain.Bridge,
+		"-p", proto,
+		"-s", ip2.String(),
+		"-d", ip1.String(),
+		"--sport", strconv.Itoa(port),
+		"-j", "ACCEPT"}
+
+	if !Exists(filterChain.Table, filterChain.Name, rule2...) {
+		t.Fatalf("rule2 does not exist")
+	}
+}
+
+func TestPrerouting(t *testing.T) {
+	args := []string{
+		"-i", "lo",
+		"-d", "192.168.1.1"}
+
+	err := natChain.Prerouting(Insert, args...)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	rule := []string{
+		"-j", natChain.Name}
+
+	rule = append(rule, args...)
+
+	if !Exists(natChain.Table, "PREROUTING", rule...) {
+		t.Fatalf("rule does not exist")
+	}
+
+	delRule := append([]string{"-D", "PREROUTING", "-t", string(Nat)}, rule...)
+	if _, err = Raw(delRule...); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestOutput(t *testing.T) {
+	args := []string{
+		"-o", "lo",
+		"-d", "192.168.1.1"}
+
+	err := natChain.Output(Insert, args...)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	rule := []string{
+		"-j", natChain.Name}
+
+	rule = append(rule, args...)
+
+	if !Exists(natChain.Table, "OUTPUT", rule...) {
+		t.Fatalf("rule does not exist")
+	}
+
+	delRule := append([]string{"-D", "OUTPUT", "-t",
+		string(natChain.Table)}, rule...)
+	if _, err = Raw(delRule...); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestConcurrencyWithWait(t *testing.T) {
+	RunConcurrencyTest(t, true)
+}
+
+func TestConcurrencyNoWait(t *testing.T) {
+	RunConcurrencyTest(t, false)
+}
+
+// Runs 10 concurrent rule additions. This will fail if iptables
+// is actually invoked simultaneously without --wait.
+// Note that if iptables does not support the xtable lock on this
+// system, then allowXlock has no effect -- it will always be off.
+func RunConcurrencyTest(t *testing.T, allowXlock bool) {
+	var wg sync.WaitGroup
+
+	if !allowXlock && supportsXlock {
+		supportsXlock = false
+		defer func() { supportsXlock = true }()
+	}
+
+	ip := net.ParseIP("192.168.1.1")
+	port := 1234
+	dstAddr := "172.17.0.1"
+	dstPort := 4321
+	proto := "tcp"
+
+	for i := 0; i < 10; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			err := natChain.Forward(Append, ip, port, proto, dstAddr, dstPort)
+			if err != nil {
+				t.Fatal(err)
+			}
+		}()
+	}
+	wg.Wait()
+}
+
+func TestCleanup(t *testing.T) {
+	var err error
+	var rules []byte
+
+	// Cleanup filter/FORWARD first otherwise output of iptables-save is dirty
+	link := []string{"-t", string(filterChain.Table),
+		string(Delete), "FORWARD",
+		"-o", filterChain.Bridge,
+		"-j", filterChain.Name}
+	if _, err = Raw(link...); err != nil {
+		t.Fatal(err)
+	}
+	filterChain.Remove()
+
+	err = RemoveExistingChain(chainName, Nat)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	rules, err = exec.Command("iptables-save").Output()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if strings.Contains(string(rules), chainName) {
+		t.Fatalf("Removing chain failed. %s found in iptables-save", chainName)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/libnetwork_internal_test.go b/vendor/src/github.com/docker/libnetwork/libnetwork_internal_test.go
new file mode 100644
index 0000000..6a9a7fd
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/libnetwork_internal_test.go
@@ -0,0 +1,26 @@
+package libnetwork
+
+import (
+	"testing"
+
+	"github.com/docker/libnetwork/driverapi"
+)
+
+func TestDriverRegistration(t *testing.T) {
+	bridgeNetType := "bridge"
+	c, err := New()
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = c.(*controller).RegisterDriver(bridgeNetType, nil)
+	if err == nil {
+		t.Fatalf("Expecting the RegisterDriver to fail for %s", bridgeNetType)
+	}
+	if _, ok := err.(driverapi.ErrActiveRegistration); !ok {
+		t.Fatalf("Failed for unexpected reason: %v", err)
+	}
+	err = c.(*controller).RegisterDriver("test-dummy", nil)
+	if err != nil {
+		t.Fatalf("Test failed with an error %v", err)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/libnetwork_test.go b/vendor/src/github.com/docker/libnetwork/libnetwork_test.go
new file mode 100644
index 0000000..981128c
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/libnetwork_test.go
@@ -0,0 +1,1528 @@
+package libnetwork_test
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/http/httptest"
+	"os"
+	"runtime"
+	"strconv"
+	"sync"
+	"testing"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/plugins"
+	"github.com/docker/docker/pkg/reexec"
+	"github.com/docker/libnetwork"
+	"github.com/docker/libnetwork/driverapi"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/netutils"
+	"github.com/docker/libnetwork/options"
+	"github.com/docker/libnetwork/types"
+	"github.com/vishvananda/netlink"
+	"github.com/vishvananda/netns"
+)
+
+const (
+	bridgeNetType = "bridge"
+	bridgeName    = "docker0"
+)
+
+func TestMain(m *testing.M) {
+	if reexec.Init() {
+		return
+	}
+	os.Exit(m.Run())
+}
+
+func createTestNetwork(networkType, networkName string, option options.Generic, netOption options.Generic) (libnetwork.Network, error) {
+	controller, err := libnetwork.New()
+	if err != nil {
+		return nil, err
+	}
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = option
+
+	err = controller.ConfigureNetworkDriver(networkType, genericOption)
+	if err != nil {
+		return nil, err
+	}
+
+	network, err := controller.NewNetwork(networkType, networkName,
+		libnetwork.NetworkOptionGeneric(netOption))
+	if err != nil {
+		return nil, err
+	}
+
+	return network, nil
+}
+
+func getEmptyGenericOption() map[string]interface{} {
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = options.Generic{}
+	return genericOption
+}
+
+func getPortMapping() []types.PortBinding {
+	return []types.PortBinding{
+		types.PortBinding{Proto: types.TCP, Port: uint16(230), HostPort: uint16(23000)},
+		types.PortBinding{Proto: types.UDP, Port: uint16(200), HostPort: uint16(22000)},
+		types.PortBinding{Proto: types.TCP, Port: uint16(120), HostPort: uint16(12000)},
+	}
+}
+
+func TestNull(t *testing.T) {
+	network, err := createTestNetwork("null", "testnetwork", options.Generic{},
+		options.Generic{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep, err := network.CreateEndpoint("testep")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = ep.Join("null_container",
+		libnetwork.JoinOptionHostname("test"),
+		libnetwork.JoinOptionDomainname("docker.io"),
+		libnetwork.JoinOptionExtraHost("web", "192.168.0.1"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = ep.Leave("null_container")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := ep.Delete(); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := network.Delete(); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestHost(t *testing.T) {
+	network, err := createTestNetwork("host", "testnetwork", options.Generic{}, options.Generic{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep1, err := network.CreateEndpoint("testep1")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = ep1.Join("host_container1",
+		libnetwork.JoinOptionHostname("test1"),
+		libnetwork.JoinOptionDomainname("docker.io"),
+		libnetwork.JoinOptionExtraHost("web", "192.168.0.1"),
+		libnetwork.JoinOptionUseDefaultSandbox())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep2, err := network.CreateEndpoint("testep2")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = ep2.Join("host_container2",
+		libnetwork.JoinOptionHostname("test2"),
+		libnetwork.JoinOptionDomainname("docker.io"),
+		libnetwork.JoinOptionExtraHost("web", "192.168.0.1"),
+		libnetwork.JoinOptionUseDefaultSandbox())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = ep1.Leave("host_container1")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = ep2.Leave("host_container2")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := ep1.Delete(); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := ep2.Delete(); err != nil {
+		t.Fatal(err)
+	}
+
+	// Try to create another host endpoint and join/leave that.
+	ep3, err := network.CreateEndpoint("testep3")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = ep3.Join("host_container3",
+		libnetwork.JoinOptionHostname("test3"),
+		libnetwork.JoinOptionDomainname("docker.io"),
+		libnetwork.JoinOptionExtraHost("web", "192.168.0.1"),
+		libnetwork.JoinOptionUseDefaultSandbox())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = ep3.Leave("host_container3")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := ep3.Delete(); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := network.Delete(); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestBridge(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	ip, subnet, err := net.ParseCIDR("192.168.100.1/24")
+	if err != nil {
+		t.Fatal(err)
+	}
+	subnet.IP = ip
+
+	ip, cidr, err := net.ParseCIDR("192.168.100.2/28")
+	if err != nil {
+		t.Fatal(err)
+	}
+	cidr.IP = ip
+
+	ip, cidrv6, err := net.ParseCIDR("fe90::1/96")
+	if err != nil {
+		t.Fatal(err)
+	}
+	cidrv6.IP = ip
+
+	log.Debug("Adding a bridge")
+	option := options.Generic{
+		"EnableIPForwarding": true,
+	}
+
+	netOption := options.Generic{
+		"BridgeName":            bridgeName,
+		"AddressIPv4":           subnet,
+		"FixedCIDR":             cidr,
+		"FixedCIDRv6":           cidrv6,
+		"EnableIPv6":            true,
+		"EnableIPTables":        true,
+		"EnableIPMasquerade":    true,
+		"EnableICC":             true,
+		"AllowNonDefaultBridge": true}
+
+	network, err := createTestNetwork(bridgeNetType, "testnetwork", option, netOption)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep, err := network.CreateEndpoint("testep", libnetwork.CreateOptionPortMapping(getPortMapping()))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	epInfo, err := ep.DriverInfo()
+	if err != nil {
+		t.Fatal(err)
+	}
+	pmd, ok := epInfo[netlabel.PortMap]
+	if !ok {
+		t.Fatalf("Could not find expected info in endpoint data")
+	}
+	pm, ok := pmd.([]types.PortBinding)
+	if !ok {
+		t.Fatalf("Unexpected format for port mapping in endpoint operational data")
+	}
+	if len(pm) != 3 {
+		t.Fatalf("Incomplete data for port mapping in endpoint operational data: %d", len(pm))
+	}
+
+	if err := ep.Delete(); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := network.Delete(); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestUnknownDriver(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	_, err := createTestNetwork("unknowndriver", "testnetwork", options.Generic{}, options.Generic{})
+	if err == nil {
+		t.Fatal("Expected to fail. But instead succeeded")
+	}
+
+	if _, ok := err.(libnetwork.NetworkTypeError); !ok {
+		t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+	}
+}
+
+func TestNilRemoteDriver(t *testing.T) {
+	controller, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = controller.NewNetwork("framerelay", "dummy",
+		libnetwork.NetworkOptionGeneric(getEmptyGenericOption()))
+	if err == nil {
+		t.Fatal("Expected to fail. But instead succeeded")
+	}
+
+	if _, ok := err.(types.NotFoundError); !ok {
+		t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+	}
+}
+
+func TestDuplicateNetwork(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	controller, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	genericOption := make(map[string]interface{})
+	genericOption[netlabel.GenericData] = options.Generic{}
+
+	err = controller.ConfigureNetworkDriver(bridgeNetType, genericOption)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = controller.NewNetwork(bridgeNetType, "testnetwork",
+		libnetwork.NetworkOptionGeneric(genericOption))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = controller.NewNetwork(bridgeNetType, "testnetwork")
+	if err == nil {
+		t.Fatal("Expected to fail. But instead succeeded")
+	}
+
+	if _, ok := err.(libnetwork.NetworkNameError); !ok {
+		t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+	}
+}
+
+func TestNetworkName(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	_, err := createTestNetwork(bridgeNetType, "", options.Generic{}, options.Generic{})
+	if err == nil {
+		t.Fatal("Expected to fail. But instead succeeded")
+	}
+
+	if _, ok := err.(libnetwork.ErrInvalidName); !ok {
+		t.Fatalf("Expected to fail with ErrInvalidName error. Got %v", err)
+	}
+
+	networkName := "testnetwork"
+	n, err := createTestNetwork(bridgeNetType, networkName, options.Generic{}, options.Generic{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if n.Name() != networkName {
+		t.Fatalf("Expected network name %s, got %s", networkName, n.Name())
+	}
+}
+
+func TestNetworkType(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if n.Type() != bridgeNetType {
+		t.Fatalf("Expected network type %s, got %s", bridgeNetType, n.Type())
+	}
+}
+
+func TestNetworkID(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if n.ID() == "" {
+		t.Fatal("Expected non-empty network id")
+	}
+}
+
+func TestDeleteNetworkWithActiveEndpoints(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	option := options.Generic{
+		"BridgeName":            bridgeName,
+		"AllowNonDefaultBridge": true}
+
+	network, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, option)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep, err := network.CreateEndpoint("testep")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = network.Delete()
+	if err == nil {
+		t.Fatal("Expected to fail. But instead succeeded")
+	}
+
+	if _, ok := err.(*libnetwork.ActiveEndpointsError); !ok {
+		t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+	}
+
+	// Done testing. Now cleanup.
+	if err := ep.Delete(); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := network.Delete(); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestUnknownNetwork(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	option := options.Generic{
+		"BridgeName":            bridgeName,
+		"AllowNonDefaultBridge": true}
+
+	network, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, option)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = network.Delete()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = network.Delete()
+	if err == nil {
+		t.Fatal("Expected to fail. But instead succeeded")
+	}
+
+	if _, ok := err.(*libnetwork.UnknownNetworkError); !ok {
+		t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+	}
+}
+
+func TestUnknownEndpoint(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	ip, subnet, err := net.ParseCIDR("192.168.100.1/24")
+	if err != nil {
+		t.Fatal(err)
+	}
+	subnet.IP = ip
+
+	option := options.Generic{
+		"BridgeName":            bridgeName,
+		"AddressIPv4":           subnet,
+		"AllowNonDefaultBridge": true}
+
+	network, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, option)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = network.CreateEndpoint("")
+	if err == nil {
+		t.Fatal("Expected to fail. But instead succeeded")
+	}
+	if _, ok := err.(libnetwork.ErrInvalidName); !ok {
+		t.Fatalf("Expected to fail with ErrInvalidName error. Actual error: %v", err)
+	}
+
+	ep, err := network.CreateEndpoint("testep")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = ep.Delete()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = ep.Delete()
+	if err == nil {
+		t.Fatal("Expected to fail. But instead succeeded")
+	}
+
+	if _, ok := err.(*libnetwork.UnknownEndpointError); !ok {
+		t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+	}
+
+	// Done testing. Now cleanup
+	if err := network.Delete(); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestNetworkEndpointsWalkers(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	controller, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = controller.ConfigureNetworkDriver(bridgeNetType, getEmptyGenericOption())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Create network 1 and add 2 endpoint: ep11, ep12
+	net1, err := controller.NewNetwork(bridgeNetType, "network1")
+	if err != nil {
+		t.Fatal(err)
+	}
+	ep11, err := net1.CreateEndpoint("ep11")
+	if err != nil {
+		t.Fatal(err)
+	}
+	ep12, err := net1.CreateEndpoint("ep12")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Test list methods on net1
+	epList1 := net1.Endpoints()
+	if len(epList1) != 2 {
+		t.Fatalf("Endpoints() returned wrong number of elements: %d instead of 2", len(epList1))
+	}
+	// endpoint order is not guaranteed
+	for _, e := range epList1 {
+		if e != ep11 && e != ep12 {
+			t.Fatal("Endpoints() did not return all the expected elements")
+		}
+	}
+
+	// Test Endpoint Walk method
+	var epName string
+	var epWanted libnetwork.Endpoint
+	wlk := func(ep libnetwork.Endpoint) bool {
+		if ep.Name() == epName {
+			epWanted = ep
+			return true
+		}
+		return false
+	}
+
+	// Look for ep1 on network1
+	epName = "ep11"
+	net1.WalkEndpoints(wlk)
+	if epWanted == nil {
+		t.Fatal(err)
+	}
+	if ep11 != epWanted {
+		t.Fatal(err)
+	}
+
+	// Test Network Walk method
+	var netName string
+	var netWanted libnetwork.Network
+	nwWlk := func(nw libnetwork.Network) bool {
+		if nw.Name() == netName {
+			netWanted = nw
+			return true
+		}
+		return false
+	}
+
+	// Look for network named "network1"
+	netName = "network1"
+	controller.WalkNetworks(nwWlk)
+	if netWanted == nil {
+		t.Fatal(err)
+	}
+	if net1 != netWanted {
+		t.Fatal(err)
+	}
+}
+
+func TestControllerQuery(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	controller, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = controller.ConfigureNetworkDriver(bridgeNetType, getEmptyGenericOption())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Create network 1
+	net1, err := controller.NewNetwork(bridgeNetType, "network1")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = controller.NetworkByName("")
+	if err == nil {
+		t.Fatalf("NetworkByName() succeeded with invalid target name")
+	}
+	if _, ok := err.(libnetwork.ErrInvalidName); !ok {
+		t.Fatalf("Expected NetworkByName() to fail with ErrInvalidName error. Got: %v", err)
+	}
+
+	_, err = controller.NetworkByID("")
+	if err == nil {
+		t.Fatalf("NetworkByID() succeeded with invalid target id")
+	}
+	if _, ok := err.(libnetwork.ErrInvalidID); !ok {
+		t.Fatalf("NetworkByID() failed with unexpected error: %v", err)
+	}
+
+	g, err := controller.NetworkByID("network1")
+	if err == nil {
+		t.Fatalf("Unexpected success for NetworkByID(): %v", g)
+	}
+	if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
+		t.Fatalf("NetworkByID() failed with unexpected error: %v", err)
+	}
+
+	g, err = controller.NetworkByName("network1")
+	if err != nil {
+		t.Fatalf("Unexpected failure for NetworkByName(): %v", err)
+	}
+	if g == nil {
+		t.Fatalf("NetworkByName() did not find the network")
+	}
+
+	if g != net1 {
+		t.Fatalf("NetworkByName() returned the wrong network")
+	}
+
+	g, err = controller.NetworkByID(net1.ID())
+	if err != nil {
+		t.Fatalf("Unexpected failure for NetworkByID(): %v", err)
+	}
+	if net1 != g {
+		t.Fatalf("NetworkByID() returned unexpected element: %v", g)
+	}
+}
+
+func TestNetworkQuery(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	controller, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = controller.ConfigureNetworkDriver(bridgeNetType, getEmptyGenericOption())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Create network 1 and add 2 endpoint: ep11, ep12
+	net1, err := controller.NewNetwork(bridgeNetType, "network1")
+	if err != nil {
+		t.Fatal(err)
+	}
+	ep11, err := net1.CreateEndpoint("ep11")
+	if err != nil {
+		t.Fatal(err)
+	}
+	ep12, err := net1.CreateEndpoint("ep12")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	e, err := net1.EndpointByName("ep11")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ep11 != e {
+		t.Fatalf("EndpointByName() returned %v instead of %v", e, ep11)
+	}
+
+	e, err = net1.EndpointByName("")
+	if err == nil {
+		t.Fatalf("EndpointByName() succeeded with invalid target name")
+	}
+	if _, ok := err.(libnetwork.ErrInvalidName); !ok {
+		t.Fatalf("Expected EndpointByName() to fail with ErrInvalidName error. Got: %v", err)
+	}
+
+	e, err = net1.EndpointByName("IamNotAnEndpoint")
+	if err == nil {
+		t.Fatalf("EndpointByName() succeeded with unknown target name")
+	}
+	if _, ok := err.(libnetwork.ErrNoSuchEndpoint); !ok {
+		t.Fatal(err)
+	}
+	if e != nil {
+		t.Fatalf("EndpointByName(): expected nil, got %v", e)
+	}
+
+	e, err = net1.EndpointByID(ep12.ID())
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ep12 != e {
+		t.Fatalf("EndpointByID() returned %v instead of %v", e, ep12)
+	}
+
+	e, err = net1.EndpointByID("")
+	if err == nil {
+		t.Fatalf("EndpointByID() succeeded with invalid target id")
+	}
+	if _, ok := err.(libnetwork.ErrInvalidID); !ok {
+		t.Fatalf("EndpointByID() failed with unexpected error: %v", err)
+	}
+}
+
+const containerID = "valid_container"
+
+func checkSandbox(t *testing.T, info libnetwork.EndpointInfo) {
+	origns, err := netns.Get()
+	if err != nil {
+		t.Fatalf("Could not get the current netns: %v", err)
+	}
+	defer origns.Close()
+
+	key := info.SandboxKey()
+	f, err := os.OpenFile(key, os.O_RDONLY, 0)
+	if err != nil {
+		t.Fatalf("Failed to open network namespace path %q: %v", key, err)
+	}
+	defer f.Close()
+
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	nsFD := f.Fd()
+	if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
+		t.Fatalf("Setting to the namespace pointed to by the sandbox %s failed: %v", key, err)
+	}
+	defer netns.Set(origns)
+
+	_, err = netlink.LinkByName("eth0")
+	if err != nil {
+		t.Fatalf("Could not find the interface eth0 inside the sandbox: %v", err)
+	}
+}
+
+func TestEndpointJoin(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep, err := n.CreateEndpoint("ep1")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Validate if ep.Info() only gives me IP address info and not names and gateway during CreateEndpoint()
+	info := ep.Info()
+
+	for _, iface := range info.InterfaceList() {
+		if iface.Address().IP.To4() == nil {
+			t.Fatalf("Invalid IP address returned: %v", iface.Address())
+		}
+	}
+
+	if info.Gateway().To4() != nil {
+		t.Fatalf("Expected empty gateway for an empty endpoint. Instead found a gateway: %v", info.Gateway())
+	}
+
+	if info.SandboxKey() != "" {
+		t.Fatalf("Expected an empty sandbox key for an empty endpoint. Instead found a non-empty sandbox key: %s", info.SandboxKey())
+	}
+
+	_, err = ep.Join(containerID,
+		libnetwork.JoinOptionHostname("test"),
+		libnetwork.JoinOptionDomainname("docker.io"),
+		libnetwork.JoinOptionExtraHost("web", "192.168.0.1"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	defer func() {
+		err = ep.Leave(containerID)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	// Validate if ep.Info() only gives valid gateway and sandbox key after has container has joined.
+	info = ep.Info()
+	if info.Gateway().To4() == nil {
+		t.Fatalf("Expected a valid gateway for a joined endpoint. Instead found an invalid gateway: %v", info.Gateway())
+	}
+
+	if info.SandboxKey() == "" {
+		t.Fatalf("Expected an non-empty sandbox key for a joined endpoint. Instead found a empty sandbox key")
+	}
+
+	checkSandbox(t, info)
+}
+
+func TestEndpointJoinInvalidContainerId(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep, err := n.CreateEndpoint("ep1")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = ep.Join("")
+	if err == nil {
+		t.Fatal("Expected to fail join with empty container id string")
+	}
+
+	if _, ok := err.(libnetwork.InvalidContainerIDError); !ok {
+		t.Fatalf("Failed for unexpected reason: %v", err)
+	}
+}
+
+func TestEndpointDeleteWithActiveContainer(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep, err := n.CreateEndpoint("ep1")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = ep.Join(containerID,
+		libnetwork.JoinOptionHostname("test"),
+		libnetwork.JoinOptionDomainname("docker.io"),
+		libnetwork.JoinOptionExtraHost("web", "192.168.0.1"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() {
+		err = ep.Leave(containerID)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		err = ep.Delete()
+		if err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	err = ep.Delete()
+	if err == nil {
+		t.Fatal("Expected to fail. But instead succeeded")
+	}
+
+	if _, ok := err.(*libnetwork.ActiveContainerError); !ok {
+		t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+	}
+}
+
+func TestEndpointMultipleJoins(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep, err := n.CreateEndpoint("ep1")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = ep.Join(containerID,
+		libnetwork.JoinOptionHostname("test"),
+		libnetwork.JoinOptionDomainname("docker.io"),
+		libnetwork.JoinOptionExtraHost("web", "192.168.0.1"))
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() {
+		err = ep.Leave(containerID)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	_, err = ep.Join("container2")
+	if err == nil {
+		t.Fatal("Expected to fail multiple joins for the same endpoint")
+	}
+
+	if _, ok := err.(libnetwork.ErrInvalidJoin); !ok {
+		t.Fatalf("Failed for unexpected reason: %v", err)
+	}
+}
+
+func TestEndpointInvalidLeave(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	n, err := createTestNetwork(bridgeNetType, "testnetwork", options.Generic{}, options.Generic{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep, err := n.CreateEndpoint("ep1")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = ep.Leave(containerID)
+	if err == nil {
+		t.Fatal("Expected to fail leave from an endpoint which has no active join")
+	}
+
+	if _, ok := err.(libnetwork.InvalidContainerIDError); !ok {
+		if _, ok := err.(libnetwork.ErrNoContainer); !ok {
+			t.Fatalf("Failed for unexpected reason: %v", err)
+		}
+	}
+
+	_, err = ep.Join(containerID,
+		libnetwork.JoinOptionHostname("test"),
+		libnetwork.JoinOptionDomainname("docker.io"),
+		libnetwork.JoinOptionExtraHost("web", "192.168.0.1"))
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() {
+		err = ep.Leave(containerID)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	err = ep.Leave("")
+	if err == nil {
+		t.Fatal("Expected to fail leave with empty container id")
+	}
+
+	if _, ok := err.(libnetwork.InvalidContainerIDError); !ok {
+		t.Fatalf("Failed for unexpected reason: %v", err)
+	}
+
+	err = ep.Leave("container2")
+	if err == nil {
+		t.Fatal("Expected to fail leave with wrong container id")
+	}
+
+	if _, ok := err.(libnetwork.InvalidContainerIDError); !ok {
+		t.Fatalf("Failed for unexpected reason: %v", err)
+	}
+
+}
+
+func TestEndpointUpdateParent(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	n, err := createTestNetwork("bridge", "testnetwork", options.Generic{}, options.Generic{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep1, err := n.CreateEndpoint("ep1", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = ep1.Join(containerID,
+		libnetwork.JoinOptionHostname("test1"),
+		libnetwork.JoinOptionDomainname("docker.io"),
+		libnetwork.JoinOptionExtraHost("web", "192.168.0.1"))
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() {
+		err = ep1.Leave(containerID)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	ep2, err := n.CreateEndpoint("ep2", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = ep2.Join("container2",
+		libnetwork.JoinOptionHostname("test2"),
+		libnetwork.JoinOptionDomainname("docker.io"),
+		libnetwork.JoinOptionHostsPath("/var/lib/docker/test_network/container2/hosts"),
+		libnetwork.JoinOptionParentUpdate(ep1.ID(), "web", "192.168.0.2"))
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	defer func() {
+		err = ep2.Leave("container2")
+		if err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+}
+
+func TestEnableIPv6(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\nnameserver 2001:4860:4860::8888")
+	//take a copy of resolv.conf for restoring after test completes
+	resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
+	if err != nil {
+		t.Fatal(err)
+	}
+	//cleanup
+	defer func() {
+		if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	ip, cidrv6, err := net.ParseCIDR("fe80::1/64")
+	if err != nil {
+		t.Fatal(err)
+	}
+	cidrv6.IP = ip
+
+	netOption := options.Generic{
+		netlabel.EnableIPv6: true,
+		netlabel.GenericData: options.Generic{
+			"FixedCIDRv6": cidrv6,
+		},
+	}
+
+	n, err := createTestNetwork("bridge", "testnetwork", options.Generic{}, netOption)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep1, err := n.CreateEndpoint("ep1", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	resolvConfPath := "/tmp/libnetwork_test/resolv.conf"
+	defer os.Remove(resolvConfPath)
+
+	_, err = ep1.Join(containerID,
+		libnetwork.JoinOptionResolvConfPath(resolvConfPath))
+
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() {
+		err = ep1.Leave(containerID)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	content, err := ioutil.ReadFile(resolvConfPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !bytes.Equal(content, tmpResolvConf) {
+		t.Fatalf("Expected %s, Got %s", string(tmpResolvConf), string(content))
+	}
+
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestResolvConf(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		defer netutils.SetupTestNetNS(t)()
+	}
+
+	tmpResolvConf1 := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\nnameserver 2001:4860:4860::8888")
+	expectedResolvConf1 := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n")
+	tmpResolvConf2 := []byte("search pommesfrites.fr\nnameserver 112.34.56.78\nnameserver 2001:4860:4860::8888")
+	expectedResolvConf2 := []byte("search pommesfrites.fr\nnameserver 112.34.56.78\n")
+	tmpResolvConf3 := []byte("search pommesfrites.fr\nnameserver 113.34.56.78\n")
+
+	//take a copy of resolv.conf for restoring after test completes
+	resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
+	if err != nil {
+		t.Fatal(err)
+	}
+	//cleanup
+	defer func() {
+		if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	n, err := createTestNetwork("bridge", "testnetwork", options.Generic{}, options.Generic{})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ep1, err := n.CreateEndpoint("ep1", nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf1, 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	resolvConfPath := "/tmp/libnetwork_test/resolv.conf"
+	defer os.Remove(resolvConfPath)
+
+	_, err = ep1.Join(containerID,
+		libnetwork.JoinOptionResolvConfPath(resolvConfPath))
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() {
+		err = ep1.Leave(containerID)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	finfo, err := os.Stat(resolvConfPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	fmode := (os.FileMode)(0644)
+	if finfo.Mode() != fmode {
+		t.Fatalf("Expected file mode %s, got %s", fmode.String(), finfo.Mode().String())
+	}
+
+	content, err := ioutil.ReadFile(resolvConfPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !bytes.Equal(content, expectedResolvConf1) {
+		t.Fatalf("Expected %s, Got %s", string(expectedResolvConf1), string(content))
+	}
+
+	err = ep1.Leave(containerID)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf2, 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = ep1.Join(containerID,
+		libnetwork.JoinOptionResolvConfPath(resolvConfPath))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	content, err = ioutil.ReadFile(resolvConfPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !bytes.Equal(content, expectedResolvConf2) {
+		t.Fatalf("Expected %s, Got %s", string(expectedResolvConf2), string(content))
+	}
+
+	if err := ioutil.WriteFile(resolvConfPath, tmpResolvConf3, 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	err = ep1.Leave(containerID)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = ep1.Join(containerID,
+		libnetwork.JoinOptionResolvConfPath(resolvConfPath))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	content, err = ioutil.ReadFile(resolvConfPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !bytes.Equal(content, tmpResolvConf3) {
+		t.Fatalf("Expected %s, Got %s", string(tmpResolvConf3), string(content))
+	}
+}
+
+func TestInvalidRemoteDriver(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		t.Skip("Skipping test when not running inside a Container")
+	}
+
+	mux := http.NewServeMux()
+	server := httptest.NewServer(mux)
+	if server == nil {
+		t.Fatal("Failed to start a HTTP Server")
+	}
+	defer server.Close()
+
+	type pluginRequest struct {
+		name string
+	}
+
+	mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json")
+		fmt.Fprintln(w, `{"Implements": ["InvalidDriver"]}`)
+	})
+
+	if err := os.MkdirAll("/usr/share/docker/plugins", 0755); err != nil {
+		t.Fatal(err)
+	}
+	defer func() {
+		if err := os.RemoveAll("/usr/share/docker/plugins"); err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	if err := ioutil.WriteFile("/usr/share/docker/plugins/invalid-network-driver.spec", []byte(server.URL), 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	controller, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = controller.NewNetwork("invalid-network-driver", "dummy",
+		libnetwork.NetworkOptionGeneric(getEmptyGenericOption()))
+	if err == nil {
+		t.Fatal("Expected to fail. But instead succeeded")
+	}
+
+	if err != plugins.ErrNotImplements {
+		t.Fatalf("Did not fail with expected error. Actual error: %v", err)
+	}
+}
+
+func TestValidRemoteDriver(t *testing.T) {
+	if !netutils.IsRunningInContainer() {
+		t.Skip("Skipping test when not running inside a Container")
+	}
+
+	mux := http.NewServeMux()
+	server := httptest.NewServer(mux)
+	if server == nil {
+		t.Fatal("Failed to start a HTTP Server")
+	}
+	defer server.Close()
+
+	type pluginRequest struct {
+		name string
+	}
+
+	mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json")
+		fmt.Fprintf(w, `{"Implements": ["%s"]}`, driverapi.NetworkPluginEndpointType)
+	})
+	mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json")
+		fmt.Fprintf(w, "null")
+	})
+
+	if err := os.MkdirAll("/usr/share/docker/plugins", 0755); err != nil {
+		t.Fatal(err)
+	}
+	defer func() {
+		if err := os.RemoveAll("/usr/share/docker/plugins"); err != nil {
+			t.Fatal(err)
+		}
+	}()
+
+	if err := ioutil.WriteFile("/usr/share/docker/plugins/valid-network-driver.spec", []byte(server.URL), 0644); err != nil {
+		t.Fatal(err)
+	}
+
+	controller, err := libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = controller.NewNetwork("valid-network-driver", "dummy",
+		libnetwork.NetworkOptionGeneric(getEmptyGenericOption()))
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+var (
+	once   sync.Once
+	ctrlr  libnetwork.NetworkController
+	start  = make(chan struct{})
+	done   = make(chan chan struct{}, numThreads-1)
+	origns = netns.None()
+	testns = netns.None()
+)
+
+const (
+	iterCnt    = 25
+	numThreads = 3
+	first      = 1
+	last       = numThreads
+	debug      = false
+)
+
+func createGlobalInstance(t *testing.T) {
+	var err error
+	defer close(start)
+
+	origns, err = netns.Get()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if netutils.IsRunningInContainer() {
+		testns = origns
+	} else {
+		testns, err = netns.New()
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	ctrlr, err = libnetwork.New()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = ctrlr.ConfigureNetworkDriver(bridgeNetType, getEmptyGenericOption())
+	if err != nil {
+		t.Fatal("configure driver")
+	}
+
+	net, err := ctrlr.NewNetwork(bridgeNetType, "network1")
+	if err != nil {
+		t.Fatal("new network")
+	}
+
+	_, err = net.CreateEndpoint("ep1")
+	if err != nil {
+		t.Fatal("createendpoint")
+	}
+}
+
+func debugf(format string, a ...interface{}) (int, error) {
+	if debug {
+		return fmt.Printf(format, a...)
+	}
+
+	return 0, nil
+}
+
+func parallelJoin(t *testing.T, ep libnetwork.Endpoint, thrNumber int) {
+	debugf("J%d.", thrNumber)
+	_, err := ep.Join("racing_container")
+	runtime.LockOSThread()
+	if err != nil {
+		if _, ok := err.(libnetwork.ErrNoContainer); !ok {
+			if _, ok := err.(libnetwork.ErrInvalidJoin); !ok {
+				t.Fatal(err)
+			}
+		}
+		debugf("JE%d(%v).", thrNumber, err)
+	}
+	debugf("JD%d.", thrNumber)
+}
+
+func parallelLeave(t *testing.T, ep libnetwork.Endpoint, thrNumber int) {
+	debugf("L%d.", thrNumber)
+	err := ep.Leave("racing_container")
+	runtime.LockOSThread()
+	if err != nil {
+		if _, ok := err.(libnetwork.ErrNoContainer); !ok {
+			if _, ok := err.(libnetwork.ErrInvalidJoin); !ok {
+				t.Fatal(err)
+			}
+		}
+		debugf("LE%d(%v).", thrNumber, err)
+	}
+	debugf("LD%d.", thrNumber)
+}
+
+func runParallelTests(t *testing.T, thrNumber int) {
+	var err error
+
+	t.Parallel()
+
+	pTest := flag.Lookup("test.parallel")
+	if pTest == nil {
+		t.Skip("Skipped because test.parallel flag not set;")
+	}
+	numParallel, err := strconv.Atoi(pTest.Value.String())
+	if err != nil {
+		t.Fatal(err)
+	}
+	if numParallel < numThreads {
+		t.Skip("Skipped because t.parallel was less than ", numThreads)
+	}
+
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	if thrNumber == first {
+		createGlobalInstance(t)
+	}
+
+	if thrNumber != first {
+		select {
+		case <-start:
+		}
+
+		thrdone := make(chan struct{})
+		done <- thrdone
+		defer close(thrdone)
+
+		if thrNumber == last {
+			defer close(done)
+		}
+
+		err = netns.Set(testns)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+	defer netns.Set(origns)
+
+	net, err := ctrlr.NetworkByName("network1")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if net == nil {
+		t.Fatal("Could not find network1")
+	}
+
+	ep, err := net.EndpointByName("ep1")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if ep == nil {
+		t.Fatal("Got nil ep with no error")
+	}
+
+	for i := 0; i < iterCnt; i++ {
+		parallelJoin(t, ep, thrNumber)
+		parallelLeave(t, ep, thrNumber)
+	}
+
+	debugf("\n")
+
+	if thrNumber == first {
+		for thrdone := range done {
+			select {
+			case <-thrdone:
+			}
+		}
+
+		testns.Close()
+		err = ep.Delete()
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+}
+
+func TestParallel1(t *testing.T) {
+	runParallelTests(t, 1)
+}
+
+func TestParallel2(t *testing.T) {
+	runParallelTests(t, 2)
+}
+
+func TestParallel3(t *testing.T) {
+	runParallelTests(t, 3)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/netlabel/labels.go b/vendor/src/github.com/docker/libnetwork/netlabel/labels.go
new file mode 100644
index 0000000..adbabbc
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/netlabel/labels.go
@@ -0,0 +1,18 @@
+package netlabel
+
+const (
+	// GenericData constant that helps to identify an option as a Generic constant
+	GenericData = "io.docker.network.generic"
+
+	// PortMap constant represents Port Mapping
+	PortMap = "io.docker.network.endpoint.portmap"
+
+	// MacAddress constant represents Mac Address config of a Container
+	MacAddress = "io.docker.network.endpoint.macaddress"
+
+	// ExposedPorts constant represents exposedports of a Container
+	ExposedPorts = "io.docker.network.endpoint.exposedports"
+
+	//EnableIPv6 constant represents enabling IPV6 at network level
+	EnableIPv6 = "io.docker.network.enable_ipv6"
+)
diff --git a/vendor/src/github.com/docker/libnetwork/netutils/test_utils.go b/vendor/src/github.com/docker/libnetwork/netutils/test_utils.go
new file mode 100644
index 0000000..d0a2fab
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/netutils/test_utils.go
@@ -0,0 +1,41 @@
+package netutils
+
+import (
+	"flag"
+	"runtime"
+	"syscall"
+	"testing"
+)
+
+var runningInContainer = flag.Bool("incontainer", false, "Indicates if the test is running in a container")
+
+// IsRunningInContainer returns whether the test is running inside a container.
+func IsRunningInContainer() bool {
+	return (*runningInContainer)
+}
+
+// SetupTestNetNS joins a new network namespace, and returns its associated
+// teardown function.
+//
+// Example usage:
+//
+//     defer SetupTestNetNS(t)()
+//
+func SetupTestNetNS(t *testing.T) func() {
+	runtime.LockOSThread()
+	if err := syscall.Unshare(syscall.CLONE_NEWNET); err != nil {
+		t.Fatalf("Failed to enter netns: %v", err)
+	}
+
+	fd, err := syscall.Open("/proc/self/ns/net", syscall.O_RDONLY, 0)
+	if err != nil {
+		t.Fatal("Failed to open netns file")
+	}
+
+	return func() {
+		if err := syscall.Close(fd); err != nil {
+			t.Logf("Warning: netns closing failed (%v)", err)
+		}
+		runtime.UnlockOSThread()
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/netutils/utils.go b/vendor/src/github.com/docker/libnetwork/netutils/utils.go
new file mode 100644
index 0000000..98da12e
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/netutils/utils.go
@@ -0,0 +1,149 @@
+// Network utility functions.
+
+package netutils
+
+import (
+	"crypto/rand"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+
+	"github.com/vishvananda/netlink"
+)
+
+var (
+	// ErrNetworkOverlapsWithNameservers preformatted error
+	ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver")
+	// ErrNetworkOverlaps preformatted error
+	ErrNetworkOverlaps = errors.New("requested network overlaps with existing network")
+	// ErrNoDefaultRoute preformatted error
+	ErrNoDefaultRoute = errors.New("no default route")
+
+	networkGetRoutesFct = netlink.RouteList
+)
+
+// CheckNameserverOverlaps checks whether the passed network overlaps with any of the nameservers
+func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error {
+	if len(nameservers) > 0 {
+		for _, ns := range nameservers {
+			_, nsNetwork, err := net.ParseCIDR(ns)
+			if err != nil {
+				return err
+			}
+			if NetworkOverlaps(toCheck, nsNetwork) {
+				return ErrNetworkOverlapsWithNameservers
+			}
+		}
+	}
+	return nil
+}
+
+// CheckRouteOverlaps checks whether the passed network overlaps with any existing routes
+func CheckRouteOverlaps(toCheck *net.IPNet) error {
+	networks, err := networkGetRoutesFct(nil, netlink.FAMILY_V4)
+	if err != nil {
+		return err
+	}
+
+	for _, network := range networks {
+		if network.Dst != nil && NetworkOverlaps(toCheck, network.Dst) {
+			return ErrNetworkOverlaps
+		}
+	}
+	return nil
+}
+
+// NetworkOverlaps detects overlap between one IPNet and another
+func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool {
+	// Check if both netX and netY are ipv4 or ipv6
+	if (netX.IP.To4() != nil && netY.IP.To4() != nil) ||
+		(netX.IP.To4() == nil && netY.IP.To4() == nil) {
+		if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) {
+			return true
+		}
+		if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) {
+			return true
+		}
+	}
+	return false
+}
+
+// NetworkRange calculates the first and last IP addresses in an IPNet
+func NetworkRange(network *net.IPNet) (net.IP, net.IP) {
+	var netIP net.IP
+	if network.IP.To4() != nil {
+		netIP = network.IP.To4()
+	} else if network.IP.To16() != nil {
+		netIP = network.IP.To16()
+	} else {
+		return nil, nil
+	}
+
+	lastIP := make([]byte, len(netIP), len(netIP))
+	for i := 0; i < len(netIP); i++ {
+		lastIP[i] = netIP[i] | ^network.Mask[i]
+	}
+	return netIP.Mask(network.Mask), net.IP(lastIP)
+}
+
+// GetIfaceAddr returns the first IPv4 address and slice of IPv6 addresses for the specified network interface
+func GetIfaceAddr(name string) (net.Addr, []net.Addr, error) {
+	iface, err := net.InterfaceByName(name)
+	if err != nil {
+		return nil, nil, err
+	}
+	addrs, err := iface.Addrs()
+	if err != nil {
+		return nil, nil, err
+	}
+	var addrs4 []net.Addr
+	var addrs6 []net.Addr
+	for _, addr := range addrs {
+		ip := (addr.(*net.IPNet)).IP
+		if ip4 := ip.To4(); ip4 != nil {
+			addrs4 = append(addrs4, addr)
+		} else if ip6 := ip.To16(); len(ip6) == net.IPv6len {
+			addrs6 = append(addrs6, addr)
+		}
+	}
+	switch {
+	case len(addrs4) == 0:
+		return nil, nil, fmt.Errorf("Interface %v has no IPv4 addresses", name)
+	case len(addrs4) > 1:
+		fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n",
+			name, (addrs4[0].(*net.IPNet)).IP)
+	}
+	return addrs4[0], addrs6, nil
+}
+
+// GenerateRandomMAC returns a new 6-byte(48-bit) hardware address (MAC)
+func GenerateRandomMAC() net.HardwareAddr {
+	hw := make(net.HardwareAddr, 6)
+	// The first byte of the MAC address has to comply with these rules:
+	// 1. Unicast: Set the least-significant bit to 0.
+	// 2. Address is locally administered: Set the second-least-significant bit (U/L) to 1.
+	// 3. As "small" as possible: The veth address has to be "smaller" than the bridge address.
+	hw[0] = 0x02
+	// The first 24 bits of the MAC represent the Organizationally Unique Identifier (OUI).
+	// Since this address is locally administered, we can do whatever we want as long as
+	// it doesn't conflict with other addresses.
+	hw[1] = 0x42
+	// Randomly generate the remaining 4 bytes (2^32)
+	_, err := rand.Read(hw[2:])
+	if err != nil {
+		return nil
+	}
+	return hw
+}
+
+// GenerateRandomName returns a new name joined with a prefix.  This size
+// specified is used to truncate the randomly generated value
+func GenerateRandomName(prefix string, size int) (string, error) {
+	id := make([]byte, 32)
+	if _, err := io.ReadFull(rand.Reader, id); err != nil {
+		return "", err
+	}
+	return prefix + hex.EncodeToString(id)[:size], nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/netutils/utils_test.go b/vendor/src/github.com/docker/libnetwork/netutils/utils_test.go
new file mode 100644
index 0000000..78de626
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/netutils/utils_test.go
@@ -0,0 +1,211 @@
+package netutils
+
+import (
+	"bytes"
+	"net"
+	"testing"
+
+	"github.com/vishvananda/netlink"
+)
+
+func TestNonOverlapingNameservers(t *testing.T) {
+	network := &net.IPNet{
+		IP:   []byte{192, 168, 0, 1},
+		Mask: []byte{255, 255, 255, 0},
+	}
+	nameservers := []string{
+		"127.0.0.1/32",
+	}
+
+	if err := CheckNameserverOverlaps(nameservers, network); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestOverlapingNameservers(t *testing.T) {
+	network := &net.IPNet{
+		IP:   []byte{192, 168, 0, 1},
+		Mask: []byte{255, 255, 255, 0},
+	}
+	nameservers := []string{
+		"192.168.0.1/32",
+	}
+
+	if err := CheckNameserverOverlaps(nameservers, network); err == nil {
+		t.Fatalf("Expected error %s got %s", ErrNetworkOverlapsWithNameservers, err)
+	}
+}
+
+func TestCheckRouteOverlaps(t *testing.T) {
+	orig := networkGetRoutesFct
+	defer func() {
+		networkGetRoutesFct = orig
+	}()
+	networkGetRoutesFct = func(netlink.Link, int) ([]netlink.Route, error) {
+		routesData := []string{"10.0.2.0/32", "10.0.3.0/24", "10.0.42.0/24", "172.16.42.0/24", "192.168.142.0/24"}
+
+		routes := []netlink.Route{}
+		for _, addr := range routesData {
+			_, netX, _ := net.ParseCIDR(addr)
+			routes = append(routes, netlink.Route{Dst: netX})
+		}
+		return routes, nil
+	}
+
+	_, netX, _ := net.ParseCIDR("172.16.0.1/24")
+	if err := CheckRouteOverlaps(netX); err != nil {
+		t.Fatal(err)
+	}
+
+	_, netX, _ = net.ParseCIDR("10.0.2.0/24")
+	if err := CheckRouteOverlaps(netX); err == nil {
+		t.Fatalf("10.0.2.0/24 and 10.0.2.0 should overlap but it doesn't")
+	}
+}
+
+func TestCheckNameserverOverlaps(t *testing.T) {
+	nameservers := []string{"10.0.2.3/32", "192.168.102.1/32"}
+
+	_, netX, _ := net.ParseCIDR("10.0.2.3/32")
+
+	if err := CheckNameserverOverlaps(nameservers, netX); err == nil {
+		t.Fatalf("%s should overlap 10.0.2.3/32 but doesn't", netX)
+	}
+
+	_, netX, _ = net.ParseCIDR("192.168.102.2/32")
+
+	if err := CheckNameserverOverlaps(nameservers, netX); err != nil {
+		t.Fatalf("%s should not overlap %v but it does", netX, nameservers)
+	}
+}
+
+func AssertOverlap(CIDRx string, CIDRy string, t *testing.T) {
+	_, netX, _ := net.ParseCIDR(CIDRx)
+	_, netY, _ := net.ParseCIDR(CIDRy)
+	if !NetworkOverlaps(netX, netY) {
+		t.Errorf("%v and %v should overlap", netX, netY)
+	}
+}
+
+func AssertNoOverlap(CIDRx string, CIDRy string, t *testing.T) {
+	_, netX, _ := net.ParseCIDR(CIDRx)
+	_, netY, _ := net.ParseCIDR(CIDRy)
+	if NetworkOverlaps(netX, netY) {
+		t.Errorf("%v and %v should not overlap", netX, netY)
+	}
+}
+
+func TestNetworkOverlaps(t *testing.T) {
+	//netY starts at same IP and ends within netX
+	AssertOverlap("172.16.0.1/24", "172.16.0.1/25", t)
+	//netY starts within netX and ends at same IP
+	AssertOverlap("172.16.0.1/24", "172.16.0.128/25", t)
+	//netY starts and ends within netX
+	AssertOverlap("172.16.0.1/24", "172.16.0.64/25", t)
+	//netY starts at same IP and ends outside of netX
+	AssertOverlap("172.16.0.1/24", "172.16.0.1/23", t)
+	//netY starts before and ends at same IP of netX
+	AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t)
+	//netY starts before and ends outside of netX
+	AssertOverlap("172.16.1.1/24", "172.16.0.1/22", t)
+	//netY starts and ends before netX
+	AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t)
+	//netX starts and ends before netY
+	AssertNoOverlap("172.16.1.1/25", "172.16.2.1/24", t)
+}
+
+func TestNetworkRange(t *testing.T) {
+	// Simple class C test
+	_, network, _ := net.ParseCIDR("192.168.0.1/24")
+	first, last := NetworkRange(network)
+	if !first.Equal(net.ParseIP("192.168.0.0")) {
+		t.Error(first.String())
+	}
+	if !last.Equal(net.ParseIP("192.168.0.255")) {
+		t.Error(last.String())
+	}
+
+	// Class A test
+	_, network, _ = net.ParseCIDR("10.0.0.1/8")
+	first, last = NetworkRange(network)
+	if !first.Equal(net.ParseIP("10.0.0.0")) {
+		t.Error(first.String())
+	}
+	if !last.Equal(net.ParseIP("10.255.255.255")) {
+		t.Error(last.String())
+	}
+
+	// Class A, random IP address
+	_, network, _ = net.ParseCIDR("10.1.2.3/8")
+	first, last = NetworkRange(network)
+	if !first.Equal(net.ParseIP("10.0.0.0")) {
+		t.Error(first.String())
+	}
+	if !last.Equal(net.ParseIP("10.255.255.255")) {
+		t.Error(last.String())
+	}
+
+	// 32bit mask
+	_, network, _ = net.ParseCIDR("10.1.2.3/32")
+	first, last = NetworkRange(network)
+	if !first.Equal(net.ParseIP("10.1.2.3")) {
+		t.Error(first.String())
+	}
+	if !last.Equal(net.ParseIP("10.1.2.3")) {
+		t.Error(last.String())
+	}
+
+	// 31bit mask
+	_, network, _ = net.ParseCIDR("10.1.2.3/31")
+	first, last = NetworkRange(network)
+	if !first.Equal(net.ParseIP("10.1.2.2")) {
+		t.Error(first.String())
+	}
+	if !last.Equal(net.ParseIP("10.1.2.3")) {
+		t.Error(last.String())
+	}
+
+	// 26bit mask
+	_, network, _ = net.ParseCIDR("10.1.2.3/26")
+	first, last = NetworkRange(network)
+	if !first.Equal(net.ParseIP("10.1.2.0")) {
+		t.Error(first.String())
+	}
+	if !last.Equal(net.ParseIP("10.1.2.63")) {
+		t.Error(last.String())
+	}
+}
+
+// Test veth name generation "veth"+rand (e.g.veth0f60e2c)
+func TestGenerateRandomName(t *testing.T) {
+	name1, err := GenerateRandomName("veth", 7)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// veth plus generated append equals a len of 11
+	if len(name1) != 11 {
+		t.Fatalf("Expected 11 characters, instead received %d characters", len(name1))
+	}
+	name2, err := GenerateRandomName("veth", 7)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Fail if the random generated names equal one another
+	if name1 == name2 {
+		t.Fatalf("Expected differing values but received %s and %s", name1, name2)
+	}
+}
+
+// Test mac generation.
+func TestUtilGenerateRandomMAC(t *testing.T) {
+	mac1 := GenerateRandomMAC()
+	mac2 := GenerateRandomMAC()
+	// ensure bytes are unique
+	if bytes.Equal(mac1, mac2) {
+		t.Fatalf("mac1 %s should not equal mac2 %s", mac1, mac2)
+	}
+	// existing tests check string functionality so keeping the pattern
+	if mac1.String() == mac2.String() {
+		t.Fatalf("mac1 %s should not equal mac2 %s", mac1, mac2)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/network.go b/vendor/src/github.com/docker/libnetwork/network.go
new file mode 100644
index 0000000..36938a5
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/network.go
@@ -0,0 +1,207 @@
+package libnetwork
+
+import (
+	"sync"
+
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/libnetwork/driverapi"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/options"
+	"github.com/docker/libnetwork/types"
+)
+
+// A Network represents a logical connectivity zone that containers may
+// join using the Link method. A Network is managed by a specific driver.
+type Network interface {
+	// A user chosen name for this network.
+	Name() string
+
+	// A system generated id for this network.
+	ID() string
+
+	// The type of network, which corresponds to its managing driver.
+	Type() string
+
+	// Create a new endpoint to this network symbolically identified by the
+	// specified unique name. The options parameter carry driver specific options.
+	// Labels support will be added in the near future.
+	CreateEndpoint(name string, options ...EndpointOption) (Endpoint, error)
+
+	// Delete the network.
+	Delete() error
+
+	// Endpoints returns the list of Endpoint(s) in this network.
+	Endpoints() []Endpoint
+
+	// WalkEndpoints uses the provided function to walk the Endpoints
+	WalkEndpoints(walker EndpointWalker)
+
+	// EndpointByName returns the Endpoint which has the passed name. If not found, the error ErrNoSuchEndpoint is returned.
+	EndpointByName(name string) (Endpoint, error)
+
+	// EndpointByID returns the Endpoint which has the passed id. If not found, the error ErrNoSuchEndpoint is returned.
+	EndpointByID(id string) (Endpoint, error)
+}
+
+// EndpointWalker is a client provided function which will be used to walk the Endpoints.
+// When the function returns true, the walk will stop.
+type EndpointWalker func(ep Endpoint) bool
+
+type network struct {
+	ctrlr       *controller
+	name        string
+	networkType string
+	id          types.UUID
+	driver      driverapi.Driver
+	enableIPv6  bool
+	endpoints   endpointTable
+	generic     options.Generic
+	sync.Mutex
+}
+
+func (n *network) Name() string {
+	return n.name
+}
+
+func (n *network) ID() string {
+	return string(n.id)
+}
+
+func (n *network) Type() string {
+	if n.driver == nil {
+		return ""
+	}
+
+	return n.driver.Type()
+}
+
+// NetworkOption is a option setter function type used to pass varios options to
+// NewNetwork method. The various setter functions of type NetworkOption are
+// provided by libnetwork, they look like NetworkOptionXXXX(...)
+type NetworkOption func(n *network)
+
+// NetworkOptionGeneric function returns an option setter for a Generic option defined
+// in a Dictionary of Key-Value pair
+func NetworkOptionGeneric(generic map[string]interface{}) NetworkOption {
+	return func(n *network) {
+		n.generic = generic
+		if _, ok := generic[netlabel.EnableIPv6]; ok {
+			n.enableIPv6 = generic[netlabel.EnableIPv6].(bool)
+		}
+	}
+}
+
+func (n *network) processOptions(options ...NetworkOption) {
+	for _, opt := range options {
+		if opt != nil {
+			opt(n)
+		}
+	}
+}
+
+func (n *network) Delete() error {
+	var err error
+
+	n.ctrlr.Lock()
+	_, ok := n.ctrlr.networks[n.id]
+	if !ok {
+		n.ctrlr.Unlock()
+		return &UnknownNetworkError{name: n.name, id: string(n.id)}
+	}
+
+	n.Lock()
+	numEps := len(n.endpoints)
+	n.Unlock()
+	if numEps != 0 {
+		n.ctrlr.Unlock()
+		return &ActiveEndpointsError{name: n.name, id: string(n.id)}
+	}
+
+	delete(n.ctrlr.networks, n.id)
+	n.ctrlr.Unlock()
+	defer func() {
+		if err != nil {
+			n.ctrlr.Lock()
+			n.ctrlr.networks[n.id] = n
+			n.ctrlr.Unlock()
+		}
+	}()
+
+	err = n.driver.DeleteNetwork(n.id)
+	return err
+}
+
+func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoint, error) {
+	if name == "" {
+		return nil, ErrInvalidName(name)
+	}
+	ep := &endpoint{name: name, iFaces: []*endpointInterface{}, generic: make(map[string]interface{})}
+	ep.id = types.UUID(stringid.GenerateRandomID())
+	ep.network = n
+	ep.processOptions(options...)
+
+	d := n.driver
+	err := d.CreateEndpoint(n.id, ep.id, ep, ep.generic)
+	if err != nil {
+		return nil, err
+	}
+
+	n.Lock()
+	n.endpoints[ep.id] = ep
+	n.Unlock()
+	return ep, nil
+}
+
+func (n *network) Endpoints() []Endpoint {
+	n.Lock()
+	defer n.Unlock()
+	list := make([]Endpoint, 0, len(n.endpoints))
+	for _, e := range n.endpoints {
+		list = append(list, e)
+	}
+
+	return list
+}
+
+func (n *network) WalkEndpoints(walker EndpointWalker) {
+	for _, e := range n.Endpoints() {
+		if walker(e) {
+			return
+		}
+	}
+}
+
+func (n *network) EndpointByName(name string) (Endpoint, error) {
+	if name == "" {
+		return nil, ErrInvalidName(name)
+	}
+	var e Endpoint
+
+	s := func(current Endpoint) bool {
+		if current.Name() == name {
+			e = current
+			return true
+		}
+		return false
+	}
+
+	n.WalkEndpoints(s)
+
+	if e == nil {
+		return nil, ErrNoSuchEndpoint(name)
+	}
+
+	return e, nil
+}
+
+func (n *network) EndpointByID(id string) (Endpoint, error) {
+	if id == "" {
+		return nil, ErrInvalidID(id)
+	}
+	n.Lock()
+	defer n.Unlock()
+	if e, ok := n.endpoints[types.UUID(id)]; ok {
+		return e, nil
+	}
+	return nil, ErrNoSuchEndpoint(id)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/options/options.go b/vendor/src/github.com/docker/libnetwork/options/options.go
new file mode 100644
index 0000000..e0e93ff
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/options/options.go
@@ -0,0 +1,73 @@
+// Package options provides a way to pass unstructured sets of options to a
+// component expecting a strongly-typed configuration structure.
+package options
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// NoSuchFieldError is the error returned when the generic parameters hold a
+// value for a field absent from the destination structure.
+type NoSuchFieldError struct {
+	Field string
+	Type  string
+}
+
+func (e NoSuchFieldError) Error() string {
+	return fmt.Sprintf("no field %q in type %q", e.Field, e.Type)
+}
+
+// CannotSetFieldError is the error returned when the generic parameters hold a
+// value for a field that cannot be set in the destination structure.
+type CannotSetFieldError struct {
+	Field string
+	Type  string
+}
+
+func (e CannotSetFieldError) Error() string {
+	return fmt.Sprintf("cannot set field %q of type %q", e.Field, e.Type)
+}
+
+// Generic is an basic type to store arbitrary settings.
+type Generic map[string]interface{}
+
+// NewGeneric returns a new Generic instance.
+func NewGeneric() Generic {
+	return make(Generic)
+}
+
+// GenerateFromModel takes the generic options, and tries to build a new
+// instance of the model's type by matching keys from the generic options to
+// fields in the model.
+//
+// The return value is of the same type than the model (including a potential
+// pointer qualifier).
+func GenerateFromModel(options Generic, model interface{}) (interface{}, error) {
+	modType := reflect.TypeOf(model)
+
+	// If the model is of pointer type, we need to dereference for New.
+	resType := reflect.TypeOf(model)
+	if modType.Kind() == reflect.Ptr {
+		resType = resType.Elem()
+	}
+
+	// Populate the result structure with the generic layout content.
+	res := reflect.New(resType)
+	for name, value := range options {
+		field := res.Elem().FieldByName(name)
+		if !field.IsValid() {
+			return nil, NoSuchFieldError{name, resType.String()}
+		}
+		if !field.CanSet() {
+			return nil, CannotSetFieldError{name, resType.String()}
+		}
+		field.Set(reflect.ValueOf(value))
+	}
+
+	// If the model is not of pointer type, return content of the result.
+	if modType.Kind() == reflect.Ptr {
+		return res.Interface(), nil
+	}
+	return res.Elem().Interface(), nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/options/options_test.go b/vendor/src/github.com/docker/libnetwork/options/options_test.go
new file mode 100644
index 0000000..ecd3b3b
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/options/options_test.go
@@ -0,0 +1,97 @@
+package options
+
+import (
+	"reflect"
+	"strings"
+	"testing"
+
+	_ "github.com/docker/libnetwork/netutils"
+)
+
+func TestGenerate(t *testing.T) {
+	gen := NewGeneric()
+	gen["Int"] = 1
+	gen["Rune"] = 'b'
+	gen["Float64"] = 2.0
+
+	type Model struct {
+		Int     int
+		Rune    rune
+		Float64 float64
+	}
+
+	result, err := GenerateFromModel(gen, Model{})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	cast, ok := result.(Model)
+	if !ok {
+		t.Fatalf("result has unexpected type %s", reflect.TypeOf(result))
+	}
+	if expected := 1; cast.Int != expected {
+		t.Fatalf("wrong value for field Int: expected %v, got %v", expected, cast.Int)
+	}
+	if expected := 'b'; cast.Rune != expected {
+		t.Fatalf("wrong value for field Rune: expected %v, got %v", expected, cast.Rune)
+	}
+	if expected := 2.0; cast.Float64 != expected {
+		t.Fatalf("wrong value for field Int: expected %v, got %v", expected, cast.Float64)
+	}
+}
+
+func TestGeneratePtr(t *testing.T) {
+	gen := NewGeneric()
+	gen["Int"] = 1
+	gen["Rune"] = 'b'
+	gen["Float64"] = 2.0
+
+	type Model struct {
+		Int     int
+		Rune    rune
+		Float64 float64
+	}
+
+	result, err := GenerateFromModel(gen, &Model{})
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	cast, ok := result.(*Model)
+	if !ok {
+		t.Fatalf("result has unexpected type %s", reflect.TypeOf(result))
+	}
+	if expected := 1; cast.Int != expected {
+		t.Fatalf("wrong value for field Int: expected %v, got %v", expected, cast.Int)
+	}
+	if expected := 'b'; cast.Rune != expected {
+		t.Fatalf("wrong value for field Rune: expected %v, got %v", expected, cast.Rune)
+	}
+	if expected := 2.0; cast.Float64 != expected {
+		t.Fatalf("wrong value for field Int: expected %v, got %v", expected, cast.Float64)
+	}
+}
+
+func TestGenerateMissingField(t *testing.T) {
+	type Model struct{}
+	_, err := GenerateFromModel(Generic{"foo": "bar"}, Model{})
+
+	if _, ok := err.(NoSuchFieldError); !ok {
+		t.Fatalf("expected NoSuchFieldError, got %#v", err)
+	} else if expected := "no field"; !strings.Contains(err.Error(), expected) {
+		t.Fatalf("expected %q in error message, got %s", expected, err.Error())
+	}
+}
+
+func TestFieldCannotBeSet(t *testing.T) {
+	type Model struct{ foo int }
+	_, err := GenerateFromModel(Generic{"foo": "bar"}, Model{})
+
+	if _, ok := err.(CannotSetFieldError); !ok {
+		t.Fatalf("expected CannotSetFieldError, got %#v", err)
+	} else if expected := "cannot set field"; !strings.Contains(err.Error(), expected) {
+		t.Fatalf("expected %q in error message, got %s", expected, err.Error())
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/portallocator/portallocator.go b/vendor/src/github.com/docker/libnetwork/portallocator/portallocator.go
new file mode 100644
index 0000000..84b07b2
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/portallocator/portallocator.go
@@ -0,0 +1,212 @@
+package portallocator
+
+import (
+	"bufio"
+	"errors"
+	"fmt"
+	"net"
+	"os"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
+)
+
+const (
+	// DefaultPortRangeStart indicates the first port in port range
+	DefaultPortRangeStart = 49153
+	// DefaultPortRangeEnd indicates the last port in port range
+	DefaultPortRangeEnd = 65535
+)
+
+type ipMapping map[string]protoMap
+
+var (
+	// ErrAllPortsAllocated is returned when no more ports are available
+	ErrAllPortsAllocated = errors.New("all ports are allocated")
+	// ErrUnknownProtocol is returned when an unknown protocol was specified
+	ErrUnknownProtocol = errors.New("unknown protocol")
+	defaultIP          = net.ParseIP("0.0.0.0")
+	once               sync.Once
+	instance           *PortAllocator
+	createInstance     = func() { instance = newInstance() }
+)
+
+// ErrPortAlreadyAllocated is the returned error information when a requested port is already being used
+type ErrPortAlreadyAllocated struct {
+	ip   string
+	port int
+}
+
+func newErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated {
+	return ErrPortAlreadyAllocated{
+		ip:   ip,
+		port: port,
+	}
+}
+
+// IP returns the address to which the used port is associated
+func (e ErrPortAlreadyAllocated) IP() string {
+	return e.ip
+}
+
+// Port returns the value of the already used port
+func (e ErrPortAlreadyAllocated) Port() int {
+	return e.port
+}
+
+// IPPort returns the address and the port in the form ip:port
+func (e ErrPortAlreadyAllocated) IPPort() string {
+	return fmt.Sprintf("%s:%d", e.ip, e.port)
+}
+
+// Error is the implementation of error.Error interface
+func (e ErrPortAlreadyAllocated) Error() string {
+	return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port)
+}
+
+type (
+	// PortAllocator manages the transport ports database
+	PortAllocator struct {
+		mutex sync.Mutex
+		ipMap ipMapping
+		Begin int
+		End   int
+	}
+	portMap struct {
+		p          map[int]struct{}
+		begin, end int
+		last       int
+	}
+	protoMap map[string]*portMap
+)
+
+// Get returns the default instance of PortAllocator
+func Get() *PortAllocator {
+	// Port Allocator is a singleton
+	// Note: Long term solution will be each PortAllocator will have access to
+	// the OS so that it can have up to date view of the OS port allocation.
+	// When this happens singleton behavior will be removed. Clients do not
+	// need to worry about this, they will not see a change in behavior.
+	once.Do(createInstance)
+	return instance
+}
+
+func newInstance() *PortAllocator {
+	start, end, err := getDynamicPortRange()
+	if err != nil {
+		logrus.Warn(err)
+		start, end = DefaultPortRangeStart, DefaultPortRangeEnd
+	}
+	return &PortAllocator{
+		ipMap: ipMapping{},
+		Begin: start,
+		End:   end,
+	}
+}
+
+func getDynamicPortRange() (start int, end int, err error) {
+	const portRangeKernelParam = "/proc/sys/net/ipv4/ip_local_port_range"
+	portRangeFallback := fmt.Sprintf("using fallback port range %d-%d", DefaultPortRangeStart, DefaultPortRangeEnd)
+	file, err := os.Open(portRangeKernelParam)
+	if err != nil {
+		return 0, 0, fmt.Errorf("port allocator - %s due to error: %v", portRangeFallback, err)
+	}
+	n, err := fmt.Fscanf(bufio.NewReader(file), "%d\t%d", &start, &end)
+	if n != 2 || err != nil {
+		if err == nil {
+			err = fmt.Errorf("unexpected count of parsed numbers (%d)", n)
+		}
+		return 0, 0, fmt.Errorf("port allocator - failed to parse system ephemeral port range from %s - %s: %v", portRangeKernelParam, portRangeFallback, err)
+	}
+	return start, end, nil
+}
+
+// RequestPort requests new port from global ports pool for specified ip and proto.
+// If port is 0 it returns first free port. Otherwise it checks port availability
+// in pool and return that port or error if port is already busy.
+func (p *PortAllocator) RequestPort(ip net.IP, proto string, port int) (int, error) {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	if proto != "tcp" && proto != "udp" {
+		return 0, ErrUnknownProtocol
+	}
+
+	if ip == nil {
+		ip = defaultIP
+	}
+	ipstr := ip.String()
+	protomap, ok := p.ipMap[ipstr]
+	if !ok {
+		protomap = protoMap{
+			"tcp": p.newPortMap(),
+			"udp": p.newPortMap(),
+		}
+
+		p.ipMap[ipstr] = protomap
+	}
+	mapping := protomap[proto]
+	if port > 0 {
+		if _, ok := mapping.p[port]; !ok {
+			mapping.p[port] = struct{}{}
+			return port, nil
+		}
+		return 0, newErrPortAlreadyAllocated(ipstr, port)
+	}
+
+	port, err := mapping.findPort()
+	if err != nil {
+		return 0, err
+	}
+	return port, nil
+}
+
+// ReleasePort releases port from global ports pool for specified ip and proto.
+func (p *PortAllocator) ReleasePort(ip net.IP, proto string, port int) error {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	if ip == nil {
+		ip = defaultIP
+	}
+	protomap, ok := p.ipMap[ip.String()]
+	if !ok {
+		return nil
+	}
+	delete(protomap[proto].p, port)
+	return nil
+}
+
+func (p *PortAllocator) newPortMap() *portMap {
+	return &portMap{
+		p:     map[int]struct{}{},
+		begin: p.Begin,
+		end:   p.End,
+		last:  p.End,
+	}
+}
+
+// ReleaseAll releases all ports for all ips.
+func (p *PortAllocator) ReleaseAll() error {
+	p.mutex.Lock()
+	p.ipMap = ipMapping{}
+	p.mutex.Unlock()
+	return nil
+}
+
+func (pm *portMap) findPort() (int, error) {
+	port := pm.last
+	for i := 0; i <= pm.end-pm.begin; i++ {
+		port++
+		if port > pm.end {
+			port = pm.begin
+		}
+
+		if _, ok := pm.p[port]; !ok {
+			pm.p[port] = struct{}{}
+			pm.last = port
+			return port, nil
+		}
+	}
+	return 0, ErrAllPortsAllocated
+}
diff --git a/vendor/src/github.com/docker/libnetwork/portallocator/portallocator_test.go b/vendor/src/github.com/docker/libnetwork/portallocator/portallocator_test.go
new file mode 100644
index 0000000..2075649
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/portallocator/portallocator_test.go
@@ -0,0 +1,254 @@
+package portallocator
+
+import (
+	"net"
+	"testing"
+
+	_ "github.com/docker/libnetwork/netutils"
+)
+
+func resetPortAllocator() {
+	instance = newInstance()
+}
+
+func TestRequestNewPort(t *testing.T) {
+	p := Get()
+	defer resetPortAllocator()
+
+	port, err := p.RequestPort(defaultIP, "tcp", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if expected := p.Begin; port != expected {
+		t.Fatalf("Expected port %d got %d", expected, port)
+	}
+}
+
+func TestRequestSpecificPort(t *testing.T) {
+	p := Get()
+	defer resetPortAllocator()
+
+	port, err := p.RequestPort(defaultIP, "tcp", 5000)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if port != 5000 {
+		t.Fatalf("Expected port 5000 got %d", port)
+	}
+}
+
+func TestReleasePort(t *testing.T) {
+	p := Get()
+
+	port, err := p.RequestPort(defaultIP, "tcp", 5000)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if port != 5000 {
+		t.Fatalf("Expected port 5000 got %d", port)
+	}
+
+	if err := p.ReleasePort(defaultIP, "tcp", 5000); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestReuseReleasedPort(t *testing.T) {
+	p := Get()
+	defer resetPortAllocator()
+
+	port, err := p.RequestPort(defaultIP, "tcp", 5000)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if port != 5000 {
+		t.Fatalf("Expected port 5000 got %d", port)
+	}
+
+	if err := p.ReleasePort(defaultIP, "tcp", 5000); err != nil {
+		t.Fatal(err)
+	}
+
+	port, err = p.RequestPort(defaultIP, "tcp", 5000)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestReleaseUnreadledPort(t *testing.T) {
+	p := Get()
+	defer resetPortAllocator()
+
+	port, err := p.RequestPort(defaultIP, "tcp", 5000)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if port != 5000 {
+		t.Fatalf("Expected port 5000 got %d", port)
+	}
+
+	port, err = p.RequestPort(defaultIP, "tcp", 5000)
+
+	switch err.(type) {
+	case ErrPortAlreadyAllocated:
+	default:
+		t.Fatalf("Expected port allocation error got %s", err)
+	}
+}
+
+func TestUnknowProtocol(t *testing.T) {
+	if _, err := Get().RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol {
+		t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err)
+	}
+}
+
+func TestAllocateAllPorts(t *testing.T) {
+	p := Get()
+	defer resetPortAllocator()
+
+	for i := 0; i <= p.End-p.Begin; i++ {
+		port, err := p.RequestPort(defaultIP, "tcp", 0)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if expected := p.Begin + i; port != expected {
+			t.Fatalf("Expected port %d got %d", expected, port)
+		}
+	}
+
+	if _, err := p.RequestPort(defaultIP, "tcp", 0); err != ErrAllPortsAllocated {
+		t.Fatalf("Expected error %s got %s", ErrAllPortsAllocated, err)
+	}
+
+	_, err := p.RequestPort(defaultIP, "udp", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// release a port in the middle and ensure we get another tcp port
+	port := p.Begin + 5
+	if err := p.ReleasePort(defaultIP, "tcp", port); err != nil {
+		t.Fatal(err)
+	}
+	newPort, err := p.RequestPort(defaultIP, "tcp", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if newPort != port {
+		t.Fatalf("Expected port %d got %d", port, newPort)
+	}
+
+	// now pm.last == newPort, release it so that it's the only free port of
+	// the range, and ensure we get it back
+	if err := p.ReleasePort(defaultIP, "tcp", newPort); err != nil {
+		t.Fatal(err)
+	}
+	port, err = p.RequestPort(defaultIP, "tcp", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if newPort != port {
+		t.Fatalf("Expected port %d got %d", newPort, port)
+	}
+}
+
+func BenchmarkAllocatePorts(b *testing.B) {
+	p := Get()
+	defer resetPortAllocator()
+
+	for i := 0; i < b.N; i++ {
+		for i := 0; i <= p.End-p.Begin; i++ {
+			port, err := p.RequestPort(defaultIP, "tcp", 0)
+			if err != nil {
+				b.Fatal(err)
+			}
+
+			if expected := p.Begin + i; port != expected {
+				b.Fatalf("Expected port %d got %d", expected, port)
+			}
+		}
+		p.ReleaseAll()
+	}
+}
+
+func TestPortAllocation(t *testing.T) {
+	p := Get()
+	defer resetPortAllocator()
+
+	ip := net.ParseIP("192.168.0.1")
+	ip2 := net.ParseIP("192.168.0.2")
+	if port, err := p.RequestPort(ip, "tcp", 80); err != nil {
+		t.Fatal(err)
+	} else if port != 80 {
+		t.Fatalf("Acquire(80) should return 80, not %d", port)
+	}
+	port, err := p.RequestPort(ip, "tcp", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if port <= 0 {
+		t.Fatalf("Acquire(0) should return a non-zero port")
+	}
+
+	if _, err := p.RequestPort(ip, "tcp", port); err == nil {
+		t.Fatalf("Acquiring a port already in use should return an error")
+	}
+
+	if newPort, err := p.RequestPort(ip, "tcp", 0); err != nil {
+		t.Fatal(err)
+	} else if newPort == port {
+		t.Fatalf("Acquire(0) allocated the same port twice: %d", port)
+	}
+
+	if _, err := p.RequestPort(ip, "tcp", 80); err == nil {
+		t.Fatalf("Acquiring a port already in use should return an error")
+	}
+	if _, err := p.RequestPort(ip2, "tcp", 80); err != nil {
+		t.Fatalf("It should be possible to allocate the same port on a different interface")
+	}
+	if _, err := p.RequestPort(ip2, "tcp", 80); err == nil {
+		t.Fatalf("Acquiring a port already in use should return an error")
+	}
+	if err := p.ReleasePort(ip, "tcp", 80); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := p.RequestPort(ip, "tcp", 80); err != nil {
+		t.Fatal(err)
+	}
+
+	port, err = p.RequestPort(ip, "tcp", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	port2, err := p.RequestPort(ip, "tcp", port+1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	port3, err := p.RequestPort(ip, "tcp", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if port3 == port2 {
+		t.Fatal("Requesting a dynamic port should never allocate a used port")
+	}
+}
+
+func TestNoDuplicateBPR(t *testing.T) {
+	p := Get()
+	defer resetPortAllocator()
+
+	if port, err := p.RequestPort(defaultIP, "tcp", p.Begin); err != nil {
+		t.Fatal(err)
+	} else if port != p.Begin {
+		t.Fatalf("Expected port %d got %d", p.Begin, port)
+	}
+
+	if port, err := p.RequestPort(defaultIP, "tcp", 0); err != nil {
+		t.Fatal(err)
+	} else if port == p.Begin {
+		t.Fatalf("Acquire(0) allocated the same port twice: %d", port)
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/portmapper/mapper.go b/vendor/src/github.com/docker/libnetwork/portmapper/mapper.go
new file mode 100644
index 0000000..ac32f66
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/portmapper/mapper.go
@@ -0,0 +1,207 @@
+package portmapper
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/libnetwork/iptables"
+	"github.com/docker/libnetwork/portallocator"
+)
+
+type mapping struct {
+	proto         string
+	userlandProxy userlandProxy
+	host          net.Addr
+	container     net.Addr
+}
+
+var newProxy = newProxyCommand
+
+var (
+	// ErrUnknownBackendAddressType refers to an unknown container or unsupported address type
+	ErrUnknownBackendAddressType = errors.New("unknown container address type not supported")
+	// ErrPortMappedForIP refers to a port already mapped to an ip address
+	ErrPortMappedForIP = errors.New("port is already mapped to ip")
+	// ErrPortNotMapped refers to an unmapped port
+	ErrPortNotMapped = errors.New("port is not mapped")
+)
+
+// PortMapper manages the network address translation
+type PortMapper struct {
+	chain *iptables.Chain
+
+	// udp:ip:port
+	currentMappings map[string]*mapping
+	lock            sync.Mutex
+
+	Allocator *portallocator.PortAllocator
+}
+
+// New returns a new instance of PortMapper
+func New() *PortMapper {
+	return NewWithPortAllocator(portallocator.Get())
+}
+
+// NewWithPortAllocator returns a new instance of PortMapper which will use the specified PortAllocator
+func NewWithPortAllocator(allocator *portallocator.PortAllocator) *PortMapper {
+	return &PortMapper{
+		currentMappings: make(map[string]*mapping),
+		Allocator:       allocator,
+	}
+}
+
+// SetIptablesChain sets the specified chain into portmapper
+func (pm *PortMapper) SetIptablesChain(c *iptables.Chain) {
+	pm.chain = c
+}
+
+// Map maps the specified container transport address to the host's network address and transport port
+func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int, useProxy bool) (host net.Addr, err error) {
+	pm.lock.Lock()
+	defer pm.lock.Unlock()
+
+	var (
+		m                 *mapping
+		proto             string
+		allocatedHostPort int
+	)
+
+	switch container.(type) {
+	case *net.TCPAddr:
+		proto = "tcp"
+		if allocatedHostPort, err = pm.Allocator.RequestPort(hostIP, proto, hostPort); err != nil {
+			return nil, err
+		}
+
+		m = &mapping{
+			proto:     proto,
+			host:      &net.TCPAddr{IP: hostIP, Port: allocatedHostPort},
+			container: container,
+		}
+
+		if useProxy {
+			m.userlandProxy = newProxy(proto, hostIP, allocatedHostPort, container.(*net.TCPAddr).IP, container.(*net.TCPAddr).Port)
+		} else {
+			m.userlandProxy = newDummyProxy(proto, hostIP, allocatedHostPort)
+		}
+	case *net.UDPAddr:
+		proto = "udp"
+		if allocatedHostPort, err = pm.Allocator.RequestPort(hostIP, proto, hostPort); err != nil {
+			return nil, err
+		}
+
+		m = &mapping{
+			proto:     proto,
+			host:      &net.UDPAddr{IP: hostIP, Port: allocatedHostPort},
+			container: container,
+		}
+
+		if useProxy {
+			m.userlandProxy = newProxy(proto, hostIP, allocatedHostPort, container.(*net.UDPAddr).IP, container.(*net.UDPAddr).Port)
+		} else {
+			m.userlandProxy = newDummyProxy(proto, hostIP, allocatedHostPort)
+		}
+	default:
+		return nil, ErrUnknownBackendAddressType
+	}
+
+	// release the allocated port on any further error during return.
+	defer func() {
+		if err != nil {
+			pm.Allocator.ReleasePort(hostIP, proto, allocatedHostPort)
+		}
+	}()
+
+	key := getKey(m.host)
+	if _, exists := pm.currentMappings[key]; exists {
+		return nil, ErrPortMappedForIP
+	}
+
+	containerIP, containerPort := getIPAndPort(m.container)
+	if err := pm.forward(iptables.Append, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil {
+		return nil, err
+	}
+
+	cleanup := func() error {
+		// need to undo the iptables rules before we return
+		m.userlandProxy.Stop()
+		pm.forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort)
+		if err := pm.Allocator.ReleasePort(hostIP, m.proto, allocatedHostPort); err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	if err := m.userlandProxy.Start(); err != nil {
+		if err := cleanup(); err != nil {
+			return nil, fmt.Errorf("Error during port allocation cleanup: %v", err)
+		}
+		return nil, err
+	}
+
+	pm.currentMappings[key] = m
+	return m.host, nil
+}
+
+// Unmap removes stored mapping for the specified host transport address
+func (pm *PortMapper) Unmap(host net.Addr) error {
+	pm.lock.Lock()
+	defer pm.lock.Unlock()
+
+	key := getKey(host)
+	data, exists := pm.currentMappings[key]
+	if !exists {
+		return ErrPortNotMapped
+	}
+
+	if data.userlandProxy != nil {
+		data.userlandProxy.Stop()
+	}
+
+	delete(pm.currentMappings, key)
+
+	containerIP, containerPort := getIPAndPort(data.container)
+	hostIP, hostPort := getIPAndPort(data.host)
+	if err := pm.forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
+		logrus.Errorf("Error on iptables delete: %s", err)
+	}
+
+	switch a := host.(type) {
+	case *net.TCPAddr:
+		return pm.Allocator.ReleasePort(a.IP, "tcp", a.Port)
+	case *net.UDPAddr:
+		return pm.Allocator.ReleasePort(a.IP, "udp", a.Port)
+	}
+	return nil
+}
+
+func getKey(a net.Addr) string {
+	switch t := a.(type) {
+	case *net.TCPAddr:
+		return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp")
+	case *net.UDPAddr:
+		return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp")
+	}
+	return ""
+}
+
+func getIPAndPort(a net.Addr) (net.IP, int) {
+	switch t := a.(type) {
+	case *net.TCPAddr:
+		return t.IP, t.Port
+	case *net.UDPAddr:
+		return t.IP, t.Port
+	}
+	return nil, 0
+}
+
+func (pm *PortMapper) forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error {
+	if pm.chain == nil {
+		return nil
+	}
+	return pm.chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/portmapper/mapper_test.go b/vendor/src/github.com/docker/libnetwork/portmapper/mapper_test.go
new file mode 100644
index 0000000..635723d
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/portmapper/mapper_test.go
@@ -0,0 +1,271 @@
+package portmapper
+
+import (
+	"net"
+	"strings"
+	"testing"
+
+	"github.com/docker/libnetwork/iptables"
+	_ "github.com/docker/libnetwork/netutils"
+)
+
+func init() {
+	// override this func to mock out the proxy server
+	newProxy = newMockProxyCommand
+}
+
+func TestSetIptablesChain(t *testing.T) {
+	pm := New()
+
+	c := &iptables.Chain{
+		Name:   "TEST",
+		Bridge: "192.168.1.1",
+	}
+
+	if pm.chain != nil {
+		t.Fatal("chain should be nil at init")
+	}
+
+	pm.SetIptablesChain(c)
+	if pm.chain == nil {
+		t.Fatal("chain should not be nil after set")
+	}
+}
+
+func TestMapTCPPorts(t *testing.T) {
+	pm := New()
+	dstIP1 := net.ParseIP("192.168.0.1")
+	dstIP2 := net.ParseIP("192.168.0.2")
+	dstAddr1 := &net.TCPAddr{IP: dstIP1, Port: 80}
+	dstAddr2 := &net.TCPAddr{IP: dstIP2, Port: 80}
+
+	srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
+	srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")}
+
+	addrEqual := func(addr1, addr2 net.Addr) bool {
+		return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
+	}
+
+	if host, err := pm.Map(srcAddr1, dstIP1, 80, true); err != nil {
+		t.Fatalf("Failed to allocate port: %s", err)
+	} else if !addrEqual(dstAddr1, host) {
+		t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
+			dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())
+	}
+
+	if _, err := pm.Map(srcAddr1, dstIP1, 80, true); err == nil {
+		t.Fatalf("Port is in use - mapping should have failed")
+	}
+
+	if _, err := pm.Map(srcAddr2, dstIP1, 80, true); err == nil {
+		t.Fatalf("Port is in use - mapping should have failed")
+	}
+
+	if _, err := pm.Map(srcAddr2, dstIP2, 80, true); err != nil {
+		t.Fatalf("Failed to allocate port: %s", err)
+	}
+
+	if pm.Unmap(dstAddr1) != nil {
+		t.Fatalf("Failed to release port")
+	}
+
+	if pm.Unmap(dstAddr2) != nil {
+		t.Fatalf("Failed to release port")
+	}
+
+	if pm.Unmap(dstAddr2) == nil {
+		t.Fatalf("Port already released, but no error reported")
+	}
+}
+
+func TestGetUDPKey(t *testing.T) {
+	addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53}
+
+	key := getKey(addr)
+
+	if expected := "192.168.1.5:53/udp"; key != expected {
+		t.Fatalf("expected key %s got %s", expected, key)
+	}
+}
+
+func TestGetTCPKey(t *testing.T) {
+	addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80}
+
+	key := getKey(addr)
+
+	if expected := "192.168.1.5:80/tcp"; key != expected {
+		t.Fatalf("expected key %s got %s", expected, key)
+	}
+}
+
+func TestGetUDPIPAndPort(t *testing.T) {
+	addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53}
+
+	ip, port := getIPAndPort(addr)
+	if expected := "192.168.1.5"; ip.String() != expected {
+		t.Fatalf("expected ip %s got %s", expected, ip)
+	}
+
+	if ep := 53; port != ep {
+		t.Fatalf("expected port %d got %d", ep, port)
+	}
+}
+
+func TestMapUDPPorts(t *testing.T) {
+	pm := New()
+	dstIP1 := net.ParseIP("192.168.0.1")
+	dstIP2 := net.ParseIP("192.168.0.2")
+	dstAddr1 := &net.UDPAddr{IP: dstIP1, Port: 80}
+	dstAddr2 := &net.UDPAddr{IP: dstIP2, Port: 80}
+
+	srcAddr1 := &net.UDPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
+	srcAddr2 := &net.UDPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")}
+
+	addrEqual := func(addr1, addr2 net.Addr) bool {
+		return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
+	}
+
+	if host, err := pm.Map(srcAddr1, dstIP1, 80, true); err != nil {
+		t.Fatalf("Failed to allocate port: %s", err)
+	} else if !addrEqual(dstAddr1, host) {
+		t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
+			dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())
+	}
+
+	if _, err := pm.Map(srcAddr1, dstIP1, 80, true); err == nil {
+		t.Fatalf("Port is in use - mapping should have failed")
+	}
+
+	if _, err := pm.Map(srcAddr2, dstIP1, 80, true); err == nil {
+		t.Fatalf("Port is in use - mapping should have failed")
+	}
+
+	if _, err := pm.Map(srcAddr2, dstIP2, 80, true); err != nil {
+		t.Fatalf("Failed to allocate port: %s", err)
+	}
+
+	if pm.Unmap(dstAddr1) != nil {
+		t.Fatalf("Failed to release port")
+	}
+
+	if pm.Unmap(dstAddr2) != nil {
+		t.Fatalf("Failed to release port")
+	}
+
+	if pm.Unmap(dstAddr2) == nil {
+		t.Fatalf("Port already released, but no error reported")
+	}
+}
+
+func TestMapAllPortsSingleInterface(t *testing.T) {
+	pm := New()
+	dstIP1 := net.ParseIP("0.0.0.0")
+	srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
+
+	hosts := []net.Addr{}
+	var host net.Addr
+	var err error
+
+	defer func() {
+		for _, val := range hosts {
+			pm.Unmap(val)
+		}
+	}()
+
+	for i := 0; i < 10; i++ {
+		start, end := pm.Allocator.Begin, pm.Allocator.End
+		for i := start; i < end; i++ {
+			if host, err = pm.Map(srcAddr1, dstIP1, 0, true); err != nil {
+				t.Fatal(err)
+			}
+
+			hosts = append(hosts, host)
+		}
+
+		if _, err := pm.Map(srcAddr1, dstIP1, start, true); err == nil {
+			t.Fatalf("Port %d should be bound but is not", start)
+		}
+
+		for _, val := range hosts {
+			if err := pm.Unmap(val); err != nil {
+				t.Fatal(err)
+			}
+		}
+
+		hosts = []net.Addr{}
+	}
+}
+
+func TestMapTCPDummyListen(t *testing.T) {
+	pm := New()
+	dstIP := net.ParseIP("0.0.0.0")
+	dstAddr := &net.TCPAddr{IP: dstIP, Port: 80}
+
+	// no-op for dummy
+	srcAddr := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
+
+	addrEqual := func(addr1, addr2 net.Addr) bool {
+		return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
+	}
+
+	if host, err := pm.Map(srcAddr, dstIP, 80, false); err != nil {
+		t.Fatalf("Failed to allocate port: %s", err)
+	} else if !addrEqual(dstAddr, host) {
+		t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
+			dstAddr.String(), dstAddr.Network(), host.String(), host.Network())
+	}
+	if _, err := net.Listen("tcp", "0.0.0.0:80"); err == nil {
+		t.Fatal("Listen on mapped port without proxy should fail")
+	} else {
+		if !strings.Contains(err.Error(), "address already in use") {
+			t.Fatalf("Error should be about address already in use, got %v", err)
+		}
+	}
+	if _, err := net.Listen("tcp", "0.0.0.0:81"); err != nil {
+		t.Fatal(err)
+	}
+	if host, err := pm.Map(srcAddr, dstIP, 81, false); err == nil {
+		t.Fatalf("Bound port shouldn't be allocated, but it was on: %v", host)
+	} else {
+		if !strings.Contains(err.Error(), "address already in use") {
+			t.Fatalf("Error should be about address already in use, got %v", err)
+		}
+	}
+}
+
+func TestMapUDPDummyListen(t *testing.T) {
+	pm := New()
+	dstIP := net.ParseIP("0.0.0.0")
+	dstAddr := &net.UDPAddr{IP: dstIP, Port: 80}
+
+	// no-op for dummy
+	srcAddr := &net.UDPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
+
+	addrEqual := func(addr1, addr2 net.Addr) bool {
+		return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
+	}
+
+	if host, err := pm.Map(srcAddr, dstIP, 80, false); err != nil {
+		t.Fatalf("Failed to allocate port: %s", err)
+	} else if !addrEqual(dstAddr, host) {
+		t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
+			dstAddr.String(), dstAddr.Network(), host.String(), host.Network())
+	}
+	if _, err := net.ListenUDP("udp", &net.UDPAddr{IP: dstIP, Port: 80}); err == nil {
+		t.Fatal("Listen on mapped port without proxy should fail")
+	} else {
+		if !strings.Contains(err.Error(), "address already in use") {
+			t.Fatalf("Error should be about address already in use, got %v", err)
+		}
+	}
+	if _, err := net.ListenUDP("udp", &net.UDPAddr{IP: dstIP, Port: 81}); err != nil {
+		t.Fatal(err)
+	}
+	if host, err := pm.Map(srcAddr, dstIP, 81, false); err == nil {
+		t.Fatalf("Bound port shouldn't be allocated, but it was on: %v", host)
+	} else {
+		if !strings.Contains(err.Error(), "address already in use") {
+			t.Fatalf("Error should be about address already in use, got %v", err)
+		}
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/portmapper/mock_proxy.go b/vendor/src/github.com/docker/libnetwork/portmapper/mock_proxy.go
new file mode 100644
index 0000000..29b1605
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/portmapper/mock_proxy.go
@@ -0,0 +1,18 @@
+package portmapper
+
+import "net"
+
+func newMockProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) userlandProxy {
+	return &mockProxyCommand{}
+}
+
+type mockProxyCommand struct {
+}
+
+func (p *mockProxyCommand) Start() error {
+	return nil
+}
+
+func (p *mockProxyCommand) Stop() error {
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/portmapper/proxy.go b/vendor/src/github.com/docker/libnetwork/portmapper/proxy.go
new file mode 100644
index 0000000..530703b
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/portmapper/proxy.go
@@ -0,0 +1,209 @@
+package portmapper
+
+import (
+	"flag"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"net"
+	"os"
+	"os/exec"
+	"os/signal"
+	"strconv"
+	"syscall"
+	"time"
+
+	"github.com/docker/docker/pkg/proxy"
+	"github.com/docker/docker/pkg/reexec"
+)
+
+const userlandProxyCommandName = "docker-proxy"
+
+func init() {
+	reexec.Register(userlandProxyCommandName, execProxy)
+}
+
+type userlandProxy interface {
+	Start() error
+	Stop() error
+}
+
+// proxyCommand wraps an exec.Cmd to run the userland TCP and UDP
+// proxies as separate processes.
+type proxyCommand struct {
+	cmd *exec.Cmd
+}
+
+// execProxy is the reexec function that is registered to start the userland proxies
+func execProxy() {
+	f := os.NewFile(3, "signal-parent")
+	host, container := parseHostContainerAddrs()
+
+	p, err := proxy.NewProxy(host, container)
+	if err != nil {
+		fmt.Fprintf(f, "1\n%s", err)
+		f.Close()
+		os.Exit(1)
+	}
+	go handleStopSignals(p)
+	fmt.Fprint(f, "0\n")
+	f.Close()
+
+	// Run will block until the proxy stops
+	p.Run()
+}
+
+// parseHostContainerAddrs parses the flags passed on reexec to create the TCP or UDP
+// net.Addrs to map the host and container ports
+func parseHostContainerAddrs() (host net.Addr, container net.Addr) {
+	var (
+		proto         = flag.String("proto", "tcp", "proxy protocol")
+		hostIP        = flag.String("host-ip", "", "host ip")
+		hostPort      = flag.Int("host-port", -1, "host port")
+		containerIP   = flag.String("container-ip", "", "container ip")
+		containerPort = flag.Int("container-port", -1, "container port")
+	)
+
+	flag.Parse()
+
+	switch *proto {
+	case "tcp":
+		host = &net.TCPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort}
+		container = &net.TCPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort}
+	case "udp":
+		host = &net.UDPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort}
+		container = &net.UDPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort}
+	default:
+		log.Fatalf("unsupported protocol %s", *proto)
+	}
+
+	return host, container
+}
+
+func handleStopSignals(p proxy.Proxy) {
+	s := make(chan os.Signal, 10)
+	signal.Notify(s, os.Interrupt, syscall.SIGTERM, syscall.SIGSTOP)
+
+	for _ = range s {
+		p.Close()
+
+		os.Exit(0)
+	}
+}
+
+func newProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) userlandProxy {
+	args := []string{
+		userlandProxyCommandName,
+		"-proto", proto,
+		"-host-ip", hostIP.String(),
+		"-host-port", strconv.Itoa(hostPort),
+		"-container-ip", containerIP.String(),
+		"-container-port", strconv.Itoa(containerPort),
+	}
+
+	return &proxyCommand{
+		cmd: &exec.Cmd{
+			Path: reexec.Self(),
+			Args: args,
+			SysProcAttr: &syscall.SysProcAttr{
+				Pdeathsig: syscall.SIGTERM, // send a sigterm to the proxy if the daemon process dies
+			},
+		},
+	}
+}
+
+func (p *proxyCommand) Start() error {
+	r, w, err := os.Pipe()
+	if err != nil {
+		return fmt.Errorf("proxy unable to open os.Pipe %s", err)
+	}
+	defer r.Close()
+	p.cmd.ExtraFiles = []*os.File{w}
+	if err := p.cmd.Start(); err != nil {
+		return err
+	}
+	w.Close()
+
+	errchan := make(chan error, 1)
+	go func() {
+		buf := make([]byte, 2)
+		r.Read(buf)
+
+		if string(buf) != "0\n" {
+			errStr, err := ioutil.ReadAll(r)
+			if err != nil {
+				errchan <- fmt.Errorf("Error reading exit status from userland proxy: %v", err)
+				return
+			}
+
+			errchan <- fmt.Errorf("Error starting userland proxy: %s", errStr)
+			return
+		}
+		errchan <- nil
+	}()
+
+	select {
+	case err := <-errchan:
+		return err
+	case <-time.After(16 * time.Second):
+		return fmt.Errorf("Timed out proxy starting the userland proxy")
+	}
+}
+
+func (p *proxyCommand) Stop() error {
+	if p.cmd.Process != nil {
+		if err := p.cmd.Process.Signal(os.Interrupt); err != nil {
+			return err
+		}
+		return p.cmd.Wait()
+	}
+	return nil
+}
+
+// dummyProxy just listen on some port, it is needed to prevent accidental
+// port allocations on bound port, because without userland proxy we using
+// iptables rules and not net.Listen
+type dummyProxy struct {
+	listener io.Closer
+	addr     net.Addr
+}
+
+func newDummyProxy(proto string, hostIP net.IP, hostPort int) userlandProxy {
+	switch proto {
+	case "tcp":
+		addr := &net.TCPAddr{IP: hostIP, Port: hostPort}
+		return &dummyProxy{addr: addr}
+	case "udp":
+		addr := &net.UDPAddr{IP: hostIP, Port: hostPort}
+		return &dummyProxy{addr: addr}
+	}
+	return nil
+}
+
+func (p *dummyProxy) Start() error {
+	switch addr := p.addr.(type) {
+	case *net.TCPAddr:
+		l, err := net.ListenTCP("tcp", addr)
+		if err != nil {
+			return err
+		}
+		p.listener = l
+	case *net.UDPAddr:
+		l, err := net.ListenUDP("udp", addr)
+		if err != nil {
+			return err
+		}
+		p.listener = l
+	default:
+		return fmt.Errorf("Unknown addr type: %T", p.addr)
+	}
+	return nil
+}
+
+func (p *dummyProxy) Stop() error {
+	if p.listener != nil {
+		return p.listener.Close()
+	}
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/resolvconf/README.md b/vendor/src/github.com/docker/libnetwork/resolvconf/README.md
new file mode 100644
index 0000000..cdda554
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/resolvconf/README.md
@@ -0,0 +1 @@
+Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf
diff --git a/vendor/src/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go b/vendor/src/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go
new file mode 100644
index 0000000..d581a19
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go
@@ -0,0 +1,17 @@
+package dns
+
+import (
+	"regexp"
+)
+
+// IPLocalhost is a regex patter for localhost IP address range.
+const IPLocalhost = `((127\.([0-9]{1,3}.){2}[0-9]{1,3})|(::1))`
+
+var localhostIPRegexp = regexp.MustCompile(IPLocalhost)
+
+// IsLocalhost returns true if ip matches the localhost IP regular expression.
+// Used for determining if nameserver settings are being passed which are
+// localhost addresses
+func IsLocalhost(ip string) bool {
+	return localhostIPRegexp.MatchString(ip)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf.go b/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf.go
new file mode 100644
index 0000000..ebe3b71
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf.go
@@ -0,0 +1,187 @@
+// Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf
+package resolvconf
+
+import (
+	"bytes"
+	"io/ioutil"
+	"regexp"
+	"strings"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/libnetwork/resolvconf/dns"
+)
+
+var (
+	// Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS
+	defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"}
+	defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"}
+	ipv4NumBlock   = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)`
+	ipv4Address    = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock
+	// This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also
+	// will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants
+	// -- e.g. other link-local types -- either won't work in containers or are unnecessary.
+	// For readability and sufficiency for Docker purposes this seemed more reasonable than a
+	// 1000+ character regexp with exact and complete IPv6 validation
+	ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})`
+
+	localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`)
+	nsIPv6Regexp      = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`)
+	nsRegexp          = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`)
+	searchRegexp      = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`)
+)
+
+var lastModified struct {
+	sync.Mutex
+	sha256   string
+	contents []byte
+}
+
+// Get returns the contents of /etc/resolv.conf
+func Get() ([]byte, error) {
+	resolv, err := ioutil.ReadFile("/etc/resolv.conf")
+	if err != nil {
+		return nil, err
+	}
+	return resolv, nil
+}
+
+// GetIfChanged retrieves the host /etc/resolv.conf file, checks against the last hash
+// and, if modified since last check, returns the bytes and new hash.
+// This feature is used by the resolv.conf updater for containers
+func GetIfChanged() ([]byte, string, error) {
+	lastModified.Lock()
+	defer lastModified.Unlock()
+
+	resolv, err := ioutil.ReadFile("/etc/resolv.conf")
+	if err != nil {
+		return nil, "", err
+	}
+	newHash, err := ioutils.HashData(bytes.NewReader(resolv))
+	if err != nil {
+		return nil, "", err
+	}
+	if lastModified.sha256 != newHash {
+		lastModified.sha256 = newHash
+		lastModified.contents = resolv
+		return resolv, newHash, nil
+	}
+	// nothing changed, so return no data
+	return nil, "", nil
+}
+
+// GetLastModified retrieves the last used contents and hash of the host resolv.conf.
+// Used by containers updating on restart
+func GetLastModified() ([]byte, string) {
+	lastModified.Lock()
+	defer lastModified.Unlock()
+
+	return lastModified.contents, lastModified.sha256
+}
+
+// FilterResolvDNS cleans up the config in resolvConf.  It has two main jobs:
+// 1. It looks for localhost (127.*|::1) entries in the provided
+//    resolv.conf, removing local nameserver entries, and, if the resulting
+//    cleaned config has no defined nameservers left, adds default DNS entries
+// 2. Given the caller provides the enable/disable state of IPv6, the filter
+//    code will remove all IPv6 nameservers if it is not enabled for containers
+//
+// It returns a boolean to notify the caller if changes were made at all
+func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) ([]byte, bool) {
+	changed := false
+	cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{})
+	// if IPv6 is not enabled, also clean out any IPv6 address nameserver
+	if !ipv6Enabled {
+		cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{})
+	}
+	// if the resulting resolvConf has no more nameservers defined, add appropriate
+	// default DNS servers for IPv4 and (optionally) IPv6
+	if len(GetNameservers(cleanedResolvConf)) == 0 {
+		logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers : %v", defaultIPv4Dns)
+		dns := defaultIPv4Dns
+		if ipv6Enabled {
+			logrus.Infof("IPv6 enabled; Adding default IPv6 external servers : %v", defaultIPv6Dns)
+			dns = append(dns, defaultIPv6Dns...)
+		}
+		cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...)
+	}
+	if !bytes.Equal(resolvConf, cleanedResolvConf) {
+		changed = true
+	}
+	return cleanedResolvConf, changed
+}
+
+// getLines parses input into lines and strips away comments.
+func getLines(input []byte, commentMarker []byte) [][]byte {
+	lines := bytes.Split(input, []byte("\n"))
+	var output [][]byte
+	for _, currentLine := range lines {
+		var commentIndex = bytes.Index(currentLine, commentMarker)
+		if commentIndex == -1 {
+			output = append(output, currentLine)
+		} else {
+			output = append(output, currentLine[:commentIndex])
+		}
+	}
+	return output
+}
+
+// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf
+func GetNameservers(resolvConf []byte) []string {
+	nameservers := []string{}
+	for _, line := range getLines(resolvConf, []byte("#")) {
+		var ns = nsRegexp.FindSubmatch(line)
+		if len(ns) > 0 {
+			nameservers = append(nameservers, string(ns[1]))
+		}
+	}
+	return nameservers
+}
+
+// GetNameserversAsCIDR returns nameservers (if any) listed in
+// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
+// This function's output is intended for net.ParseCIDR
+func GetNameserversAsCIDR(resolvConf []byte) []string {
+	nameservers := []string{}
+	for _, nameserver := range GetNameservers(resolvConf) {
+		nameservers = append(nameservers, nameserver+"/32")
+	}
+	return nameservers
+}
+
+// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf
+// If more than one search line is encountered, only the contents of the last
+// one is returned.
+func GetSearchDomains(resolvConf []byte) []string {
+	domains := []string{}
+	for _, line := range getLines(resolvConf, []byte("#")) {
+		match := searchRegexp.FindSubmatch(line)
+		if match == nil {
+			continue
+		}
+		domains = strings.Fields(string(match[1]))
+	}
+	return domains
+}
+
+// Build writes a configuration file to path containing a "nameserver" entry
+// for every element in dns, and a "search" entry for every element in
+// dnsSearch.
+func Build(path string, dns, dnsSearch []string) error {
+	content := bytes.NewBuffer(nil)
+	for _, dns := range dns {
+		if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil {
+			return err
+		}
+	}
+	if len(dnsSearch) > 0 {
+		if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." {
+			if _, err := content.WriteString("search " + searchString + "\n"); err != nil {
+				return err
+			}
+		}
+	}
+
+	return ioutil.WriteFile(path, content.Bytes(), 0644)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf_test.go b/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf_test.go
new file mode 100644
index 0000000..a21c7af
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf_test.go
@@ -0,0 +1,240 @@
+package resolvconf
+
+import (
+	"bytes"
+	"io/ioutil"
+	"os"
+	"testing"
+
+	_ "github.com/docker/libnetwork/netutils"
+)
+
+func TestGet(t *testing.T) {
+	resolvConfUtils, err := Get()
+	if err != nil {
+		t.Fatal(err)
+	}
+	resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if string(resolvConfUtils) != string(resolvConfSystem) {
+		t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.")
+	}
+}
+
+func TestGetNameservers(t *testing.T) {
+	for resolv, result := range map[string][]string{`
+nameserver 1.2.3.4
+nameserver 40.3.200.10
+search example.com`: {"1.2.3.4", "40.3.200.10"},
+		`search example.com`: {},
+		`nameserver 1.2.3.4
+search example.com
+nameserver 4.30.20.100`: {"1.2.3.4", "4.30.20.100"},
+		``: {},
+		`  nameserver 1.2.3.4   `: {"1.2.3.4"},
+		`search example.com
+nameserver 1.2.3.4
+#nameserver 4.3.2.1`: {"1.2.3.4"},
+		`search example.com
+nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4"},
+	} {
+		test := GetNameservers([]byte(resolv))
+		if !strSlicesEqual(test, result) {
+			t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv)
+		}
+	}
+}
+
+func TestGetNameserversAsCIDR(t *testing.T) {
+	for resolv, result := range map[string][]string{`
+nameserver 1.2.3.4
+nameserver 40.3.200.10
+search example.com`: {"1.2.3.4/32", "40.3.200.10/32"},
+		`search example.com`: {},
+		`nameserver 1.2.3.4
+search example.com
+nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"},
+		``: {},
+		`  nameserver 1.2.3.4   `: {"1.2.3.4/32"},
+		`search example.com
+nameserver 1.2.3.4
+#nameserver 4.3.2.1`: {"1.2.3.4/32"},
+		`search example.com
+nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"},
+	} {
+		test := GetNameserversAsCIDR([]byte(resolv))
+		if !strSlicesEqual(test, result) {
+			t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv)
+		}
+	}
+}
+
+func TestGetSearchDomains(t *testing.T) {
+	for resolv, result := range map[string][]string{
+		`search example.com`:           {"example.com"},
+		`search example.com # ignored`: {"example.com"},
+		` 	  search 	 example.com 	  `: {"example.com"},
+		` 	  search 	 example.com 	  # ignored`: {"example.com"},
+		`search foo.example.com example.com`: {"foo.example.com", "example.com"},
+		`	   search   	   foo.example.com 	 example.com 	`: {"foo.example.com", "example.com"},
+		`	   search   	   foo.example.com 	 example.com 	# ignored`: {"foo.example.com", "example.com"},
+		``:          {},
+		`# ignored`: {},
+		`nameserver 1.2.3.4
+search foo.example.com example.com`: {"foo.example.com", "example.com"},
+		`nameserver 1.2.3.4
+search dup1.example.com dup2.example.com
+search foo.example.com example.com`: {"foo.example.com", "example.com"},
+		`nameserver 1.2.3.4
+search foo.example.com example.com
+nameserver 4.30.20.100`: {"foo.example.com", "example.com"},
+	} {
+		test := GetSearchDomains([]byte(resolv))
+		if !strSlicesEqual(test, result) {
+			t.Fatalf("Wrong search domain string {%s} should be %v. Input: %s", test, result, resolv)
+		}
+	}
+}
+
+func strSlicesEqual(a, b []string) bool {
+	if len(a) != len(b) {
+		return false
+	}
+
+	for i, v := range a {
+		if v != b[i] {
+			return false
+		}
+	}
+
+	return true
+}
+
+func TestBuild(t *testing.T) {
+	file, err := ioutil.TempFile("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(file.Name())
+
+	err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"search1"})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	content, err := ioutil.ReadFile(file.Name())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\nsearch search1\n"; !bytes.Contains(content, []byte(expected)) {
+		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
+	}
+}
+
+func TestBuildWithZeroLengthDomainSearch(t *testing.T) {
+	file, err := ioutil.TempFile("", "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(file.Name())
+
+	err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"."})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	content, err := ioutil.ReadFile(file.Name())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\n"; !bytes.Contains(content, []byte(expected)) {
+		t.Fatalf("Expected to find '%s' got '%s'", expected, content)
+	}
+	if notExpected := "search ."; bytes.Contains(content, []byte(notExpected)) {
+		t.Fatalf("Expected to not find '%s' got '%s'", notExpected, content)
+	}
+}
+
+func TestFilterResolvDns(t *testing.T) {
+	ns0 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\n"
+
+	if result, _ := FilterResolvDNS([]byte(ns0), false); result != nil {
+		if ns0 != string(result) {
+			t.Fatalf("Failed No Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
+		}
+	}
+
+	ns1 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\nnameserver 127.0.0.1\n"
+	if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
+		if ns0 != string(result) {
+			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
+		}
+	}
+
+	ns1 = "nameserver 10.16.60.14\nnameserver 127.0.0.1\nnameserver 10.16.60.21\n"
+	if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
+		if ns0 != string(result) {
+			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
+		}
+	}
+
+	ns1 = "nameserver 127.0.1.1\nnameserver 10.16.60.14\nnameserver 10.16.60.21\n"
+	if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
+		if ns0 != string(result) {
+			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
+		}
+	}
+
+	ns1 = "nameserver ::1\nnameserver 10.16.60.14\nnameserver 127.0.2.1\nnameserver 10.16.60.21\n"
+	if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
+		if ns0 != string(result) {
+			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
+		}
+	}
+
+	ns1 = "nameserver 10.16.60.14\nnameserver ::1\nnameserver 10.16.60.21\nnameserver ::1"
+	if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
+		if ns0 != string(result) {
+			t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result))
+		}
+	}
+
+	// with IPv6 disabled (false param), the IPv6 nameserver should be removed
+	ns1 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\nnameserver ::1"
+	if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
+		if ns0 != string(result) {
+			t.Fatalf("Failed Localhost+IPv6 off: expected \n<%s> got \n<%s>", ns0, string(result))
+		}
+	}
+
+	// with IPv6 enabled, the IPv6 nameserver should be preserved
+	ns0 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\n"
+	ns1 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\nnameserver ::1"
+	if result, _ := FilterResolvDNS([]byte(ns1), true); result != nil {
+		if ns0 != string(result) {
+			t.Fatalf("Failed Localhost+IPv6 on: expected \n<%s> got \n<%s>", ns0, string(result))
+		}
+	}
+
+	// with IPv6 enabled, and no non-localhost servers, Google defaults (both IPv4+IPv6) should be added
+	ns0 = "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\nnameserver 2001:4860:4860::8888\nnameserver 2001:4860:4860::8844"
+	ns1 = "nameserver 127.0.0.1\nnameserver ::1\nnameserver 127.0.2.1"
+	if result, _ := FilterResolvDNS([]byte(ns1), true); result != nil {
+		if ns0 != string(result) {
+			t.Fatalf("Failed no Localhost+IPv6 enabled: expected \n<%s> got \n<%s>", ns0, string(result))
+		}
+	}
+
+	// with IPv6 disabled, and no non-localhost servers, Google defaults (only IPv4) should be added
+	ns0 = "\nnameserver 8.8.8.8\nnameserver 8.8.4.4"
+	ns1 = "nameserver 127.0.0.1\nnameserver ::1\nnameserver 127.0.2.1"
+	if result, _ := FilterResolvDNS([]byte(ns1), false); result != nil {
+		if ns0 != string(result) {
+			t.Fatalf("Failed no Localhost+IPv6 enabled: expected \n<%s> got \n<%s>", ns0, string(result))
+		}
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/configure_linux.go b/vendor/src/github.com/docker/libnetwork/sandbox/configure_linux.go
new file mode 100644
index 0000000..cae7789
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/configure_linux.go
@@ -0,0 +1,81 @@
+package sandbox
+
+import (
+	"fmt"
+	"net"
+	"os"
+	"runtime"
+
+	"github.com/vishvananda/netlink"
+	"github.com/vishvananda/netns"
+)
+
+func configureInterface(iface netlink.Link, settings *Interface) error {
+	ifaceName := iface.Attrs().Name
+	ifaceConfigurators := []struct {
+		Fn         func(netlink.Link, *Interface) error
+		ErrMessage string
+	}{
+		{setInterfaceName, fmt.Sprintf("error renaming interface %q to %q", ifaceName, settings.DstName)},
+		{setInterfaceIP, fmt.Sprintf("error setting interface %q IP to %q", ifaceName, settings.Address)},
+		{setInterfaceIPv6, fmt.Sprintf("error setting interface %q IPv6 to %q", ifaceName, settings.AddressIPv6)},
+	}
+
+	for _, config := range ifaceConfigurators {
+		if err := config.Fn(iface, settings); err != nil {
+			return fmt.Errorf("%s: %v", config.ErrMessage, err)
+		}
+	}
+	return nil
+}
+
+func programGateway(path string, gw net.IP) error {
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	origns, err := netns.Get()
+	if err != nil {
+		return err
+	}
+	defer origns.Close()
+
+	f, err := os.OpenFile(path, os.O_RDONLY, 0)
+	if err != nil {
+		return fmt.Errorf("failed get network namespace %q: %v", path, err)
+	}
+	defer f.Close()
+
+	nsFD := f.Fd()
+	if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
+		return err
+	}
+	defer netns.Set(origns)
+
+	gwRoutes, err := netlink.RouteGet(gw)
+	if err != nil {
+		return fmt.Errorf("route for the gateway could not be found: %v", err)
+	}
+
+	return netlink.RouteAdd(&netlink.Route{
+		Scope:     netlink.SCOPE_UNIVERSE,
+		LinkIndex: gwRoutes[0].LinkIndex,
+		Gw:        gw,
+	})
+}
+
+func setInterfaceIP(iface netlink.Link, settings *Interface) error {
+	ipAddr := &netlink.Addr{IPNet: settings.Address, Label: ""}
+	return netlink.AddrAdd(iface, ipAddr)
+}
+
+func setInterfaceIPv6(iface netlink.Link, settings *Interface) error {
+	if settings.AddressIPv6 == nil {
+		return nil
+	}
+	ipAddr := &netlink.Addr{IPNet: settings.AddressIPv6, Label: ""}
+	return netlink.AddrAdd(iface, ipAddr)
+}
+
+func setInterfaceName(iface netlink.Link, settings *Interface) error {
+	return netlink.LinkSetName(iface, settings.DstName)
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/namespace_linux.go b/vendor/src/github.com/docker/libnetwork/sandbox/namespace_linux.go
new file mode 100644
index 0000000..3912beb
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/namespace_linux.go
@@ -0,0 +1,340 @@
+package sandbox
+
+import (
+	"fmt"
+	"net"
+	"os"
+	"os/exec"
+	"runtime"
+	"sync"
+	"syscall"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/reexec"
+	"github.com/vishvananda/netlink"
+	"github.com/vishvananda/netns"
+)
+
+const prefix = "/var/run/docker/netns"
+
+var (
+	once             sync.Once
+	garbagePathMap   = make(map[string]bool)
+	gpmLock          sync.Mutex
+	gpmWg            sync.WaitGroup
+	gpmCleanupPeriod = 60 * time.Second
+)
+
+// The networkNamespace type is the linux implementation of the Sandbox
+// interface. It represents a linux network namespace, and moves an interface
+// into it when called on method AddInterface or sets the gateway etc.
+type networkNamespace struct {
+	path        string
+	sinfo       *Info
+	nextIfIndex int
+	sync.Mutex
+}
+
+func init() {
+	reexec.Register("netns-create", reexecCreateNamespace)
+}
+
+func createBasePath() {
+	err := os.MkdirAll(prefix, 0644)
+	if err != nil && !os.IsExist(err) {
+		panic("Could not create net namespace path directory")
+	}
+
+	// Start the garbage collection go routine
+	go removeUnusedPaths()
+}
+
+func removeUnusedPaths() {
+	for range time.Tick(gpmCleanupPeriod) {
+		gpmLock.Lock()
+		pathList := make([]string, 0, len(garbagePathMap))
+		for path := range garbagePathMap {
+			pathList = append(pathList, path)
+		}
+		garbagePathMap = make(map[string]bool)
+		gpmWg.Add(1)
+		gpmLock.Unlock()
+
+		for _, path := range pathList {
+			os.Remove(path)
+		}
+
+		gpmWg.Done()
+	}
+}
+
+func addToGarbagePaths(path string) {
+	gpmLock.Lock()
+	garbagePathMap[path] = true
+	gpmLock.Unlock()
+}
+
+func removeFromGarbagePaths(path string) {
+	gpmLock.Lock()
+	delete(garbagePathMap, path)
+	gpmLock.Unlock()
+}
+
+// GenerateKey generates a sandbox key based on the passed
+// container id.
+func GenerateKey(containerID string) string {
+	maxLen := 12
+	if len(containerID) < maxLen {
+		maxLen = len(containerID)
+	}
+
+	return prefix + "/" + containerID[:maxLen]
+}
+
+// NewSandbox provides a new sandbox instance created in an os specific way
+// provided a key which uniquely identifies the sandbox
+func NewSandbox(key string, osCreate bool) (Sandbox, error) {
+	info, err := createNetworkNamespace(key, osCreate)
+	if err != nil {
+		return nil, err
+	}
+
+	return &networkNamespace{path: key, sinfo: info}, nil
+}
+
+func reexecCreateNamespace() {
+	if len(os.Args) < 2 {
+		log.Fatal("no namespace path provided")
+	}
+
+	if err := syscall.Mount("/proc/self/ns/net", os.Args[1], "bind", syscall.MS_BIND, ""); err != nil {
+		log.Fatal(err)
+	}
+
+	if err := loopbackUp(); err != nil {
+		log.Fatal(err)
+	}
+}
+
+func createNetworkNamespace(path string, osCreate bool) (*Info, error) {
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	origns, err := netns.Get()
+	if err != nil {
+		return nil, err
+	}
+	defer origns.Close()
+
+	if err := createNamespaceFile(path); err != nil {
+		return nil, err
+	}
+
+	cmd := &exec.Cmd{
+		Path:   reexec.Self(),
+		Args:   append([]string{"netns-create"}, path),
+		Stdout: os.Stdout,
+		Stderr: os.Stderr,
+	}
+	if osCreate {
+		cmd.SysProcAttr = &syscall.SysProcAttr{}
+		cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWNET
+	}
+	if err := cmd.Run(); err != nil {
+		return nil, fmt.Errorf("namespace creation reexec command failed: %v", err)
+	}
+
+	interfaces := []*Interface{}
+	info := &Info{Interfaces: interfaces}
+	return info, nil
+}
+
+func unmountNamespaceFile(path string) {
+	if _, err := os.Stat(path); err == nil {
+		syscall.Unmount(path, syscall.MNT_DETACH)
+	}
+}
+
+func createNamespaceFile(path string) (err error) {
+	var f *os.File
+
+	once.Do(createBasePath)
+	// Remove it from garbage collection list if present
+	removeFromGarbagePaths(path)
+
+	// If the path is there unmount it first
+	unmountNamespaceFile(path)
+
+	// wait for garbage collection to complete if it is in progress
+	// before trying to create the file.
+	gpmWg.Wait()
+
+	if f, err = os.Create(path); err == nil {
+		f.Close()
+	}
+
+	return err
+}
+
+func loopbackUp() error {
+	iface, err := netlink.LinkByName("lo")
+	if err != nil {
+		return err
+	}
+	return netlink.LinkSetUp(iface)
+}
+
+func (n *networkNamespace) RemoveInterface(i *Interface) error {
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	origns, err := netns.Get()
+	if err != nil {
+		return err
+	}
+	defer origns.Close()
+
+	f, err := os.OpenFile(n.path, os.O_RDONLY, 0)
+	if err != nil {
+		return fmt.Errorf("failed get network namespace %q: %v", n.path, err)
+	}
+	defer f.Close()
+
+	nsFD := f.Fd()
+	if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
+		return err
+	}
+	defer netns.Set(origns)
+
+	// Find the network inteerface identified by the DstName attribute.
+	iface, err := netlink.LinkByName(i.DstName)
+	if err != nil {
+		return err
+	}
+
+	// Down the interface before configuring
+	if err := netlink.LinkSetDown(iface); err != nil {
+		return err
+	}
+
+	err = netlink.LinkSetName(iface, i.SrcName)
+	if err != nil {
+		fmt.Println("LinkSetName failed: ", err)
+		return err
+	}
+
+	// Move the network interface to caller namespace.
+	if err := netlink.LinkSetNsFd(iface, int(origns)); err != nil {
+		fmt.Println("LinkSetNsPid failed: ", err)
+		return err
+	}
+
+	return nil
+}
+
+func (n *networkNamespace) AddInterface(i *Interface) error {
+	n.Lock()
+	i.DstName = fmt.Sprintf("%s%d", i.DstName, n.nextIfIndex)
+	n.nextIfIndex++
+	n.Unlock()
+
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	origns, err := netns.Get()
+	if err != nil {
+		return err
+	}
+	defer origns.Close()
+
+	f, err := os.OpenFile(n.path, os.O_RDONLY, 0)
+	if err != nil {
+		return fmt.Errorf("failed get network namespace %q: %v", n.path, err)
+	}
+	defer f.Close()
+
+	// Find the network interface identified by the SrcName attribute.
+	iface, err := netlink.LinkByName(i.SrcName)
+	if err != nil {
+		return err
+	}
+
+	// Move the network interface to the destination namespace.
+	nsFD := f.Fd()
+	if err := netlink.LinkSetNsFd(iface, int(nsFD)); err != nil {
+		return err
+	}
+
+	if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
+		return err
+	}
+	defer netns.Set(origns)
+
+	// Down the interface before configuring
+	if err := netlink.LinkSetDown(iface); err != nil {
+		return err
+	}
+
+	// Configure the interface now this is moved in the proper namespace.
+	if err := configureInterface(iface, i); err != nil {
+		return err
+	}
+
+	// Up the interface.
+	if err := netlink.LinkSetUp(iface); err != nil {
+		return err
+	}
+
+	n.Lock()
+	n.sinfo.Interfaces = append(n.sinfo.Interfaces, i)
+	n.Unlock()
+
+	return nil
+}
+
+func (n *networkNamespace) SetGateway(gw net.IP) error {
+	if len(gw) == 0 {
+		return nil
+	}
+
+	err := programGateway(n.path, gw)
+	if err == nil {
+		n.sinfo.Gateway = gw
+	}
+
+	return err
+}
+
+func (n *networkNamespace) SetGatewayIPv6(gw net.IP) error {
+	if len(gw) == 0 {
+		return nil
+	}
+
+	err := programGateway(n.path, gw)
+	if err == nil {
+		n.sinfo.GatewayIPv6 = gw
+	}
+
+	return err
+}
+
+func (n *networkNamespace) Interfaces() []*Interface {
+	return n.sinfo.Interfaces
+}
+
+func (n *networkNamespace) Key() string {
+	return n.path
+}
+
+func (n *networkNamespace) Destroy() error {
+	// Assuming no running process is executing in this network namespace,
+	// unmounting is sufficient to destroy it.
+	if err := syscall.Unmount(n.path, syscall.MNT_DETACH); err != nil {
+		return err
+	}
+
+	// Stash it into the garbage collection list
+	addToGarbagePaths(n.path)
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/sandbox.go b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox.go
new file mode 100644
index 0000000..9e104ca
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox.go
@@ -0,0 +1,159 @@
+package sandbox
+
+import (
+	"net"
+
+	"github.com/docker/libnetwork/types"
+)
+
+// Sandbox represents a network sandbox, identified by a specific key.  It
+// holds a list of Interfaces, routes etc, and more can be added dynamically.
+type Sandbox interface {
+	// The path where the network namespace is mounted.
+	Key() string
+
+	// The collection of Interface previously added with the AddInterface
+	// method. Note that this doesn't incude network interfaces added in any
+	// other way (such as the default loopback interface which are automatically
+	// created on creation of a sandbox).
+	Interfaces() []*Interface
+
+	// Add an existing Interface to this sandbox. The operation will rename
+	// from the Interface SrcName to DstName as it moves, and reconfigure the
+	// interface according to the specified settings. The caller is expected
+	// to only provide a prefix for DstName. The AddInterface api will auto-generate
+	// an appropriate suffix for the DstName to disambiguate.
+	AddInterface(*Interface) error
+
+	// Remove an interface from the sandbox by renamin to original name
+	// and moving it out of the sandbox.
+	RemoveInterface(*Interface) error
+
+	// Set default IPv4 gateway for the sandbox
+	SetGateway(gw net.IP) error
+
+	// Set default IPv6 gateway for the sandbox
+	SetGatewayIPv6(gw net.IP) error
+
+	// Destroy the sandbox
+	Destroy() error
+}
+
+// Info represents all possible information that
+// the driver wants to place in the sandbox which includes
+// interfaces, routes and gateway
+type Info struct {
+	Interfaces []*Interface
+
+	// IPv4 gateway for the sandbox.
+	Gateway net.IP
+
+	// IPv6 gateway for the sandbox.
+	GatewayIPv6 net.IP
+
+	// TODO: Add routes and ip tables etc.
+}
+
+// Interface represents the settings and identity of a network device. It is
+// used as a return type for Network.Link, and it is common practice for the
+// caller to use this information when moving interface SrcName from host
+// namespace to DstName in a different net namespace with the appropriate
+// network settings.
+type Interface struct {
+	// The name of the interface in the origin network namespace.
+	SrcName string
+
+	// The name that will be assigned to the interface once moves inside a
+	// network namespace. When the caller passes in a DstName, it is only
+	// expected to pass a prefix. The name will modified with an appropriately
+	// auto-generated suffix.
+	DstName string
+
+	// IPv4 address for the interface.
+	Address *net.IPNet
+
+	// IPv6 address for the interface.
+	AddressIPv6 *net.IPNet
+}
+
+// GetCopy returns a copy of this Interface structure
+func (i *Interface) GetCopy() *Interface {
+	return &Interface{
+		SrcName:     i.SrcName,
+		DstName:     i.DstName,
+		Address:     types.GetIPNetCopy(i.Address),
+		AddressIPv6: types.GetIPNetCopy(i.AddressIPv6),
+	}
+}
+
+// Equal checks if this instance of Interface is equal to the passed one
+func (i *Interface) Equal(o *Interface) bool {
+	if i == o {
+		return true
+	}
+
+	if o == nil {
+		return false
+	}
+
+	if i.SrcName != o.SrcName || i.DstName != o.DstName {
+		return false
+	}
+
+	if !types.CompareIPNet(i.Address, o.Address) {
+		return false
+	}
+
+	if !types.CompareIPNet(i.AddressIPv6, o.AddressIPv6) {
+		return false
+	}
+
+	return true
+}
+
+// GetCopy returns a copy of this SandboxInfo structure
+func (s *Info) GetCopy() *Info {
+	list := make([]*Interface, len(s.Interfaces))
+	for i, iface := range s.Interfaces {
+		list[i] = iface.GetCopy()
+	}
+	gw := types.GetIPCopy(s.Gateway)
+	gw6 := types.GetIPCopy(s.GatewayIPv6)
+
+	return &Info{Interfaces: list, Gateway: gw, GatewayIPv6: gw6}
+}
+
+// Equal checks if this instance of SandboxInfo is equal to the passed one
+func (s *Info) Equal(o *Info) bool {
+	if s == o {
+		return true
+	}
+
+	if o == nil {
+		return false
+	}
+
+	if !s.Gateway.Equal(o.Gateway) {
+		return false
+	}
+
+	if !s.GatewayIPv6.Equal(o.GatewayIPv6) {
+		return false
+	}
+
+	if (s.Interfaces == nil && o.Interfaces != nil) ||
+		(s.Interfaces != nil && o.Interfaces == nil) ||
+		(len(s.Interfaces) != len(o.Interfaces)) {
+		return false
+	}
+
+	// Note: At the moment, the two lists must be in the same order
+	for i := 0; i < len(s.Interfaces); i++ {
+		if !s.Interfaces[i].Equal(o.Interfaces[i]) {
+			return false
+		}
+	}
+
+	return true
+
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_linux_test.go b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_linux_test.go
new file mode 100644
index 0000000..7fda707
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_linux_test.go
@@ -0,0 +1,150 @@
+package sandbox
+
+import (
+	"net"
+	"os"
+	"path/filepath"
+	"runtime"
+	"testing"
+	"time"
+
+	"github.com/docker/libnetwork/netutils"
+	"github.com/vishvananda/netlink"
+	"github.com/vishvananda/netns"
+)
+
+const (
+	vethName1     = "wierdlongname1"
+	vethName2     = "wierdlongname2"
+	vethName3     = "wierdlongname3"
+	vethName4     = "wierdlongname4"
+	sboxIfaceName = "containername"
+)
+
+func newKey(t *testing.T) (string, error) {
+	name, err := netutils.GenerateRandomName("netns", 12)
+	if err != nil {
+		return "", err
+	}
+
+	name = filepath.Join("/tmp", name)
+	if _, err := os.Create(name); err != nil {
+		return "", err
+	}
+
+	// Set the rpmCleanupPeriod to be low to make the test run quicker
+	gpmCleanupPeriod = 2 * time.Second
+
+	return name, nil
+}
+
+func newInfo(t *testing.T) (*Info, error) {
+	veth := &netlink.Veth{
+		LinkAttrs: netlink.LinkAttrs{Name: vethName1, TxQLen: 0},
+		PeerName:  vethName2}
+	if err := netlink.LinkAdd(veth); err != nil {
+		return nil, err
+	}
+
+	// Store the sandbox side pipe interface
+	// This is needed for cleanup on DeleteEndpoint()
+	intf1 := &Interface{}
+	intf1.SrcName = vethName2
+	intf1.DstName = sboxIfaceName
+
+	ip4, addr, err := net.ParseCIDR("192.168.1.100/24")
+	if err != nil {
+		return nil, err
+	}
+	intf1.Address = addr
+	intf1.Address.IP = ip4
+
+	// ip6, addrv6, err := net.ParseCIDR("2001:DB8::ABCD/48")
+	ip6, addrv6, err := net.ParseCIDR("fe80::2/64")
+	if err != nil {
+		return nil, err
+	}
+	intf1.AddressIPv6 = addrv6
+	intf1.AddressIPv6.IP = ip6
+
+	veth = &netlink.Veth{
+		LinkAttrs: netlink.LinkAttrs{Name: vethName3, TxQLen: 0},
+		PeerName:  vethName4}
+
+	if err := netlink.LinkAdd(veth); err != nil {
+		return nil, err
+	}
+
+	intf2 := &Interface{}
+	intf2.SrcName = vethName4
+	intf2.DstName = sboxIfaceName
+
+	ip4, addr, err = net.ParseCIDR("192.168.2.100/24")
+	if err != nil {
+		return nil, err
+	}
+	intf2.Address = addr
+	intf2.Address.IP = ip4
+
+	// ip6, addrv6, err := net.ParseCIDR("2001:DB8::ABCD/48")
+	ip6, addrv6, err = net.ParseCIDR("fe80::3/64")
+	if err != nil {
+		return nil, err
+	}
+	intf2.AddressIPv6 = addrv6
+	intf2.AddressIPv6.IP = ip6
+
+	sinfo := &Info{Interfaces: []*Interface{intf1, intf2}}
+	sinfo.Gateway = net.ParseIP("192.168.1.1")
+	// sinfo.GatewayIPv6 = net.ParseIP("2001:DB8::1")
+	sinfo.GatewayIPv6 = net.ParseIP("fe80::1")
+
+	return sinfo, nil
+}
+
+func verifySandbox(t *testing.T, s Sandbox) {
+	_, ok := s.(*networkNamespace)
+	if !ok {
+		t.Fatalf("The sandox interface returned is not of type networkNamespace")
+	}
+
+	origns, err := netns.Get()
+	if err != nil {
+		t.Fatalf("Could not get the current netns: %v", err)
+	}
+	defer origns.Close()
+
+	f, err := os.OpenFile(s.Key(), os.O_RDONLY, 0)
+	if err != nil {
+		t.Fatalf("Failed top open network namespace path %q: %v", s.Key(), err)
+	}
+	defer f.Close()
+
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	nsFD := f.Fd()
+	if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
+		t.Fatalf("Setting to the namespace pointed to by the sandbox %s failed: %v", s.Key(), err)
+	}
+	defer netns.Set(origns)
+
+	_, err = netlink.LinkByName(sboxIfaceName + "0")
+	if err != nil {
+		t.Fatalf("Could not find the interface %s inside the sandbox: %v", sboxIfaceName,
+			err)
+	}
+
+	_, err = netlink.LinkByName(sboxIfaceName + "1")
+	if err != nil {
+		t.Fatalf("Could not find the interface %s inside the sandbox: %v", sboxIfaceName,
+			err)
+	}
+}
+
+func verifyCleanup(t *testing.T, s Sandbox) {
+	time.Sleep(time.Duration(gpmCleanupPeriod * 2))
+	if _, err := os.Stat(s.Key()); err == nil {
+		t.Fatalf("The sandbox path %s is not getting cleanup event after twice the cleanup period", s.Key())
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_test.go b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_test.go
new file mode 100644
index 0000000..258616a
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_test.go
@@ -0,0 +1,160 @@
+package sandbox
+
+import (
+	"net"
+	"os"
+	"testing"
+
+	"github.com/docker/docker/pkg/reexec"
+)
+
+func TestMain(m *testing.M) {
+	if reexec.Init() {
+		return
+	}
+	os.Exit(m.Run())
+}
+
+func TestSandboxCreate(t *testing.T) {
+	key, err := newKey(t)
+	if err != nil {
+		t.Fatalf("Failed to obtain a key: %v", err)
+	}
+
+	s, err := NewSandbox(key, true)
+	if err != nil {
+		t.Fatalf("Failed to create a new sandbox: %v", err)
+	}
+
+	if s.Key() != key {
+		t.Fatalf("s.Key() returned %s. Expected %s", s.Key(), key)
+	}
+
+	info, err := newInfo(t)
+	if err != nil {
+		t.Fatalf("Failed to generate new sandbox info: %v", err)
+	}
+
+	for _, i := range info.Interfaces {
+		err = s.AddInterface(i)
+		if err != nil {
+			t.Fatalf("Failed to add interfaces to sandbox: %v", err)
+		}
+	}
+
+	err = s.SetGateway(info.Gateway)
+	if err != nil {
+		t.Fatalf("Failed to set gateway to sandbox: %v", err)
+	}
+
+	err = s.SetGatewayIPv6(info.GatewayIPv6)
+	if err != nil {
+		t.Fatalf("Failed to set ipv6 gateway to sandbox: %v", err)
+	}
+
+	verifySandbox(t, s)
+	s.Destroy()
+	verifyCleanup(t, s)
+}
+
+func TestSandboxCreateTwice(t *testing.T) {
+	key, err := newKey(t)
+	if err != nil {
+		t.Fatalf("Failed to obtain a key: %v", err)
+	}
+
+	_, err = NewSandbox(key, true)
+	if err != nil {
+		t.Fatalf("Failed to create a new sandbox: %v", err)
+	}
+
+	// Create another sandbox with the same key to see if we handle it
+	// gracefully.
+	s, err := NewSandbox(key, true)
+	if err != nil {
+		t.Fatalf("Failed to create a new sandbox: %v", err)
+	}
+	s.Destroy()
+}
+
+func TestInterfaceEqual(t *testing.T) {
+	list := getInterfaceList()
+
+	if !list[0].Equal(list[0]) {
+		t.Fatalf("Interface.Equal() returned false negative")
+	}
+
+	if list[0].Equal(list[1]) {
+		t.Fatalf("Interface.Equal() returned false positive")
+	}
+
+	if list[0].Equal(list[1]) != list[1].Equal(list[0]) {
+		t.Fatalf("Interface.Equal() failed commutative check")
+	}
+}
+
+func TestSandboxInfoEqual(t *testing.T) {
+	si1 := &Info{Interfaces: getInterfaceList(), Gateway: net.ParseIP("192.168.1.254"), GatewayIPv6: net.ParseIP("2001:2345::abcd:8889")}
+	si2 := &Info{Interfaces: getInterfaceList(), Gateway: net.ParseIP("172.18.255.254"), GatewayIPv6: net.ParseIP("2001:2345::abcd:8888")}
+
+	if !si1.Equal(si1) {
+		t.Fatalf("Info.Equal() returned false negative")
+	}
+
+	if si1.Equal(si2) {
+		t.Fatalf("Info.Equal() returned false positive")
+	}
+
+	if si1.Equal(si2) != si2.Equal(si1) {
+		t.Fatalf("Info.Equal() failed commutative check")
+	}
+}
+
+func TestInterfaceCopy(t *testing.T) {
+	for _, iface := range getInterfaceList() {
+		cp := iface.GetCopy()
+
+		if !iface.Equal(cp) {
+			t.Fatalf("Failed to return a copy of Interface")
+		}
+
+		if iface == cp {
+			t.Fatalf("Failed to return a true copy of Interface")
+		}
+	}
+}
+
+func TestSandboxInfoCopy(t *testing.T) {
+	si := Info{Interfaces: getInterfaceList(), Gateway: net.ParseIP("192.168.1.254"), GatewayIPv6: net.ParseIP("2001:2345::abcd:8889")}
+	cp := si.GetCopy()
+
+	if !si.Equal(cp) {
+		t.Fatalf("Failed to return a copy of Info")
+	}
+
+	if &si == cp {
+		t.Fatalf("Failed to return a true copy of Info")
+	}
+}
+
+func getInterfaceList() []*Interface {
+	_, netv4a, _ := net.ParseCIDR("192.168.30.1/24")
+	_, netv4b, _ := net.ParseCIDR("172.18.255.2/23")
+	_, netv6a, _ := net.ParseCIDR("2001:2345::abcd:8888/80")
+	_, netv6b, _ := net.ParseCIDR("2001:2345::abcd:8889/80")
+
+	return []*Interface{
+		&Interface{
+			SrcName:     "veth1234567",
+			DstName:     "eth0",
+			Address:     netv4a,
+			AddressIPv6: netv6a,
+		},
+		&Interface{
+			SrcName:     "veth7654321",
+			DstName:     "eth1",
+			Address:     netv4b,
+			AddressIPv6: netv6b,
+		},
+	}
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_unsupported.go b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_unsupported.go
new file mode 100644
index 0000000..aa116fd
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux
+
+package sandbox
+
+import "errors"
+
+var (
+	ErrNotImplemented = errors.New("not implemented")
+)
+
+// NewSandbox provides a new sandbox instance created in an os specific way
+// provided a key which uniquely identifies the sandbox
+func NewSandbox(key string) (Sandbox, error) {
+	return nil, ErrNotImplemented
+}
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_unsupported_test.go b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_unsupported_test.go
new file mode 100644
index 0000000..48dc2aa
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/sandbox/sandbox_unsupported_test.go
@@ -0,0 +1,20 @@
+// +build !linux
+
+package sandbox
+
+import (
+	"errors"
+	"testing"
+)
+
+var (
+	ErrNotImplemented = errors.New("not implemented")
+)
+
+func newKey(t *testing.T) (string, error) {
+	return nil, ErrNotImplemented
+}
+
+func verifySandbox(t *testing.T, s Sandbox) {
+	return
+}
diff --git a/vendor/src/github.com/docker/libnetwork/system.go b/vendor/src/github.com/docker/libnetwork/system.go
new file mode 100644
index 0000000..7beec28
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/system.go
@@ -0,0 +1,34 @@
+package libnetwork
+
+import (
+	"fmt"
+	"runtime"
+	"syscall"
+)
+
+// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092
+//
+// We need different setns values for the different platforms and arch
+// We are declaring the macro here because the SETNS syscall does not exist in th stdlib
+var setNsMap = map[string]uintptr{
+	"linux/386":     346,
+	"linux/amd64":   308,
+	"linux/arm":     374,
+	"linux/ppc64":   350,
+	"linux/ppc64le": 350,
+	"linux/s390x":   339,
+}
+
+func setns(fd uintptr, flags uintptr) error {
+	ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)]
+	if !exists {
+		return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
+	}
+
+	_, _, err := syscall.RawSyscall(ns, fd, flags, 0)
+	if err != 0 {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/test/integration/README.md b/vendor/src/github.com/docker/libnetwork/test/integration/README.md
new file mode 100644
index 0000000..777b1cf
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/test/integration/README.md
@@ -0,0 +1,34 @@
+# LibNetwork Integration Tests
+
+Integration tests provide end-to-end testing of LibNetwork and Drivers.
+
+While unit tests verify the code is working as expected by relying on mocks and
+artificially created fixtures, integration tests actually use real docker
+engines and communicate to it through the CLI.
+
+Note that integration tests do **not** replace unit tests and Docker is used as a good use-case.
+
+As a rule of thumb, code should be tested thoroughly with unit tests.
+Integration tests on the other hand are meant to test a specific feature end to end.
+
+Integration tests are written in *bash* using the
+[bats](https://github.com/sstephenson/bats) framework.
+
+## Pre-Requisites
+
+1. Bats (https://github.com/sstephenson/bats#installing-bats-from-source)
+2. Docker Machine (https://github.com/docker/machine)
+3. Virtualbox (as a Docker machine driver)
+
+## Running integration tests
+
+* Start by [installing] (https://github.com/sstephenson/bats#installing-bats-from-source) *bats* on your system.
+* If not done already, [install](https://docs.docker.com/machine/) *docker-machine* into /usr/bin
+* Make sure Virtualbox is installed as well, which will be used by docker-machine as a driver to launch VMs
+
+In order to run all integration tests, pass *bats* the test path:
+```
+$ bats test/integration/daemon-configs.bats
+```
+
+
diff --git a/vendor/src/github.com/docker/libnetwork/test/integration/daemon-configs.bats b/vendor/src/github.com/docker/libnetwork/test/integration/daemon-configs.bats
new file mode 100644
index 0000000..fd48fbe
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/test/integration/daemon-configs.bats
@@ -0,0 +1,104 @@
+#!/usr/bin/env bats
+
+load helpers
+
+export DRIVER=virtualbox
+export NAME="bats-$DRIVER-daemon-configs"
+export MACHINE_STORAGE_PATH=/tmp/machine-bats-daemon-test-$DRIVER
+# Default memsize is 1024MB and disksize is 20000MB
+# These values are defined in drivers/virtualbox/virtualbox.go
+export DEFAULT_MEMSIZE=1024
+export DEFAULT_DISKSIZE=20000
+export CUSTOM_MEMSIZE=1536
+export CUSTOM_DISKSIZE=10000
+export CUSTOM_CPUCOUNT=1
+export BAD_URL="http://dev.null:9111/bad.iso"
+
+function setup() {
+  # add sleep because vbox; ugh
+  sleep 1
+}
+
+findDiskSize() {
+  # SATA-0-0 is usually the boot2disk.iso image
+  # We assume that SATA 1-0 is root disk VMDK and grab this UUID
+  # e.g. "SATA-ImageUUID-1-0"="fb5f33a7-e4e3-4cb9-877c-f9415ae2adea"
+  # TODO(slashk): does this work on Windows ?
+  run bash -c "VBoxManage showvminfo --machinereadable $NAME | grep SATA-ImageUUID-1-0 | cut -d'=' -f2"
+  run bash -c "VBoxManage showhdinfo $output | grep "Capacity:" | awk -F' ' '{ print $2 }'"
+}
+
+findMemorySize() {
+  run bash -c "VBoxManage showvminfo --machinereadable $NAME | grep memory= | cut -d'=' -f2"
+}
+
+findCPUCount() {
+  run bash -c "VBoxManage showvminfo --machinereadable $NAME | grep cpus= | cut -d'=' -f2"
+}
+
+buildMachineWithOldIsoCheckUpgrade() {
+  run wget https://github.com/boot2docker/boot2docker/releases/download/v1.4.1/boot2docker.iso -O $MACHINE_STORAGE_PATH/cache/boot2docker.iso
+  run machine create -d virtualbox $NAME
+  run machine upgrade $NAME
+}
+
+@test "$DRIVER: machine should not exist" {
+  run machine active $NAME
+  [ "$status" -eq 1  ]
+}
+
+@test "$DRIVER: VM should not exist" {
+  run VBoxManage showvminfo $NAME
+  [ "$status" -eq 1  ]
+}
+
+@test "$DRIVER: create" {
+  run machine create -d $DRIVER $NAME
+  [ "$status" -eq 0  ]
+}
+
+@test "$DRIVER: active" {
+  run machine active $NAME
+  [ "$status" -eq 0  ]
+}
+
+@test "$DRIVER: check default machine memory size" {
+  findMemorySize
+  [[ ${output} == "${DEFAULT_MEMSIZE}"  ]]
+}
+
+@test "$DRIVER: check default machine disksize" {
+  findDiskSize
+  [[ ${output} == *"$DEFAULT_DISKSIZE"* ]]
+}
+
+@test "$DRIVER: test bridge-ip" {
+  run machine ssh $NAME sudo /etc/init.d/docker stop
+  run machine ssh $NAME sudo ifconfig docker0 down
+  run machine ssh $NAME sudo ip link delete docker0
+  BIP='--bip=172.168.45.1/24'
+  set_extra_config $BIP
+  cat ${TMP_EXTRA_ARGS_FILE} | machine ssh $NAME sudo tee /var/lib/boot2docker/profile
+  cat ${DAEMON_CFG_FILE} | machine ssh $NAME "sudo tee -a /var/lib/boot2docker/profile"
+  run machine ssh $NAME sudo /etc/init.d/docker start
+  run machine ssh $NAME ifconfig docker0
+  [ "$status" -eq 0  ]
+  [[ ${lines[1]} =~ "172.168.45.1"  ]]
+}
+
+@test "$DRIVER: run busybox container" {
+  run machine ssh $NAME sudo cat /var/lib/boot2docker/profile
+  run docker $(machine config $NAME) run busybox echo hello world
+  [ "$status" -eq 0  ]
+}
+
+@test "$DRIVER: remove machine" {
+  run machine rm -f $NAME
+}
+
+# Cleanup of machine store should always be the last 'test'
+@test "$DRIVER: cleanup" {
+  run rm -rf $MACHINE_STORAGE_PATH
+  [ "$status" -eq 0  ]
+}
+
diff --git a/vendor/src/github.com/docker/libnetwork/test/integration/daemon.cfg b/vendor/src/github.com/docker/libnetwork/test/integration/daemon.cfg
new file mode 100644
index 0000000..fc93dbd
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/test/integration/daemon.cfg
@@ -0,0 +1,4 @@
+CACERT=/var/lib/boot2docker/ca.pem
+SERVERCERT=/var/lib/boot2docker/server-key.pem
+SERVERKEY=/var/lib/boot2docker/server.pem
+DOCKER_TLS=no
diff --git a/vendor/src/github.com/docker/libnetwork/test/integration/helpers.bash b/vendor/src/github.com/docker/libnetwork/test/integration/helpers.bash
new file mode 100644
index 0000000..ec18e5d
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/test/integration/helpers.bash
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+# Root directory of the repository.
+MACHINE_ROOT=/usr/bin
+
+PLATFORM=`uname -s | tr '[:upper:]' '[:lower:]'`
+ARCH=`uname -m`
+
+if [ "$ARCH" = "x86_64" ]; then
+    ARCH="amd64"
+else
+    ARCH="386"
+fi
+MACHINE_BIN_NAME=docker-machine_$PLATFORM-$ARCH
+BATS_LOG=/tmp/bats.log
+
+touch ${BATS_LOG}
+rm ${BATS_LOG}
+
+teardown() {
+  echo "$BATS_TEST_NAME
+----------
+$output
+----------
+
+" >> ${BATS_LOG}
+}
+
+EXTRA_ARGS_CFG='EXTRA_ARGS'
+EXTRA_ARGS='--tlsverify --tlscacert=/var/lib/boot2docker/ca.pem --tlskey=/var/lib/boot2docker/server-key.pem --tlscert=/var/lib/boot2docker/server.pem --label=provider=virtualbox -H tcp://0.0.0.0:2376'
+TMP_EXTRA_ARGS_FILE=/tmp/tmp_extra_args
+DAEMON_CFG_FILE=${BATS_TEST_DIRNAME}/daemon.cfg
+set_extra_config() {
+  if [ -f ${TMP_EXTRA_ARGS_FILE} ];
+  then
+    rm ${TMP_EXTRA_ARGS_FILE}
+  fi
+  echo -n "${EXTRA_ARGS_CFG}='"  > ${TMP_EXTRA_ARGS_FILE}
+  echo -n "$1 "  >> ${TMP_EXTRA_ARGS_FILE}
+  echo "${EXTRA_ARGS}'"  >> ${TMP_EXTRA_ARGS_FILE}
+}
+
+if [ ! -e $MACHINE_ROOT/$MACHINE_BIN_NAME ]; then
+  echo "${MACHINE_ROOT}/${MACHINE_BIN_NAME} not found"
+  exit 1
+fi
+
+function machine() {
+    ${MACHINE_ROOT}/$MACHINE_BIN_NAME "$@"
+}
diff --git a/vendor/src/github.com/docker/libnetwork/types/types.go b/vendor/src/github.com/docker/libnetwork/types/types.go
new file mode 100644
index 0000000..3b83485
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/types/types.go
@@ -0,0 +1,345 @@
+// Package types contains types that are common across libnetwork project
+package types
+
+import (
+	"bytes"
+	"fmt"
+	"net"
+	"strings"
+)
+
+// UUID represents a globally unique ID of various resources like network and endpoint
+type UUID string
+
+// TransportPort represent a local Layer 4 endpoint
+type TransportPort struct {
+	Proto Protocol
+	Port  uint16
+}
+
+// GetCopy returns a copy of this TransportPort structure instance
+func (t *TransportPort) GetCopy() TransportPort {
+	return TransportPort{Proto: t.Proto, Port: t.Port}
+}
+
+// PortBinding represent a port binding between the container an the host
+type PortBinding struct {
+	Proto    Protocol
+	IP       net.IP
+	Port     uint16
+	HostIP   net.IP
+	HostPort uint16
+}
+
+// HostAddr returns the host side transport address
+func (p PortBinding) HostAddr() (net.Addr, error) {
+	switch p.Proto {
+	case UDP:
+		return &net.UDPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil
+	case TCP:
+		return &net.TCPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil
+	default:
+		return nil, ErrInvalidProtocolBinding(p.Proto.String())
+	}
+}
+
+// ContainerAddr returns the container side transport address
+func (p PortBinding) ContainerAddr() (net.Addr, error) {
+	switch p.Proto {
+	case UDP:
+		return &net.UDPAddr{IP: p.IP, Port: int(p.Port)}, nil
+	case TCP:
+		return &net.TCPAddr{IP: p.IP, Port: int(p.Port)}, nil
+	default:
+		return nil, ErrInvalidProtocolBinding(p.Proto.String())
+	}
+}
+
+// GetCopy returns a copy of this PortBinding structure instance
+func (p *PortBinding) GetCopy() PortBinding {
+	return PortBinding{
+		Proto:    p.Proto,
+		IP:       GetIPCopy(p.IP),
+		Port:     p.Port,
+		HostIP:   GetIPCopy(p.HostIP),
+		HostPort: p.HostPort,
+	}
+}
+
+// Equal checks if this instance of PortBinding is equal to the passed one
+func (p *PortBinding) Equal(o *PortBinding) bool {
+	if p == o {
+		return true
+	}
+
+	if o == nil {
+		return false
+	}
+
+	if p.Proto != o.Proto || p.Port != o.Port || p.HostPort != o.HostPort {
+		return false
+	}
+
+	if p.IP != nil {
+		if !p.IP.Equal(o.IP) {
+			return false
+		}
+	} else {
+		if o.IP != nil {
+			return false
+		}
+	}
+
+	if p.HostIP != nil {
+		if !p.HostIP.Equal(o.HostIP) {
+			return false
+		}
+	} else {
+		if o.HostIP != nil {
+			return false
+		}
+	}
+
+	return true
+}
+
+// ErrInvalidProtocolBinding is returned when the port binding protocol is not valid.
+type ErrInvalidProtocolBinding string
+
+func (ipb ErrInvalidProtocolBinding) Error() string {
+	return fmt.Sprintf("invalid transport protocol: %s", string(ipb))
+}
+
+const (
+	// ICMP is for the ICMP ip protocol
+	ICMP = 1
+	// TCP is for the TCP ip protocol
+	TCP = 6
+	// UDP is for the UDP ip protocol
+	UDP = 17
+)
+
+// Protocol represents a IP protocol number
+type Protocol uint8
+
+func (p Protocol) String() string {
+	switch p {
+	case ICMP:
+		return "icmp"
+	case TCP:
+		return "tcp"
+	case UDP:
+		return "udp"
+	default:
+		return fmt.Sprintf("%d", p)
+	}
+}
+
+// ParseProtocol returns the respective Protocol type for the passed string
+func ParseProtocol(s string) Protocol {
+	switch strings.ToLower(s) {
+	case "icmp":
+		return ICMP
+	case "udp":
+		return UDP
+	case "tcp":
+		return TCP
+	default:
+		return 0
+	}
+}
+
+// GetMacCopy returns a copy of the passed MAC address
+func GetMacCopy(from net.HardwareAddr) net.HardwareAddr {
+	to := make(net.HardwareAddr, len(from))
+	copy(to, from)
+	return to
+}
+
+// GetIPCopy returns a copy of the passed IP address
+func GetIPCopy(from net.IP) net.IP {
+	to := make(net.IP, len(from))
+	copy(to, from)
+	return to
+}
+
+// GetIPNetCopy returns a copy of the passed IP Network
+func GetIPNetCopy(from *net.IPNet) *net.IPNet {
+	if from == nil {
+		return nil
+	}
+	bm := make(net.IPMask, len(from.Mask))
+	copy(bm, from.Mask)
+	return &net.IPNet{IP: GetIPCopy(from.IP), Mask: bm}
+}
+
+// CompareIPNet returns equal if the two IP Networks are equal
+func CompareIPNet(a, b *net.IPNet) bool {
+	if a == b {
+		return true
+	}
+	if a == nil || b == nil {
+		return false
+	}
+	return a.IP.Equal(b.IP) && bytes.Equal(a.Mask, b.Mask)
+}
+
+/******************************
+ * Well-known Error Interfaces
+ ******************************/
+
+// MaskableError is an interface for errors which can be ignored by caller
+type MaskableError interface {
+	// Maskable makes implementer into MaskableError type
+	Maskable()
+}
+
+// BadRequestError is an interface for errors originated by a bad request
+type BadRequestError interface {
+	// BadRequest makes implementer into BadRequestError type
+	BadRequest()
+}
+
+// NotFoundError is an interface for errors raised because a needed resource is not available
+type NotFoundError interface {
+	// NotFound makes implementer into NotFoundError type
+	NotFound()
+}
+
+// ForbiddenError is an interface for errors which denote an valid request that cannot be honored
+type ForbiddenError interface {
+	// Forbidden makes implementer into ForbiddenError type
+	Forbidden()
+}
+
+// NoServiceError  is an interface for errors returned when the required service is not available
+type NoServiceError interface {
+	// NoService makes implementer into NoServiceError type
+	NoService()
+}
+
+// TimeoutError  is an interface for errors raised because of timeout
+type TimeoutError interface {
+	// Timeout makes implementer into TimeoutError type
+	Timeout()
+}
+
+// NotImplementedError  is an interface for errors raised because of requested functionality is not yet implemented
+type NotImplementedError interface {
+	// NotImplemented makes implementer into NotImplementedError type
+	NotImplemented()
+}
+
+// InternalError is an interface for errors raised because of an internal error
+type InternalError interface {
+	// Internal makes implementer into InternalError type
+	Internal()
+}
+
+/******************************
+ * Weel-known Error Formatters
+ ******************************/
+
+// BadRequestErrorf creates an instance of BadRequestError
+func BadRequestErrorf(format string, params ...interface{}) error {
+	return badRequest(fmt.Sprintf(format, params...))
+}
+
+// NotFoundErrorf creates an instance of NotFoundError
+func NotFoundErrorf(format string, params ...interface{}) error {
+	return notFound(fmt.Sprintf(format, params...))
+}
+
+// ForbiddenErrorf creates an instance of ForbiddenError
+func ForbiddenErrorf(format string, params ...interface{}) error {
+	return forbidden(fmt.Sprintf(format, params...))
+}
+
+// NoServiceErrorf creates an instance of NoServiceError
+func NoServiceErrorf(format string, params ...interface{}) error {
+	return noService(fmt.Sprintf(format, params...))
+}
+
+// NotImplementedErrorf creates an instance of NotImplementedError
+func NotImplementedErrorf(format string, params ...interface{}) error {
+	return notImpl(fmt.Sprintf(format, params...))
+}
+
+// TimeoutErrorf creates an instance of TimeoutError
+func TimeoutErrorf(format string, params ...interface{}) error {
+	return timeout(fmt.Sprintf(format, params...))
+}
+
+// InternalErrorf creates an instance of InternalError
+func InternalErrorf(format string, params ...interface{}) error {
+	return internal(fmt.Sprintf(format, params...))
+}
+
+// InternalMaskableErrorf creates an instance of InternalError and MaskableError
+func InternalMaskableErrorf(format string, params ...interface{}) error {
+	return maskInternal(fmt.Sprintf(format, params...))
+}
+
+/***********************
+ * Internal Error Types
+ ***********************/
+type badRequest string
+
+func (br badRequest) Error() string {
+	return string(br)
+}
+func (br badRequest) BadRequest() {}
+
+type maskBadRequest string
+
+type notFound string
+
+func (nf notFound) Error() string {
+	return string(nf)
+}
+func (nf notFound) NotFound() {}
+
+type forbidden string
+
+func (frb forbidden) Error() string {
+	return string(frb)
+}
+func (frb forbidden) Forbidden() {}
+
+type noService string
+
+func (ns noService) Error() string {
+	return string(ns)
+}
+func (ns noService) NoService() {}
+
+type maskNoService string
+
+type timeout string
+
+func (to timeout) Error() string {
+	return string(to)
+}
+func (to timeout) Timeout() {}
+
+type notImpl string
+
+func (ni notImpl) Error() string {
+	return string(ni)
+}
+func (ni notImpl) NotImplemented() {}
+
+type internal string
+
+func (nt internal) Error() string {
+	return string(nt)
+}
+func (nt internal) Internal() {}
+
+type maskInternal string
+
+func (mnt maskInternal) Error() string {
+	return string(mnt)
+}
+func (mnt maskInternal) Internal() {}
+func (mnt maskInternal) Maskable() {}
diff --git a/vendor/src/github.com/docker/libnetwork/types/types_test.go b/vendor/src/github.com/docker/libnetwork/types/types_test.go
new file mode 100644
index 0000000..9e96ea8
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/types/types_test.go
@@ -0,0 +1,99 @@
+package types
+
+import (
+	"testing"
+
+	_ "github.com/docker/libnetwork/netutils"
+)
+
+func TestErrorConstructors(t *testing.T) {
+	var err error
+
+	err = BadRequestErrorf("Io ho %d uccello", 1)
+	if err.Error() != "Io ho 1 uccello" {
+		t.Fatal(err)
+	}
+	if _, ok := err.(BadRequestError); !ok {
+		t.Fatal(err)
+	}
+	if _, ok := err.(MaskableError); ok {
+		t.Fatal(err)
+	}
+
+	err = NotFoundErrorf("Can't find the %s", "keys")
+	if err.Error() != "Can't find the keys" {
+		t.Fatal(err)
+	}
+	if _, ok := err.(NotFoundError); !ok {
+		t.Fatal(err)
+	}
+	if _, ok := err.(MaskableError); ok {
+		t.Fatal(err)
+	}
+
+	err = ForbiddenErrorf("Can't open door %d", 2)
+	if err.Error() != "Can't open door 2" {
+		t.Fatal(err)
+	}
+	if _, ok := err.(ForbiddenError); !ok {
+		t.Fatal(err)
+	}
+	if _, ok := err.(MaskableError); ok {
+		t.Fatal(err)
+	}
+
+	err = NotImplementedErrorf("Functionality %s is not implemented", "x")
+	if err.Error() != "Functionality x is not implemented" {
+		t.Fatal(err)
+	}
+	if _, ok := err.(NotImplementedError); !ok {
+		t.Fatal(err)
+	}
+	if _, ok := err.(MaskableError); ok {
+		t.Fatal(err)
+	}
+
+	err = TimeoutErrorf("Process %s timed out", "abc")
+	if err.Error() != "Process abc timed out" {
+		t.Fatal(err)
+	}
+	if _, ok := err.(TimeoutError); !ok {
+		t.Fatal(err)
+	}
+	if _, ok := err.(MaskableError); ok {
+		t.Fatal(err)
+	}
+
+	err = NoServiceErrorf("Driver %s is not available", "mh")
+	if err.Error() != "Driver mh is not available" {
+		t.Fatal(err)
+	}
+	if _, ok := err.(NoServiceError); !ok {
+		t.Fatal(err)
+	}
+	if _, ok := err.(MaskableError); ok {
+		t.Fatal(err)
+	}
+
+	err = InternalErrorf("Not sure what happened")
+	if err.Error() != "Not sure what happened" {
+		t.Fatal(err)
+	}
+	if _, ok := err.(InternalError); !ok {
+		t.Fatal(err)
+	}
+	if _, ok := err.(MaskableError); ok {
+		t.Fatal(err)
+	}
+
+	err = InternalMaskableErrorf("Minor issue, it can be ignored")
+	if err.Error() != "Minor issue, it can be ignored" {
+		t.Fatal(err)
+	}
+	if _, ok := err.(InternalError); !ok {
+		t.Fatal(err)
+	}
+	if _, ok := err.(MaskableError); !ok {
+		t.Fatal(err)
+	}
+}
diff --git a/vendor/src/github.com/go-check/check/.gitignore b/vendor/src/github.com/go-check/check/.gitignore
new file mode 100644
index 0000000..191a536
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/.gitignore
@@ -0,0 +1,4 @@
+_*
+*.swp
+*.[568]
+[568].out
diff --git a/vendor/src/github.com/go-check/check/LICENSE b/vendor/src/github.com/go-check/check/LICENSE
new file mode 100644
index 0000000..545cf2d
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/LICENSE
@@ -0,0 +1,25 @@
+Gocheck - A rich testing framework for Go
+ 
+Copyright (c) 2010-2013 Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/src/github.com/go-check/check/README.md b/vendor/src/github.com/go-check/check/README.md
new file mode 100644
index 0000000..0ca9e57
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/README.md
@@ -0,0 +1,20 @@
+Instructions
+============
+
+Install the package with:
+
+    go get gopkg.in/check.v1
+    
+Import it with:
+
+    import "gopkg.in/check.v1"
+
+and use _check_ as the package name inside the code.
+
+For more details, visit the project page:
+
+* http://labix.org/gocheck
+
+and the API documentation:
+
+* https://gopkg.in/check.v1
diff --git a/vendor/src/github.com/go-check/check/TODO b/vendor/src/github.com/go-check/check/TODO
new file mode 100644
index 0000000..3349827
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/TODO
@@ -0,0 +1,2 @@
+- Assert(slice, Contains, item)
+- Parallel test support
diff --git a/vendor/src/github.com/go-check/check/benchmark.go b/vendor/src/github.com/go-check/check/benchmark.go
new file mode 100644
index 0000000..48cb8c8
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/benchmark.go
@@ -0,0 +1,163 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package check
+
+import (
+	"fmt"
+	"runtime"
+	"time"
+)
+
+var memStats runtime.MemStats
+
+// testingB is a type passed to Benchmark functions to manage benchmark
+// timing and to specify the number of iterations to run.
+type timer struct {
+	start     time.Time // Time test or benchmark started
+	duration  time.Duration
+	N         int
+	bytes     int64
+	timerOn   bool
+	benchTime time.Duration
+	// The initial states of memStats.Mallocs and memStats.TotalAlloc.
+	startAllocs uint64
+	startBytes  uint64
+	// The net total of this test after being run.
+	netAllocs uint64
+	netBytes  uint64
+}
+
+// StartTimer starts timing a test. This function is called automatically
+// before a benchmark starts, but it can also used to resume timing after
+// a call to StopTimer.
+func (c *C) StartTimer() {
+	if !c.timerOn {
+		c.start = time.Now()
+		c.timerOn = true
+
+		runtime.ReadMemStats(&memStats)
+		c.startAllocs = memStats.Mallocs
+		c.startBytes = memStats.TotalAlloc
+	}
+}
+
+// StopTimer stops timing a test. This can be used to pause the timer
+// while performing complex initialization that you don't
+// want to measure.
+func (c *C) StopTimer() {
+	if c.timerOn {
+		c.duration += time.Now().Sub(c.start)
+		c.timerOn = false
+		runtime.ReadMemStats(&memStats)
+		c.netAllocs += memStats.Mallocs - c.startAllocs
+		c.netBytes += memStats.TotalAlloc - c.startBytes
+	}
+}
+
+// ResetTimer sets the elapsed benchmark time to zero.
+// It does not affect whether the timer is running.
+func (c *C) ResetTimer() {
+	if c.timerOn {
+		c.start = time.Now()
+		runtime.ReadMemStats(&memStats)
+		c.startAllocs = memStats.Mallocs
+		c.startBytes = memStats.TotalAlloc
+	}
+	c.duration = 0
+	c.netAllocs = 0
+	c.netBytes = 0
+}
+
+// SetBytes informs the number of bytes that the benchmark processes
+// on each iteration. If this is called in a benchmark it will also
+// report MB/s.
+func (c *C) SetBytes(n int64) {
+	c.bytes = n
+}
+
+func (c *C) nsPerOp() int64 {
+	if c.N <= 0 {
+		return 0
+	}
+	return c.duration.Nanoseconds() / int64(c.N)
+}
+
+func (c *C) mbPerSec() float64 {
+	if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 {
+		return 0
+	}
+	return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds()
+}
+
+func (c *C) timerString() string {
+	if c.N <= 0 {
+		return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9)
+	}
+	mbs := c.mbPerSec()
+	mb := ""
+	if mbs != 0 {
+		mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
+	}
+	nsop := c.nsPerOp()
+	ns := fmt.Sprintf("%10d ns/op", nsop)
+	if c.N > 0 && nsop < 100 {
+		// The format specifiers here make sure that
+		// the ones digits line up for all three possible formats.
+		if nsop < 10 {
+			ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
+		} else {
+			ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
+		}
+	}
+	memStats := ""
+	if c.benchMem {
+		allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N))
+		allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N))
+		memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs)
+	}
+	return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats)
+}
+
+func min(x, y int) int {
+	if x > y {
+		return y
+	}
+	return x
+}
+
+func max(x, y int) int {
+	if x < y {
+		return y
+	}
+	return x
+}
+
+// roundDown10 rounds a number down to the nearest power of 10.
+func roundDown10(n int) int {
+	var tens = 0
+	// tens = floor(log_10(n))
+	for n > 10 {
+		n = n / 10
+		tens++
+	}
+	// result = 10^tens
+	result := 1
+	for i := 0; i < tens; i++ {
+		result *= 10
+	}
+	return result
+}
+
+// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
+func roundUp(n int) int {
+	base := roundDown10(n)
+	if n < (2 * base) {
+		return 2 * base
+	}
+	if n < (5 * base) {
+		return 5 * base
+	}
+	return 10 * base
+}
diff --git a/vendor/src/github.com/go-check/check/benchmark_test.go b/vendor/src/github.com/go-check/check/benchmark_test.go
new file mode 100644
index 0000000..4dd827c
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/benchmark_test.go
@@ -0,0 +1,91 @@
+// These tests verify the test running logic.
+
+package check_test
+
+import (
+	"time"
+	. "gopkg.in/check.v1"
+)
+
+var benchmarkS = Suite(&BenchmarkS{})
+
+type BenchmarkS struct{}
+
+func (s *BenchmarkS) TestCountSuite(c *C) {
+	suitesRun += 1
+}
+
+func (s *BenchmarkS) TestBasicTestTiming(c *C) {
+	helper := FixtureHelper{sleepOn: "Test1", sleep: 1000000 * time.Nanosecond}
+	output := String{}
+	runConf := RunConf{Output: &output, Verbose: true}
+	Run(&helper, &runConf)
+
+	expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t0\\.001s\n" +
+		"PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t0\\.000s\n"
+	c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestStreamTestTiming(c *C) {
+	helper := FixtureHelper{sleepOn: "SetUpSuite", sleep: 1000000 * time.Nanosecond}
+	output := String{}
+	runConf := RunConf{Output: &output, Stream: true}
+	Run(&helper, &runConf)
+
+	expected := "(?s).*\nPASS: check_test\\.go:[0-9]+: FixtureHelper\\.SetUpSuite\t *0\\.001s\n.*"
+	c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestBenchmark(c *C) {
+	helper := FixtureHelper{sleep: 100000}
+	output := String{}
+	runConf := RunConf{
+		Output:        &output,
+		Benchmark:     true,
+		BenchmarkTime: 10000000,
+		Filter:        "Benchmark1",
+	}
+	Run(&helper, &runConf)
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "SetUpTest")
+	c.Check(helper.calls[2], Equals, "Benchmark1")
+	c.Check(helper.calls[3], Equals, "TearDownTest")
+	c.Check(helper.calls[4], Equals, "SetUpTest")
+	c.Check(helper.calls[5], Equals, "Benchmark1")
+	c.Check(helper.calls[6], Equals, "TearDownTest")
+	// ... and more.
+
+	expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark1\t *100\t *[12][0-9]{5} ns/op\n"
+	c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestBenchmarkBytes(c *C) {
+	helper := FixtureHelper{sleep: 100000}
+	output := String{}
+	runConf := RunConf{
+		Output:        &output,
+		Benchmark:     true,
+		BenchmarkTime: 10000000,
+		Filter:        "Benchmark2",
+	}
+	Run(&helper, &runConf)
+
+	expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark2\t *100\t *[12][0-9]{5} ns/op\t *[4-9]\\.[0-9]{2} MB/s\n"
+	c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestBenchmarkMem(c *C) {
+	helper := FixtureHelper{sleep: 100000}
+	output := String{}
+	runConf := RunConf{
+		Output:        &output,
+		Benchmark:     true,
+		BenchmarkMem:  true,
+		BenchmarkTime: 10000000,
+		Filter:        "Benchmark3",
+	}
+	Run(&helper, &runConf)
+
+	expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark3\t *100\t *[12][0-9]{5} ns/op\t *[0-9]+ B/op\t *[1-9] allocs/op\n"
+	c.Assert(output.value, Matches, expected)
+}
diff --git a/vendor/src/github.com/go-check/check/bootstrap_test.go b/vendor/src/github.com/go-check/check/bootstrap_test.go
new file mode 100644
index 0000000..e55f327
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/bootstrap_test.go
@@ -0,0 +1,82 @@
+// These initial tests are for bootstrapping.  They verify that we can
+// basically use the testing infrastructure itself to check if the test
+// system is working.
+//
+// These tests use will break down the test runner badly in case of
+// errors because if they simply fail, we can't be sure the developer
+// will ever see anything (because failing means the failing system
+// somehow isn't working! :-)
+//
+// Do not assume *any* internal functionality works as expected besides
+// what's actually tested here.
+
+package check_test
+
+import (
+	"fmt"
+	"gopkg.in/check.v1"
+	"strings"
+)
+
+type BootstrapS struct{}
+
+var boostrapS = check.Suite(&BootstrapS{})
+
+func (s *BootstrapS) TestCountSuite(c *check.C) {
+	suitesRun += 1
+}
+
+func (s *BootstrapS) TestFailedAndFail(c *check.C) {
+	if c.Failed() {
+		critical("c.Failed() must be false first!")
+	}
+	c.Fail()
+	if !c.Failed() {
+		critical("c.Fail() didn't put the test in a failed state!")
+	}
+	c.Succeed()
+}
+
+func (s *BootstrapS) TestFailedAndSucceed(c *check.C) {
+	c.Fail()
+	c.Succeed()
+	if c.Failed() {
+		critical("c.Succeed() didn't put the test back in a non-failed state")
+	}
+}
+
+func (s *BootstrapS) TestLogAndGetTestLog(c *check.C) {
+	c.Log("Hello there!")
+	log := c.GetTestLog()
+	if log != "Hello there!\n" {
+		critical(fmt.Sprintf("Log() or GetTestLog() is not working! Got: %#v", log))
+	}
+}
+
+func (s *BootstrapS) TestLogfAndGetTestLog(c *check.C) {
+	c.Logf("Hello %v", "there!")
+	log := c.GetTestLog()
+	if log != "Hello there!\n" {
+		critical(fmt.Sprintf("Logf() or GetTestLog() is not working! Got: %#v", log))
+	}
+}
+
+func (s *BootstrapS) TestRunShowsErrors(c *check.C) {
+	output := String{}
+	check.Run(&FailHelper{}, &check.RunConf{Output: &output})
+	if strings.Index(output.value, "Expected failure!") == -1 {
+		critical(fmt.Sprintf("RunWithWriter() output did not contain the "+
+			"expected failure! Got: %#v",
+			output.value))
+	}
+}
+
+func (s *BootstrapS) TestRunDoesntShowSuccesses(c *check.C) {
+	output := String{}
+	check.Run(&SuccessHelper{}, &check.RunConf{Output: &output})
+	if strings.Index(output.value, "Expected success!") != -1 {
+		critical(fmt.Sprintf("RunWithWriter() output contained a successful "+
+			"test! Got: %#v",
+			output.value))
+	}
+}
diff --git a/vendor/src/github.com/go-check/check/check.go b/vendor/src/github.com/go-check/check/check.go
new file mode 100644
index 0000000..ca8c0f9
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/check.go
@@ -0,0 +1,945 @@
+// Package check is a rich testing extension for Go's testing package.
+//
+// For details about the project, see:
+//
+//     http://labix.org/gocheck
+//
+package check
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"math/rand"
+	"os"
+	"path"
+	"path/filepath"
+	"reflect"
+	"regexp"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+// -----------------------------------------------------------------------
+// Internal type which deals with suite method calling.
+
+const (
+	fixtureKd = iota
+	testKd
+)
+
+type funcKind int
+
+const (
+	succeededSt = iota
+	failedSt
+	skippedSt
+	panickedSt
+	fixturePanickedSt
+	missedSt
+)
+
+type funcStatus int
+
+// A method value can't reach its own Method structure.
+type methodType struct {
+	reflect.Value
+	Info reflect.Method
+}
+
+func newMethod(receiver reflect.Value, i int) *methodType {
+	return &methodType{receiver.Method(i), receiver.Type().Method(i)}
+}
+
+func (method *methodType) PC() uintptr {
+	return method.Info.Func.Pointer()
+}
+
+func (method *methodType) suiteName() string {
+	t := method.Info.Type.In(0)
+	if t.Kind() == reflect.Ptr {
+		t = t.Elem()
+	}
+	return t.Name()
+}
+
+func (method *methodType) String() string {
+	return method.suiteName() + "." + method.Info.Name
+}
+
+func (method *methodType) matches(re *regexp.Regexp) bool {
+	return (re.MatchString(method.Info.Name) ||
+		re.MatchString(method.suiteName()) ||
+		re.MatchString(method.String()))
+}
+
+type C struct {
+	method    *methodType
+	kind      funcKind
+	testName  string
+	status    funcStatus
+	logb      *logger
+	logw      io.Writer
+	done      chan *C
+	reason    string
+	mustFail  bool
+	tempDir   *tempDir
+	benchMem  bool
+	startTime time.Time
+	timer
+}
+
+func (c *C) stopNow() {
+	runtime.Goexit()
+}
+
+// logger is a concurrency safe byte.Buffer
+type logger struct {
+	sync.Mutex
+	writer bytes.Buffer
+}
+
+func (l *logger) Write(buf []byte) (int, error) {
+	l.Lock()
+	defer l.Unlock()
+	return l.writer.Write(buf)
+}
+
+func (l *logger) WriteTo(w io.Writer) (int64, error) {
+	l.Lock()
+	defer l.Unlock()
+	return l.writer.WriteTo(w)
+}
+
+func (l *logger) String() string {
+	l.Lock()
+	defer l.Unlock()
+	return l.writer.String()
+}
+
+// -----------------------------------------------------------------------
+// Handling of temporary files and directories.
+
+type tempDir struct {
+	sync.Mutex
+	path    string
+	counter int
+}
+
+func (td *tempDir) newPath() string {
+	td.Lock()
+	defer td.Unlock()
+	if td.path == "" {
+		var err error
+		for i := 0; i != 100; i++ {
+			path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int())
+			if err = os.Mkdir(path, 0700); err == nil {
+				td.path = path
+				break
+			}
+		}
+		if td.path == "" {
+			panic("Couldn't create temporary directory: " + err.Error())
+		}
+	}
+	result := filepath.Join(td.path, strconv.Itoa(td.counter))
+	td.counter += 1
+	return result
+}
+
+func (td *tempDir) removeAll() {
+	td.Lock()
+	defer td.Unlock()
+	if td.path != "" {
+		err := os.RemoveAll(td.path)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error())
+		}
+	}
+}
+
+// Create a new temporary directory which is automatically removed after
+// the suite finishes running.
+func (c *C) MkDir() string {
+	path := c.tempDir.newPath()
+	if err := os.Mkdir(path, 0700); err != nil {
+		panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error()))
+	}
+	return path
+}
+
+// -----------------------------------------------------------------------
+// Low-level logging functions.
+
+func (c *C) log(args ...interface{}) {
+	c.writeLog([]byte(fmt.Sprint(args...) + "\n"))
+}
+
+func (c *C) logf(format string, args ...interface{}) {
+	c.writeLog([]byte(fmt.Sprintf(format+"\n", args...)))
+}
+
+func (c *C) logNewLine() {
+	c.writeLog([]byte{'\n'})
+}
+
+func (c *C) writeLog(buf []byte) {
+	c.logb.Write(buf)
+	if c.logw != nil {
+		c.logw.Write(buf)
+	}
+}
+
+func hasStringOrError(x interface{}) (ok bool) {
+	_, ok = x.(fmt.Stringer)
+	if ok {
+		return
+	}
+	_, ok = x.(error)
+	return
+}
+
+func (c *C) logValue(label string, value interface{}) {
+	if label == "" {
+		if hasStringOrError(value) {
+			c.logf("... %#v (%q)", value, value)
+		} else {
+			c.logf("... %#v", value)
+		}
+	} else if value == nil {
+		c.logf("... %s = nil", label)
+	} else {
+		if hasStringOrError(value) {
+			fv := fmt.Sprintf("%#v", value)
+			qv := fmt.Sprintf("%q", value)
+			if fv != qv {
+				c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv)
+				return
+			}
+		}
+		if s, ok := value.(string); ok && isMultiLine(s) {
+			c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value))
+			c.logMultiLine(s)
+		} else {
+			c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value)
+		}
+	}
+}
+
+func (c *C) logMultiLine(s string) {
+	b := make([]byte, 0, len(s)*2)
+	i := 0
+	n := len(s)
+	for i < n {
+		j := i + 1
+		for j < n && s[j-1] != '\n' {
+			j++
+		}
+		b = append(b, "...     "...)
+		b = strconv.AppendQuote(b, s[i:j])
+		if j < n {
+			b = append(b, " +"...)
+		}
+		b = append(b, '\n')
+		i = j
+	}
+	c.writeLog(b)
+}
+
+func isMultiLine(s string) bool {
+	for i := 0; i+1 < len(s); i++ {
+		if s[i] == '\n' {
+			return true
+		}
+	}
+	return false
+}
+
+func (c *C) logString(issue string) {
+	c.log("... ", issue)
+}
+
+func (c *C) logCaller(skip int) {
+	// This is a bit heavier than it ought to be.
+	skip += 1 // Our own frame.
+	pc, callerFile, callerLine, ok := runtime.Caller(skip)
+	if !ok {
+		return
+	}
+	var testFile string
+	var testLine int
+	testFunc := runtime.FuncForPC(c.method.PC())
+	if runtime.FuncForPC(pc) != testFunc {
+		for {
+			skip += 1
+			if pc, file, line, ok := runtime.Caller(skip); ok {
+				// Note that the test line may be different on
+				// distinct calls for the same test.  Showing
+				// the "internal" line is helpful when debugging.
+				if runtime.FuncForPC(pc) == testFunc {
+					testFile, testLine = file, line
+					break
+				}
+			} else {
+				break
+			}
+		}
+	}
+	if testFile != "" && (testFile != callerFile || testLine != callerLine) {
+		c.logCode(testFile, testLine)
+	}
+	c.logCode(callerFile, callerLine)
+}
+
+func (c *C) logCode(path string, line int) {
+	c.logf("%s:%d:", nicePath(path), line)
+	code, err := printLine(path, line)
+	if code == "" {
+		code = "..." // XXX Open the file and take the raw line.
+		if err != nil {
+			code += err.Error()
+		}
+	}
+	c.log(indent(code, "    "))
+}
+
+var valueGo = filepath.Join("reflect", "value.go")
+var asmGo = filepath.Join("runtime", "asm_")
+
+func (c *C) logPanic(skip int, value interface{}) {
+	skip++ // Our own frame.
+	initialSkip := skip
+	for ; ; skip++ {
+		if pc, file, line, ok := runtime.Caller(skip); ok {
+			if skip == initialSkip {
+				c.logf("... Panic: %s (PC=0x%X)\n", value, pc)
+			}
+			name := niceFuncName(pc)
+			path := nicePath(file)
+			if strings.Contains(path, "/gopkg.in/check.v") {
+				continue
+			}
+			if name == "Value.call" && strings.HasSuffix(path, valueGo) {
+				continue
+			}
+			if name == "call16" && strings.Contains(path, asmGo) {
+				continue
+			}
+			c.logf("%s:%d\n  in %s", nicePath(file), line, name)
+		} else {
+			break
+		}
+	}
+}
+
+func (c *C) logSoftPanic(issue string) {
+	c.log("... Panic: ", issue)
+}
+
+func (c *C) logArgPanic(method *methodType, expectedType string) {
+	c.logf("... Panic: %s argument should be %s",
+		niceFuncName(method.PC()), expectedType)
+}
+
+// -----------------------------------------------------------------------
+// Some simple formatting helpers.
+
+var initWD, initWDErr = os.Getwd()
+
+func init() {
+	if initWDErr == nil {
+		initWD = strings.Replace(initWD, "\\", "/", -1) + "/"
+	}
+}
+
+func nicePath(path string) string {
+	if initWDErr == nil {
+		if strings.HasPrefix(path, initWD) {
+			return path[len(initWD):]
+		}
+	}
+	return path
+}
+
+func niceFuncPath(pc uintptr) string {
+	function := runtime.FuncForPC(pc)
+	if function != nil {
+		filename, line := function.FileLine(pc)
+		return fmt.Sprintf("%s:%d", nicePath(filename), line)
+	}
+	return "<unknown path>"
+}
+
+func niceFuncName(pc uintptr) string {
+	function := runtime.FuncForPC(pc)
+	if function != nil {
+		name := path.Base(function.Name())
+		if i := strings.Index(name, "."); i > 0 {
+			name = name[i+1:]
+		}
+		if strings.HasPrefix(name, "(*") {
+			if i := strings.Index(name, ")"); i > 0 {
+				name = name[2:i] + name[i+1:]
+			}
+		}
+		if i := strings.LastIndex(name, ".*"); i != -1 {
+			name = name[:i] + "." + name[i+2:]
+		}
+		if i := strings.LastIndex(name, "·"); i != -1 {
+			name = name[:i] + "." + name[i+2:]
+		}
+		return name
+	}
+	return "<unknown function>"
+}
+
+// -----------------------------------------------------------------------
+// Result tracker to aggregate call results.
+
+type Result struct {
+	Succeeded        int
+	Failed           int
+	Skipped          int
+	Panicked         int
+	FixturePanicked  int
+	ExpectedFailures int
+	Missed           int    // Not even tried to run, related to a panic in the fixture.
+	RunError         error  // Houston, we've got a problem.
+	WorkDir          string // If KeepWorkDir is true
+}
+
+type resultTracker struct {
+	result          Result
+	_lastWasProblem bool
+	_waiting        int
+	_missed         int
+	_expectChan     chan *C
+	_doneChan       chan *C
+	_stopChan       chan bool
+}
+
+func newResultTracker() *resultTracker {
+	return &resultTracker{_expectChan: make(chan *C), // Synchronous
+		_doneChan: make(chan *C, 32), // Asynchronous
+		_stopChan: make(chan bool)}   // Synchronous
+}
+
+func (tracker *resultTracker) start() {
+	go tracker._loopRoutine()
+}
+
+func (tracker *resultTracker) waitAndStop() {
+	<-tracker._stopChan
+}
+
+func (tracker *resultTracker) expectCall(c *C) {
+	tracker._expectChan <- c
+}
+
+func (tracker *resultTracker) callDone(c *C) {
+	tracker._doneChan <- c
+}
+
+func (tracker *resultTracker) _loopRoutine() {
+	for {
+		var c *C
+		if tracker._waiting > 0 {
+			// Calls still running. Can't stop.
+			select {
+			// XXX Reindent this (not now to make diff clear)
+			case c = <-tracker._expectChan:
+				tracker._waiting += 1
+			case c = <-tracker._doneChan:
+				tracker._waiting -= 1
+				switch c.status {
+				case succeededSt:
+					if c.kind == testKd {
+						if c.mustFail {
+							tracker.result.ExpectedFailures++
+						} else {
+							tracker.result.Succeeded++
+						}
+					}
+				case failedSt:
+					tracker.result.Failed++
+				case panickedSt:
+					if c.kind == fixtureKd {
+						tracker.result.FixturePanicked++
+					} else {
+						tracker.result.Panicked++
+					}
+				case fixturePanickedSt:
+					// Track it as missed, since the panic
+					// was on the fixture, not on the test.
+					tracker.result.Missed++
+				case missedSt:
+					tracker.result.Missed++
+				case skippedSt:
+					if c.kind == testKd {
+						tracker.result.Skipped++
+					}
+				}
+			}
+		} else {
+			// No calls.  Can stop, but no done calls here.
+			select {
+			case tracker._stopChan <- true:
+				return
+			case c = <-tracker._expectChan:
+				tracker._waiting += 1
+			case c = <-tracker._doneChan:
+				panic("Tracker got an unexpected done call.")
+			}
+		}
+	}
+}
+
+// -----------------------------------------------------------------------
+// The underlying suite runner.
+
+type suiteRunner struct {
+	suite                     interface{}
+	setUpSuite, tearDownSuite *methodType
+	setUpTest, tearDownTest   *methodType
+	tests                     []*methodType
+	tracker                   *resultTracker
+	tempDir                   *tempDir
+	keepDir                   bool
+	output                    *outputWriter
+	reportedProblemLast       bool
+	benchTime                 time.Duration
+	benchMem                  bool
+}
+
+type RunConf struct {
+	Output        io.Writer
+	Stream        bool
+	Verbose       bool
+	Filter        string
+	Benchmark     bool
+	BenchmarkTime time.Duration // Defaults to 1 second
+	BenchmarkMem  bool
+	KeepWorkDir   bool
+}
+
+// Create a new suiteRunner able to run all methods in the given suite.
+func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner {
+	var conf RunConf
+	if runConf != nil {
+		conf = *runConf
+	}
+	if conf.Output == nil {
+		conf.Output = os.Stdout
+	}
+	if conf.Benchmark {
+		conf.Verbose = true
+	}
+
+	suiteType := reflect.TypeOf(suite)
+	suiteNumMethods := suiteType.NumMethod()
+	suiteValue := reflect.ValueOf(suite)
+
+	runner := &suiteRunner{
+		suite:     suite,
+		output:    newOutputWriter(conf.Output, conf.Stream, conf.Verbose),
+		tracker:   newResultTracker(),
+		benchTime: conf.BenchmarkTime,
+		benchMem:  conf.BenchmarkMem,
+		tempDir:   &tempDir{},
+		keepDir:   conf.KeepWorkDir,
+		tests:     make([]*methodType, 0, suiteNumMethods),
+	}
+	if runner.benchTime == 0 {
+		runner.benchTime = 1 * time.Second
+	}
+
+	var filterRegexp *regexp.Regexp
+	if conf.Filter != "" {
+		if regexp, err := regexp.Compile(conf.Filter); err != nil {
+			msg := "Bad filter expression: " + err.Error()
+			runner.tracker.result.RunError = errors.New(msg)
+			return runner
+		} else {
+			filterRegexp = regexp
+		}
+	}
+
+	for i := 0; i != suiteNumMethods; i++ {
+		method := newMethod(suiteValue, i)
+		switch method.Info.Name {
+		case "SetUpSuite":
+			runner.setUpSuite = method
+		case "TearDownSuite":
+			runner.tearDownSuite = method
+		case "SetUpTest":
+			runner.setUpTest = method
+		case "TearDownTest":
+			runner.tearDownTest = method
+		default:
+			prefix := "Test"
+			if conf.Benchmark {
+				prefix = "Benchmark"
+			}
+			if !strings.HasPrefix(method.Info.Name, prefix) {
+				continue
+			}
+			if filterRegexp == nil || method.matches(filterRegexp) {
+				runner.tests = append(runner.tests, method)
+			}
+		}
+	}
+	return runner
+}
+
+// Run all methods in the given suite.
+func (runner *suiteRunner) run() *Result {
+	if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {
+		runner.tracker.start()
+		if runner.checkFixtureArgs() {
+			c := runner.runFixture(runner.setUpSuite, "", nil)
+			if c == nil || c.status == succeededSt {
+				for i := 0; i != len(runner.tests); i++ {
+					c := runner.runTest(runner.tests[i])
+					if c.status == fixturePanickedSt {
+						runner.skipTests(missedSt, runner.tests[i+1:])
+						break
+					}
+				}
+			} else if c != nil && c.status == skippedSt {
+				runner.skipTests(skippedSt, runner.tests)
+			} else {
+				runner.skipTests(missedSt, runner.tests)
+			}
+			runner.runFixture(runner.tearDownSuite, "", nil)
+		} else {
+			runner.skipTests(missedSt, runner.tests)
+		}
+		runner.tracker.waitAndStop()
+		if runner.keepDir {
+			runner.tracker.result.WorkDir = runner.tempDir.path
+		} else {
+			runner.tempDir.removeAll()
+		}
+	}
+	return &runner.tracker.result
+}
+
+// Create a call object with the given suite method, and fork a
+// goroutine with the provided dispatcher for running it.
+func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
+	var logw io.Writer
+	if runner.output.Stream {
+		logw = runner.output
+	}
+	if logb == nil {
+		logb = new(logger)
+	}
+	c := &C{
+		method:    method,
+		kind:      kind,
+		testName:  testName,
+		logb:      logb,
+		logw:      logw,
+		tempDir:   runner.tempDir,
+		done:      make(chan *C, 1),
+		timer:     timer{benchTime: runner.benchTime},
+		startTime: time.Now(),
+		benchMem:  runner.benchMem,
+	}
+	runner.tracker.expectCall(c)
+	go (func() {
+		runner.reportCallStarted(c)
+		defer runner.callDone(c)
+		dispatcher(c)
+	})()
+	return c
+}
+
+// Same as forkCall(), but wait for call to finish before returning.
+func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
+	c := runner.forkCall(method, kind, testName, logb, dispatcher)
+	<-c.done
+	return c
+}
+
+// Handle a finished call.  If there were any panics, update the call status
+// accordingly.  Then, mark the call as done and report to the tracker.
+func (runner *suiteRunner) callDone(c *C) {
+	value := recover()
+	if value != nil {
+		switch v := value.(type) {
+		case *fixturePanic:
+			if v.status == skippedSt {
+				c.status = skippedSt
+			} else {
+				c.logSoftPanic("Fixture has panicked (see related PANIC)")
+				c.status = fixturePanickedSt
+			}
+		default:
+			c.logPanic(1, value)
+			c.status = panickedSt
+		}
+	}
+	if c.mustFail {
+		switch c.status {
+		case failedSt:
+			c.status = succeededSt
+		case succeededSt:
+			c.status = failedSt
+			c.logString("Error: Test succeeded, but was expected to fail")
+			c.logString("Reason: " + c.reason)
+		}
+	}
+
+	runner.reportCallDone(c)
+	c.done <- c
+}
+
+// Runs a fixture call synchronously.  The fixture will still be run in a
+// goroutine like all suite methods, but this method will not return
+// while the fixture goroutine is not done, because the fixture must be
+// run in a desired order.
+func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C {
+	if method != nil {
+		c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) {
+			c.ResetTimer()
+			c.StartTimer()
+			defer c.StopTimer()
+			c.method.Call([]reflect.Value{reflect.ValueOf(c)})
+		})
+		return c
+	}
+	return nil
+}
+
+// Run the fixture method with runFixture(), but panic with a fixturePanic{}
+// in case the fixture method panics.  This makes it easier to track the
+// fixture panic together with other call panics within forkTest().
+func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C {
+	if skipped != nil && *skipped {
+		return nil
+	}
+	c := runner.runFixture(method, testName, logb)
+	if c != nil && c.status != succeededSt {
+		if skipped != nil {
+			*skipped = c.status == skippedSt
+		}
+		panic(&fixturePanic{c.status, method})
+	}
+	return c
+}
+
+type fixturePanic struct {
+	status funcStatus
+	method *methodType
+}
+
+// Run the suite test method, together with the test-specific fixture,
+// asynchronously.
+func (runner *suiteRunner) forkTest(method *methodType) *C {
+	testName := method.String()
+	return runner.forkCall(method, testKd, testName, nil, func(c *C) {
+		var skipped bool
+		defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped)
+		defer c.StopTimer()
+		benchN := 1
+		for {
+			runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped)
+			mt := c.method.Type()
+			if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) {
+				// Rather than a plain panic, provide a more helpful message when
+				// the argument type is incorrect.
+				c.status = panickedSt
+				c.logArgPanic(c.method, "*check.C")
+				return
+			}
+			if strings.HasPrefix(c.method.Info.Name, "Test") {
+				c.ResetTimer()
+				c.StartTimer()
+				c.method.Call([]reflect.Value{reflect.ValueOf(c)})
+				return
+			}
+			if !strings.HasPrefix(c.method.Info.Name, "Benchmark") {
+				panic("unexpected method prefix: " + c.method.Info.Name)
+			}
+
+			runtime.GC()
+			c.N = benchN
+			c.ResetTimer()
+			c.StartTimer()
+			c.method.Call([]reflect.Value{reflect.ValueOf(c)})
+			c.StopTimer()
+			if c.status != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 {
+				return
+			}
+			perOpN := int(1e9)
+			if c.nsPerOp() != 0 {
+				perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp())
+			}
+
+			// Logic taken from the stock testing package:
+			// - Run more iterations than we think we'll need for a second (1.5x).
+			// - Don't grow too fast in case we had timing errors previously.
+			// - Be sure to run at least one more than last time.
+			benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1)
+			benchN = roundUp(benchN)
+
+			skipped = true // Don't run the deferred one if this panics.
+			runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil)
+			skipped = false
+		}
+	})
+}
+
+// Same as forkTest(), but wait for the test to finish before returning.
+func (runner *suiteRunner) runTest(method *methodType) *C {
+	c := runner.forkTest(method)
+	<-c.done
+	return c
+}
+
+// Helper to mark tests as skipped or missed.  A bit heavy for what
+// it does, but it enables homogeneous handling of tracking, including
+// nice verbose output.
+func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) {
+	for _, method := range methods {
+		runner.runFunc(method, testKd, "", nil, func(c *C) {
+			c.status = status
+		})
+	}
+}
+
+// Verify if the fixture arguments are *check.C.  In case of errors,
+// log the error as a panic in the fixture method call, and return false.
+func (runner *suiteRunner) checkFixtureArgs() bool {
+	succeeded := true
+	argType := reflect.TypeOf(&C{})
+	for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} {
+		if method != nil {
+			mt := method.Type()
+			if mt.NumIn() != 1 || mt.In(0) != argType {
+				succeeded = false
+				runner.runFunc(method, fixtureKd, "", nil, func(c *C) {
+					c.logArgPanic(method, "*check.C")
+					c.status = panickedSt
+				})
+			}
+		}
+	}
+	return succeeded
+}
+
+func (runner *suiteRunner) reportCallStarted(c *C) {
+	runner.output.WriteCallStarted("START", c)
+}
+
+func (runner *suiteRunner) reportCallDone(c *C) {
+	runner.tracker.callDone(c)
+	switch c.status {
+	case succeededSt:
+		if c.mustFail {
+			runner.output.WriteCallSuccess("FAIL EXPECTED", c)
+		} else {
+			runner.output.WriteCallSuccess("PASS", c)
+		}
+	case skippedSt:
+		runner.output.WriteCallSuccess("SKIP", c)
+	case failedSt:
+		runner.output.WriteCallProblem("FAIL", c)
+	case panickedSt:
+		runner.output.WriteCallProblem("PANIC", c)
+	case fixturePanickedSt:
+		// That's a testKd call reporting that its fixture
+		// has panicked. The fixture call which caused the
+		// panic itself was tracked above. We'll report to
+		// aid debugging.
+		runner.output.WriteCallProblem("PANIC", c)
+	case missedSt:
+		runner.output.WriteCallSuccess("MISS", c)
+	}
+}
+
+// -----------------------------------------------------------------------
+// Output writer manages atomic output writing according to settings.
+
+type outputWriter struct {
+	m                    sync.Mutex
+	writer               io.Writer
+	wroteCallProblemLast bool
+	Stream               bool
+	Verbose              bool
+}
+
+func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter {
+	return &outputWriter{writer: writer, Stream: stream, Verbose: verbose}
+}
+
+func (ow *outputWriter) Write(content []byte) (n int, err error) {
+	ow.m.Lock()
+	n, err = ow.writer.Write(content)
+	ow.m.Unlock()
+	return
+}
+
+func (ow *outputWriter) WriteCallStarted(label string, c *C) {
+	if ow.Stream {
+		header := renderCallHeader(label, c, "", "\n")
+		ow.m.Lock()
+		ow.writer.Write([]byte(header))
+		ow.m.Unlock()
+	}
+}
+
+func (ow *outputWriter) WriteCallProblem(label string, c *C) {
+	var prefix string
+	if !ow.Stream {
+		prefix = "\n-----------------------------------" +
+			"-----------------------------------\n"
+	}
+	header := renderCallHeader(label, c, prefix, "\n\n")
+	ow.m.Lock()
+	ow.wroteCallProblemLast = true
+	ow.writer.Write([]byte(header))
+	if !ow.Stream {
+		c.logb.WriteTo(ow.writer)
+	}
+	ow.m.Unlock()
+}
+
+func (ow *outputWriter) WriteCallSuccess(label string, c *C) {
+	if ow.Stream || (ow.Verbose && c.kind == testKd) {
+		// TODO Use a buffer here.
+		var suffix string
+		if c.reason != "" {
+			suffix = " (" + c.reason + ")"
+		}
+		if c.status == succeededSt {
+			suffix += "\t" + c.timerString()
+		}
+		suffix += "\n"
+		if ow.Stream {
+			suffix += "\n"
+		}
+		header := renderCallHeader(label, c, "", suffix)
+		ow.m.Lock()
+		// Resist temptation of using line as prefix above due to race.
+		if !ow.Stream && ow.wroteCallProblemLast {
+			header = "\n-----------------------------------" +
+				"-----------------------------------\n" +
+				header
+		}
+		ow.wroteCallProblemLast = false
+		ow.writer.Write([]byte(header))
+		ow.m.Unlock()
+	}
+}
+
+func renderCallHeader(label string, c *C, prefix, suffix string) string {
+	pc := c.method.PC()
+	return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc),
+		niceFuncName(pc), suffix)
+}
diff --git a/vendor/src/github.com/go-check/check/check_test.go b/vendor/src/github.com/go-check/check/check_test.go
new file mode 100644
index 0000000..871b325
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/check_test.go
@@ -0,0 +1,207 @@
+// This file contains just a few generic helpers which are used by the
+// other test files.
+
+package check_test
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"regexp"
+	"runtime"
+	"testing"
+	"time"
+
+	"gopkg.in/check.v1"
+)
+
+// We count the number of suites run at least to get a vague hint that the
+// test suite is behaving as it should.  Otherwise a bug introduced at the
+// very core of the system could go unperceived.
+const suitesRunExpected = 8
+
+var suitesRun int = 0
+
+func Test(t *testing.T) {
+	check.TestingT(t)
+	if suitesRun != suitesRunExpected && flag.Lookup("check.f").Value.String() == "" {
+		critical(fmt.Sprintf("Expected %d suites to run rather than %d",
+			suitesRunExpected, suitesRun))
+	}
+}
+
+// -----------------------------------------------------------------------
+// Helper functions.
+
+// Break down badly.  This is used in test cases which can't yet assume
+// that the fundamental bits are working.
+func critical(error string) {
+	fmt.Fprintln(os.Stderr, "CRITICAL: "+error)
+	os.Exit(1)
+}
+
+// Return the file line where it's called.
+func getMyLine() int {
+	if _, _, line, ok := runtime.Caller(1); ok {
+		return line
+	}
+	return -1
+}
+
+// -----------------------------------------------------------------------
+// Helper type implementing a basic io.Writer for testing output.
+
+// Type implementing the io.Writer interface for analyzing output.
+type String struct {
+	value string
+}
+
+// The only function required by the io.Writer interface.  Will append
+// written data to the String.value string.
+func (s *String) Write(p []byte) (n int, err error) {
+	s.value += string(p)
+	return len(p), nil
+}
+
+// Trivial wrapper to test errors happening on a different file
+// than the test itself.
+func checkEqualWrapper(c *check.C, obtained, expected interface{}) (result bool, line int) {
+	return c.Check(obtained, check.Equals, expected), getMyLine()
+}
+
+// -----------------------------------------------------------------------
+// Helper suite for testing basic fail behavior.
+
+type FailHelper struct {
+	testLine int
+}
+
+func (s *FailHelper) TestLogAndFail(c *check.C) {
+	s.testLine = getMyLine() - 1
+	c.Log("Expected failure!")
+	c.Fail()
+}
+
+// -----------------------------------------------------------------------
+// Helper suite for testing basic success behavior.
+
+type SuccessHelper struct{}
+
+func (s *SuccessHelper) TestLogAndSucceed(c *check.C) {
+	c.Log("Expected success!")
+}
+
+// -----------------------------------------------------------------------
+// Helper suite for testing ordering and behavior of fixture.
+
+type FixtureHelper struct {
+	calls   []string
+	panicOn string
+	skip    bool
+	skipOnN int
+	sleepOn string
+	sleep   time.Duration
+	bytes   int64
+}
+
+func (s *FixtureHelper) trace(name string, c *check.C) {
+	s.calls = append(s.calls, name)
+	if name == s.panicOn {
+		panic(name)
+	}
+	if s.sleep > 0 && s.sleepOn == name {
+		time.Sleep(s.sleep)
+	}
+	if s.skip && s.skipOnN == len(s.calls)-1 {
+		c.Skip("skipOnN == n")
+	}
+}
+
+func (s *FixtureHelper) SetUpSuite(c *check.C) {
+	s.trace("SetUpSuite", c)
+}
+
+func (s *FixtureHelper) TearDownSuite(c *check.C) {
+	s.trace("TearDownSuite", c)
+}
+
+func (s *FixtureHelper) SetUpTest(c *check.C) {
+	s.trace("SetUpTest", c)
+}
+
+func (s *FixtureHelper) TearDownTest(c *check.C) {
+	s.trace("TearDownTest", c)
+}
+
+func (s *FixtureHelper) Test1(c *check.C) {
+	s.trace("Test1", c)
+}
+
+func (s *FixtureHelper) Test2(c *check.C) {
+	s.trace("Test2", c)
+}
+
+func (s *FixtureHelper) Benchmark1(c *check.C) {
+	s.trace("Benchmark1", c)
+	for i := 0; i < c.N; i++ {
+		time.Sleep(s.sleep)
+	}
+}
+
+func (s *FixtureHelper) Benchmark2(c *check.C) {
+	s.trace("Benchmark2", c)
+	c.SetBytes(1024)
+	for i := 0; i < c.N; i++ {
+		time.Sleep(s.sleep)
+	}
+}
+
+func (s *FixtureHelper) Benchmark3(c *check.C) {
+	var x []int64
+	s.trace("Benchmark3", c)
+	for i := 0; i < c.N; i++ {
+		time.Sleep(s.sleep)
+		x = make([]int64, 5)
+		_ = x
+	}
+}
+
+// -----------------------------------------------------------------------
+// Helper which checks the state of the test and ensures that it matches
+// the given expectations.  Depends on c.Errorf() working, so shouldn't
+// be used to test this one function.
+
+type expectedState struct {
+	name   string
+	result interface{}
+	failed bool
+	log    string
+}
+
+// Verify the state of the test.  Note that since this also verifies if
+// the test is supposed to be in a failed state, no other checks should
+// be done in addition to what is being tested.
+func checkState(c *check.C, result interface{}, expected *expectedState) {
+	failed := c.Failed()
+	c.Succeed()
+	log := c.GetTestLog()
+	matched, matchError := regexp.MatchString("^"+expected.log+"$", log)
+	if matchError != nil {
+		c.Errorf("Error in matching expression used in testing %s",
+			expected.name)
+	} else if !matched {
+		c.Errorf("%s logged:\n----------\n%s----------\n\nExpected:\n----------\n%s\n----------",
+			expected.name, log, expected.log)
+	}
+	if result != expected.result {
+		c.Errorf("%s returned %#v rather than %#v",
+			expected.name, result, expected.result)
+	}
+	if failed != expected.failed {
+		if failed {
+			c.Errorf("%s has failed when it shouldn't", expected.name)
+		} else {
+			c.Errorf("%s has not failed when it should", expected.name)
+		}
+	}
+}
diff --git a/vendor/src/github.com/go-check/check/checkers.go b/vendor/src/github.com/go-check/check/checkers.go
new file mode 100644
index 0000000..bac3387
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/checkers.go
@@ -0,0 +1,458 @@
+package check
+
+import (
+	"fmt"
+	"reflect"
+	"regexp"
+)
+
+// -----------------------------------------------------------------------
+// CommentInterface and Commentf helper, to attach extra information to checks.
+
+type comment struct {
+	format string
+	args   []interface{}
+}
+
+// Commentf returns an infomational value to use with Assert or Check calls.
+// If the checker test fails, the provided arguments will be passed to
+// fmt.Sprintf, and will be presented next to the logged failure.
+//
+// For example:
+//
+//     c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i))
+//
+// Note that if the comment is constant, a better option is to
+// simply use a normal comment right above or next to the line, as
+// it will also get printed with any errors:
+//
+//     c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123)
+//
+func Commentf(format string, args ...interface{}) CommentInterface {
+	return &comment{format, args}
+}
+
+// CommentInterface must be implemented by types that attach extra
+// information to failed checks. See the Commentf function for details.
+type CommentInterface interface {
+	CheckCommentString() string
+}
+
+func (c *comment) CheckCommentString() string {
+	return fmt.Sprintf(c.format, c.args...)
+}
+
+// -----------------------------------------------------------------------
+// The Checker interface.
+
+// The Checker interface must be provided by checkers used with
+// the Assert and Check verification methods.
+type Checker interface {
+	Info() *CheckerInfo
+	Check(params []interface{}, names []string) (result bool, error string)
+}
+
+// See the Checker interface.
+type CheckerInfo struct {
+	Name   string
+	Params []string
+}
+
+func (info *CheckerInfo) Info() *CheckerInfo {
+	return info
+}
+
+// -----------------------------------------------------------------------
+// Not checker logic inverter.
+
+// The Not checker inverts the logic of the provided checker.  The
+// resulting checker will succeed where the original one failed, and
+// vice-versa.
+//
+// For example:
+//
+//     c.Assert(a, Not(Equals), b)
+//
+func Not(checker Checker) Checker {
+	return &notChecker{checker}
+}
+
+type notChecker struct {
+	sub Checker
+}
+
+func (checker *notChecker) Info() *CheckerInfo {
+	info := *checker.sub.Info()
+	info.Name = "Not(" + info.Name + ")"
+	return &info
+}
+
+func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) {
+	result, error = checker.sub.Check(params, names)
+	result = !result
+	return
+}
+
+// -----------------------------------------------------------------------
+// IsNil checker.
+
+type isNilChecker struct {
+	*CheckerInfo
+}
+
+// The IsNil checker tests whether the obtained value is nil.
+//
+// For example:
+//
+//    c.Assert(err, IsNil)
+//
+var IsNil Checker = &isNilChecker{
+	&CheckerInfo{Name: "IsNil", Params: []string{"value"}},
+}
+
+func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
+	return isNil(params[0]), ""
+}
+
+func isNil(obtained interface{}) (result bool) {
+	if obtained == nil {
+		result = true
+	} else {
+		switch v := reflect.ValueOf(obtained); v.Kind() {
+		case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+			return v.IsNil()
+		}
+	}
+	return
+}
+
+// -----------------------------------------------------------------------
+// NotNil checker. Alias for Not(IsNil), since it's so common.
+
+type notNilChecker struct {
+	*CheckerInfo
+}
+
+// The NotNil checker verifies that the obtained value is not nil.
+//
+// For example:
+//
+//     c.Assert(iface, NotNil)
+//
+// This is an alias for Not(IsNil), made available since it's a
+// fairly common check.
+//
+var NotNil Checker = &notNilChecker{
+	&CheckerInfo{Name: "NotNil", Params: []string{"value"}},
+}
+
+func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
+	return !isNil(params[0]), ""
+}
+
+// -----------------------------------------------------------------------
+// Equals checker.
+
+type equalsChecker struct {
+	*CheckerInfo
+}
+
+// The Equals checker verifies that the obtained value is equal to
+// the expected value, according to usual Go semantics for ==.
+//
+// For example:
+//
+//     c.Assert(value, Equals, 42)
+//
+var Equals Checker = &equalsChecker{
+	&CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}},
+}
+
+func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+	defer func() {
+		if v := recover(); v != nil {
+			result = false
+			error = fmt.Sprint(v)
+		}
+	}()
+	return params[0] == params[1], ""
+}
+
+// -----------------------------------------------------------------------
+// DeepEquals checker.
+
+type deepEqualsChecker struct {
+	*CheckerInfo
+}
+
+// The DeepEquals checker verifies that the obtained value is deep-equal to
+// the expected value.  The check will work correctly even when facing
+// slices, interfaces, and values of different types (which always fail
+// the test).
+//
+// For example:
+//
+//     c.Assert(value, DeepEquals, 42)
+//     c.Assert(array, DeepEquals, []string{"hi", "there"})
+//
+var DeepEquals Checker = &deepEqualsChecker{
+	&CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}},
+}
+
+func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+	return reflect.DeepEqual(params[0], params[1]), ""
+}
+
+// -----------------------------------------------------------------------
+// HasLen checker.
+
+type hasLenChecker struct {
+	*CheckerInfo
+}
+
+// The HasLen checker verifies that the obtained value has the
+// provided length. In many cases this is superior to using Equals
+// in conjuction with the len function because in case the check
+// fails the value itself will be printed, instead of its length,
+// providing more details for figuring the problem.
+//
+// For example:
+//
+//     c.Assert(list, HasLen, 5)
+//
+var HasLen Checker = &hasLenChecker{
+	&CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}},
+}
+
+func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) {
+	n, ok := params[1].(int)
+	if !ok {
+		return false, "n must be an int"
+	}
+	value := reflect.ValueOf(params[0])
+	switch value.Kind() {
+	case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String:
+	default:
+		return false, "obtained value type has no length"
+	}
+	return value.Len() == n, ""
+}
+
+// -----------------------------------------------------------------------
+// ErrorMatches checker.
+
+type errorMatchesChecker struct {
+	*CheckerInfo
+}
+
+// The ErrorMatches checker verifies that the error value
+// is non nil and matches the regular expression provided.
+//
+// For example:
+//
+//     c.Assert(err, ErrorMatches, "perm.*denied")
+//
+var ErrorMatches Checker = errorMatchesChecker{
+	&CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}},
+}
+
+func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) {
+	if params[0] == nil {
+		return false, "Error value is nil"
+	}
+	err, ok := params[0].(error)
+	if !ok {
+		return false, "Value is not an error"
+	}
+	params[0] = err.Error()
+	names[0] = "error"
+	return matches(params[0], params[1])
+}
+
+// -----------------------------------------------------------------------
+// Matches checker.
+
+type matchesChecker struct {
+	*CheckerInfo
+}
+
+// The Matches checker verifies that the string provided as the obtained
+// value (or the string resulting from obtained.String()) matches the
+// regular expression provided.
+//
+// For example:
+//
+//     c.Assert(err, Matches, "perm.*denied")
+//
+var Matches Checker = &matchesChecker{
+	&CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}},
+}
+
+func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) {
+	return matches(params[0], params[1])
+}
+
+func matches(value, regex interface{}) (result bool, error string) {
+	reStr, ok := regex.(string)
+	if !ok {
+		return false, "Regex must be a string"
+	}
+	valueStr, valueIsStr := value.(string)
+	if !valueIsStr {
+		if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr {
+			valueStr, valueIsStr = valueWithStr.String(), true
+		}
+	}
+	if valueIsStr {
+		matches, err := regexp.MatchString("^"+reStr+"$", valueStr)
+		if err != nil {
+			return false, "Can't compile regex: " + err.Error()
+		}
+		return matches, ""
+	}
+	return false, "Obtained value is not a string and has no .String()"
+}
+
+// -----------------------------------------------------------------------
+// Panics checker.
+
+type panicsChecker struct {
+	*CheckerInfo
+}
+
+// The Panics checker verifies that calling the provided zero-argument
+// function will cause a panic which is deep-equal to the provided value.
+//
+// For example:
+//
+//     c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}).
+//
+//
+var Panics Checker = &panicsChecker{
+	&CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}},
+}
+
+func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+	f := reflect.ValueOf(params[0])
+	if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
+		return false, "Function must take zero arguments"
+	}
+	defer func() {
+		// If the function has not panicked, then don't do the check.
+		if error != "" {
+			return
+		}
+		params[0] = recover()
+		names[0] = "panic"
+		result = reflect.DeepEqual(params[0], params[1])
+	}()
+	f.Call(nil)
+	return false, "Function has not panicked"
+}
+
+type panicMatchesChecker struct {
+	*CheckerInfo
+}
+
+// The PanicMatches checker verifies that calling the provided zero-argument
+// function will cause a panic with an error value matching
+// the regular expression provided.
+//
+// For example:
+//
+//     c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`).
+//
+//
+var PanicMatches Checker = &panicMatchesChecker{
+	&CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}},
+}
+
+func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) {
+	f := reflect.ValueOf(params[0])
+	if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
+		return false, "Function must take zero arguments"
+	}
+	defer func() {
+		// If the function has not panicked, then don't do the check.
+		if errmsg != "" {
+			return
+		}
+		obtained := recover()
+		names[0] = "panic"
+		if e, ok := obtained.(error); ok {
+			params[0] = e.Error()
+		} else if _, ok := obtained.(string); ok {
+			params[0] = obtained
+		} else {
+			errmsg = "Panic value is not a string or an error"
+			return
+		}
+		result, errmsg = matches(params[0], params[1])
+	}()
+	f.Call(nil)
+	return false, "Function has not panicked"
+}
+
+// -----------------------------------------------------------------------
+// FitsTypeOf checker.
+
+type fitsTypeChecker struct {
+	*CheckerInfo
+}
+
+// The FitsTypeOf checker verifies that the obtained value is
+// assignable to a variable with the same type as the provided
+// sample value.
+//
+// For example:
+//
+//     c.Assert(value, FitsTypeOf, int64(0))
+//     c.Assert(value, FitsTypeOf, os.Error(nil))
+//
+var FitsTypeOf Checker = &fitsTypeChecker{
+	&CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}},
+}
+
+func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) {
+	obtained := reflect.ValueOf(params[0])
+	sample := reflect.ValueOf(params[1])
+	if !obtained.IsValid() {
+		return false, ""
+	}
+	if !sample.IsValid() {
+		return false, "Invalid sample value"
+	}
+	return obtained.Type().AssignableTo(sample.Type()), ""
+}
+
+// -----------------------------------------------------------------------
+// Implements checker.
+
+type implementsChecker struct {
+	*CheckerInfo
+}
+
+// The Implements checker verifies that the obtained value
+// implements the interface specified via a pointer to an interface
+// variable.
+//
+// For example:
+//
+//     var e os.Error
+//     c.Assert(err, Implements, &e)
+//
+var Implements Checker = &implementsChecker{
+	&CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}},
+}
+
+func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+	obtained := reflect.ValueOf(params[0])
+	ifaceptr := reflect.ValueOf(params[1])
+	if !obtained.IsValid() {
+		return false, ""
+	}
+	if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface {
+		return false, "ifaceptr should be a pointer to an interface variable"
+	}
+	return obtained.Type().Implements(ifaceptr.Elem().Type()), ""
+}
diff --git a/vendor/src/github.com/go-check/check/checkers_test.go b/vendor/src/github.com/go-check/check/checkers_test.go
new file mode 100644
index 0000000..5c69747
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/checkers_test.go
@@ -0,0 +1,272 @@
+package check_test
+
+import (
+	"errors"
+	"gopkg.in/check.v1"
+	"reflect"
+	"runtime"
+)
+
+type CheckersS struct{}
+
+var _ = check.Suite(&CheckersS{})
+
+func testInfo(c *check.C, checker check.Checker, name string, paramNames []string) {
+	info := checker.Info()
+	if info.Name != name {
+		c.Fatalf("Got name %s, expected %s", info.Name, name)
+	}
+	if !reflect.DeepEqual(info.Params, paramNames) {
+		c.Fatalf("Got param names %#v, expected %#v", info.Params, paramNames)
+	}
+}
+
+func testCheck(c *check.C, checker check.Checker, result bool, error string, params ...interface{}) ([]interface{}, []string) {
+	info := checker.Info()
+	if len(params) != len(info.Params) {
+		c.Fatalf("unexpected param count in test; expected %d got %d", len(info.Params), len(params))
+	}
+	names := append([]string{}, info.Params...)
+	result_, error_ := checker.Check(params, names)
+	if result_ != result || error_ != error {
+		c.Fatalf("%s.Check(%#v) returned (%#v, %#v) rather than (%#v, %#v)",
+			info.Name, params, result_, error_, result, error)
+	}
+	return params, names
+}
+
+func (s *CheckersS) TestComment(c *check.C) {
+	bug := check.Commentf("a %d bc", 42)
+	comment := bug.CheckCommentString()
+	if comment != "a 42 bc" {
+		c.Fatalf("Commentf returned %#v", comment)
+	}
+}
+
+func (s *CheckersS) TestIsNil(c *check.C) {
+	testInfo(c, check.IsNil, "IsNil", []string{"value"})
+
+	testCheck(c, check.IsNil, true, "", nil)
+	testCheck(c, check.IsNil, false, "", "a")
+
+	testCheck(c, check.IsNil, true, "", (chan int)(nil))
+	testCheck(c, check.IsNil, false, "", make(chan int))
+	testCheck(c, check.IsNil, true, "", (error)(nil))
+	testCheck(c, check.IsNil, false, "", errors.New(""))
+	testCheck(c, check.IsNil, true, "", ([]int)(nil))
+	testCheck(c, check.IsNil, false, "", make([]int, 1))
+	testCheck(c, check.IsNil, false, "", int(0))
+}
+
+func (s *CheckersS) TestNotNil(c *check.C) {
+	testInfo(c, check.NotNil, "NotNil", []string{"value"})
+
+	testCheck(c, check.NotNil, false, "", nil)
+	testCheck(c, check.NotNil, true, "", "a")
+
+	testCheck(c, check.NotNil, false, "", (chan int)(nil))
+	testCheck(c, check.NotNil, true, "", make(chan int))
+	testCheck(c, check.NotNil, false, "", (error)(nil))
+	testCheck(c, check.NotNil, true, "", errors.New(""))
+	testCheck(c, check.NotNil, false, "", ([]int)(nil))
+	testCheck(c, check.NotNil, true, "", make([]int, 1))
+}
+
+func (s *CheckersS) TestNot(c *check.C) {
+	testInfo(c, check.Not(check.IsNil), "Not(IsNil)", []string{"value"})
+
+	testCheck(c, check.Not(check.IsNil), false, "", nil)
+	testCheck(c, check.Not(check.IsNil), true, "", "a")
+}
+
+type simpleStruct struct {
+	i int
+}
+
+func (s *CheckersS) TestEquals(c *check.C) {
+	testInfo(c, check.Equals, "Equals", []string{"obtained", "expected"})
+
+	// The simplest.
+	testCheck(c, check.Equals, true, "", 42, 42)
+	testCheck(c, check.Equals, false, "", 42, 43)
+
+	// Different native types.
+	testCheck(c, check.Equals, false, "", int32(42), int64(42))
+
+	// With nil.
+	testCheck(c, check.Equals, false, "", 42, nil)
+
+	// Slices
+	testCheck(c, check.Equals, false, "runtime error: comparing uncomparable type []uint8", []byte{1, 2}, []byte{1, 2})
+
+	// Struct values
+	testCheck(c, check.Equals, true, "", simpleStruct{1}, simpleStruct{1})
+	testCheck(c, check.Equals, false, "", simpleStruct{1}, simpleStruct{2})
+
+	// Struct pointers
+	testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{1})
+	testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{2})
+}
+
+func (s *CheckersS) TestDeepEquals(c *check.C) {
+	testInfo(c, check.DeepEquals, "DeepEquals", []string{"obtained", "expected"})
+
+	// The simplest.
+	testCheck(c, check.DeepEquals, true, "", 42, 42)
+	testCheck(c, check.DeepEquals, false, "", 42, 43)
+
+	// Different native types.
+	testCheck(c, check.DeepEquals, false, "", int32(42), int64(42))
+
+	// With nil.
+	testCheck(c, check.DeepEquals, false, "", 42, nil)
+
+	// Slices
+	testCheck(c, check.DeepEquals, true, "", []byte{1, 2}, []byte{1, 2})
+	testCheck(c, check.DeepEquals, false, "", []byte{1, 2}, []byte{1, 3})
+
+	// Struct values
+	testCheck(c, check.DeepEquals, true, "", simpleStruct{1}, simpleStruct{1})
+	testCheck(c, check.DeepEquals, false, "", simpleStruct{1}, simpleStruct{2})
+
+	// Struct pointers
+	testCheck(c, check.DeepEquals, true, "", &simpleStruct{1}, &simpleStruct{1})
+	testCheck(c, check.DeepEquals, false, "", &simpleStruct{1}, &simpleStruct{2})
+}
+
+func (s *CheckersS) TestHasLen(c *check.C) {
+	testInfo(c, check.HasLen, "HasLen", []string{"obtained", "n"})
+
+	testCheck(c, check.HasLen, true, "", "abcd", 4)
+	testCheck(c, check.HasLen, true, "", []int{1, 2}, 2)
+	testCheck(c, check.HasLen, false, "", []int{1, 2}, 3)
+
+	testCheck(c, check.HasLen, false, "n must be an int", []int{1, 2}, "2")
+	testCheck(c, check.HasLen, false, "obtained value type has no length", nil, 2)
+}
+
+func (s *CheckersS) TestErrorMatches(c *check.C) {
+	testInfo(c, check.ErrorMatches, "ErrorMatches", []string{"value", "regex"})
+
+	testCheck(c, check.ErrorMatches, false, "Error value is nil", nil, "some error")
+	testCheck(c, check.ErrorMatches, false, "Value is not an error", 1, "some error")
+	testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "some error")
+	testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "so.*or")
+
+	// Verify params mutation
+	params, names := testCheck(c, check.ErrorMatches, false, "", errors.New("some error"), "other error")
+	c.Assert(params[0], check.Equals, "some error")
+	c.Assert(names[0], check.Equals, "error")
+}
+
+func (s *CheckersS) TestMatches(c *check.C) {
+	testInfo(c, check.Matches, "Matches", []string{"value", "regex"})
+
+	// Simple matching
+	testCheck(c, check.Matches, true, "", "abc", "abc")
+	testCheck(c, check.Matches, true, "", "abc", "a.c")
+
+	// Must match fully
+	testCheck(c, check.Matches, false, "", "abc", "ab")
+	testCheck(c, check.Matches, false, "", "abc", "bc")
+
+	// String()-enabled values accepted
+	testCheck(c, check.Matches, true, "", reflect.ValueOf("abc"), "a.c")
+	testCheck(c, check.Matches, false, "", reflect.ValueOf("abc"), "a.d")
+
+	// Some error conditions.
+	testCheck(c, check.Matches, false, "Obtained value is not a string and has no .String()", 1, "a.c")
+	testCheck(c, check.Matches, false, "Can't compile regex: error parsing regexp: missing closing ]: `[c$`", "abc", "a[c")
+}
+
+func (s *CheckersS) TestPanics(c *check.C) {
+	testInfo(c, check.Panics, "Panics", []string{"function", "expected"})
+
+	// Some errors.
+	testCheck(c, check.Panics, false, "Function has not panicked", func() bool { return false }, "BOOM")
+	testCheck(c, check.Panics, false, "Function must take zero arguments", 1, "BOOM")
+
+	// Plain strings.
+	testCheck(c, check.Panics, true, "", func() { panic("BOOM") }, "BOOM")
+	testCheck(c, check.Panics, false, "", func() { panic("KABOOM") }, "BOOM")
+	testCheck(c, check.Panics, true, "", func() bool { panic("BOOM") }, "BOOM")
+
+	// Error values.
+	testCheck(c, check.Panics, true, "", func() { panic(errors.New("BOOM")) }, errors.New("BOOM"))
+	testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM"))
+
+	type deep struct{ i int }
+	// Deep value
+	testCheck(c, check.Panics, true, "", func() { panic(&deep{99}) }, &deep{99})
+
+	// Verify params/names mutation
+	params, names := testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM"))
+	c.Assert(params[0], check.ErrorMatches, "KABOOM")
+	c.Assert(names[0], check.Equals, "panic")
+
+	// Verify a nil panic
+	testCheck(c, check.Panics, true, "", func() { panic(nil) }, nil)
+	testCheck(c, check.Panics, false, "", func() { panic(nil) }, "NOPE")
+}
+
+func (s *CheckersS) TestPanicMatches(c *check.C) {
+	testInfo(c, check.PanicMatches, "PanicMatches", []string{"function", "expected"})
+
+	// Error matching.
+	testCheck(c, check.PanicMatches, true, "", func() { panic(errors.New("BOOM")) }, "BO.M")
+	testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BO.M")
+
+	// Some errors.
+	testCheck(c, check.PanicMatches, false, "Function has not panicked", func() bool { return false }, "BOOM")
+	testCheck(c, check.PanicMatches, false, "Function must take zero arguments", 1, "BOOM")
+
+	// Plain strings.
+	testCheck(c, check.PanicMatches, true, "", func() { panic("BOOM") }, "BO.M")
+	testCheck(c, check.PanicMatches, false, "", func() { panic("KABOOM") }, "BOOM")
+	testCheck(c, check.PanicMatches, true, "", func() bool { panic("BOOM") }, "BO.M")
+
+	// Verify params/names mutation
+	params, names := testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BOOM")
+	c.Assert(params[0], check.Equals, "KABOOM")
+	c.Assert(names[0], check.Equals, "panic")
+
+	// Verify a nil panic
+	testCheck(c, check.PanicMatches, false, "Panic value is not a string or an error", func() { panic(nil) }, "")
+}
+
+func (s *CheckersS) TestFitsTypeOf(c *check.C) {
+	testInfo(c, check.FitsTypeOf, "FitsTypeOf", []string{"obtained", "sample"})
+
+	// Basic types
+	testCheck(c, check.FitsTypeOf, true, "", 1, 0)
+	testCheck(c, check.FitsTypeOf, false, "", 1, int64(0))
+
+	// Aliases
+	testCheck(c, check.FitsTypeOf, false, "", 1, errors.New(""))
+	testCheck(c, check.FitsTypeOf, false, "", "error", errors.New(""))
+	testCheck(c, check.FitsTypeOf, true, "", errors.New("error"), errors.New(""))
+
+	// Structures
+	testCheck(c, check.FitsTypeOf, false, "", 1, simpleStruct{})
+	testCheck(c, check.FitsTypeOf, false, "", simpleStruct{42}, &simpleStruct{})
+	testCheck(c, check.FitsTypeOf, true, "", simpleStruct{42}, simpleStruct{})
+	testCheck(c, check.FitsTypeOf, true, "", &simpleStruct{42}, &simpleStruct{})
+
+	// Some bad values
+	testCheck(c, check.FitsTypeOf, false, "Invalid sample value", 1, interface{}(nil))
+	testCheck(c, check.FitsTypeOf, false, "", interface{}(nil), 0)
+}
+
+func (s *CheckersS) TestImplements(c *check.C) {
+	testInfo(c, check.Implements, "Implements", []string{"obtained", "ifaceptr"})
+
+	var e error
+	var re runtime.Error
+	testCheck(c, check.Implements, true, "", errors.New(""), &e)
+	testCheck(c, check.Implements, false, "", errors.New(""), &re)
+
+	// Some bad values
+	testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, errors.New(""))
+	testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, interface{}(nil))
+	testCheck(c, check.Implements, false, "", interface{}(nil), &e)
+}
diff --git a/vendor/src/github.com/go-check/check/export_test.go b/vendor/src/github.com/go-check/check/export_test.go
new file mode 100644
index 0000000..0e6cfe0
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/export_test.go
@@ -0,0 +1,9 @@
+package check
+
+func PrintLine(filename string, line int) (string, error) {
+	return printLine(filename, line)
+}
+
+func Indent(s, with string) string {
+	return indent(s, with)
+}
diff --git a/vendor/src/github.com/go-check/check/fixture_test.go b/vendor/src/github.com/go-check/check/fixture_test.go
new file mode 100644
index 0000000..2bff9e1
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/fixture_test.go
@@ -0,0 +1,484 @@
+// Tests for the behavior of the test fixture system.
+
+package check_test
+
+import (
+	. "gopkg.in/check.v1"
+)
+
+// -----------------------------------------------------------------------
+// Fixture test suite.
+
+type FixtureS struct{}
+
+var fixtureS = Suite(&FixtureS{})
+
+func (s *FixtureS) TestCountSuite(c *C) {
+	suitesRun += 1
+}
+
+// -----------------------------------------------------------------------
+// Basic fixture ordering verification.
+
+func (s *FixtureS) TestOrder(c *C) {
+	helper := FixtureHelper{}
+	Run(&helper, nil)
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "SetUpTest")
+	c.Check(helper.calls[2], Equals, "Test1")
+	c.Check(helper.calls[3], Equals, "TearDownTest")
+	c.Check(helper.calls[4], Equals, "SetUpTest")
+	c.Check(helper.calls[5], Equals, "Test2")
+	c.Check(helper.calls[6], Equals, "TearDownTest")
+	c.Check(helper.calls[7], Equals, "TearDownSuite")
+	c.Check(len(helper.calls), Equals, 8)
+}
+
+// -----------------------------------------------------------------------
+// Check the behavior when panics occur within tests and fixtures.
+
+func (s *FixtureS) TestPanicOnTest(c *C) {
+	helper := FixtureHelper{panicOn: "Test1"}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "SetUpTest")
+	c.Check(helper.calls[2], Equals, "Test1")
+	c.Check(helper.calls[3], Equals, "TearDownTest")
+	c.Check(helper.calls[4], Equals, "SetUpTest")
+	c.Check(helper.calls[5], Equals, "Test2")
+	c.Check(helper.calls[6], Equals, "TearDownTest")
+	c.Check(helper.calls[7], Equals, "TearDownSuite")
+	c.Check(len(helper.calls), Equals, 8)
+
+	expected := "^\n-+\n" +
+		"PANIC: check_test\\.go:[0-9]+: FixtureHelper.Test1\n\n" +
+		"\\.\\.\\. Panic: Test1 \\(PC=[xA-F0-9]+\\)\n\n" +
+		".+:[0-9]+\n" +
+		"  in (go)?panic\n" +
+		".*check_test.go:[0-9]+\n" +
+		"  in FixtureHelper.trace\n" +
+		".*check_test.go:[0-9]+\n" +
+		"  in FixtureHelper.Test1\n" +
+		"(.|\n)*$"
+
+	c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnSetUpTest(c *C) {
+	helper := FixtureHelper{panicOn: "SetUpTest"}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "SetUpTest")
+	c.Check(helper.calls[2], Equals, "TearDownTest")
+	c.Check(helper.calls[3], Equals, "TearDownSuite")
+	c.Check(len(helper.calls), Equals, 4)
+
+	expected := "^\n-+\n" +
+		"PANIC: check_test\\.go:[0-9]+: " +
+		"FixtureHelper\\.SetUpTest\n\n" +
+		"\\.\\.\\. Panic: SetUpTest \\(PC=[xA-F0-9]+\\)\n\n" +
+		".+:[0-9]+\n" +
+		"  in (go)?panic\n" +
+		".*check_test.go:[0-9]+\n" +
+		"  in FixtureHelper.trace\n" +
+		".*check_test.go:[0-9]+\n" +
+		"  in FixtureHelper.SetUpTest\n" +
+		"(.|\n)*" +
+		"\n-+\n" +
+		"PANIC: check_test\\.go:[0-9]+: " +
+		"FixtureHelper\\.Test1\n\n" +
+		"\\.\\.\\. Panic: Fixture has panicked " +
+		"\\(see related PANIC\\)\n$"
+
+	c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnTearDownTest(c *C) {
+	helper := FixtureHelper{panicOn: "TearDownTest"}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "SetUpTest")
+	c.Check(helper.calls[2], Equals, "Test1")
+	c.Check(helper.calls[3], Equals, "TearDownTest")
+	c.Check(helper.calls[4], Equals, "TearDownSuite")
+	c.Check(len(helper.calls), Equals, 5)
+
+	expected := "^\n-+\n" +
+		"PANIC: check_test\\.go:[0-9]+: " +
+		"FixtureHelper.TearDownTest\n\n" +
+		"\\.\\.\\. Panic: TearDownTest \\(PC=[xA-F0-9]+\\)\n\n" +
+		".+:[0-9]+\n" +
+		"  in (go)?panic\n" +
+		".*check_test.go:[0-9]+\n" +
+		"  in FixtureHelper.trace\n" +
+		".*check_test.go:[0-9]+\n" +
+		"  in FixtureHelper.TearDownTest\n" +
+		"(.|\n)*" +
+		"\n-+\n" +
+		"PANIC: check_test\\.go:[0-9]+: " +
+		"FixtureHelper\\.Test1\n\n" +
+		"\\.\\.\\. Panic: Fixture has panicked " +
+		"\\(see related PANIC\\)\n$"
+
+	c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnSetUpSuite(c *C) {
+	helper := FixtureHelper{panicOn: "SetUpSuite"}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "TearDownSuite")
+	c.Check(len(helper.calls), Equals, 2)
+
+	expected := "^\n-+\n" +
+		"PANIC: check_test\\.go:[0-9]+: " +
+		"FixtureHelper.SetUpSuite\n\n" +
+		"\\.\\.\\. Panic: SetUpSuite \\(PC=[xA-F0-9]+\\)\n\n" +
+		".+:[0-9]+\n" +
+		"  in (go)?panic\n" +
+		".*check_test.go:[0-9]+\n" +
+		"  in FixtureHelper.trace\n" +
+		".*check_test.go:[0-9]+\n" +
+		"  in FixtureHelper.SetUpSuite\n" +
+		"(.|\n)*$"
+
+	c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnTearDownSuite(c *C) {
+	helper := FixtureHelper{panicOn: "TearDownSuite"}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "SetUpTest")
+	c.Check(helper.calls[2], Equals, "Test1")
+	c.Check(helper.calls[3], Equals, "TearDownTest")
+	c.Check(helper.calls[4], Equals, "SetUpTest")
+	c.Check(helper.calls[5], Equals, "Test2")
+	c.Check(helper.calls[6], Equals, "TearDownTest")
+	c.Check(helper.calls[7], Equals, "TearDownSuite")
+	c.Check(len(helper.calls), Equals, 8)
+
+	expected := "^\n-+\n" +
+		"PANIC: check_test\\.go:[0-9]+: " +
+		"FixtureHelper.TearDownSuite\n\n" +
+		"\\.\\.\\. Panic: TearDownSuite \\(PC=[xA-F0-9]+\\)\n\n" +
+		".+:[0-9]+\n" +
+		"  in (go)?panic\n" +
+		".*check_test.go:[0-9]+\n" +
+		"  in FixtureHelper.trace\n" +
+		".*check_test.go:[0-9]+\n" +
+		"  in FixtureHelper.TearDownSuite\n" +
+		"(.|\n)*$"
+
+	c.Check(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// A wrong argument on a test or fixture will produce a nice error.
+
+func (s *FixtureS) TestPanicOnWrongTestArg(c *C) {
+	helper := WrongTestArgHelper{}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "SetUpTest")
+	c.Check(helper.calls[2], Equals, "TearDownTest")
+	c.Check(helper.calls[3], Equals, "SetUpTest")
+	c.Check(helper.calls[4], Equals, "Test2")
+	c.Check(helper.calls[5], Equals, "TearDownTest")
+	c.Check(helper.calls[6], Equals, "TearDownSuite")
+	c.Check(len(helper.calls), Equals, 7)
+
+	expected := "^\n-+\n" +
+		"PANIC: fixture_test\\.go:[0-9]+: " +
+		"WrongTestArgHelper\\.Test1\n\n" +
+		"\\.\\.\\. Panic: WrongTestArgHelper\\.Test1 argument " +
+		"should be \\*check\\.C\n"
+
+	c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnWrongSetUpTestArg(c *C) {
+	helper := WrongSetUpTestArgHelper{}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Check(len(helper.calls), Equals, 0)
+
+	expected :=
+		"^\n-+\n" +
+			"PANIC: fixture_test\\.go:[0-9]+: " +
+			"WrongSetUpTestArgHelper\\.SetUpTest\n\n" +
+			"\\.\\.\\. Panic: WrongSetUpTestArgHelper\\.SetUpTest argument " +
+			"should be \\*check\\.C\n"
+
+	c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnWrongSetUpSuiteArg(c *C) {
+	helper := WrongSetUpSuiteArgHelper{}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Check(len(helper.calls), Equals, 0)
+
+	expected :=
+		"^\n-+\n" +
+			"PANIC: fixture_test\\.go:[0-9]+: " +
+			"WrongSetUpSuiteArgHelper\\.SetUpSuite\n\n" +
+			"\\.\\.\\. Panic: WrongSetUpSuiteArgHelper\\.SetUpSuite argument " +
+			"should be \\*check\\.C\n"
+
+	c.Check(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Nice errors also when tests or fixture have wrong arg count.
+
+func (s *FixtureS) TestPanicOnWrongTestArgCount(c *C) {
+	helper := WrongTestArgCountHelper{}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "SetUpTest")
+	c.Check(helper.calls[2], Equals, "TearDownTest")
+	c.Check(helper.calls[3], Equals, "SetUpTest")
+	c.Check(helper.calls[4], Equals, "Test2")
+	c.Check(helper.calls[5], Equals, "TearDownTest")
+	c.Check(helper.calls[6], Equals, "TearDownSuite")
+	c.Check(len(helper.calls), Equals, 7)
+
+	expected := "^\n-+\n" +
+		"PANIC: fixture_test\\.go:[0-9]+: " +
+		"WrongTestArgCountHelper\\.Test1\n\n" +
+		"\\.\\.\\. Panic: WrongTestArgCountHelper\\.Test1 argument " +
+		"should be \\*check\\.C\n"
+
+	c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnWrongSetUpTestArgCount(c *C) {
+	helper := WrongSetUpTestArgCountHelper{}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Check(len(helper.calls), Equals, 0)
+
+	expected :=
+		"^\n-+\n" +
+			"PANIC: fixture_test\\.go:[0-9]+: " +
+			"WrongSetUpTestArgCountHelper\\.SetUpTest\n\n" +
+			"\\.\\.\\. Panic: WrongSetUpTestArgCountHelper\\.SetUpTest argument " +
+			"should be \\*check\\.C\n"
+
+	c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnWrongSetUpSuiteArgCount(c *C) {
+	helper := WrongSetUpSuiteArgCountHelper{}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Check(len(helper.calls), Equals, 0)
+
+	expected :=
+		"^\n-+\n" +
+			"PANIC: fixture_test\\.go:[0-9]+: " +
+			"WrongSetUpSuiteArgCountHelper\\.SetUpSuite\n\n" +
+			"\\.\\.\\. Panic: WrongSetUpSuiteArgCountHelper" +
+			"\\.SetUpSuite argument should be \\*check\\.C\n"
+
+	c.Check(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Helper test suites with wrong function arguments.
+
+type WrongTestArgHelper struct {
+	FixtureHelper
+}
+
+func (s *WrongTestArgHelper) Test1(t int) {
+}
+
+type WrongSetUpTestArgHelper struct {
+	FixtureHelper
+}
+
+func (s *WrongSetUpTestArgHelper) SetUpTest(t int) {
+}
+
+type WrongSetUpSuiteArgHelper struct {
+	FixtureHelper
+}
+
+func (s *WrongSetUpSuiteArgHelper) SetUpSuite(t int) {
+}
+
+type WrongTestArgCountHelper struct {
+	FixtureHelper
+}
+
+func (s *WrongTestArgCountHelper) Test1(c *C, i int) {
+}
+
+type WrongSetUpTestArgCountHelper struct {
+	FixtureHelper
+}
+
+func (s *WrongSetUpTestArgCountHelper) SetUpTest(c *C, i int) {
+}
+
+type WrongSetUpSuiteArgCountHelper struct {
+	FixtureHelper
+}
+
+func (s *WrongSetUpSuiteArgCountHelper) SetUpSuite(c *C, i int) {
+}
+
+// -----------------------------------------------------------------------
+// Ensure fixture doesn't run without tests.
+
+type NoTestsHelper struct {
+	hasRun bool
+}
+
+func (s *NoTestsHelper) SetUpSuite(c *C) {
+	s.hasRun = true
+}
+
+func (s *NoTestsHelper) TearDownSuite(c *C) {
+	s.hasRun = true
+}
+
+func (s *FixtureS) TestFixtureDoesntRunWithoutTests(c *C) {
+	helper := NoTestsHelper{}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Check(helper.hasRun, Equals, false)
+}
+
+// -----------------------------------------------------------------------
+// Verify that checks and assertions work correctly inside the fixture.
+
+type FixtureCheckHelper struct {
+	fail      string
+	completed bool
+}
+
+func (s *FixtureCheckHelper) SetUpSuite(c *C) {
+	switch s.fail {
+	case "SetUpSuiteAssert":
+		c.Assert(false, Equals, true)
+	case "SetUpSuiteCheck":
+		c.Check(false, Equals, true)
+	}
+	s.completed = true
+}
+
+func (s *FixtureCheckHelper) SetUpTest(c *C) {
+	switch s.fail {
+	case "SetUpTestAssert":
+		c.Assert(false, Equals, true)
+	case "SetUpTestCheck":
+		c.Check(false, Equals, true)
+	}
+	s.completed = true
+}
+
+func (s *FixtureCheckHelper) Test(c *C) {
+	// Do nothing.
+}
+
+func (s *FixtureS) TestSetUpSuiteCheck(c *C) {
+	helper := FixtureCheckHelper{fail: "SetUpSuiteCheck"}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Assert(output.value, Matches,
+		"\n---+\n"+
+			"FAIL: fixture_test\\.go:[0-9]+: "+
+			"FixtureCheckHelper\\.SetUpSuite\n\n"+
+			"fixture_test\\.go:[0-9]+:\n"+
+			"    c\\.Check\\(false, Equals, true\\)\n"+
+			"\\.+ obtained bool = false\n"+
+			"\\.+ expected bool = true\n\n")
+	c.Assert(helper.completed, Equals, true)
+}
+
+func (s *FixtureS) TestSetUpSuiteAssert(c *C) {
+	helper := FixtureCheckHelper{fail: "SetUpSuiteAssert"}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Assert(output.value, Matches,
+		"\n---+\n"+
+			"FAIL: fixture_test\\.go:[0-9]+: "+
+			"FixtureCheckHelper\\.SetUpSuite\n\n"+
+			"fixture_test\\.go:[0-9]+:\n"+
+			"    c\\.Assert\\(false, Equals, true\\)\n"+
+			"\\.+ obtained bool = false\n"+
+			"\\.+ expected bool = true\n\n")
+	c.Assert(helper.completed, Equals, false)
+}
+
+// -----------------------------------------------------------------------
+// Verify that logging within SetUpTest() persists within the test log itself.
+
+type FixtureLogHelper struct {
+	c *C
+}
+
+func (s *FixtureLogHelper) SetUpTest(c *C) {
+	s.c = c
+	c.Log("1")
+}
+
+func (s *FixtureLogHelper) Test(c *C) {
+	c.Log("2")
+	s.c.Log("3")
+	c.Log("4")
+	c.Fail()
+}
+
+func (s *FixtureLogHelper) TearDownTest(c *C) {
+	s.c.Log("5")
+}
+
+func (s *FixtureS) TestFixtureLogging(c *C) {
+	helper := FixtureLogHelper{}
+	output := String{}
+	Run(&helper, &RunConf{Output: &output})
+	c.Assert(output.value, Matches,
+		"\n---+\n"+
+			"FAIL: fixture_test\\.go:[0-9]+: "+
+			"FixtureLogHelper\\.Test\n\n"+
+			"1\n2\n3\n4\n5\n")
+}
+
+// -----------------------------------------------------------------------
+// Skip() within fixture methods.
+
+func (s *FixtureS) TestSkipSuite(c *C) {
+	helper := FixtureHelper{skip: true, skipOnN: 0}
+	output := String{}
+	result := Run(&helper, &RunConf{Output: &output})
+	c.Assert(output.value, Equals, "")
+	c.Assert(helper.calls[0], Equals, "SetUpSuite")
+	c.Assert(helper.calls[1], Equals, "TearDownSuite")
+	c.Assert(len(helper.calls), Equals, 2)
+	c.Assert(result.Skipped, Equals, 2)
+}
+
+func (s *FixtureS) TestSkipTest(c *C) {
+	helper := FixtureHelper{skip: true, skipOnN: 1}
+	output := String{}
+	result := Run(&helper, &RunConf{Output: &output})
+	c.Assert(helper.calls[0], Equals, "SetUpSuite")
+	c.Assert(helper.calls[1], Equals, "SetUpTest")
+	c.Assert(helper.calls[2], Equals, "SetUpTest")
+	c.Assert(helper.calls[3], Equals, "Test2")
+	c.Assert(helper.calls[4], Equals, "TearDownTest")
+	c.Assert(helper.calls[5], Equals, "TearDownSuite")
+	c.Assert(len(helper.calls), Equals, 6)
+	c.Assert(result.Skipped, Equals, 1)
+}
diff --git a/vendor/src/github.com/go-check/check/foundation_test.go b/vendor/src/github.com/go-check/check/foundation_test.go
new file mode 100644
index 0000000..8ecf791
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/foundation_test.go
@@ -0,0 +1,335 @@
+// These tests check that the foundations of gocheck are working properly.
+// They already assume that fundamental failing is working already, though,
+// since this was tested in bootstrap_test.go. Even then, some care may
+// still have to be taken when using external functions, since they should
+// of course not rely on functionality tested here.
+
+package check_test
+
+import (
+	"fmt"
+	"gopkg.in/check.v1"
+	"log"
+	"os"
+	"regexp"
+	"strings"
+)
+
+// -----------------------------------------------------------------------
+// Foundation test suite.
+
+type FoundationS struct{}
+
+var foundationS = check.Suite(&FoundationS{})
+
+func (s *FoundationS) TestCountSuite(c *check.C) {
+	suitesRun += 1
+}
+
+func (s *FoundationS) TestErrorf(c *check.C) {
+	// Do not use checkState() here.  It depends on Errorf() working.
+	expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+
+		"    c.Errorf(\"Error %%v!\", \"message\")\n"+
+		"... Error: Error message!\n\n",
+		getMyLine()+1)
+	c.Errorf("Error %v!", "message")
+	failed := c.Failed()
+	c.Succeed()
+	if log := c.GetTestLog(); log != expectedLog {
+		c.Logf("Errorf() logged %#v rather than %#v", log, expectedLog)
+		c.Fail()
+	}
+	if !failed {
+		c.Logf("Errorf() didn't put the test in a failed state")
+		c.Fail()
+	}
+}
+
+func (s *FoundationS) TestError(c *check.C) {
+	expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+
+		"    c\\.Error\\(\"Error \", \"message!\"\\)\n"+
+		"\\.\\.\\. Error: Error message!\n\n",
+		getMyLine()+1)
+	c.Error("Error ", "message!")
+	checkState(c, nil,
+		&expectedState{
+			name:   "Error(`Error `, `message!`)",
+			failed: true,
+			log:    expectedLog,
+		})
+}
+
+func (s *FoundationS) TestFailNow(c *check.C) {
+	defer (func() {
+		if !c.Failed() {
+			c.Error("FailNow() didn't fail the test")
+		} else {
+			c.Succeed()
+			if c.GetTestLog() != "" {
+				c.Error("Something got logged:\n" + c.GetTestLog())
+			}
+		}
+	})()
+
+	c.FailNow()
+	c.Log("FailNow() didn't stop the test")
+}
+
+func (s *FoundationS) TestSucceedNow(c *check.C) {
+	defer (func() {
+		if c.Failed() {
+			c.Error("SucceedNow() didn't succeed the test")
+		}
+		if c.GetTestLog() != "" {
+			c.Error("Something got logged:\n" + c.GetTestLog())
+		}
+	})()
+
+	c.Fail()
+	c.SucceedNow()
+	c.Log("SucceedNow() didn't stop the test")
+}
+
+func (s *FoundationS) TestFailureHeader(c *check.C) {
+	output := String{}
+	failHelper := FailHelper{}
+	check.Run(&failHelper, &check.RunConf{Output: &output})
+	header := fmt.Sprintf(""+
+		"\n-----------------------------------"+
+		"-----------------------------------\n"+
+		"FAIL: check_test.go:%d: FailHelper.TestLogAndFail\n",
+		failHelper.testLine)
+	if strings.Index(output.value, header) == -1 {
+		c.Errorf(""+
+			"Failure didn't print a proper header.\n"+
+			"... Got:\n%s... Expected something with:\n%s",
+			output.value, header)
+	}
+}
+
+func (s *FoundationS) TestFatal(c *check.C) {
+	var line int
+	defer (func() {
+		if !c.Failed() {
+			c.Error("Fatal() didn't fail the test")
+		} else {
+			c.Succeed()
+			expected := fmt.Sprintf("foundation_test.go:%d:\n"+
+				"    c.Fatal(\"Die \", \"now!\")\n"+
+				"... Error: Die now!\n\n",
+				line)
+			if c.GetTestLog() != expected {
+				c.Error("Incorrect log:", c.GetTestLog())
+			}
+		}
+	})()
+
+	line = getMyLine() + 1
+	c.Fatal("Die ", "now!")
+	c.Log("Fatal() didn't stop the test")
+}
+
+func (s *FoundationS) TestFatalf(c *check.C) {
+	var line int
+	defer (func() {
+		if !c.Failed() {
+			c.Error("Fatalf() didn't fail the test")
+		} else {
+			c.Succeed()
+			expected := fmt.Sprintf("foundation_test.go:%d:\n"+
+				"    c.Fatalf(\"Die %%s!\", \"now\")\n"+
+				"... Error: Die now!\n\n",
+				line)
+			if c.GetTestLog() != expected {
+				c.Error("Incorrect log:", c.GetTestLog())
+			}
+		}
+	})()
+
+	line = getMyLine() + 1
+	c.Fatalf("Die %s!", "now")
+	c.Log("Fatalf() didn't stop the test")
+}
+
+func (s *FoundationS) TestCallerLoggingInsideTest(c *check.C) {
+	log := fmt.Sprintf(""+
+		"foundation_test.go:%d:\n"+
+		"    result := c.Check\\(10, check.Equals, 20\\)\n"+
+		"\\.\\.\\. obtained int = 10\n"+
+		"\\.\\.\\. expected int = 20\n\n",
+		getMyLine()+1)
+	result := c.Check(10, check.Equals, 20)
+	checkState(c, result,
+		&expectedState{
+			name:   "Check(10, Equals, 20)",
+			result: false,
+			failed: true,
+			log:    log,
+		})
+}
+
+func (s *FoundationS) TestCallerLoggingInDifferentFile(c *check.C) {
+	result, line := checkEqualWrapper(c, 10, 20)
+	testLine := getMyLine() - 1
+	log := fmt.Sprintf(""+
+		"foundation_test.go:%d:\n"+
+		"    result, line := checkEqualWrapper\\(c, 10, 20\\)\n"+
+		"check_test.go:%d:\n"+
+		"    return c.Check\\(obtained, check.Equals, expected\\), getMyLine\\(\\)\n"+
+		"\\.\\.\\. obtained int = 10\n"+
+		"\\.\\.\\. expected int = 20\n\n",
+		testLine, line)
+	checkState(c, result,
+		&expectedState{
+			name:   "Check(10, Equals, 20)",
+			result: false,
+			failed: true,
+			log:    log,
+		})
+}
+
+// -----------------------------------------------------------------------
+// ExpectFailure() inverts the logic of failure.
+
+type ExpectFailureSucceedHelper struct{}
+
+func (s *ExpectFailureSucceedHelper) TestSucceed(c *check.C) {
+	c.ExpectFailure("It booms!")
+	c.Error("Boom!")
+}
+
+type ExpectFailureFailHelper struct{}
+
+func (s *ExpectFailureFailHelper) TestFail(c *check.C) {
+	c.ExpectFailure("Bug #XYZ")
+}
+
+func (s *FoundationS) TestExpectFailureFail(c *check.C) {
+	helper := ExpectFailureFailHelper{}
+	output := String{}
+	result := check.Run(&helper, &check.RunConf{Output: &output})
+
+	expected := "" +
+		"^\n-+\n" +
+		"FAIL: foundation_test\\.go:[0-9]+:" +
+		" ExpectFailureFailHelper\\.TestFail\n\n" +
+		"\\.\\.\\. Error: Test succeeded, but was expected to fail\n" +
+		"\\.\\.\\. Reason: Bug #XYZ\n$"
+
+	matched, err := regexp.MatchString(expected, output.value)
+	if err != nil {
+		c.Error("Bad expression: ", expected)
+	} else if !matched {
+		c.Error("ExpectFailure() didn't log properly:\n", output.value)
+	}
+
+	c.Assert(result.ExpectedFailures, check.Equals, 0)
+}
+
+func (s *FoundationS) TestExpectFailureSucceed(c *check.C) {
+	helper := ExpectFailureSucceedHelper{}
+	output := String{}
+	result := check.Run(&helper, &check.RunConf{Output: &output})
+
+	c.Assert(output.value, check.Equals, "")
+	c.Assert(result.ExpectedFailures, check.Equals, 1)
+}
+
+func (s *FoundationS) TestExpectFailureSucceedVerbose(c *check.C) {
+	helper := ExpectFailureSucceedHelper{}
+	output := String{}
+	result := check.Run(&helper, &check.RunConf{Output: &output, Verbose: true})
+
+	expected := "" +
+		"FAIL EXPECTED: foundation_test\\.go:[0-9]+:" +
+		" ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\t *[.0-9]+s\n"
+
+	matched, err := regexp.MatchString(expected, output.value)
+	if err != nil {
+		c.Error("Bad expression: ", expected)
+	} else if !matched {
+		c.Error("ExpectFailure() didn't log properly:\n", output.value)
+	}
+
+	c.Assert(result.ExpectedFailures, check.Equals, 1)
+}
+
+// -----------------------------------------------------------------------
+// Skip() allows stopping a test without positive/negative results.
+
+type SkipTestHelper struct{}
+
+func (s *SkipTestHelper) TestFail(c *check.C) {
+	c.Skip("Wrong platform or whatever")
+	c.Error("Boom!")
+}
+
+func (s *FoundationS) TestSkip(c *check.C) {
+	helper := SkipTestHelper{}
+	output := String{}
+	check.Run(&helper, &check.RunConf{Output: &output})
+
+	if output.value != "" {
+		c.Error("Skip() logged something:\n", output.value)
+	}
+}
+
+func (s *FoundationS) TestSkipVerbose(c *check.C) {
+	helper := SkipTestHelper{}
+	output := String{}
+	check.Run(&helper, &check.RunConf{Output: &output, Verbose: true})
+
+	expected := "SKIP: foundation_test\\.go:[0-9]+: SkipTestHelper\\.TestFail" +
+		" \\(Wrong platform or whatever\\)"
+	matched, err := regexp.MatchString(expected, output.value)
+	if err != nil {
+		c.Error("Bad expression: ", expected)
+	} else if !matched {
+		c.Error("Skip() didn't log properly:\n", output.value)
+	}
+}
+
+// -----------------------------------------------------------------------
+// Check minimum *log.Logger interface provided by *check.C.
+
+type minLogger interface {
+	Output(calldepth int, s string) error
+}
+
+func (s *BootstrapS) TestMinLogger(c *check.C) {
+	var logger minLogger
+	logger = log.New(os.Stderr, "", 0)
+	logger = c
+	logger.Output(0, "Hello there")
+	expected := `\[LOG\] [0-9]+:[0-9][0-9]\.[0-9][0-9][0-9] +Hello there\n`
+	output := c.GetTestLog()
+	c.Assert(output, check.Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Ensure that suites with embedded types are working fine, including the
+// the workaround for issue 906.
+
+type EmbeddedInternalS struct {
+	called bool
+}
+
+type EmbeddedS struct {
+	EmbeddedInternalS
+}
+
+var embeddedS = check.Suite(&EmbeddedS{})
+
+func (s *EmbeddedS) TestCountSuite(c *check.C) {
+	suitesRun += 1
+}
+
+func (s *EmbeddedInternalS) TestMethod(c *check.C) {
+	c.Error("TestMethod() of the embedded type was called!?")
+}
+
+func (s *EmbeddedS) TestMethod(c *check.C) {
+	// http://code.google.com/p/go/issues/detail?id=906
+	c.Check(s.called, check.Equals, false) // Go issue 906 is affecting the runner?
+	s.called = true
+}
diff --git a/vendor/src/github.com/go-check/check/helpers.go b/vendor/src/github.com/go-check/check/helpers.go
new file mode 100644
index 0000000..4b6c26d
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/helpers.go
@@ -0,0 +1,231 @@
+package check
+
+import (
+	"fmt"
+	"strings"
+	"time"
+)
+
+// TestName returns the current test name in the form "SuiteName.TestName"
+func (c *C) TestName() string {
+	return c.testName
+}
+
+// -----------------------------------------------------------------------
+// Basic succeeding/failing logic.
+
+// Failed returns whether the currently running test has already failed.
+func (c *C) Failed() bool {
+	return c.status == failedSt
+}
+
+// Fail marks the currently running test as failed.
+//
+// Something ought to have been previously logged so the developer can tell
+// what went wrong. The higher level helper functions will fail the test
+// and do the logging properly.
+func (c *C) Fail() {
+	c.status = failedSt
+}
+
+// FailNow marks the currently running test as failed and stops running it.
+// Something ought to have been previously logged so the developer can tell
+// what went wrong. The higher level helper functions will fail the test
+// and do the logging properly.
+func (c *C) FailNow() {
+	c.Fail()
+	c.stopNow()
+}
+
+// Succeed marks the currently running test as succeeded, undoing any
+// previous failures.
+func (c *C) Succeed() {
+	c.status = succeededSt
+}
+
+// SucceedNow marks the currently running test as succeeded, undoing any
+// previous failures, and stops running the test.
+func (c *C) SucceedNow() {
+	c.Succeed()
+	c.stopNow()
+}
+
+// ExpectFailure informs that the running test is knowingly broken for
+// the provided reason. If the test does not fail, an error will be reported
+// to raise attention to this fact. This method is useful to temporarily
+// disable tests which cover well known problems until a better time to
+// fix the problem is found, without forgetting about the fact that a
+// failure still exists.
+func (c *C) ExpectFailure(reason string) {
+	if reason == "" {
+		panic("Missing reason why the test is expected to fail")
+	}
+	c.mustFail = true
+	c.reason = reason
+}
+
+// Skip skips the running test for the provided reason. If run from within
+// SetUpTest, the individual test being set up will be skipped, and if run
+// from within SetUpSuite, the whole suite is skipped.
+func (c *C) Skip(reason string) {
+	if reason == "" {
+		panic("Missing reason why the test is being skipped")
+	}
+	c.reason = reason
+	c.status = skippedSt
+	c.stopNow()
+}
+
+// -----------------------------------------------------------------------
+// Basic logging.
+
+// GetTestLog returns the current test error output.
+func (c *C) GetTestLog() string {
+	return c.logb.String()
+}
+
+// Log logs some information into the test error output.
+// The provided arguments are assembled together into a string with fmt.Sprint.
+func (c *C) Log(args ...interface{}) {
+	c.log(args...)
+}
+
+// Log logs some information into the test error output.
+// The provided arguments are assembled together into a string with fmt.Sprintf.
+func (c *C) Logf(format string, args ...interface{}) {
+	c.logf(format, args...)
+}
+
+// Output enables *C to be used as a logger in functions that require only
+// the minimum interface of *log.Logger.
+func (c *C) Output(calldepth int, s string) error {
+	d := time.Now().Sub(c.startTime)
+	msec := d / time.Millisecond
+	sec := d / time.Second
+	min := d / time.Minute
+
+	c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s)
+	return nil
+}
+
+// Error logs an error into the test error output and marks the test as failed.
+// The provided arguments are assembled together into a string with fmt.Sprint.
+func (c *C) Error(args ...interface{}) {
+	c.logCaller(1)
+	c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
+	c.logNewLine()
+	c.Fail()
+}
+
+// Errorf logs an error into the test error output and marks the test as failed.
+// The provided arguments are assembled together into a string with fmt.Sprintf.
+func (c *C) Errorf(format string, args ...interface{}) {
+	c.logCaller(1)
+	c.logString(fmt.Sprintf("Error: "+format, args...))
+	c.logNewLine()
+	c.Fail()
+}
+
+// Fatal logs an error into the test error output, marks the test as failed, and
+// stops the test execution. The provided arguments are assembled together into
+// a string with fmt.Sprint.
+func (c *C) Fatal(args ...interface{}) {
+	c.logCaller(1)
+	c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
+	c.logNewLine()
+	c.FailNow()
+}
+
+// Fatlaf logs an error into the test error output, marks the test as failed, and
+// stops the test execution. The provided arguments are assembled together into
+// a string with fmt.Sprintf.
+func (c *C) Fatalf(format string, args ...interface{}) {
+	c.logCaller(1)
+	c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...)))
+	c.logNewLine()
+	c.FailNow()
+}
+
+// -----------------------------------------------------------------------
+// Generic checks and assertions based on checkers.
+
+// Check verifies if the first value matches the expected value according
+// to the provided checker. If they do not match, an error is logged, the
+// test is marked as failed, and the test execution continues.
+//
+// Some checkers may not need the expected argument (e.g. IsNil).
+//
+// Extra arguments provided to the function are logged next to the reported
+// problem when the matching fails.
+func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool {
+	return c.internalCheck("Check", obtained, checker, args...)
+}
+
+// Assert ensures that the first value matches the expected value according
+// to the provided checker. If they do not match, an error is logged, the
+// test is marked as failed, and the test execution stops.
+//
+// Some checkers may not need the expected argument (e.g. IsNil).
+//
+// Extra arguments provided to the function are logged next to the reported
+// problem when the matching fails.
+func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) {
+	if !c.internalCheck("Assert", obtained, checker, args...) {
+		c.stopNow()
+	}
+}
+
+func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool {
+	if checker == nil {
+		c.logCaller(2)
+		c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName))
+		c.logString("Oops.. you've provided a nil checker!")
+		c.logNewLine()
+		c.Fail()
+		return false
+	}
+
+	// If the last argument is a bug info, extract it out.
+	var comment CommentInterface
+	if len(args) > 0 {
+		if c, ok := args[len(args)-1].(CommentInterface); ok {
+			comment = c
+			args = args[:len(args)-1]
+		}
+	}
+
+	params := append([]interface{}{obtained}, args...)
+	info := checker.Info()
+
+	if len(params) != len(info.Params) {
+		names := append([]string{info.Params[0], info.Name}, info.Params[1:]...)
+		c.logCaller(2)
+		c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", ")))
+		c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1))
+		c.logNewLine()
+		c.Fail()
+		return false
+	}
+
+	// Copy since it may be mutated by Check.
+	names := append([]string{}, info.Params...)
+
+	// Do the actual check.
+	result, error := checker.Check(params, names)
+	if !result || error != "" {
+		c.logCaller(2)
+		for i := 0; i != len(params); i++ {
+			c.logValue(names[i], params[i])
+		}
+		if comment != nil {
+			c.logString(comment.CheckCommentString())
+		}
+		if error != "" {
+			c.logString(error)
+		}
+		c.logNewLine()
+		c.Fail()
+		return false
+	}
+	return true
+}
diff --git a/vendor/src/github.com/go-check/check/helpers_test.go b/vendor/src/github.com/go-check/check/helpers_test.go
new file mode 100644
index 0000000..4baa656
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/helpers_test.go
@@ -0,0 +1,519 @@
+// These tests verify the inner workings of the helper methods associated
+// with check.T.
+
+package check_test
+
+import (
+	"gopkg.in/check.v1"
+	"os"
+	"reflect"
+	"runtime"
+	"sync"
+)
+
+var helpersS = check.Suite(&HelpersS{})
+
+type HelpersS struct{}
+
+func (s *HelpersS) TestCountSuite(c *check.C) {
+	suitesRun += 1
+}
+
+// -----------------------------------------------------------------------
+// Fake checker and bug info to verify the behavior of Assert() and Check().
+
+type MyChecker struct {
+	info   *check.CheckerInfo
+	params []interface{}
+	names  []string
+	result bool
+	error  string
+}
+
+func (checker *MyChecker) Info() *check.CheckerInfo {
+	if checker.info == nil {
+		return &check.CheckerInfo{Name: "MyChecker", Params: []string{"myobtained", "myexpected"}}
+	}
+	return checker.info
+}
+
+func (checker *MyChecker) Check(params []interface{}, names []string) (bool, string) {
+	rparams := checker.params
+	rnames := checker.names
+	checker.params = append([]interface{}{}, params...)
+	checker.names = append([]string{}, names...)
+	if rparams != nil {
+		copy(params, rparams)
+	}
+	if rnames != nil {
+		copy(names, rnames)
+	}
+	return checker.result, checker.error
+}
+
+type myCommentType string
+
+func (c myCommentType) CheckCommentString() string {
+	return string(c)
+}
+
+func myComment(s string) myCommentType {
+	return myCommentType(s)
+}
+
+// -----------------------------------------------------------------------
+// Ensure a real checker actually works fine.
+
+func (s *HelpersS) TestCheckerInterface(c *check.C) {
+	testHelperSuccess(c, "Check(1, Equals, 1)", true, func() interface{} {
+		return c.Check(1, check.Equals, 1)
+	})
+}
+
+// -----------------------------------------------------------------------
+// Tests for Check(), mostly the same as for Assert() following these.
+
+func (s *HelpersS) TestCheckSucceedWithExpected(c *check.C) {
+	checker := &MyChecker{result: true}
+	testHelperSuccess(c, "Check(1, checker, 2)", true, func() interface{} {
+		return c.Check(1, checker, 2)
+	})
+	if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) {
+		c.Fatalf("Bad params for check: %#v", checker.params)
+	}
+}
+
+func (s *HelpersS) TestCheckSucceedWithoutExpected(c *check.C) {
+	checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+	testHelperSuccess(c, "Check(1, checker)", true, func() interface{} {
+		return c.Check(1, checker)
+	})
+	if !reflect.DeepEqual(checker.params, []interface{}{1}) {
+		c.Fatalf("Bad params for check: %#v", checker.params)
+	}
+}
+
+func (s *HelpersS) TestCheckFailWithExpected(c *check.C) {
+	checker := &MyChecker{result: false}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    return c\\.Check\\(1, checker, 2\\)\n" +
+		"\\.+ myobtained int = 1\n" +
+		"\\.+ myexpected int = 2\n\n"
+	testHelperFailure(c, "Check(1, checker, 2)", false, false, log,
+		func() interface{} {
+			return c.Check(1, checker, 2)
+		})
+}
+
+func (s *HelpersS) TestCheckFailWithExpectedAndComment(c *check.C) {
+	checker := &MyChecker{result: false}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    return c\\.Check\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" +
+		"\\.+ myobtained int = 1\n" +
+		"\\.+ myexpected int = 2\n" +
+		"\\.+ Hello world!\n\n"
+	testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log,
+		func() interface{} {
+			return c.Check(1, checker, 2, myComment("Hello world!"))
+		})
+}
+
+func (s *HelpersS) TestCheckFailWithExpectedAndStaticComment(c *check.C) {
+	checker := &MyChecker{result: false}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    // Nice leading comment\\.\n" +
+		"    return c\\.Check\\(1, checker, 2\\) // Hello there\n" +
+		"\\.+ myobtained int = 1\n" +
+		"\\.+ myexpected int = 2\n\n"
+	testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log,
+		func() interface{} {
+			// Nice leading comment.
+			return c.Check(1, checker, 2) // Hello there
+		})
+}
+
+func (s *HelpersS) TestCheckFailWithoutExpected(c *check.C) {
+	checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    return c\\.Check\\(1, checker\\)\n" +
+		"\\.+ myvalue int = 1\n\n"
+	testHelperFailure(c, "Check(1, checker)", false, false, log,
+		func() interface{} {
+			return c.Check(1, checker)
+		})
+}
+
+func (s *HelpersS) TestCheckFailWithoutExpectedAndMessage(c *check.C) {
+	checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    return c\\.Check\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" +
+		"\\.+ myvalue int = 1\n" +
+		"\\.+ Hello world!\n\n"
+	testHelperFailure(c, "Check(1, checker, msg)", false, false, log,
+		func() interface{} {
+			return c.Check(1, checker, myComment("Hello world!"))
+		})
+}
+
+func (s *HelpersS) TestCheckWithMissingExpected(c *check.C) {
+	checker := &MyChecker{result: true}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    return c\\.Check\\(1, checker\\)\n" +
+		"\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" +
+		"\\.+ Wrong number of parameters for MyChecker: " +
+		"want 3, got 2\n\n"
+	testHelperFailure(c, "Check(1, checker, !?)", false, false, log,
+		func() interface{} {
+			return c.Check(1, checker)
+		})
+}
+
+func (s *HelpersS) TestCheckWithTooManyExpected(c *check.C) {
+	checker := &MyChecker{result: true}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    return c\\.Check\\(1, checker, 2, 3\\)\n" +
+		"\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" +
+		"\\.+ Wrong number of parameters for MyChecker: " +
+		"want 3, got 4\n\n"
+	testHelperFailure(c, "Check(1, checker, 2, 3)", false, false, log,
+		func() interface{} {
+			return c.Check(1, checker, 2, 3)
+		})
+}
+
+func (s *HelpersS) TestCheckWithError(c *check.C) {
+	checker := &MyChecker{result: false, error: "Some not so cool data provided!"}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    return c\\.Check\\(1, checker, 2\\)\n" +
+		"\\.+ myobtained int = 1\n" +
+		"\\.+ myexpected int = 2\n" +
+		"\\.+ Some not so cool data provided!\n\n"
+	testHelperFailure(c, "Check(1, checker, 2)", false, false, log,
+		func() interface{} {
+			return c.Check(1, checker, 2)
+		})
+}
+
+func (s *HelpersS) TestCheckWithNilChecker(c *check.C) {
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    return c\\.Check\\(1, nil\\)\n" +
+		"\\.+ Check\\(obtained, nil!\\?, \\.\\.\\.\\):\n" +
+		"\\.+ Oops\\.\\. you've provided a nil checker!\n\n"
+	testHelperFailure(c, "Check(obtained, nil)", false, false, log,
+		func() interface{} {
+			return c.Check(1, nil)
+		})
+}
+
+func (s *HelpersS) TestCheckWithParamsAndNamesMutation(c *check.C) {
+	checker := &MyChecker{result: false, params: []interface{}{3, 4}, names: []string{"newobtained", "newexpected"}}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    return c\\.Check\\(1, checker, 2\\)\n" +
+		"\\.+ newobtained int = 3\n" +
+		"\\.+ newexpected int = 4\n\n"
+	testHelperFailure(c, "Check(1, checker, 2) with mutation", false, false, log,
+		func() interface{} {
+			return c.Check(1, checker, 2)
+		})
+}
+
+// -----------------------------------------------------------------------
+// Tests for Assert(), mostly the same as for Check() above.
+
+func (s *HelpersS) TestAssertSucceedWithExpected(c *check.C) {
+	checker := &MyChecker{result: true}
+	testHelperSuccess(c, "Assert(1, checker, 2)", nil, func() interface{} {
+		c.Assert(1, checker, 2)
+		return nil
+	})
+	if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) {
+		c.Fatalf("Bad params for check: %#v", checker.params)
+	}
+}
+
+func (s *HelpersS) TestAssertSucceedWithoutExpected(c *check.C) {
+	checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+	testHelperSuccess(c, "Assert(1, checker)", nil, func() interface{} {
+		c.Assert(1, checker)
+		return nil
+	})
+	if !reflect.DeepEqual(checker.params, []interface{}{1}) {
+		c.Fatalf("Bad params for check: %#v", checker.params)
+	}
+}
+
+func (s *HelpersS) TestAssertFailWithExpected(c *check.C) {
+	checker := &MyChecker{result: false}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    c\\.Assert\\(1, checker, 2\\)\n" +
+		"\\.+ myobtained int = 1\n" +
+		"\\.+ myexpected int = 2\n\n"
+	testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log,
+		func() interface{} {
+			c.Assert(1, checker, 2)
+			return nil
+		})
+}
+
+func (s *HelpersS) TestAssertFailWithExpectedAndMessage(c *check.C) {
+	checker := &MyChecker{result: false}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    c\\.Assert\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" +
+		"\\.+ myobtained int = 1\n" +
+		"\\.+ myexpected int = 2\n" +
+		"\\.+ Hello world!\n\n"
+	testHelperFailure(c, "Assert(1, checker, 2, msg)", nil, true, log,
+		func() interface{} {
+			c.Assert(1, checker, 2, myComment("Hello world!"))
+			return nil
+		})
+}
+
+func (s *HelpersS) TestAssertFailWithoutExpected(c *check.C) {
+	checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    c\\.Assert\\(1, checker\\)\n" +
+		"\\.+ myvalue int = 1\n\n"
+	testHelperFailure(c, "Assert(1, checker)", nil, true, log,
+		func() interface{} {
+			c.Assert(1, checker)
+			return nil
+		})
+}
+
+func (s *HelpersS) TestAssertFailWithoutExpectedAndMessage(c *check.C) {
+	checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    c\\.Assert\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" +
+		"\\.+ myvalue int = 1\n" +
+		"\\.+ Hello world!\n\n"
+	testHelperFailure(c, "Assert(1, checker, msg)", nil, true, log,
+		func() interface{} {
+			c.Assert(1, checker, myComment("Hello world!"))
+			return nil
+		})
+}
+
+func (s *HelpersS) TestAssertWithMissingExpected(c *check.C) {
+	checker := &MyChecker{result: true}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    c\\.Assert\\(1, checker\\)\n" +
+		"\\.+ Assert\\(myobtained, MyChecker, myexpected\\):\n" +
+		"\\.+ Wrong number of parameters for MyChecker: " +
+		"want 3, got 2\n\n"
+	testHelperFailure(c, "Assert(1, checker, !?)", nil, true, log,
+		func() interface{} {
+			c.Assert(1, checker)
+			return nil
+		})
+}
+
+func (s *HelpersS) TestAssertWithError(c *check.C) {
+	checker := &MyChecker{result: false, error: "Some not so cool data provided!"}
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    c\\.Assert\\(1, checker, 2\\)\n" +
+		"\\.+ myobtained int = 1\n" +
+		"\\.+ myexpected int = 2\n" +
+		"\\.+ Some not so cool data provided!\n\n"
+	testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log,
+		func() interface{} {
+			c.Assert(1, checker, 2)
+			return nil
+		})
+}
+
+func (s *HelpersS) TestAssertWithNilChecker(c *check.C) {
+	log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+		"    c\\.Assert\\(1, nil\\)\n" +
+		"\\.+ Assert\\(obtained, nil!\\?, \\.\\.\\.\\):\n" +
+		"\\.+ Oops\\.\\. you've provided a nil checker!\n\n"
+	testHelperFailure(c, "Assert(obtained, nil)", nil, true, log,
+		func() interface{} {
+			c.Assert(1, nil)
+			return nil
+		})
+}
+
+// -----------------------------------------------------------------------
+// Ensure that values logged work properly in some interesting cases.
+
+func (s *HelpersS) TestValueLoggingWithArrays(c *check.C) {
+	checker := &MyChecker{result: false}
+	log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" +
+		"    return c\\.Check\\(\\[\\]byte{1, 2}, checker, \\[\\]byte{1, 3}\\)\n" +
+		"\\.+ myobtained \\[\\]uint8 = \\[\\]byte{0x1, 0x2}\n" +
+		"\\.+ myexpected \\[\\]uint8 = \\[\\]byte{0x1, 0x3}\n\n"
+	testHelperFailure(c, "Check([]byte{1}, chk, []byte{3})", false, false, log,
+		func() interface{} {
+			return c.Check([]byte{1, 2}, checker, []byte{1, 3})
+		})
+}
+
+func (s *HelpersS) TestValueLoggingWithMultiLine(c *check.C) {
+	checker := &MyChecker{result: false}
+	log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" +
+		"    return c\\.Check\\(\"a\\\\nb\\\\n\", checker, \"a\\\\nb\\\\nc\"\\)\n" +
+		"\\.+ myobtained string = \"\" \\+\n" +
+		"\\.+     \"a\\\\n\" \\+\n" +
+		"\\.+     \"b\\\\n\"\n" +
+		"\\.+ myexpected string = \"\" \\+\n" +
+		"\\.+     \"a\\\\n\" \\+\n" +
+		"\\.+     \"b\\\\n\" \\+\n" +
+		"\\.+     \"c\"\n\n"
+	testHelperFailure(c, `Check("a\nb\n", chk, "a\nb\nc")`, false, false, log,
+		func() interface{} {
+			return c.Check("a\nb\n", checker, "a\nb\nc")
+		})
+}
+
+func (s *HelpersS) TestValueLoggingWithMultiLineException(c *check.C) {
+	// If the newline is at the end of the string, don't log as multi-line.
+	checker := &MyChecker{result: false}
+	log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" +
+		"    return c\\.Check\\(\"a b\\\\n\", checker, \"a\\\\nb\"\\)\n" +
+		"\\.+ myobtained string = \"a b\\\\n\"\n" +
+		"\\.+ myexpected string = \"\" \\+\n" +
+		"\\.+     \"a\\\\n\" \\+\n" +
+		"\\.+     \"b\"\n\n"
+	testHelperFailure(c, `Check("a b\n", chk, "a\nb")`, false, false, log,
+		func() interface{} {
+			return c.Check("a b\n", checker, "a\nb")
+		})
+}
+
+// -----------------------------------------------------------------------
+// MakeDir() tests.
+
+type MkDirHelper struct {
+	path1  string
+	path2  string
+	isDir1 bool
+	isDir2 bool
+	isDir3 bool
+	isDir4 bool
+}
+
+func (s *MkDirHelper) SetUpSuite(c *check.C) {
+	s.path1 = c.MkDir()
+	s.isDir1 = isDir(s.path1)
+}
+
+func (s *MkDirHelper) Test(c *check.C) {
+	s.path2 = c.MkDir()
+	s.isDir2 = isDir(s.path2)
+}
+
+func (s *MkDirHelper) TearDownSuite(c *check.C) {
+	s.isDir3 = isDir(s.path1)
+	s.isDir4 = isDir(s.path2)
+}
+
+func (s *HelpersS) TestMkDir(c *check.C) {
+	helper := MkDirHelper{}
+	output := String{}
+	check.Run(&helper, &check.RunConf{Output: &output})
+	c.Assert(output.value, check.Equals, "")
+	c.Check(helper.isDir1, check.Equals, true)
+	c.Check(helper.isDir2, check.Equals, true)
+	c.Check(helper.isDir3, check.Equals, true)
+	c.Check(helper.isDir4, check.Equals, true)
+	c.Check(helper.path1, check.Not(check.Equals),
+		helper.path2)
+	c.Check(isDir(helper.path1), check.Equals, false)
+	c.Check(isDir(helper.path2), check.Equals, false)
+}
+
+func isDir(path string) bool {
+	if stat, err := os.Stat(path); err == nil {
+		return stat.IsDir()
+	}
+	return false
+}
+
+// Concurrent logging should not corrupt the underling buffer.
+// Use go test -race to detect the race in this test.
+func (s *HelpersS) TestConcurrentLogging(c *check.C) {
+	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))
+	var start, stop sync.WaitGroup
+	start.Add(1)
+	for i, n := 0, runtime.NumCPU()*2; i < n; i++ {
+		stop.Add(1)
+		go func(i int) {
+			start.Wait()
+			for j := 0; j < 30; j++ {
+				c.Logf("Worker %d: line %d", i, j)
+			}
+			stop.Done()
+		}(i)
+	}
+	start.Done()
+	stop.Wait()
+}
+
+// -----------------------------------------------------------------------
+// Test the TestName function
+
+type TestNameHelper struct {
+	name1 string
+	name2 string
+	name3 string
+	name4 string
+	name5 string
+}
+
+func (s *TestNameHelper) SetUpSuite(c *check.C)    { s.name1 = c.TestName() }
+func (s *TestNameHelper) SetUpTest(c *check.C)     { s.name2 = c.TestName() }
+func (s *TestNameHelper) Test(c *check.C)          { s.name3 = c.TestName() }
+func (s *TestNameHelper) TearDownTest(c *check.C)  { s.name4 = c.TestName() }
+func (s *TestNameHelper) TearDownSuite(c *check.C) { s.name5 = c.TestName() }
+
+func (s *HelpersS) TestTestName(c *check.C) {
+	helper := TestNameHelper{}
+	output := String{}
+	check.Run(&helper, &check.RunConf{Output: &output})
+	c.Check(helper.name1, check.Equals, "")
+	c.Check(helper.name2, check.Equals, "TestNameHelper.Test")
+	c.Check(helper.name3, check.Equals, "TestNameHelper.Test")
+	c.Check(helper.name4, check.Equals, "TestNameHelper.Test")
+	c.Check(helper.name5, check.Equals, "")
+}
+
+// -----------------------------------------------------------------------
+// A couple of helper functions to test helper functions. :-)
+
+func testHelperSuccess(c *check.C, name string, expectedResult interface{}, closure func() interface{}) {
+	var result interface{}
+	defer (func() {
+		if err := recover(); err != nil {
+			panic(err)
+		}
+		checkState(c, result,
+			&expectedState{
+				name:   name,
+				result: expectedResult,
+				failed: false,
+				log:    "",
+			})
+	})()
+	result = closure()
+}
+
+func testHelperFailure(c *check.C, name string, expectedResult interface{}, shouldStop bool, log string, closure func() interface{}) {
+	var result interface{}
+	defer (func() {
+		if err := recover(); err != nil {
+			panic(err)
+		}
+		checkState(c, result,
+			&expectedState{
+				name:   name,
+				result: expectedResult,
+				failed: true,
+				log:    log,
+			})
+	})()
+	result = closure()
+	if shouldStop {
+		c.Logf("%s didn't stop when it should", name)
+	}
+}
diff --git a/vendor/src/github.com/go-check/check/printer.go b/vendor/src/github.com/go-check/check/printer.go
new file mode 100644
index 0000000..e0f7557
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/printer.go
@@ -0,0 +1,168 @@
+package check
+
+import (
+	"bytes"
+	"go/ast"
+	"go/parser"
+	"go/printer"
+	"go/token"
+	"os"
+)
+
+func indent(s, with string) (r string) {
+	eol := true
+	for i := 0; i != len(s); i++ {
+		c := s[i]
+		switch {
+		case eol && c == '\n' || c == '\r':
+		case c == '\n' || c == '\r':
+			eol = true
+		case eol:
+			eol = false
+			s = s[:i] + with + s[i:]
+			i += len(with)
+		}
+	}
+	return s
+}
+
+func printLine(filename string, line int) (string, error) {
+	fset := token.NewFileSet()
+	file, err := os.Open(filename)
+	if err != nil {
+		return "", err
+	}
+	fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments)
+	if err != nil {
+		return "", err
+	}
+	config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4}
+	lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config}
+	ast.Walk(lp, fnode)
+	result := lp.output.Bytes()
+	// Comments leave \n at the end.
+	n := len(result)
+	for n > 0 && result[n-1] == '\n' {
+		n--
+	}
+	return string(result[:n]), nil
+}
+
+type linePrinter struct {
+	config *printer.Config
+	fset   *token.FileSet
+	fnode  *ast.File
+	line   int
+	output bytes.Buffer
+	stmt   ast.Stmt
+}
+
+func (lp *linePrinter) emit() bool {
+	if lp.stmt != nil {
+		lp.trim(lp.stmt)
+		lp.printWithComments(lp.stmt)
+		lp.stmt = nil
+		return true
+	}
+	return false
+}
+
+func (lp *linePrinter) printWithComments(n ast.Node) {
+	nfirst := lp.fset.Position(n.Pos()).Line
+	nlast := lp.fset.Position(n.End()).Line
+	for _, g := range lp.fnode.Comments {
+		cfirst := lp.fset.Position(g.Pos()).Line
+		clast := lp.fset.Position(g.End()).Line
+		if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column {
+			for _, c := range g.List {
+				lp.output.WriteString(c.Text)
+				lp.output.WriteByte('\n')
+			}
+		}
+		if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash {
+			// The printer will not include the comment if it starts past
+			// the node itself. Trick it into printing by overlapping the
+			// slash with the end of the statement.
+			g.List[0].Slash = n.End() - 1
+		}
+	}
+	node := &printer.CommentedNode{n, lp.fnode.Comments}
+	lp.config.Fprint(&lp.output, lp.fset, node)
+}
+
+func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) {
+	if n == nil {
+		if lp.output.Len() == 0 {
+			lp.emit()
+		}
+		return nil
+	}
+	first := lp.fset.Position(n.Pos()).Line
+	last := lp.fset.Position(n.End()).Line
+	if first <= lp.line && last >= lp.line {
+		// Print the innermost statement containing the line.
+		if stmt, ok := n.(ast.Stmt); ok {
+			if _, ok := n.(*ast.BlockStmt); !ok {
+				lp.stmt = stmt
+			}
+		}
+		if first == lp.line && lp.emit() {
+			return nil
+		}
+		return lp
+	}
+	return nil
+}
+
+func (lp *linePrinter) trim(n ast.Node) bool {
+	stmt, ok := n.(ast.Stmt)
+	if !ok {
+		return true
+	}
+	line := lp.fset.Position(n.Pos()).Line
+	if line != lp.line {
+		return false
+	}
+	switch stmt := stmt.(type) {
+	case *ast.IfStmt:
+		stmt.Body = lp.trimBlock(stmt.Body)
+	case *ast.SwitchStmt:
+		stmt.Body = lp.trimBlock(stmt.Body)
+	case *ast.TypeSwitchStmt:
+		stmt.Body = lp.trimBlock(stmt.Body)
+	case *ast.CaseClause:
+		stmt.Body = lp.trimList(stmt.Body)
+	case *ast.CommClause:
+		stmt.Body = lp.trimList(stmt.Body)
+	case *ast.BlockStmt:
+		stmt.List = lp.trimList(stmt.List)
+	}
+	return true
+}
+
+func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt {
+	if !lp.trim(stmt) {
+		return lp.emptyBlock(stmt)
+	}
+	stmt.Rbrace = stmt.Lbrace
+	return stmt
+}
+
+func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt {
+	for i := 0; i != len(stmts); i++ {
+		if !lp.trim(stmts[i]) {
+			stmts[i] = lp.emptyStmt(stmts[i])
+			break
+		}
+	}
+	return stmts
+}
+
+func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt {
+	return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}}
+}
+
+func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt {
+	p := n.Pos()
+	return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p}
+}
diff --git a/vendor/src/github.com/go-check/check/printer_test.go b/vendor/src/github.com/go-check/check/printer_test.go
new file mode 100644
index 0000000..538b2d5
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/printer_test.go
@@ -0,0 +1,104 @@
+package check_test
+
+import (
+    .   "gopkg.in/check.v1"
+)
+
+var _ = Suite(&PrinterS{})
+
+type PrinterS struct{}
+
+func (s *PrinterS) TestCountSuite(c *C) {
+    suitesRun += 1
+}
+
+var printTestFuncLine int
+
+func init() {
+    printTestFuncLine = getMyLine() + 3
+}
+
+func printTestFunc() {
+    println(1)           // Comment1
+    if 2 == 2 {          // Comment2
+        println(3)       // Comment3
+    }
+    switch 5 {
+    case 6: println(6)   // Comment6
+        println(7)
+    }
+    switch interface{}(9).(type) {// Comment9
+    case int: println(10)
+        println(11)
+    }
+    select {
+    case <-(chan bool)(nil): println(14)
+        println(15)
+    default: println(16)
+        println(17)
+    }
+    println(19,
+        20)
+    _ = func() { println(21)
+        println(22)
+    }
+    println(24, func() {
+        println(25)
+    })
+    // Leading comment
+    // with multiple lines.
+    println(29)  // Comment29
+}
+
+var printLineTests = []struct {
+    line   int
+    output string
+}{
+    {1, "println(1) // Comment1"},
+    {2, "if 2 == 2 { // Comment2\n    ...\n}"},
+    {3, "println(3) // Comment3"},
+    {5, "switch 5 {\n...\n}"},
+    {6, "case 6:\n    println(6) // Comment6\n    ..."},
+    {7, "println(7)"},
+    {9, "switch interface{}(9).(type) { // Comment9\n...\n}"},
+    {10, "case int:\n    println(10)\n    ..."},
+    {14, "case <-(chan bool)(nil):\n    println(14)\n    ..."},
+    {15, "println(15)"},
+    {16, "default:\n    println(16)\n    ..."},
+    {17, "println(17)"},
+    {19, "println(19,\n    20)"},
+    {20, "println(19,\n    20)"},
+    {21, "_ = func() {\n    println(21)\n    println(22)\n}"},
+    {22, "println(22)"},
+    {24, "println(24, func() {\n    println(25)\n})"},
+    {25, "println(25)"},
+    {26, "println(24, func() {\n    println(25)\n})"},
+    {29, "// Leading comment\n// with multiple lines.\nprintln(29) // Comment29"},
+}
+
+func (s *PrinterS) TestPrintLine(c *C) {
+    for _, test := range printLineTests {
+        output, err := PrintLine("printer_test.go", printTestFuncLine+test.line)
+        c.Assert(err, IsNil)
+        c.Assert(output, Equals, test.output)
+    }
+}
+
+var indentTests = []struct {
+    in, out string
+}{
+    {"", ""},
+    {"\n", "\n"},
+    {"a", ">>>a"},
+    {"a\n", ">>>a\n"},
+    {"a\nb", ">>>a\n>>>b"},
+    {" ", ">>> "},
+}
+
+func (s *PrinterS) TestIndent(c *C) {
+    for _, test := range indentTests {
+        out := Indent(test.in, ">>>")
+        c.Assert(out, Equals, test.out)
+    }
+
+}
diff --git a/vendor/src/github.com/go-check/check/run.go b/vendor/src/github.com/go-check/check/run.go
new file mode 100644
index 0000000..da8fd79
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/run.go
@@ -0,0 +1,175 @@
+package check
+
+import (
+	"bufio"
+	"flag"
+	"fmt"
+	"os"
+	"testing"
+	"time"
+)
+
+// -----------------------------------------------------------------------
+// Test suite registry.
+
+var allSuites []interface{}
+
+// Suite registers the given value as a test suite to be run. Any methods
+// starting with the Test prefix in the given value will be considered as
+// a test method.
+func Suite(suite interface{}) interface{} {
+	allSuites = append(allSuites, suite)
+	return suite
+}
+
+// -----------------------------------------------------------------------
+// Public running interface.
+
+var (
+	oldFilterFlag  = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run")
+	oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode")
+	oldStreamFlag  = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)")
+	oldBenchFlag   = flag.Bool("gocheck.b", false, "Run benchmarks")
+	oldBenchTime   = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark")
+	oldListFlag    = flag.Bool("gocheck.list", false, "List the names of all tests that will be run")
+	oldWorkFlag    = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory")
+
+	newFilterFlag  = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run")
+	newVerboseFlag = flag.Bool("check.v", false, "Verbose mode")
+	newStreamFlag  = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)")
+	newBenchFlag   = flag.Bool("check.b", false, "Run benchmarks")
+	newBenchTime   = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark")
+	newBenchMem    = flag.Bool("check.bmem", false, "Report memory benchmarks")
+	newListFlag    = flag.Bool("check.list", false, "List the names of all tests that will be run")
+	newWorkFlag    = flag.Bool("check.work", false, "Display and do not remove the test working directory")
+)
+
+// TestingT runs all test suites registered with the Suite function,
+// printing results to stdout, and reporting any failures back to
+// the "testing" package.
+func TestingT(testingT *testing.T) {
+	benchTime := *newBenchTime
+	if benchTime == 1*time.Second {
+		benchTime = *oldBenchTime
+	}
+	conf := &RunConf{
+		Filter:        *oldFilterFlag + *newFilterFlag,
+		Verbose:       *oldVerboseFlag || *newVerboseFlag,
+		Stream:        *oldStreamFlag || *newStreamFlag,
+		Benchmark:     *oldBenchFlag || *newBenchFlag,
+		BenchmarkTime: benchTime,
+		BenchmarkMem:  *newBenchMem,
+		KeepWorkDir:   *oldWorkFlag || *newWorkFlag,
+	}
+	if *oldListFlag || *newListFlag {
+		w := bufio.NewWriter(os.Stdout)
+		for _, name := range ListAll(conf) {
+			fmt.Fprintln(w, name)
+		}
+		w.Flush()
+		return
+	}
+	result := RunAll(conf)
+	println(result.String())
+	if !result.Passed() {
+		testingT.Fail()
+	}
+}
+
+// RunAll runs all test suites registered with the Suite function, using the
+// provided run configuration.
+func RunAll(runConf *RunConf) *Result {
+	result := Result{}
+	for _, suite := range allSuites {
+		result.Add(Run(suite, runConf))
+	}
+	return &result
+}
+
+// Run runs the provided test suite using the provided run configuration.
+func Run(suite interface{}, runConf *RunConf) *Result {
+	runner := newSuiteRunner(suite, runConf)
+	return runner.run()
+}
+
+// ListAll returns the names of all the test functions registered with the
+// Suite function that will be run with the provided run configuration.
+func ListAll(runConf *RunConf) []string {
+	var names []string
+	for _, suite := range allSuites {
+		names = append(names, List(suite, runConf)...)
+	}
+	return names
+}
+
+// List returns the names of the test functions in the given
+// suite that will be run with the provided run configuration.
+func List(suite interface{}, runConf *RunConf) []string {
+	var names []string
+	runner := newSuiteRunner(suite, runConf)
+	for _, t := range runner.tests {
+		names = append(names, t.String())
+	}
+	return names
+}
+
+// -----------------------------------------------------------------------
+// Result methods.
+
+func (r *Result) Add(other *Result) {
+	r.Succeeded += other.Succeeded
+	r.Skipped += other.Skipped
+	r.Failed += other.Failed
+	r.Panicked += other.Panicked
+	r.FixturePanicked += other.FixturePanicked
+	r.ExpectedFailures += other.ExpectedFailures
+	r.Missed += other.Missed
+	if r.WorkDir != "" && other.WorkDir != "" {
+		r.WorkDir += ":" + other.WorkDir
+	} else if other.WorkDir != "" {
+		r.WorkDir = other.WorkDir
+	}
+}
+
+func (r *Result) Passed() bool {
+	return (r.Failed == 0 && r.Panicked == 0 &&
+		r.FixturePanicked == 0 && r.Missed == 0 &&
+		r.RunError == nil)
+}
+
+func (r *Result) String() string {
+	if r.RunError != nil {
+		return "ERROR: " + r.RunError.Error()
+	}
+
+	var value string
+	if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 &&
+		r.Missed == 0 {
+		value = "OK: "
+	} else {
+		value = "OOPS: "
+	}
+	value += fmt.Sprintf("%d passed", r.Succeeded)
+	if r.Skipped != 0 {
+		value += fmt.Sprintf(", %d skipped", r.Skipped)
+	}
+	if r.ExpectedFailures != 0 {
+		value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures)
+	}
+	if r.Failed != 0 {
+		value += fmt.Sprintf(", %d FAILED", r.Failed)
+	}
+	if r.Panicked != 0 {
+		value += fmt.Sprintf(", %d PANICKED", r.Panicked)
+	}
+	if r.FixturePanicked != 0 {
+		value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked)
+	}
+	if r.Missed != 0 {
+		value += fmt.Sprintf(", %d MISSED", r.Missed)
+	}
+	if r.WorkDir != "" {
+		value += "\nWORK=" + r.WorkDir
+	}
+	return value
+}
diff --git a/vendor/src/github.com/go-check/check/run_test.go b/vendor/src/github.com/go-check/check/run_test.go
new file mode 100644
index 0000000..f41fffc
--- /dev/null
+++ b/vendor/src/github.com/go-check/check/run_test.go
@@ -0,0 +1,419 @@
+// These tests verify the test running logic.
+
+package check_test
+
+import (
+	"errors"
+	. "gopkg.in/check.v1"
+	"os"
+	"sync"
+)
+
+var runnerS = Suite(&RunS{})
+
+type RunS struct{}
+
+func (s *RunS) TestCountSuite(c *C) {
+	suitesRun += 1
+}
+
+// -----------------------------------------------------------------------
+// Tests ensuring result counting works properly.
+
+func (s *RunS) TestSuccess(c *C) {
+	output := String{}
+	result := Run(&SuccessHelper{}, &RunConf{Output: &output})
+	c.Check(result.Succeeded, Equals, 1)
+	c.Check(result.Failed, Equals, 0)
+	c.Check(result.Skipped, Equals, 0)
+	c.Check(result.Panicked, Equals, 0)
+	c.Check(result.FixturePanicked, Equals, 0)
+	c.Check(result.Missed, Equals, 0)
+	c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestFailure(c *C) {
+	output := String{}
+	result := Run(&FailHelper{}, &RunConf{Output: &output})
+	c.Check(result.Succeeded, Equals, 0)
+	c.Check(result.Failed, Equals, 1)
+	c.Check(result.Skipped, Equals, 0)
+	c.Check(result.Panicked, Equals, 0)
+	c.Check(result.FixturePanicked, Equals, 0)
+	c.Check(result.Missed, Equals, 0)
+	c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestFixture(c *C) {
+	output := String{}
+	result := Run(&FixtureHelper{}, &RunConf{Output: &output})
+	c.Check(result.Succeeded, Equals, 2)
+	c.Check(result.Failed, Equals, 0)
+	c.Check(result.Skipped, Equals, 0)
+	c.Check(result.Panicked, Equals, 0)
+	c.Check(result.FixturePanicked, Equals, 0)
+	c.Check(result.Missed, Equals, 0)
+	c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestPanicOnTest(c *C) {
+	output := String{}
+	helper := &FixtureHelper{panicOn: "Test1"}
+	result := Run(helper, &RunConf{Output: &output})
+	c.Check(result.Succeeded, Equals, 1)
+	c.Check(result.Failed, Equals, 0)
+	c.Check(result.Skipped, Equals, 0)
+	c.Check(result.Panicked, Equals, 1)
+	c.Check(result.FixturePanicked, Equals, 0)
+	c.Check(result.Missed, Equals, 0)
+	c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestPanicOnSetUpTest(c *C) {
+	output := String{}
+	helper := &FixtureHelper{panicOn: "SetUpTest"}
+	result := Run(helper, &RunConf{Output: &output})
+	c.Check(result.Succeeded, Equals, 0)
+	c.Check(result.Failed, Equals, 0)
+	c.Check(result.Skipped, Equals, 0)
+	c.Check(result.Panicked, Equals, 0)
+	c.Check(result.FixturePanicked, Equals, 1)
+	c.Check(result.Missed, Equals, 2)
+	c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestPanicOnSetUpSuite(c *C) {
+	output := String{}
+	helper := &FixtureHelper{panicOn: "SetUpSuite"}
+	result := Run(helper, &RunConf{Output: &output})
+	c.Check(result.Succeeded, Equals, 0)
+	c.Check(result.Failed, Equals, 0)
+	c.Check(result.Skipped, Equals, 0)
+	c.Check(result.Panicked, Equals, 0)
+	c.Check(result.FixturePanicked, Equals, 1)
+	c.Check(result.Missed, Equals, 2)
+	c.Check(result.RunError, IsNil)
+}
+
+// -----------------------------------------------------------------------
+// Check result aggregation.
+
+func (s *RunS) TestAdd(c *C) {
+	result := &Result{
+		Succeeded:        1,
+		Skipped:          2,
+		Failed:           3,
+		Panicked:         4,
+		FixturePanicked:  5,
+		Missed:           6,
+		ExpectedFailures: 7,
+	}
+	result.Add(&Result{
+		Succeeded:        10,
+		Skipped:          20,
+		Failed:           30,
+		Panicked:         40,
+		FixturePanicked:  50,
+		Missed:           60,
+		ExpectedFailures: 70,
+	})
+	c.Check(result.Succeeded, Equals, 11)
+	c.Check(result.Skipped, Equals, 22)
+	c.Check(result.Failed, Equals, 33)
+	c.Check(result.Panicked, Equals, 44)
+	c.Check(result.FixturePanicked, Equals, 55)
+	c.Check(result.Missed, Equals, 66)
+	c.Check(result.ExpectedFailures, Equals, 77)
+	c.Check(result.RunError, IsNil)
+}
+
+// -----------------------------------------------------------------------
+// Check the Passed() method.
+
+func (s *RunS) TestPassed(c *C) {
+	c.Assert((&Result{}).Passed(), Equals, true)
+	c.Assert((&Result{Succeeded: 1}).Passed(), Equals, true)
+	c.Assert((&Result{Skipped: 1}).Passed(), Equals, true)
+	c.Assert((&Result{Failed: 1}).Passed(), Equals, false)
+	c.Assert((&Result{Panicked: 1}).Passed(), Equals, false)
+	c.Assert((&Result{FixturePanicked: 1}).Passed(), Equals, false)
+	c.Assert((&Result{Missed: 1}).Passed(), Equals, false)
+	c.Assert((&Result{RunError: errors.New("!")}).Passed(), Equals, false)
+}
+
+// -----------------------------------------------------------------------
+// Check that result printing is working correctly.
+
+func (s *RunS) TestPrintSuccess(c *C) {
+	result := &Result{Succeeded: 5}
+	c.Check(result.String(), Equals, "OK: 5 passed")
+}
+
+func (s *RunS) TestPrintFailure(c *C) {
+	result := &Result{Failed: 5}
+	c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FAILED")
+}
+
+func (s *RunS) TestPrintSkipped(c *C) {
+	result := &Result{Skipped: 5}
+	c.Check(result.String(), Equals, "OK: 0 passed, 5 skipped")
+}
+
+func (s *RunS) TestPrintExpectedFailures(c *C) {
+	result := &Result{ExpectedFailures: 5}
+	c.Check(result.String(), Equals, "OK: 0 passed, 5 expected failures")
+}
+
+func (s *RunS) TestPrintPanicked(c *C) {
+	result := &Result{Panicked: 5}
+	c.Check(result.String(), Equals, "OOPS: 0 passed, 5 PANICKED")
+}
+
+func (s *RunS) TestPrintFixturePanicked(c *C) {
+	result := &Result{FixturePanicked: 5}
+	c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FIXTURE-PANICKED")
+}
+
+func (s *RunS) TestPrintMissed(c *C) {
+	result := &Result{Missed: 5}
+	c.Check(result.String(), Equals, "OOPS: 0 passed, 5 MISSED")
+}
+
+func (s *RunS) TestPrintAll(c *C) {
+	result := &Result{Succeeded: 1, Skipped: 2, ExpectedFailures: 3,
+		Panicked: 4, FixturePanicked: 5, Missed: 6}
+	c.Check(result.String(), Equals,
+		"OOPS: 1 passed, 2 skipped, 3 expected failures, 4 PANICKED, "+
+			"5 FIXTURE-PANICKED, 6 MISSED")
+}
+
+func (s *RunS) TestPrintRunError(c *C) {
+	result := &Result{Succeeded: 1, Failed: 1,
+		RunError: errors.New("Kaboom!")}
+	c.Check(result.String(), Equals, "ERROR: Kaboom!")
+}
+
+// -----------------------------------------------------------------------
+// Verify that the method pattern flag works correctly.
+
+func (s *RunS) TestFilterTestName(c *C) {
+	helper := FixtureHelper{}
+	output := String{}
+	runConf := RunConf{Output: &output, Filter: "Test[91]"}
+	Run(&helper, &runConf)
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "SetUpTest")
+	c.Check(helper.calls[2], Equals, "Test1")
+	c.Check(helper.calls[3], Equals, "TearDownTest")
+	c.Check(helper.calls[4], Equals, "TearDownSuite")
+	c.Check(len(helper.calls), Equals, 5)
+}
+
+func (s *RunS) TestFilterTestNameWithAll(c *C) {
+	helper := FixtureHelper{}
+	output := String{}
+	runConf := RunConf{Output: &output, Filter: ".*"}
+	Run(&helper, &runConf)
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "SetUpTest")
+	c.Check(helper.calls[2], Equals, "Test1")
+	c.Check(helper.calls[3], Equals, "TearDownTest")
+	c.Check(helper.calls[4], Equals, "SetUpTest")
+	c.Check(helper.calls[5], Equals, "Test2")
+	c.Check(helper.calls[6], Equals, "TearDownTest")
+	c.Check(helper.calls[7], Equals, "TearDownSuite")
+	c.Check(len(helper.calls), Equals, 8)
+}
+
+func (s *RunS) TestFilterSuiteName(c *C) {
+	helper := FixtureHelper{}
+	output := String{}
+	runConf := RunConf{Output: &output, Filter: "FixtureHelper"}
+	Run(&helper, &runConf)
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "SetUpTest")
+	c.Check(helper.calls[2], Equals, "Test1")
+	c.Check(helper.calls[3], Equals, "TearDownTest")
+	c.Check(helper.calls[4], Equals, "SetUpTest")
+	c.Check(helper.calls[5], Equals, "Test2")
+	c.Check(helper.calls[6], Equals, "TearDownTest")
+	c.Check(helper.calls[7], Equals, "TearDownSuite")
+	c.Check(len(helper.calls), Equals, 8)
+}
+
+func (s *RunS) TestFilterSuiteNameAndTestName(c *C) {
+	helper := FixtureHelper{}
+	output := String{}
+	runConf := RunConf{Output: &output, Filter: "FixtureHelper\\.Test2"}
+	Run(&helper, &runConf)
+	c.Check(helper.calls[0], Equals, "SetUpSuite")
+	c.Check(helper.calls[1], Equals, "SetUpTest")
+	c.Check(helper.calls[2], Equals, "Test2")
+	c.Check(helper.calls[3], Equals, "TearDownTest")
+	c.Check(helper.calls[4], Equals, "TearDownSuite")
+	c.Check(len(helper.calls), Equals, 5)
+}
+
+func (s *RunS) TestFilterAllOut(c *C) {
+	helper := FixtureHelper{}
+	output := String{}
+	runConf := RunConf{Output: &output, Filter: "NotFound"}
+	Run(&helper, &runConf)
+	c.Check(len(helper.calls), Equals, 0)
+}
+
+func (s *RunS) TestRequirePartialMatch(c *C) {
+	helper := FixtureHelper{}
+	output := String{}
+	runConf := RunConf{Output: &output, Filter: "est"}
+	Run(&helper, &runConf)
+	c.Check(len(helper.calls), Equals, 8)
+}
+
+func (s *RunS) TestFilterError(c *C) {
+	helper := FixtureHelper{}
+	output := String{}
+	runConf := RunConf{Output: &output, Filter: "]["}
+	result := Run(&helper, &runConf)
+	c.Check(result.String(), Equals,
+		"ERROR: Bad filter expression: error parsing regexp: missing closing ]: `[`")
+	c.Check(len(helper.calls), Equals, 0)
+}
+
+// -----------------------------------------------------------------------
+// Verify that List works correctly.
+
+func (s *RunS) TestListFiltered(c *C) {
+	names := List(&FixtureHelper{}, &RunConf{Filter: "1"})
+	c.Assert(names, DeepEquals, []string{
+		"FixtureHelper.Test1",
+	})
+}
+
+func (s *RunS) TestList(c *C) {
+	names := List(&FixtureHelper{}, &RunConf{})
+	c.Assert(names, DeepEquals, []string{
+		"FixtureHelper.Test1",
+		"FixtureHelper.Test2",
+	})
+}
+
+// -----------------------------------------------------------------------
+// Verify that verbose mode prints tests which pass as well.
+
+func (s *RunS) TestVerboseMode(c *C) {
+	helper := FixtureHelper{}
+	output := String{}
+	runConf := RunConf{Output: &output, Verbose: true}
+	Run(&helper, &runConf)
+
+	expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t *[.0-9]+s\n" +
+		"PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n"
+
+	c.Assert(output.value, Matches, expected)
+}
+
+func (s *RunS) TestVerboseModeWithFailBeforePass(c *C) {
+	helper := FixtureHelper{panicOn: "Test1"}
+	output := String{}
+	runConf := RunConf{Output: &output, Verbose: true}
+	Run(&helper, &runConf)
+
+	expected := "(?s).*PANIC.*\n-+\n" + // Should have an extra line.
+		"PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n"
+
+	c.Assert(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Verify the stream output mode.  In this mode there's no output caching.
+
+type StreamHelper struct {
+	l2 sync.Mutex
+	l3 sync.Mutex
+}
+
+func (s *StreamHelper) SetUpSuite(c *C) {
+	c.Log("0")
+}
+
+func (s *StreamHelper) Test1(c *C) {
+	c.Log("1")
+	s.l2.Lock()
+	s.l3.Lock()
+	go func() {
+		s.l2.Lock() // Wait for "2".
+		c.Log("3")
+		s.l3.Unlock()
+	}()
+}
+
+func (s *StreamHelper) Test2(c *C) {
+	c.Log("2")
+	s.l2.Unlock()
+	s.l3.Lock() // Wait for "3".
+	c.Fail()
+	c.Log("4")
+}
+
+func (s *RunS) TestStreamMode(c *C) {
+	helper := &StreamHelper{}
+	output := String{}
+	runConf := RunConf{Output: &output, Stream: true}
+	Run(helper, &runConf)
+
+	expected := "START: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n0\n" +
+		"PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\t *[.0-9]+s\n\n" +
+		"START: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n1\n" +
+		"PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\t *[.0-9]+s\n\n" +
+		"START: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n2\n3\n4\n" +
+		"FAIL: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n\n"
+
+	c.Assert(output.value, Matches, expected)
+}
+
+type StreamMissHelper struct{}
+
+func (s *StreamMissHelper) SetUpSuite(c *C) {
+	c.Log("0")
+	c.Fail()
+}
+
+func (s *StreamMissHelper) Test1(c *C) {
+	c.Log("1")
+}
+
+func (s *RunS) TestStreamModeWithMiss(c *C) {
+	helper := &StreamMissHelper{}
+	output := String{}
+	runConf := RunConf{Output: &output, Stream: true}
+	Run(helper, &runConf)
+
+	expected := "START: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n0\n" +
+		"FAIL: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n\n" +
+		"START: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n" +
+		"MISS: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n\n"
+
+	c.Assert(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Verify that that the keep work dir request indeed does so.
+
+type WorkDirSuite struct {}
+
+func (s *WorkDirSuite) Test(c *C) {
+	c.MkDir()
+}
+
+func (s *RunS) TestKeepWorkDir(c *C) {
+	output := String{}
+	runConf := RunConf{Output: &output, Verbose: true, KeepWorkDir: true}
+	result := Run(&WorkDirSuite{}, &runConf)
+
+	c.Assert(result.String(), Matches, ".*\nWORK=" + result.WorkDir)
+
+	stat, err := os.Stat(result.WorkDir)
+	c.Assert(err, IsNil)
+	c.Assert(stat.IsDir(), Equals, true)
+}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/.gitignore b/vendor/src/github.com/go-fsnotify/fsnotify/.gitignore
deleted file mode 100644
index 4cd0cba..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-# Setup a Global .gitignore for OS and editor generated files:
-# https://help.github.com/articles/ignoring-files
-# git config --global core.excludesfile ~/.gitignore_global
-
-.vagrant
-*.sublime-project
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/.travis.yml b/vendor/src/github.com/go-fsnotify/fsnotify/.travis.yml
deleted file mode 100644
index f8e76fc..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-
-go:
-  - 1.2
-  - tip
-
-# not yet https://github.com/travis-ci/travis-ci/issues/2318
-os:
-  - linux
-  - osx
-
-notifications:
-  email: false
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/AUTHORS b/vendor/src/github.com/go-fsnotify/fsnotify/AUTHORS
deleted file mode 100644
index 306091e..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/AUTHORS
+++ /dev/null
@@ -1,32 +0,0 @@
-# Names should be added to this file as
-#	Name or Organization <email address>
-# The email address is not required for organizations.
-
-# You can update this list using the following command:
-#
-#   $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
-
-# Please keep the list sorted.
-
-Adrien Bustany <adrien@bustany.org>
-Caleb Spare <cespare@gmail.com>
-Case Nelson <case@teammating.com>
-Chris Howey <howeyc@gmail.com> <chris@howey.me>
-Christoffer Buchholz <christoffer.buchholz@gmail.com>
-Dave Cheney <dave@cheney.net>
-Francisco Souza <f@souza.cc>
-Hari haran <hariharan.uno@gmail.com>
-John C Barstow
-Kelvin Fo <vmirage@gmail.com>
-Nathan Youngman <git@nathany.com>
-Paul Hammond <paul@paulhammond.org>
-Pursuit92 <JoshChase@techpursuit.net>
-Rob Figueiredo <robfig@gmail.com>
-Soge Zhang <zhssoge@gmail.com>
-Tilak Sharma <tilaks@google.com>
-Travis Cline <travis.cline@gmail.com>
-Tudor Golubenco <tudor.g@gmail.com>
-Yukang <moorekang@gmail.com>
-bronze1man <bronze1man@gmail.com>
-debrando <denis.brandolini@gmail.com>
-henrikedwards <henrik.edwards@gmail.com>
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/CHANGELOG.md b/vendor/src/github.com/go-fsnotify/fsnotify/CHANGELOG.md
deleted file mode 100644
index 79f4ddb..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/CHANGELOG.md
+++ /dev/null
@@ -1,237 +0,0 @@
-# Changelog
-
-## v1.0.4 / 2014-09-07
-
-* kqueue: add dragonfly to the build tags.
-* Rename source code files, rearrange code so exported APIs are at the top.
-* Add done channel to example code. [#37](https://github.com/go-fsnotify/fsnotify/pull/37) (thanks @chenyukang)
-
-## v1.0.3 / 2014-08-19
-
-* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/go-fsnotify/fsnotify/issues/36)
-
-## v1.0.2 / 2014-08-17
-
-* [Fix] Missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso)
-* [Fix] Make ./path and path equivalent. (thanks @zhsso)
-
-## v1.0.0 / 2014-08-15
-
-* [API] Remove AddWatch on Windows, use Add.
-* Improve documentation for exported identifiers. [#30](https://github.com/go-fsnotify/fsnotify/issues/30)
-* Minor updates based on feedback from golint.
-
-## dev / 2014-07-09
-
-* Moved to [github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify).
-* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
- 
-## dev / 2014-07-04
-
-* kqueue: fix incorrect mutex used in Close()
-* Update example to demonstrate usage of Op.
-
-## dev / 2014-06-28
-
-* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/go-fsnotify/fsnotify/issues/4)
-* Fix for String() method on Event (thanks Alex Brainman)
-* Don't build on Plan 9 or Solaris (thanks @4ad)
-
-## dev / 2014-06-21
-
-* Events channel of type Event rather than *Event.
-* [internal] use syscall constants directly for inotify and kqueue.
-* [internal] kqueue: rename events to kevents and fileEvent to event.
-
-## dev / 2014-06-19
-
-* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
-* [internal] remove cookie from Event struct (unused).
-* [internal] Event struct has the same definition across every OS.
-* [internal] remove internal watch and removeWatch methods.
-
-## dev / 2014-06-12
-
-* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
-* [API] Pluralized channel names: Events and Errors.
-* [API] Renamed FileEvent struct to Event.
-* [API] Op constants replace methods like IsCreate().
-
-## dev / 2014-06-12
-
-* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
-
-## dev / 2014-05-23
-
-* [API] Remove current implementation of WatchFlags.
-    * current implementation doesn't take advantage of OS for efficiency
-    * provides little benefit over filtering events as they are received, but has  extra bookkeeping and mutexes
-    * no tests for the current implementation
-    * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
-
-## v0.9.2 / 2014-08-17
-
-* [Backport] Fix missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso)
-
-## v0.9.1 / 2014-06-12
-
-* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
-
-## v0.9.0 / 2014-01-17
-
-* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
-* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
-* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
-
-## v0.8.12 / 2013-11-13
-
-* [API] Remove FD_SET and friends from Linux adapter
-
-## v0.8.11 / 2013-11-02
-
-* [Doc] Add Changelog [#72][] (thanks @nathany)
-* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
-
-## v0.8.10 / 2013-10-19
-
-* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
-* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
-* [Doc] specify OS-specific limits in README (thanks @debrando)
-
-## v0.8.9 / 2013-09-08
-
-* [Doc] Contributing (thanks @nathany)
-* [Doc] update package path in example code [#63][] (thanks @paulhammond)
-* [Doc] GoCI badge in README (Linux only) [#60][]
-* [Doc] Cross-platform testing with Vagrant  [#59][] (thanks @nathany)
-
-## v0.8.8 / 2013-06-17
-
-* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
-
-## v0.8.7 / 2013-06-03
-
-* [API] Make syscall flags internal
-* [Fix] inotify: ignore event changes
-* [Fix] race in symlink test [#45][] (reported by @srid)
-* [Fix] tests on Windows
-* lower case error messages
-
-## v0.8.6 / 2013-05-23
-
-* kqueue: Use EVT_ONLY flag on Darwin
-* [Doc] Update README with full example
-
-## v0.8.5 / 2013-05-09
-
-* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
-
-## v0.8.4 / 2013-04-07
-
-* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
-
-## v0.8.3 / 2013-03-13
-
-* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
-* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
-
-## v0.8.2 / 2013-02-07
-
-* [Doc] add Authors
-* [Fix] fix data races for map access [#29][] (thanks @fsouza)
-
-## v0.8.1 / 2013-01-09
-
-* [Fix] Windows path separators
-* [Doc] BSD License
-
-## v0.8.0 / 2012-11-09
-
-* kqueue: directory watching improvements (thanks @vmirage)
-* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
-* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
-
-## v0.7.4 / 2012-10-09
-
-* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
-* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
-* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
-* [Fix] kqueue: modify after recreation of file
-
-## v0.7.3 / 2012-09-27
-
-* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
-* [Fix] kqueue: no longer get duplicate CREATE events
-
-## v0.7.2 / 2012-09-01
-
-* kqueue: events for created directories
-
-## v0.7.1 / 2012-07-14
-
-* [Fix] for renaming files
-
-## v0.7.0 / 2012-07-02
-
-* [Feature] FSNotify flags
-* [Fix] inotify: Added file name back to event path
-
-## v0.6.0 / 2012-06-06
-
-* kqueue: watch files after directory created (thanks @tmc)
-
-## v0.5.1 / 2012-05-22
-
-* [Fix] inotify: remove all watches before Close()
-
-## v0.5.0 / 2012-05-03
-
-* [API] kqueue: return errors during watch instead of sending over channel
-* kqueue: match symlink behavior on Linux
-* inotify: add `DELETE_SELF` (requested by @taralx)
-* [Fix] kqueue: handle EINTR (reported by @robfig)
-* [Doc] Godoc example [#1][] (thanks @davecheney)
-
-## v0.4.0 / 2012-03-30
-
-* Go 1 released: build with go tool
-* [Feature] Windows support using winfsnotify
-* Windows does not have attribute change notifications
-* Roll attribute notifications into IsModify
-
-## v0.3.0 / 2012-02-19
-
-* kqueue: add files when watch directory
-
-## v0.2.0 / 2011-12-30
-
-* update to latest Go weekly code
-
-## v0.1.0 / 2011-10-19
-
-* kqueue: add watch on file creation to match inotify
-* kqueue: create file event
-* inotify: ignore `IN_IGNORED` events
-* event String()
-* linux: common FileEvent functions
-* initial commit
-
-[#79]: https://github.com/howeyc/fsnotify/pull/79
-[#77]: https://github.com/howeyc/fsnotify/pull/77
-[#72]: https://github.com/howeyc/fsnotify/issues/72
-[#71]: https://github.com/howeyc/fsnotify/issues/71
-[#70]: https://github.com/howeyc/fsnotify/issues/70
-[#63]: https://github.com/howeyc/fsnotify/issues/63
-[#62]: https://github.com/howeyc/fsnotify/issues/62
-[#60]: https://github.com/howeyc/fsnotify/issues/60
-[#59]: https://github.com/howeyc/fsnotify/issues/59
-[#49]: https://github.com/howeyc/fsnotify/issues/49
-[#45]: https://github.com/howeyc/fsnotify/issues/45
-[#40]: https://github.com/howeyc/fsnotify/issues/40
-[#36]: https://github.com/howeyc/fsnotify/issues/36
-[#33]: https://github.com/howeyc/fsnotify/issues/33
-[#29]: https://github.com/howeyc/fsnotify/issues/29
-[#25]: https://github.com/howeyc/fsnotify/issues/25
-[#24]: https://github.com/howeyc/fsnotify/issues/24
-[#21]: https://github.com/howeyc/fsnotify/issues/21
-
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/CONTRIBUTING.md b/vendor/src/github.com/go-fsnotify/fsnotify/CONTRIBUTING.md
deleted file mode 100644
index 2fd0423..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/CONTRIBUTING.md
+++ /dev/null
@@ -1,56 +0,0 @@
-# Contributing
-
-* Send questions to [golang-dev@googlegroups.com](mailto:golang-dev@googlegroups.com). 
-
-### Issues
-
-* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues).
-* Please indicate the platform you are running on.
-
-### Pull Requests
-
-A future version of Go will have [fsnotify in the standard library](https://code.google.com/p/go/issues/detail?id=4068), therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so we need you to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
-
-Please indicate that you have signed the CLA in your pull request.
-
-To hack on fsnotify:
-
-1. Install as usual (`go get -u github.com/go-fsnotify/fsnotify`)
-2. Create your feature branch (`git checkout -b my-new-feature`)
-3. Ensure everything works and the tests pass (see below)
-4. Commit your changes (`git commit -am 'Add some feature'`)
-
-Contribute upstream:
-
-1. Fork fsnotify on GitHub
-2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
-3. Push to the branch (`git push fork my-new-feature`)
-4. Create a new Pull Request on GitHub
-
-If other team members need your patch before I merge it:
-
-1. Install as usual (`go get -u github.com/go-fsnotify/fsnotify`)
-2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
-3. Pull your revisions (`git fetch fork; git checkout -b my-new-feature fork/my-new-feature`)
-
-Notice: For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
-
-Note: The maintainers will update the CHANGELOG on your behalf. Please don't modify it in your pull request.
-
-### Testing
-
-fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows.
-
-Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
-
-To make cross-platform testing easier, I've created a Vagrantfile for Linux and BSD.
-
-* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
-* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
-* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
-* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd go-fsnotify/fsnotify; go test'`.
-* When you're done, you will want to halt or destroy the Vagrant boxes.
-
-Notice: fsnotify file system events don't work on shared folders. The tests get around this limitation by using a tmp directory, but it is something to be aware of.
-
-Right now I don't have an equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/LICENSE b/vendor/src/github.com/go-fsnotify/fsnotify/LICENSE
deleted file mode 100644
index f21e540..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-Copyright (c) 2012 fsnotify Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/README.md b/vendor/src/github.com/go-fsnotify/fsnotify/README.md
deleted file mode 100644
index 0759284..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# File system notifications for Go
-
-[![Coverage](http://gocover.io/_badge/github.com/go-fsnotify/fsnotify)](http://gocover.io/github.com/go-fsnotify/fsnotify) [![GoDoc](https://godoc.org/gopkg.in/fsnotify.v1?status.svg)](https://godoc.org/gopkg.in/fsnotify.v1)
-
-Cross platform: Windows, Linux, BSD and OS X.
-
-|Adapter   |OS        |Status    |
-|----------|----------|----------|
-|inotify   |Linux, Android\*|Supported|
-|kqueue    |BSD, OS X, iOS\*|Supported|
-|ReadDirectoryChangesW|Windows|Supported|
-|FSEvents  |OS X          |[Planned](https://github.com/go-fsnotify/fsnotify/issues/11)|
-|FEN       |Solaris 11    |[Planned](https://github.com/go-fsnotify/fsnotify/issues/12)|
-|fanotify  |Linux 2.6.37+ | |
-|Polling   |*All*         |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/9)|
-|          |Plan 9        | |
-
-\* Android and iOS are untested.
-
-Please see [the documentation](https://godoc.org/gopkg.in/fsnotify.v1) for usage. Consult the [Wiki](https://github.com/go-fsnotify/fsnotify/wiki) for the FAQ and further information.
-
-## API stability
-
-Two major versions of fsnotify exist. 
-
-**[fsnotify.v1](https://gopkg.in/fsnotify.v1)** provides [a new API](https://godoc.org/gopkg.in/fsnotify.v1) based on [this design document](http://goo.gl/MrYxyA). You can import v1 with:
-
-```go
-import "gopkg.in/fsnotify.v1"
-```
-
-\* Refer to the package as fsnotify (without the .v1 suffix).
-
-**[fsnotify.v0](https://gopkg.in/fsnotify.v0)** is API-compatible with [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify). Bugfixes *may* be backported, but I recommend upgrading to v1.
-
-```go
-import "gopkg.in/fsnotify.v0"
-```
-
-Further API changes are [planned](https://github.com/go-fsnotify/fsnotify/milestones), but a new major revision will be tagged, so you can depend on the v1 API.
-
-## Contributing
-
-* Send questions to [golang-dev@googlegroups.com](mailto:golang-dev@googlegroups.com). 
-* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues).
-
-A future version of Go will have [fsnotify in the standard library](https://code.google.com/p/go/issues/detail?id=4068), therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so we need you to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
-
-Please read [CONTRIBUTING](https://github.com/go-fsnotify/fsnotify/blob/master/CONTRIBUTING.md) before opening a pull request.
-
-## Example
-
-See [example_test.go](https://github.com/go-fsnotify/fsnotify/blob/master/example_test.go).
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/example_test.go b/vendor/src/github.com/go-fsnotify/fsnotify/example_test.go
deleted file mode 100644
index 9f2c63f..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/example_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9,!solaris
-
-package fsnotify_test
-
-import (
-	"log"
-
-	"gopkg.in/fsnotify.v1"
-)
-
-func ExampleNewWatcher() {
-	watcher, err := fsnotify.NewWatcher()
-	if err != nil {
-		log.Fatal(err)
-	}
-	defer watcher.Close()
-
-	done := make(chan bool)
-	go func() {
-		for {
-			select {
-			case event := <-watcher.Events:
-				log.Println("event:", event)
-				if event.Op&fsnotify.Write == fsnotify.Write {
-					log.Println("modified file:", event.Name)
-				}
-			case err := <-watcher.Errors:
-				log.Println("error:", err)
-			}
-		}
-	}()
-
-	err = watcher.Add("/tmp/foo")
-	if err != nil {
-		log.Fatal(err)
-	}
-	<-done
-}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/fsnotify.go b/vendor/src/github.com/go-fsnotify/fsnotify/fsnotify.go
deleted file mode 100644
index 7b5233f..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/fsnotify.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9,!solaris
-
-// Package fsnotify provides a platform-independent interface for file system notifications.
-package fsnotify
-
-import "fmt"
-
-// Event represents a single file system notification.
-type Event struct {
-	Name string // Relative path to the file or directory.
-	Op   Op     // File operation that triggered the event.
-}
-
-// Op describes a set of file operations.
-type Op uint32
-
-// These are the generalized file operations that can trigger a notification.
-const (
-	Create Op = 1 << iota
-	Write
-	Remove
-	Rename
-	Chmod
-)
-
-// String returns a string representation of the event in the form
-// "file: REMOVE|WRITE|..."
-func (e Event) String() string {
-	events := ""
-
-	if e.Op&Create == Create {
-		events += "|CREATE"
-	}
-	if e.Op&Remove == Remove {
-		events += "|REMOVE"
-	}
-	if e.Op&Write == Write {
-		events += "|WRITE"
-	}
-	if e.Op&Rename == Rename {
-		events += "|RENAME"
-	}
-	if e.Op&Chmod == Chmod {
-		events += "|CHMOD"
-	}
-
-	if len(events) > 0 {
-		events = events[1:]
-	}
-
-	return fmt.Sprintf("%q: %s", e.Name, events)
-}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/inotify.go b/vendor/src/github.com/go-fsnotify/fsnotify/inotify.go
deleted file mode 100644
index f5c0aae..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/inotify.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-package fsnotify
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	"path/filepath"
-	"strings"
-	"sync"
-	"syscall"
-	"unsafe"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
-	Events   chan Event
-	Errors   chan error
-	mu       sync.Mutex        // Map access
-	fd       int               // File descriptor (as returned by the inotify_init() syscall)
-	watches  map[string]*watch // Map of inotify watches (key: path)
-	paths    map[int]string    // Map of watched paths (key: watch descriptor)
-	done     chan bool         // Channel for sending a "quit message" to the reader goroutine
-	isClosed bool              // Set to true when Close() is first called
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
-	fd, errno := syscall.InotifyInit()
-	if fd == -1 {
-		return nil, os.NewSyscallError("inotify_init", errno)
-	}
-	w := &Watcher{
-		fd:      fd,
-		watches: make(map[string]*watch),
-		paths:   make(map[int]string),
-		Events:  make(chan Event),
-		Errors:  make(chan error),
-		done:    make(chan bool, 1),
-	}
-
-	go w.readEvents()
-	return w, nil
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
-	if w.isClosed {
-		return nil
-	}
-	w.isClosed = true
-
-	// Remove all watches
-	for name := range w.watches {
-		w.Remove(name)
-	}
-
-	// Send "quit" message to the reader goroutine
-	w.done <- true
-
-	return nil
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
-	name = filepath.Clean(name)
-	if w.isClosed {
-		return errors.New("inotify instance already closed")
-	}
-
-	const agnosticEvents = syscall.IN_MOVED_TO | syscall.IN_MOVED_FROM |
-		syscall.IN_CREATE | syscall.IN_ATTRIB | syscall.IN_MODIFY |
-		syscall.IN_MOVE_SELF | syscall.IN_DELETE | syscall.IN_DELETE_SELF
-
-	var flags uint32 = agnosticEvents
-
-	w.mu.Lock()
-	watchEntry, found := w.watches[name]
-	w.mu.Unlock()
-	if found {
-		watchEntry.flags |= flags
-		flags |= syscall.IN_MASK_ADD
-	}
-	wd, errno := syscall.InotifyAddWatch(w.fd, name, flags)
-	if wd == -1 {
-		return os.NewSyscallError("inotify_add_watch", errno)
-	}
-
-	w.mu.Lock()
-	w.watches[name] = &watch{wd: uint32(wd), flags: flags}
-	w.paths[wd] = name
-	w.mu.Unlock()
-
-	return nil
-}
-
-// Remove stops watching the the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
-	name = filepath.Clean(name)
-	w.mu.Lock()
-	defer w.mu.Unlock()
-	watch, ok := w.watches[name]
-	if !ok {
-		return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
-	}
-	success, errno := syscall.InotifyRmWatch(w.fd, watch.wd)
-	if success == -1 {
-		return os.NewSyscallError("inotify_rm_watch", errno)
-	}
-	delete(w.watches, name)
-	return nil
-}
-
-type watch struct {
-	wd    uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
-	flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
-}
-
-// readEvents reads from the inotify file descriptor, converts the
-// received events into Event objects and sends them via the Events channel
-func (w *Watcher) readEvents() {
-	var (
-		buf   [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
-		n     int                                     // Number of bytes read with read()
-		errno error                                   // Syscall errno
-	)
-
-	for {
-		// See if there is a message on the "done" channel
-		select {
-		case <-w.done:
-			syscall.Close(w.fd)
-			close(w.Events)
-			close(w.Errors)
-			return
-		default:
-		}
-
-		n, errno = syscall.Read(w.fd, buf[:])
-
-		// If EOF is received
-		if n == 0 {
-			syscall.Close(w.fd)
-			close(w.Events)
-			close(w.Errors)
-			return
-		}
-
-		if n < 0 {
-			w.Errors <- os.NewSyscallError("read", errno)
-			continue
-		}
-		if n < syscall.SizeofInotifyEvent {
-			w.Errors <- errors.New("inotify: short read in readEvents()")
-			continue
-		}
-
-		var offset uint32
-		// We don't know how many events we just read into the buffer
-		// While the offset points to at least one whole event...
-		for offset <= uint32(n-syscall.SizeofInotifyEvent) {
-			// Point "raw" to the event in the buffer
-			raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
-
-			mask := uint32(raw.Mask)
-			nameLen := uint32(raw.Len)
-			// If the event happened to the watched directory or the watched file, the kernel
-			// doesn't append the filename to the event, but we would like to always fill the
-			// the "Name" field with a valid filename. We retrieve the path of the watch from
-			// the "paths" map.
-			w.mu.Lock()
-			name := w.paths[int(raw.Wd)]
-			w.mu.Unlock()
-			if nameLen > 0 {
-				// Point "bytes" at the first byte of the filename
-				bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
-				// The filename is padded with NULL bytes. TrimRight() gets rid of those.
-				name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
-			}
-
-			event := newEvent(name, mask)
-
-			// Send the events that are not ignored on the events channel
-			if !event.ignoreLinux(mask) {
-				w.Events <- event
-			}
-
-			// Move to the next event in the buffer
-			offset += syscall.SizeofInotifyEvent + nameLen
-		}
-	}
-}
-
-// Certain types of events can be "ignored" and not sent over the Events
-// channel. Such as events marked ignore by the kernel, or MODIFY events
-// against files that do not exist.
-func (e *Event) ignoreLinux(mask uint32) bool {
-	// Ignore anything the inotify API says to ignore
-	if mask&syscall.IN_IGNORED == syscall.IN_IGNORED {
-		return true
-	}
-
-	// If the event is not a DELETE or RENAME, the file must exist.
-	// Otherwise the event is ignored.
-	// *Note*: this was put in place because it was seen that a MODIFY
-	// event was sent after the DELETE. This ignores that MODIFY and
-	// assumes a DELETE will come or has come if the file doesn't exist.
-	if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
-		_, statErr := os.Lstat(e.Name)
-		return os.IsNotExist(statErr)
-	}
-	return false
-}
-
-// newEvent returns an platform-independent Event based on an inotify mask.
-func newEvent(name string, mask uint32) Event {
-	e := Event{Name: name}
-	if mask&syscall.IN_CREATE == syscall.IN_CREATE || mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO {
-		e.Op |= Create
-	}
-	if mask&syscall.IN_DELETE_SELF == syscall.IN_DELETE_SELF || mask&syscall.IN_DELETE == syscall.IN_DELETE {
-		e.Op |= Remove
-	}
-	if mask&syscall.IN_MODIFY == syscall.IN_MODIFY {
-		e.Op |= Write
-	}
-	if mask&syscall.IN_MOVE_SELF == syscall.IN_MOVE_SELF || mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM {
-		e.Op |= Rename
-	}
-	if mask&syscall.IN_ATTRIB == syscall.IN_ATTRIB {
-		e.Op |= Chmod
-	}
-	return e
-}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/integration_test.go b/vendor/src/github.com/go-fsnotify/fsnotify/integration_test.go
deleted file mode 100644
index ad51ab6..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/integration_test.go
+++ /dev/null
@@ -1,1120 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9,!solaris
-
-package fsnotify
-
-import (
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"runtime"
-	"sync/atomic"
-	"testing"
-	"time"
-)
-
-// An atomic counter
-type counter struct {
-	val int32
-}
-
-func (c *counter) increment() {
-	atomic.AddInt32(&c.val, 1)
-}
-
-func (c *counter) value() int32 {
-	return atomic.LoadInt32(&c.val)
-}
-
-func (c *counter) reset() {
-	atomic.StoreInt32(&c.val, 0)
-}
-
-// tempMkdir makes a temporary directory
-func tempMkdir(t *testing.T) string {
-	dir, err := ioutil.TempDir("", "fsnotify")
-	if err != nil {
-		t.Fatalf("failed to create test directory: %s", err)
-	}
-	return dir
-}
-
-// newWatcher initializes an fsnotify Watcher instance.
-func newWatcher(t *testing.T) *Watcher {
-	watcher, err := NewWatcher()
-	if err != nil {
-		t.Fatalf("NewWatcher() failed: %s", err)
-	}
-	return watcher
-}
-
-// addWatch adds a watch for a directory
-func addWatch(t *testing.T, watcher *Watcher, dir string) {
-	if err := watcher.Add(dir); err != nil {
-		t.Fatalf("watcher.Add(%q) failed: %s", dir, err)
-	}
-}
-
-func TestFsnotifyMultipleOperations(t *testing.T) {
-	watcher := newWatcher(t)
-
-	// Receive errors on the error channel on a separate goroutine
-	go func() {
-		for err := range watcher.Errors {
-			t.Fatalf("error received: %s", err)
-		}
-	}()
-
-	// Create directory to watch
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	// Create directory that's not watched
-	testDirToMoveFiles := tempMkdir(t)
-	defer os.RemoveAll(testDirToMoveFiles)
-
-	testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
-	testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile")
-
-	addWatch(t, watcher, testDir)
-
-	// Receive events on the event channel on a separate goroutine
-	eventstream := watcher.Events
-	var createReceived, modifyReceived, deleteReceived, renameReceived counter
-	done := make(chan bool)
-	go func() {
-		for event := range eventstream {
-			// Only count relevant events
-			if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
-				t.Logf("event received: %s", event)
-				if event.Op&Remove == Remove {
-					deleteReceived.increment()
-				}
-				if event.Op&Write == Write {
-					modifyReceived.increment()
-				}
-				if event.Op&Create == Create {
-					createReceived.increment()
-				}
-				if event.Op&Rename == Rename {
-					renameReceived.increment()
-				}
-			} else {
-				t.Logf("unexpected event received: %s", event)
-			}
-		}
-		done <- true
-	}()
-
-	// Create a file
-	// This should add at least one event to the fsnotify event queue
-	var f *os.File
-	f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	f.Sync()
-
-	time.Sleep(time.Millisecond)
-	f.WriteString("data")
-	f.Sync()
-	f.Close()
-
-	time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
-	if err := testRename(testFile, testFileRenamed); err != nil {
-		t.Fatalf("rename failed: %s", err)
-	}
-
-	// Modify the file outside of the watched dir
-	f, err = os.Open(testFileRenamed)
-	if err != nil {
-		t.Fatalf("open test renamed file failed: %s", err)
-	}
-	f.WriteString("data")
-	f.Sync()
-	f.Close()
-
-	time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
-	// Recreate the file that was moved
-	f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	f.Close()
-	time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
-	// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
-	time.Sleep(500 * time.Millisecond)
-	cReceived := createReceived.value()
-	if cReceived != 2 {
-		t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
-	}
-	mReceived := modifyReceived.value()
-	if mReceived != 1 {
-		t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
-	}
-	dReceived := deleteReceived.value()
-	rReceived := renameReceived.value()
-	if dReceived+rReceived != 1 {
-		t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1)
-	}
-
-	// Try closing the fsnotify instance
-	t.Log("calling Close()")
-	watcher.Close()
-	t.Log("waiting for the event channel to become closed...")
-	select {
-	case <-done:
-		t.Log("event channel closed")
-	case <-time.After(2 * time.Second):
-		t.Fatal("event stream was not closed after 2 seconds")
-	}
-}
-
-func TestFsnotifyMultipleCreates(t *testing.T) {
-	watcher := newWatcher(t)
-
-	// Receive errors on the error channel on a separate goroutine
-	go func() {
-		for err := range watcher.Errors {
-			t.Fatalf("error received: %s", err)
-		}
-	}()
-
-	// Create directory to watch
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
-
-	addWatch(t, watcher, testDir)
-
-	// Receive events on the event channel on a separate goroutine
-	eventstream := watcher.Events
-	var createReceived, modifyReceived, deleteReceived counter
-	done := make(chan bool)
-	go func() {
-		for event := range eventstream {
-			// Only count relevant events
-			if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
-				t.Logf("event received: %s", event)
-				if event.Op&Remove == Remove {
-					deleteReceived.increment()
-				}
-				if event.Op&Create == Create {
-					createReceived.increment()
-				}
-				if event.Op&Write == Write {
-					modifyReceived.increment()
-				}
-			} else {
-				t.Logf("unexpected event received: %s", event)
-			}
-		}
-		done <- true
-	}()
-
-	// Create a file
-	// This should add at least one event to the fsnotify event queue
-	var f *os.File
-	f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	f.Sync()
-
-	time.Sleep(time.Millisecond)
-	f.WriteString("data")
-	f.Sync()
-	f.Close()
-
-	time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
-	os.Remove(testFile)
-
-	time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
-	// Recreate the file
-	f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	f.Close()
-	time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
-	// Modify
-	f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	f.Sync()
-
-	time.Sleep(time.Millisecond)
-	f.WriteString("data")
-	f.Sync()
-	f.Close()
-
-	time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
-	// Modify
-	f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	f.Sync()
-
-	time.Sleep(time.Millisecond)
-	f.WriteString("data")
-	f.Sync()
-	f.Close()
-
-	time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
-	// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
-	time.Sleep(500 * time.Millisecond)
-	cReceived := createReceived.value()
-	if cReceived != 2 {
-		t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
-	}
-	mReceived := modifyReceived.value()
-	if mReceived < 3 {
-		t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3)
-	}
-	dReceived := deleteReceived.value()
-	if dReceived != 1 {
-		t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1)
-	}
-
-	// Try closing the fsnotify instance
-	t.Log("calling Close()")
-	watcher.Close()
-	t.Log("waiting for the event channel to become closed...")
-	select {
-	case <-done:
-		t.Log("event channel closed")
-	case <-time.After(2 * time.Second):
-		t.Fatal("event stream was not closed after 2 seconds")
-	}
-}
-
-func TestFsnotifyDirOnly(t *testing.T) {
-	watcher := newWatcher(t)
-
-	// Create directory to watch
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	// Create a file before watching directory
-	// This should NOT add any events to the fsnotify event queue
-	testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
-	{
-		var f *os.File
-		f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
-		if err != nil {
-			t.Fatalf("creating test file failed: %s", err)
-		}
-		f.Sync()
-		f.Close()
-	}
-
-	addWatch(t, watcher, testDir)
-
-	// Receive errors on the error channel on a separate goroutine
-	go func() {
-		for err := range watcher.Errors {
-			t.Fatalf("error received: %s", err)
-		}
-	}()
-
-	testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile")
-
-	// Receive events on the event channel on a separate goroutine
-	eventstream := watcher.Events
-	var createReceived, modifyReceived, deleteReceived counter
-	done := make(chan bool)
-	go func() {
-		for event := range eventstream {
-			// Only count relevant events
-			if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) {
-				t.Logf("event received: %s", event)
-				if event.Op&Remove == Remove {
-					deleteReceived.increment()
-				}
-				if event.Op&Write == Write {
-					modifyReceived.increment()
-				}
-				if event.Op&Create == Create {
-					createReceived.increment()
-				}
-			} else {
-				t.Logf("unexpected event received: %s", event)
-			}
-		}
-		done <- true
-	}()
-
-	// Create a file
-	// This should add at least one event to the fsnotify event queue
-	var f *os.File
-	f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	f.Sync()
-
-	time.Sleep(time.Millisecond)
-	f.WriteString("data")
-	f.Sync()
-	f.Close()
-
-	time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
-
-	os.Remove(testFile)
-	os.Remove(testFileAlreadyExists)
-
-	// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
-	time.Sleep(500 * time.Millisecond)
-	cReceived := createReceived.value()
-	if cReceived != 1 {
-		t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1)
-	}
-	mReceived := modifyReceived.value()
-	if mReceived != 1 {
-		t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
-	}
-	dReceived := deleteReceived.value()
-	if dReceived != 2 {
-		t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
-	}
-
-	// Try closing the fsnotify instance
-	t.Log("calling Close()")
-	watcher.Close()
-	t.Log("waiting for the event channel to become closed...")
-	select {
-	case <-done:
-		t.Log("event channel closed")
-	case <-time.After(2 * time.Second):
-		t.Fatal("event stream was not closed after 2 seconds")
-	}
-}
-
-func TestFsnotifyDeleteWatchedDir(t *testing.T) {
-	watcher := newWatcher(t)
-	defer watcher.Close()
-
-	// Create directory to watch
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	// Create a file before watching directory
-	testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
-	{
-		var f *os.File
-		f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
-		if err != nil {
-			t.Fatalf("creating test file failed: %s", err)
-		}
-		f.Sync()
-		f.Close()
-	}
-
-	addWatch(t, watcher, testDir)
-
-	// Add a watch for testFile
-	addWatch(t, watcher, testFileAlreadyExists)
-
-	// Receive errors on the error channel on a separate goroutine
-	go func() {
-		for err := range watcher.Errors {
-			t.Fatalf("error received: %s", err)
-		}
-	}()
-
-	// Receive events on the event channel on a separate goroutine
-	eventstream := watcher.Events
-	var deleteReceived counter
-	go func() {
-		for event := range eventstream {
-			// Only count relevant events
-			if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) {
-				t.Logf("event received: %s", event)
-				if event.Op&Remove == Remove {
-					deleteReceived.increment()
-				}
-			} else {
-				t.Logf("unexpected event received: %s", event)
-			}
-		}
-	}()
-
-	os.RemoveAll(testDir)
-
-	// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
-	time.Sleep(500 * time.Millisecond)
-	dReceived := deleteReceived.value()
-	if dReceived < 2 {
-		t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived)
-	}
-}
-
-func TestFsnotifySubDir(t *testing.T) {
-	watcher := newWatcher(t)
-
-	// Create directory to watch
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile")
-	testSubDir := filepath.Join(testDir, "sub")
-	testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile")
-
-	// Receive errors on the error channel on a separate goroutine
-	go func() {
-		for err := range watcher.Errors {
-			t.Fatalf("error received: %s", err)
-		}
-	}()
-
-	// Receive events on the event channel on a separate goroutine
-	eventstream := watcher.Events
-	var createReceived, deleteReceived counter
-	done := make(chan bool)
-	go func() {
-		for event := range eventstream {
-			// Only count relevant events
-			if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) {
-				t.Logf("event received: %s", event)
-				if event.Op&Create == Create {
-					createReceived.increment()
-				}
-				if event.Op&Remove == Remove {
-					deleteReceived.increment()
-				}
-			} else {
-				t.Logf("unexpected event received: %s", event)
-			}
-		}
-		done <- true
-	}()
-
-	addWatch(t, watcher, testDir)
-
-	// Create sub-directory
-	if err := os.Mkdir(testSubDir, 0777); err != nil {
-		t.Fatalf("failed to create test sub-directory: %s", err)
-	}
-
-	// Create a file
-	var f *os.File
-	f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	f.Sync()
-	f.Close()
-
-	// Create a file (Should not see this! we are not watching subdir)
-	var fs *os.File
-	fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	fs.Sync()
-	fs.Close()
-
-	time.Sleep(200 * time.Millisecond)
-
-	// Make sure receive deletes for both file and sub-directory
-	os.RemoveAll(testSubDir)
-	os.Remove(testFile1)
-
-	// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
-	time.Sleep(500 * time.Millisecond)
-	cReceived := createReceived.value()
-	if cReceived != 2 {
-		t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
-	}
-	dReceived := deleteReceived.value()
-	if dReceived != 2 {
-		t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
-	}
-
-	// Try closing the fsnotify instance
-	t.Log("calling Close()")
-	watcher.Close()
-	t.Log("waiting for the event channel to become closed...")
-	select {
-	case <-done:
-		t.Log("event channel closed")
-	case <-time.After(2 * time.Second):
-		t.Fatal("event stream was not closed after 2 seconds")
-	}
-}
-
-func TestFsnotifyRename(t *testing.T) {
-	watcher := newWatcher(t)
-
-	// Create directory to watch
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	addWatch(t, watcher, testDir)
-
-	// Receive errors on the error channel on a separate goroutine
-	go func() {
-		for err := range watcher.Errors {
-			t.Fatalf("error received: %s", err)
-		}
-	}()
-
-	testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile")
-	testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
-
-	// Receive events on the event channel on a separate goroutine
-	eventstream := watcher.Events
-	var renameReceived counter
-	done := make(chan bool)
-	go func() {
-		for event := range eventstream {
-			// Only count relevant events
-			if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
-				if event.Op&Rename == Rename {
-					renameReceived.increment()
-				}
-				t.Logf("event received: %s", event)
-			} else {
-				t.Logf("unexpected event received: %s", event)
-			}
-		}
-		done <- true
-	}()
-
-	// Create a file
-	// This should add at least one event to the fsnotify event queue
-	var f *os.File
-	f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	f.Sync()
-
-	f.WriteString("data")
-	f.Sync()
-	f.Close()
-
-	// Add a watch for testFile
-	addWatch(t, watcher, testFile)
-
-	if err := testRename(testFile, testFileRenamed); err != nil {
-		t.Fatalf("rename failed: %s", err)
-	}
-
-	// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
-	time.Sleep(500 * time.Millisecond)
-	if renameReceived.value() == 0 {
-		t.Fatal("fsnotify rename events have not been received after 500 ms")
-	}
-
-	// Try closing the fsnotify instance
-	t.Log("calling Close()")
-	watcher.Close()
-	t.Log("waiting for the event channel to become closed...")
-	select {
-	case <-done:
-		t.Log("event channel closed")
-	case <-time.After(2 * time.Second):
-		t.Fatal("event stream was not closed after 2 seconds")
-	}
-
-	os.Remove(testFileRenamed)
-}
-
-func TestFsnotifyRenameToCreate(t *testing.T) {
-	watcher := newWatcher(t)
-
-	// Create directory to watch
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	// Create directory to get file
-	testDirFrom := tempMkdir(t)
-	defer os.RemoveAll(testDirFrom)
-
-	addWatch(t, watcher, testDir)
-
-	// Receive errors on the error channel on a separate goroutine
-	go func() {
-		for err := range watcher.Errors {
-			t.Fatalf("error received: %s", err)
-		}
-	}()
-
-	testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
-	testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
-
-	// Receive events on the event channel on a separate goroutine
-	eventstream := watcher.Events
-	var createReceived counter
-	done := make(chan bool)
-	go func() {
-		for event := range eventstream {
-			// Only count relevant events
-			if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
-				if event.Op&Create == Create {
-					createReceived.increment()
-				}
-				t.Logf("event received: %s", event)
-			} else {
-				t.Logf("unexpected event received: %s", event)
-			}
-		}
-		done <- true
-	}()
-
-	// Create a file
-	// This should add at least one event to the fsnotify event queue
-	var f *os.File
-	f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	f.Sync()
-	f.Close()
-
-	if err := testRename(testFile, testFileRenamed); err != nil {
-		t.Fatalf("rename failed: %s", err)
-	}
-
-	// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
-	time.Sleep(500 * time.Millisecond)
-	if createReceived.value() == 0 {
-		t.Fatal("fsnotify create events have not been received after 500 ms")
-	}
-
-	// Try closing the fsnotify instance
-	t.Log("calling Close()")
-	watcher.Close()
-	t.Log("waiting for the event channel to become closed...")
-	select {
-	case <-done:
-		t.Log("event channel closed")
-	case <-time.After(2 * time.Second):
-		t.Fatal("event stream was not closed after 2 seconds")
-	}
-
-	os.Remove(testFileRenamed)
-}
-
-func TestFsnotifyRenameToOverwrite(t *testing.T) {
-	switch runtime.GOOS {
-	case "plan9", "windows":
-		t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS)
-	}
-
-	watcher := newWatcher(t)
-
-	// Create directory to watch
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	// Create directory to get file
-	testDirFrom := tempMkdir(t)
-	defer os.RemoveAll(testDirFrom)
-
-	testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
-	testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
-
-	// Create a file
-	var fr *os.File
-	fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	fr.Sync()
-	fr.Close()
-
-	addWatch(t, watcher, testDir)
-
-	// Receive errors on the error channel on a separate goroutine
-	go func() {
-		for err := range watcher.Errors {
-			t.Fatalf("error received: %s", err)
-		}
-	}()
-
-	// Receive events on the event channel on a separate goroutine
-	eventstream := watcher.Events
-	var eventReceived counter
-	done := make(chan bool)
-	go func() {
-		for event := range eventstream {
-			// Only count relevant events
-			if event.Name == filepath.Clean(testFileRenamed) {
-				eventReceived.increment()
-				t.Logf("event received: %s", event)
-			} else {
-				t.Logf("unexpected event received: %s", event)
-			}
-		}
-		done <- true
-	}()
-
-	// Create a file
-	// This should add at least one event to the fsnotify event queue
-	var f *os.File
-	f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	f.Sync()
-	f.Close()
-
-	if err := testRename(testFile, testFileRenamed); err != nil {
-		t.Fatalf("rename failed: %s", err)
-	}
-
-	// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
-	time.Sleep(500 * time.Millisecond)
-	if eventReceived.value() == 0 {
-		t.Fatal("fsnotify events have not been received after 500 ms")
-	}
-
-	// Try closing the fsnotify instance
-	t.Log("calling Close()")
-	watcher.Close()
-	t.Log("waiting for the event channel to become closed...")
-	select {
-	case <-done:
-		t.Log("event channel closed")
-	case <-time.After(2 * time.Second):
-		t.Fatal("event stream was not closed after 2 seconds")
-	}
-
-	os.Remove(testFileRenamed)
-}
-
-func TestRemovalOfWatch(t *testing.T) {
-	// Create directory to watch
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	// Create a file before watching directory
-	testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
-	{
-		var f *os.File
-		f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
-		if err != nil {
-			t.Fatalf("creating test file failed: %s", err)
-		}
-		f.Sync()
-		f.Close()
-	}
-
-	watcher := newWatcher(t)
-	defer watcher.Close()
-
-	addWatch(t, watcher, testDir)
-	if err := watcher.Remove(testDir); err != nil {
-		t.Fatalf("Could not remove the watch: %v\n", err)
-	}
-
-	go func() {
-		select {
-		case ev := <-watcher.Events:
-			t.Fatalf("We received event: %v\n", ev)
-		case <-time.After(500 * time.Millisecond):
-			t.Log("No event received, as expected.")
-		}
-	}()
-
-	time.Sleep(200 * time.Millisecond)
-	// Modify the file outside of the watched dir
-	f, err := os.Open(testFileAlreadyExists)
-	if err != nil {
-		t.Fatalf("Open test file failed: %s", err)
-	}
-	f.WriteString("data")
-	f.Sync()
-	f.Close()
-	if err := os.Chmod(testFileAlreadyExists, 0700); err != nil {
-		t.Fatalf("chmod failed: %s", err)
-	}
-	time.Sleep(400 * time.Millisecond)
-}
-
-func TestFsnotifyAttrib(t *testing.T) {
-	if runtime.GOOS == "windows" {
-		t.Skip("attributes don't work on Windows.")
-	}
-
-	watcher := newWatcher(t)
-
-	// Create directory to watch
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	// Receive errors on the error channel on a separate goroutine
-	go func() {
-		for err := range watcher.Errors {
-			t.Fatalf("error received: %s", err)
-		}
-	}()
-
-	testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile")
-
-	// Receive events on the event channel on a separate goroutine
-	eventstream := watcher.Events
-	// The modifyReceived counter counts IsModify events that are not IsAttrib,
-	// and the attribReceived counts IsAttrib events (which are also IsModify as
-	// a consequence).
-	var modifyReceived counter
-	var attribReceived counter
-	done := make(chan bool)
-	go func() {
-		for event := range eventstream {
-			// Only count relevant events
-			if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
-				if event.Op&Write == Write {
-					modifyReceived.increment()
-				}
-				if event.Op&Chmod == Chmod {
-					attribReceived.increment()
-				}
-				t.Logf("event received: %s", event)
-			} else {
-				t.Logf("unexpected event received: %s", event)
-			}
-		}
-		done <- true
-	}()
-
-	// Create a file
-	// This should add at least one event to the fsnotify event queue
-	var f *os.File
-	f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
-	if err != nil {
-		t.Fatalf("creating test file failed: %s", err)
-	}
-	f.Sync()
-
-	f.WriteString("data")
-	f.Sync()
-	f.Close()
-
-	// Add a watch for testFile
-	addWatch(t, watcher, testFile)
-
-	if err := os.Chmod(testFile, 0700); err != nil {
-		t.Fatalf("chmod failed: %s", err)
-	}
-
-	// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
-	// Creating/writing a file changes also the mtime, so IsAttrib should be set to true here
-	time.Sleep(500 * time.Millisecond)
-	if modifyReceived.value() != 0 {
-		t.Fatal("received an unexpected modify event when creating a test file")
-	}
-	if attribReceived.value() == 0 {
-		t.Fatal("fsnotify attribute events have not received after 500 ms")
-	}
-
-	// Modifying the contents of the file does not set the attrib flag (although eg. the mtime
-	// might have been modified).
-	modifyReceived.reset()
-	attribReceived.reset()
-
-	f, err = os.OpenFile(testFile, os.O_WRONLY, 0)
-	if err != nil {
-		t.Fatalf("reopening test file failed: %s", err)
-	}
-
-	f.WriteString("more data")
-	f.Sync()
-	f.Close()
-
-	time.Sleep(500 * time.Millisecond)
-
-	if modifyReceived.value() != 1 {
-		t.Fatal("didn't receive a modify event after changing test file contents")
-	}
-
-	if attribReceived.value() != 0 {
-		t.Fatal("did receive an unexpected attrib event after changing test file contents")
-	}
-
-	modifyReceived.reset()
-	attribReceived.reset()
-
-	// Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents
-	// of the file are not changed though)
-	if err := os.Chmod(testFile, 0600); err != nil {
-		t.Fatalf("chmod failed: %s", err)
-	}
-
-	time.Sleep(500 * time.Millisecond)
-
-	if attribReceived.value() != 1 {
-		t.Fatal("didn't receive an attribute change after 500ms")
-	}
-
-	// Try closing the fsnotify instance
-	t.Log("calling Close()")
-	watcher.Close()
-	t.Log("waiting for the event channel to become closed...")
-	select {
-	case <-done:
-		t.Log("event channel closed")
-	case <-time.After(1e9):
-		t.Fatal("event stream was not closed after 1 second")
-	}
-
-	os.Remove(testFile)
-}
-
-func TestFsnotifyClose(t *testing.T) {
-	watcher := newWatcher(t)
-	watcher.Close()
-
-	var done int32
-	go func() {
-		watcher.Close()
-		atomic.StoreInt32(&done, 1)
-	}()
-
-	time.Sleep(50e6) // 50 ms
-	if atomic.LoadInt32(&done) == 0 {
-		t.Fatal("double Close() test failed: second Close() call didn't return")
-	}
-
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	if err := watcher.Add(testDir); err == nil {
-		t.Fatal("expected error on Watch() after Close(), got nil")
-	}
-}
-
-func TestFsnotifyFakeSymlink(t *testing.T) {
-	if runtime.GOOS == "windows" {
-		t.Skip("symlinks don't work on Windows.")
-	}
-
-	watcher := newWatcher(t)
-
-	// Create directory to watch
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	var errorsReceived counter
-	// Receive errors on the error channel on a separate goroutine
-	go func() {
-		for errors := range watcher.Errors {
-			t.Logf("Received error: %s", errors)
-			errorsReceived.increment()
-		}
-	}()
-
-	// Count the CREATE events received
-	var createEventsReceived, otherEventsReceived counter
-	go func() {
-		for ev := range watcher.Events {
-			t.Logf("event received: %s", ev)
-			if ev.Op&Create == Create {
-				createEventsReceived.increment()
-			} else {
-				otherEventsReceived.increment()
-			}
-		}
-	}()
-
-	addWatch(t, watcher, testDir)
-
-	if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil {
-		t.Fatalf("Failed to create bogus symlink: %s", err)
-	}
-	t.Logf("Created bogus symlink")
-
-	// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
-	time.Sleep(500 * time.Millisecond)
-
-	// Should not be error, just no events for broken links (watching nothing)
-	if errorsReceived.value() > 0 {
-		t.Fatal("fsnotify errors have been received.")
-	}
-	if otherEventsReceived.value() > 0 {
-		t.Fatal("fsnotify other events received on the broken link")
-	}
-
-	// Except for 1 create event (for the link itself)
-	if createEventsReceived.value() == 0 {
-		t.Fatal("fsnotify create events were not received after 500 ms")
-	}
-	if createEventsReceived.value() > 1 {
-		t.Fatal("fsnotify more create events received than expected")
-	}
-
-	// Try closing the fsnotify instance
-	t.Log("calling Close()")
-	watcher.Close()
-}
-
-// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race.
-// See https://codereview.appspot.com/103300045/
-// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race
-func TestConcurrentRemovalOfWatch(t *testing.T) {
-	if runtime.GOOS != "darwin" {
-		t.Skip("regression test for race only present on darwin")
-	}
-
-	// Create directory to watch
-	testDir := tempMkdir(t)
-	defer os.RemoveAll(testDir)
-
-	// Create a file before watching directory
-	testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
-	{
-		var f *os.File
-		f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
-		if err != nil {
-			t.Fatalf("creating test file failed: %s", err)
-		}
-		f.Sync()
-		f.Close()
-	}
-
-	watcher := newWatcher(t)
-	defer watcher.Close()
-
-	addWatch(t, watcher, testDir)
-
-	// Test that RemoveWatch can be invoked concurrently, with no data races.
-	removed1 := make(chan struct{})
-	go func() {
-		defer close(removed1)
-		watcher.Remove(testDir)
-	}()
-	removed2 := make(chan struct{})
-	go func() {
-		close(removed2)
-		watcher.Remove(testDir)
-	}()
-	<-removed1
-	<-removed2
-}
-
-func testRename(file1, file2 string) error {
-	switch runtime.GOOS {
-	case "windows", "plan9":
-		return os.Rename(file1, file2)
-	default:
-		cmd := exec.Command("mv", file1, file2)
-		return cmd.Run()
-	}
-}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/kqueue.go b/vendor/src/github.com/go-fsnotify/fsnotify/kqueue.go
deleted file mode 100644
index 5ef1346..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/kqueue.go
+++ /dev/null
@@ -1,479 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build freebsd openbsd netbsd dragonfly darwin
-
-package fsnotify
-
-import (
-	"errors"
-	"fmt"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"sync"
-	"syscall"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
-	Events          chan Event
-	Errors          chan error
-	mu              sync.Mutex          // Mutex for the Watcher itself.
-	kq              int                 // File descriptor (as returned by the kqueue() syscall).
-	watches         map[string]int      // Map of watched file descriptors (key: path).
-	wmut            sync.Mutex          // Protects access to watches.
-	enFlags         map[string]uint32   // Map of watched files to evfilt note flags used in kqueue.
-	enmut           sync.Mutex          // Protects access to enFlags.
-	paths           map[int]string      // Map of watched paths (key: watch descriptor).
-	finfo           map[int]os.FileInfo // Map of file information (isDir, isReg; key: watch descriptor).
-	pmut            sync.Mutex          // Protects access to paths and finfo.
-	fileExists      map[string]bool     // Keep track of if we know this file exists (to stop duplicate create events).
-	femut           sync.Mutex          // Protects access to fileExists.
-	externalWatches map[string]bool     // Map of watches added by user of the library.
-	ewmut           sync.Mutex          // Protects access to externalWatches.
-	done            chan bool           // Channel for sending a "quit message" to the reader goroutine
-	isClosed        bool                // Set to true when Close() is first called
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
-	fd, errno := syscall.Kqueue()
-	if fd == -1 {
-		return nil, os.NewSyscallError("kqueue", errno)
-	}
-	w := &Watcher{
-		kq:              fd,
-		watches:         make(map[string]int),
-		enFlags:         make(map[string]uint32),
-		paths:           make(map[int]string),
-		finfo:           make(map[int]os.FileInfo),
-		fileExists:      make(map[string]bool),
-		externalWatches: make(map[string]bool),
-		Events:          make(chan Event),
-		Errors:          make(chan error),
-		done:            make(chan bool, 1),
-	}
-
-	go w.readEvents()
-	return w, nil
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
-	w.mu.Lock()
-	if w.isClosed {
-		w.mu.Unlock()
-		return nil
-	}
-	w.isClosed = true
-	w.mu.Unlock()
-
-	// Send "quit" message to the reader goroutine:
-	w.done <- true
-	w.wmut.Lock()
-	ws := w.watches
-	w.wmut.Unlock()
-	for name := range ws {
-		w.Remove(name)
-	}
-
-	return nil
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
-	w.ewmut.Lock()
-	w.externalWatches[name] = true
-	w.ewmut.Unlock()
-	return w.addWatch(name, noteAllEvents)
-}
-
-// Remove stops watching the the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
-	name = filepath.Clean(name)
-	w.wmut.Lock()
-	watchfd, ok := w.watches[name]
-	w.wmut.Unlock()
-	if !ok {
-		return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
-	}
-	var kbuf [1]syscall.Kevent_t
-	watchEntry := &kbuf[0]
-	syscall.SetKevent(watchEntry, watchfd, syscall.EVFILT_VNODE, syscall.EV_DELETE)
-	entryFlags := watchEntry.Flags
-	success, errno := syscall.Kevent(w.kq, kbuf[:], nil, nil)
-	if success == -1 {
-		return os.NewSyscallError("kevent_rm_watch", errno)
-	} else if (entryFlags & syscall.EV_ERROR) == syscall.EV_ERROR {
-		return errors.New("kevent rm error")
-	}
-	syscall.Close(watchfd)
-	w.wmut.Lock()
-	delete(w.watches, name)
-	w.wmut.Unlock()
-	w.enmut.Lock()
-	delete(w.enFlags, name)
-	w.enmut.Unlock()
-	w.pmut.Lock()
-	delete(w.paths, watchfd)
-	fInfo := w.finfo[watchfd]
-	delete(w.finfo, watchfd)
-	w.pmut.Unlock()
-
-	// Find all watched paths that are in this directory that are not external.
-	if fInfo.IsDir() {
-		var pathsToRemove []string
-		w.pmut.Lock()
-		for _, wpath := range w.paths {
-			wdir, _ := filepath.Split(wpath)
-			if filepath.Clean(wdir) == filepath.Clean(name) {
-				w.ewmut.Lock()
-				if !w.externalWatches[wpath] {
-					pathsToRemove = append(pathsToRemove, wpath)
-				}
-				w.ewmut.Unlock()
-			}
-		}
-		w.pmut.Unlock()
-		for _, name := range pathsToRemove {
-			// Since these are internal, not much sense in propagating error
-			// to the user, as that will just confuse them with an error about
-			// a path they did not explicitly watch themselves.
-			w.Remove(name)
-		}
-	}
-
-	return nil
-}
-
-const (
-	// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
-	noteAllEvents = syscall.NOTE_DELETE | syscall.NOTE_WRITE | syscall.NOTE_ATTRIB | syscall.NOTE_RENAME
-
-	// Block for 100 ms on each call to kevent
-	keventWaitTime = 100e6
-)
-
-// addWatch adds path to the watched file set.
-// The flags are interpreted as described in kevent(2).
-func (w *Watcher) addWatch(path string, flags uint32) error {
-	path = filepath.Clean(path)
-	w.mu.Lock()
-	if w.isClosed {
-		w.mu.Unlock()
-		return errors.New("kevent instance already closed")
-	}
-	w.mu.Unlock()
-
-	watchDir := false
-
-	w.wmut.Lock()
-	watchfd, found := w.watches[path]
-	w.wmut.Unlock()
-	if !found {
-		fi, errstat := os.Lstat(path)
-		if errstat != nil {
-			return errstat
-		}
-
-		// don't watch socket
-		if fi.Mode()&os.ModeSocket == os.ModeSocket {
-			return nil
-		}
-
-		// Follow Symlinks
-		// Unfortunately, Linux can add bogus symlinks to watch list without
-		// issue, and Windows can't do symlinks period (AFAIK). To  maintain
-		// consistency, we will act like everything is fine. There will simply
-		// be no file events for broken symlinks.
-		// Hence the returns of nil on errors.
-		if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
-			path, err := filepath.EvalSymlinks(path)
-			if err != nil {
-				return nil
-			}
-
-			fi, errstat = os.Lstat(path)
-			if errstat != nil {
-				return nil
-			}
-		}
-
-		fd, errno := syscall.Open(path, openMode, 0700)
-		if fd == -1 {
-			return os.NewSyscallError("Open", errno)
-		}
-		watchfd = fd
-
-		w.wmut.Lock()
-		w.watches[path] = watchfd
-		w.wmut.Unlock()
-
-		w.pmut.Lock()
-		w.paths[watchfd] = path
-		w.finfo[watchfd] = fi
-		w.pmut.Unlock()
-	}
-	// Watch the directory if it has not been watched before.
-	w.pmut.Lock()
-	w.enmut.Lock()
-	if w.finfo[watchfd].IsDir() &&
-		(flags&syscall.NOTE_WRITE) == syscall.NOTE_WRITE &&
-		(!found || (w.enFlags[path]&syscall.NOTE_WRITE) != syscall.NOTE_WRITE) {
-		watchDir = true
-	}
-	w.enmut.Unlock()
-	w.pmut.Unlock()
-
-	w.enmut.Lock()
-	w.enFlags[path] = flags
-	w.enmut.Unlock()
-
-	var kbuf [1]syscall.Kevent_t
-	watchEntry := &kbuf[0]
-	watchEntry.Fflags = flags
-	syscall.SetKevent(watchEntry, watchfd, syscall.EVFILT_VNODE, syscall.EV_ADD|syscall.EV_CLEAR)
-	entryFlags := watchEntry.Flags
-	success, errno := syscall.Kevent(w.kq, kbuf[:], nil, nil)
-	if success == -1 {
-		return errno
-	} else if (entryFlags & syscall.EV_ERROR) == syscall.EV_ERROR {
-		return errors.New("kevent add error")
-	}
-
-	if watchDir {
-		errdir := w.watchDirectoryFiles(path)
-		if errdir != nil {
-			return errdir
-		}
-	}
-	return nil
-}
-
-// readEvents reads from the kqueue file descriptor, converts the
-// received events into Event objects and sends them via the Events channel
-func (w *Watcher) readEvents() {
-	var (
-		keventbuf [10]syscall.Kevent_t // Event buffer
-		kevents   []syscall.Kevent_t   // Received events
-		twait     *syscall.Timespec    // Time to block waiting for events
-		n         int                  // Number of events returned from kevent
-		errno     error                // Syscall errno
-	)
-	kevents = keventbuf[0:0]
-	twait = new(syscall.Timespec)
-	*twait = syscall.NsecToTimespec(keventWaitTime)
-
-	for {
-		// See if there is a message on the "done" channel
-		var done bool
-		select {
-		case done = <-w.done:
-		default:
-		}
-
-		// If "done" message is received
-		if done {
-			errno := syscall.Close(w.kq)
-			if errno != nil {
-				w.Errors <- os.NewSyscallError("close", errno)
-			}
-			close(w.Events)
-			close(w.Errors)
-			return
-		}
-
-		// Get new events
-		if len(kevents) == 0 {
-			n, errno = syscall.Kevent(w.kq, nil, keventbuf[:], twait)
-
-			// EINTR is okay, basically the syscall was interrupted before
-			// timeout expired.
-			if errno != nil && errno != syscall.EINTR {
-				w.Errors <- os.NewSyscallError("kevent", errno)
-				continue
-			}
-
-			// Received some events
-			if n > 0 {
-				kevents = keventbuf[0:n]
-			}
-		}
-
-		// Flush the events we received to the Events channel
-		for len(kevents) > 0 {
-			watchEvent := &kevents[0]
-			mask := uint32(watchEvent.Fflags)
-			w.pmut.Lock()
-			name := w.paths[int(watchEvent.Ident)]
-			fileInfo := w.finfo[int(watchEvent.Ident)]
-			w.pmut.Unlock()
-
-			event := newEvent(name, mask, false)
-
-			if fileInfo != nil && fileInfo.IsDir() && !(event.Op&Remove == Remove) {
-				// Double check to make sure the directory exist. This can happen when
-				// we do a rm -fr on a recursively watched folders and we receive a
-				// modification event first but the folder has been deleted and later
-				// receive the delete event
-				if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
-					// mark is as delete event
-					event.Op |= Remove
-				}
-			}
-
-			if fileInfo != nil && fileInfo.IsDir() && event.Op&Write == Write && !(event.Op&Remove == Remove) {
-				w.sendDirectoryChangeEvents(event.Name)
-			} else {
-				// Send the event on the Events channel
-				w.Events <- event
-			}
-
-			// Move to next event
-			kevents = kevents[1:]
-
-			if event.Op&Rename == Rename {
-				w.Remove(event.Name)
-				w.femut.Lock()
-				delete(w.fileExists, event.Name)
-				w.femut.Unlock()
-			}
-			if event.Op&Remove == Remove {
-				w.Remove(event.Name)
-				w.femut.Lock()
-				delete(w.fileExists, event.Name)
-				w.femut.Unlock()
-
-				// Look for a file that may have overwritten this
-				// (ie mv f1 f2 will delete f2 then create f2)
-				fileDir, _ := filepath.Split(event.Name)
-				fileDir = filepath.Clean(fileDir)
-				w.wmut.Lock()
-				_, found := w.watches[fileDir]
-				w.wmut.Unlock()
-				if found {
-					// make sure the directory exist before we watch for changes. When we
-					// do a recursive watch and perform rm -fr, the parent directory might
-					// have gone missing, ignore the missing directory and let the
-					// upcoming delete event remove the watch form the parent folder
-					if _, err := os.Lstat(fileDir); !os.IsNotExist(err) {
-						w.sendDirectoryChangeEvents(fileDir)
-					}
-				}
-			}
-		}
-	}
-}
-
-// newEvent returns an platform-independent Event based on kqueue Fflags.
-func newEvent(name string, mask uint32, create bool) Event {
-	e := Event{Name: name}
-	if create {
-		e.Op |= Create
-	}
-	if mask&syscall.NOTE_DELETE == syscall.NOTE_DELETE {
-		e.Op |= Remove
-	}
-	if mask&syscall.NOTE_WRITE == syscall.NOTE_WRITE {
-		e.Op |= Write
-	}
-	if mask&syscall.NOTE_RENAME == syscall.NOTE_RENAME {
-		e.Op |= Rename
-	}
-	if mask&syscall.NOTE_ATTRIB == syscall.NOTE_ATTRIB {
-		e.Op |= Chmod
-	}
-	return e
-}
-
-func (w *Watcher) watchDirectoryFiles(dirPath string) error {
-	// Get all files
-	files, err := ioutil.ReadDir(dirPath)
-	if err != nil {
-		return err
-	}
-
-	// Search for new files
-	for _, fileInfo := range files {
-		filePath := filepath.Join(dirPath, fileInfo.Name())
-
-		if fileInfo.IsDir() == false {
-			// Watch file to mimic linux fsnotify
-			e := w.addWatch(filePath, noteAllEvents)
-			if e != nil {
-				return e
-			}
-		} else {
-			// If the user is currently watching directory
-			// we want to preserve the flags used
-			w.enmut.Lock()
-			currFlags, found := w.enFlags[filePath]
-			w.enmut.Unlock()
-			var newFlags uint32 = syscall.NOTE_DELETE
-			if found {
-				newFlags |= currFlags
-			}
-
-			// Linux gives deletes if not explicitly watching
-			e := w.addWatch(filePath, newFlags)
-			if e != nil {
-				return e
-			}
-		}
-		w.femut.Lock()
-		w.fileExists[filePath] = true
-		w.femut.Unlock()
-	}
-
-	return nil
-}
-
-// sendDirectoryEvents searches the directory for newly created files
-// and sends them over the event channel. This functionality is to have
-// the BSD version of fsnotify match linux fsnotify which provides a
-// create event for files created in a watched directory.
-func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
-	// Get all files
-	files, err := ioutil.ReadDir(dirPath)
-	if err != nil {
-		w.Errors <- err
-	}
-
-	// Search for new files
-	for _, fileInfo := range files {
-		filePath := filepath.Join(dirPath, fileInfo.Name())
-		w.femut.Lock()
-		_, doesExist := w.fileExists[filePath]
-		w.femut.Unlock()
-		if !doesExist {
-			// Send create event (mask=0)
-			event := newEvent(filePath, 0, true)
-			w.Events <- event
-		}
-
-		// watchDirectoryFiles (but without doing another ReadDir)
-		if fileInfo.IsDir() == false {
-			// Watch file to mimic linux fsnotify
-			w.addWatch(filePath, noteAllEvents)
-		} else {
-			// If the user is currently watching directory
-			// we want to preserve the flags used
-			w.enmut.Lock()
-			currFlags, found := w.enFlags[filePath]
-			w.enmut.Unlock()
-			var newFlags uint32 = syscall.NOTE_DELETE
-			if found {
-				newFlags |= currFlags
-			}
-
-			// Linux gives deletes if not explicitly watching
-			w.addWatch(filePath, newFlags)
-		}
-
-		w.femut.Lock()
-		w.fileExists[filePath] = true
-		w.femut.Unlock()
-	}
-}
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/open_mode_bsd.go b/vendor/src/github.com/go-fsnotify/fsnotify/open_mode_bsd.go
deleted file mode 100644
index c57ccb4..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/open_mode_bsd.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build freebsd openbsd netbsd dragonfly
-
-package fsnotify
-
-import "syscall"
-
-const openMode = syscall.O_NONBLOCK | syscall.O_RDONLY
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/open_mode_darwin.go b/vendor/src/github.com/go-fsnotify/fsnotify/open_mode_darwin.go
deleted file mode 100644
index 174b2c3..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/open_mode_darwin.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin
-
-package fsnotify
-
-import "syscall"
-
-// note: this constant is not defined on BSD
-const openMode = syscall.O_EVTONLY
diff --git a/vendor/src/github.com/go-fsnotify/fsnotify/windows.go b/vendor/src/github.com/go-fsnotify/fsnotify/windows.go
deleted file mode 100644
index 8115852..0000000
--- a/vendor/src/github.com/go-fsnotify/fsnotify/windows.go
+++ /dev/null
@@ -1,561 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-package fsnotify
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	"path/filepath"
-	"runtime"
-	"sync"
-	"syscall"
-	"unsafe"
-)
-
-// Watcher watches a set of files, delivering events to a channel.
-type Watcher struct {
-	Events   chan Event
-	Errors   chan error
-	isClosed bool           // Set to true when Close() is first called
-	mu       sync.Mutex     // Map access
-	port     syscall.Handle // Handle to completion port
-	watches  watchMap       // Map of watches (key: i-number)
-	input    chan *input    // Inputs to the reader are sent on this channel
-	quit     chan chan<- error
-}
-
-// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
-func NewWatcher() (*Watcher, error) {
-	port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
-	if e != nil {
-		return nil, os.NewSyscallError("CreateIoCompletionPort", e)
-	}
-	w := &Watcher{
-		port:    port,
-		watches: make(watchMap),
-		input:   make(chan *input, 1),
-		Events:  make(chan Event, 50),
-		Errors:  make(chan error),
-		quit:    make(chan chan<- error, 1),
-	}
-	go w.readEvents()
-	return w, nil
-}
-
-// Close removes all watches and closes the events channel.
-func (w *Watcher) Close() error {
-	if w.isClosed {
-		return nil
-	}
-	w.isClosed = true
-
-	// Send "quit" message to the reader goroutine
-	ch := make(chan error)
-	w.quit <- ch
-	if err := w.wakeupReader(); err != nil {
-		return err
-	}
-	return <-ch
-}
-
-// Add starts watching the named file or directory (non-recursively).
-func (w *Watcher) Add(name string) error {
-	if w.isClosed {
-		return errors.New("watcher already closed")
-	}
-	in := &input{
-		op:    opAddWatch,
-		path:  filepath.Clean(name),
-		flags: sys_FS_ALL_EVENTS,
-		reply: make(chan error),
-	}
-	w.input <- in
-	if err := w.wakeupReader(); err != nil {
-		return err
-	}
-	return <-in.reply
-}
-
-// Remove stops watching the the named file or directory (non-recursively).
-func (w *Watcher) Remove(name string) error {
-	in := &input{
-		op:    opRemoveWatch,
-		path:  filepath.Clean(name),
-		reply: make(chan error),
-	}
-	w.input <- in
-	if err := w.wakeupReader(); err != nil {
-		return err
-	}
-	return <-in.reply
-}
-
-const (
-	// Options for AddWatch
-	sys_FS_ONESHOT = 0x80000000
-	sys_FS_ONLYDIR = 0x1000000
-
-	// Events
-	sys_FS_ACCESS      = 0x1
-	sys_FS_ALL_EVENTS  = 0xfff
-	sys_FS_ATTRIB      = 0x4
-	sys_FS_CLOSE       = 0x18
-	sys_FS_CREATE      = 0x100
-	sys_FS_DELETE      = 0x200
-	sys_FS_DELETE_SELF = 0x400
-	sys_FS_MODIFY      = 0x2
-	sys_FS_MOVE        = 0xc0
-	sys_FS_MOVED_FROM  = 0x40
-	sys_FS_MOVED_TO    = 0x80
-	sys_FS_MOVE_SELF   = 0x800
-
-	// Special events
-	sys_FS_IGNORED    = 0x8000
-	sys_FS_Q_OVERFLOW = 0x4000
-)
-
-func newEvent(name string, mask uint32) Event {
-	e := Event{Name: name}
-	if mask&sys_FS_CREATE == sys_FS_CREATE || mask&sys_FS_MOVED_TO == sys_FS_MOVED_TO {
-		e.Op |= Create
-	}
-	if mask&sys_FS_DELETE == sys_FS_DELETE || mask&sys_FS_DELETE_SELF == sys_FS_DELETE_SELF {
-		e.Op |= Remove
-	}
-	if mask&sys_FS_MODIFY == sys_FS_MODIFY {
-		e.Op |= Write
-	}
-	if mask&sys_FS_MOVE == sys_FS_MOVE || mask&sys_FS_MOVE_SELF == sys_FS_MOVE_SELF || mask&sys_FS_MOVED_FROM == sys_FS_MOVED_FROM {
-		e.Op |= Rename
-	}
-	if mask&sys_FS_ATTRIB == sys_FS_ATTRIB {
-		e.Op |= Chmod
-	}
-	return e
-}
-
-const (
-	opAddWatch = iota
-	opRemoveWatch
-)
-
-const (
-	provisional uint64 = 1 << (32 + iota)
-)
-
-type input struct {
-	op    int
-	path  string
-	flags uint32
-	reply chan error
-}
-
-type inode struct {
-	handle syscall.Handle
-	volume uint32
-	index  uint64
-}
-
-type watch struct {
-	ov     syscall.Overlapped
-	ino    *inode            // i-number
-	path   string            // Directory path
-	mask   uint64            // Directory itself is being watched with these notify flags
-	names  map[string]uint64 // Map of names being watched and their notify flags
-	rename string            // Remembers the old name while renaming a file
-	buf    [4096]byte
-}
-
-type indexMap map[uint64]*watch
-type watchMap map[uint32]indexMap
-
-func (w *Watcher) wakeupReader() error {
-	e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
-	if e != nil {
-		return os.NewSyscallError("PostQueuedCompletionStatus", e)
-	}
-	return nil
-}
-
-func getDir(pathname string) (dir string, err error) {
-	attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
-	if e != nil {
-		return "", os.NewSyscallError("GetFileAttributes", e)
-	}
-	if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
-		dir = pathname
-	} else {
-		dir, _ = filepath.Split(pathname)
-		dir = filepath.Clean(dir)
-	}
-	return
-}
-
-func getIno(path string) (ino *inode, err error) {
-	h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
-		syscall.FILE_LIST_DIRECTORY,
-		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
-		nil, syscall.OPEN_EXISTING,
-		syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
-	if e != nil {
-		return nil, os.NewSyscallError("CreateFile", e)
-	}
-	var fi syscall.ByHandleFileInformation
-	if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
-		syscall.CloseHandle(h)
-		return nil, os.NewSyscallError("GetFileInformationByHandle", e)
-	}
-	ino = &inode{
-		handle: h,
-		volume: fi.VolumeSerialNumber,
-		index:  uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
-	}
-	return ino, nil
-}
-
-// Must run within the I/O thread.
-func (m watchMap) get(ino *inode) *watch {
-	if i := m[ino.volume]; i != nil {
-		return i[ino.index]
-	}
-	return nil
-}
-
-// Must run within the I/O thread.
-func (m watchMap) set(ino *inode, watch *watch) {
-	i := m[ino.volume]
-	if i == nil {
-		i = make(indexMap)
-		m[ino.volume] = i
-	}
-	i[ino.index] = watch
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) addWatch(pathname string, flags uint64) error {
-	dir, err := getDir(pathname)
-	if err != nil {
-		return err
-	}
-	if flags&sys_FS_ONLYDIR != 0 && pathname != dir {
-		return nil
-	}
-	ino, err := getIno(dir)
-	if err != nil {
-		return err
-	}
-	w.mu.Lock()
-	watchEntry := w.watches.get(ino)
-	w.mu.Unlock()
-	if watchEntry == nil {
-		if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
-			syscall.CloseHandle(ino.handle)
-			return os.NewSyscallError("CreateIoCompletionPort", e)
-		}
-		watchEntry = &watch{
-			ino:   ino,
-			path:  dir,
-			names: make(map[string]uint64),
-		}
-		w.mu.Lock()
-		w.watches.set(ino, watchEntry)
-		w.mu.Unlock()
-		flags |= provisional
-	} else {
-		syscall.CloseHandle(ino.handle)
-	}
-	if pathname == dir {
-		watchEntry.mask |= flags
-	} else {
-		watchEntry.names[filepath.Base(pathname)] |= flags
-	}
-	if err = w.startRead(watchEntry); err != nil {
-		return err
-	}
-	if pathname == dir {
-		watchEntry.mask &= ^provisional
-	} else {
-		watchEntry.names[filepath.Base(pathname)] &= ^provisional
-	}
-	return nil
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) remWatch(pathname string) error {
-	dir, err := getDir(pathname)
-	if err != nil {
-		return err
-	}
-	ino, err := getIno(dir)
-	if err != nil {
-		return err
-	}
-	w.mu.Lock()
-	watch := w.watches.get(ino)
-	w.mu.Unlock()
-	if watch == nil {
-		return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
-	}
-	if pathname == dir {
-		w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
-		watch.mask = 0
-	} else {
-		name := filepath.Base(pathname)
-		w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED)
-		delete(watch.names, name)
-	}
-	return w.startRead(watch)
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) deleteWatch(watch *watch) {
-	for name, mask := range watch.names {
-		if mask&provisional == 0 {
-			w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED)
-		}
-		delete(watch.names, name)
-	}
-	if watch.mask != 0 {
-		if watch.mask&provisional == 0 {
-			w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
-		}
-		watch.mask = 0
-	}
-}
-
-// Must run within the I/O thread.
-func (w *Watcher) startRead(watch *watch) error {
-	if e := syscall.CancelIo(watch.ino.handle); e != nil {
-		w.Errors <- os.NewSyscallError("CancelIo", e)
-		w.deleteWatch(watch)
-	}
-	mask := toWindowsFlags(watch.mask)
-	for _, m := range watch.names {
-		mask |= toWindowsFlags(m)
-	}
-	if mask == 0 {
-		if e := syscall.CloseHandle(watch.ino.handle); e != nil {
-			w.Errors <- os.NewSyscallError("CloseHandle", e)
-		}
-		w.mu.Lock()
-		delete(w.watches[watch.ino.volume], watch.ino.index)
-		w.mu.Unlock()
-		return nil
-	}
-	e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
-		uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
-	if e != nil {
-		err := os.NewSyscallError("ReadDirectoryChanges", e)
-		if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
-			// Watched directory was probably removed
-			if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) {
-				if watch.mask&sys_FS_ONESHOT != 0 {
-					watch.mask = 0
-				}
-			}
-			err = nil
-		}
-		w.deleteWatch(watch)
-		w.startRead(watch)
-		return err
-	}
-	return nil
-}
-
-// readEvents reads from the I/O completion port, converts the
-// received events into Event objects and sends them via the Events channel.
-// Entry point to the I/O thread.
-func (w *Watcher) readEvents() {
-	var (
-		n, key uint32
-		ov     *syscall.Overlapped
-	)
-	runtime.LockOSThread()
-
-	for {
-		e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
-		watch := (*watch)(unsafe.Pointer(ov))
-
-		if watch == nil {
-			select {
-			case ch := <-w.quit:
-				w.mu.Lock()
-				var indexes []indexMap
-				for _, index := range w.watches {
-					indexes = append(indexes, index)
-				}
-				w.mu.Unlock()
-				for _, index := range indexes {
-					for _, watch := range index {
-						w.deleteWatch(watch)
-						w.startRead(watch)
-					}
-				}
-				var err error
-				if e := syscall.CloseHandle(w.port); e != nil {
-					err = os.NewSyscallError("CloseHandle", e)
-				}
-				close(w.Events)
-				close(w.Errors)
-				ch <- err
-				return
-			case in := <-w.input:
-				switch in.op {
-				case opAddWatch:
-					in.reply <- w.addWatch(in.path, uint64(in.flags))
-				case opRemoveWatch:
-					in.reply <- w.remWatch(in.path)
-				}
-			default:
-			}
-			continue
-		}
-
-		switch e {
-		case syscall.ERROR_MORE_DATA:
-			if watch == nil {
-				w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
-			} else {
-				// The i/o succeeded but the buffer is full.
-				// In theory we should be building up a full packet.
-				// In practice we can get away with just carrying on.
-				n = uint32(unsafe.Sizeof(watch.buf))
-			}
-		case syscall.ERROR_ACCESS_DENIED:
-			// Watched directory was probably removed
-			w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF)
-			w.deleteWatch(watch)
-			w.startRead(watch)
-			continue
-		case syscall.ERROR_OPERATION_ABORTED:
-			// CancelIo was called on this handle
-			continue
-		default:
-			w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
-			continue
-		case nil:
-		}
-
-		var offset uint32
-		for {
-			if n == 0 {
-				w.Events <- newEvent("", sys_FS_Q_OVERFLOW)
-				w.Errors <- errors.New("short read in readEvents()")
-				break
-			}
-
-			// Point "raw" to the event in the buffer
-			raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
-			buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
-			name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
-			fullname := watch.path + "\\" + name
-
-			var mask uint64
-			switch raw.Action {
-			case syscall.FILE_ACTION_REMOVED:
-				mask = sys_FS_DELETE_SELF
-			case syscall.FILE_ACTION_MODIFIED:
-				mask = sys_FS_MODIFY
-			case syscall.FILE_ACTION_RENAMED_OLD_NAME:
-				watch.rename = name
-			case syscall.FILE_ACTION_RENAMED_NEW_NAME:
-				if watch.names[watch.rename] != 0 {
-					watch.names[name] |= watch.names[watch.rename]
-					delete(watch.names, watch.rename)
-					mask = sys_FS_MOVE_SELF
-				}
-			}
-
-			sendNameEvent := func() {
-				if w.sendEvent(fullname, watch.names[name]&mask) {
-					if watch.names[name]&sys_FS_ONESHOT != 0 {
-						delete(watch.names, name)
-					}
-				}
-			}
-			if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
-				sendNameEvent()
-			}
-			if raw.Action == syscall.FILE_ACTION_REMOVED {
-				w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED)
-				delete(watch.names, name)
-			}
-			if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
-				if watch.mask&sys_FS_ONESHOT != 0 {
-					watch.mask = 0
-				}
-			}
-			if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
-				fullname = watch.path + "\\" + watch.rename
-				sendNameEvent()
-			}
-
-			// Move to the next event in the buffer
-			if raw.NextEntryOffset == 0 {
-				break
-			}
-			offset += raw.NextEntryOffset
-
-			// Error!
-			if offset >= n {
-				w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
-				break
-			}
-		}
-
-		if err := w.startRead(watch); err != nil {
-			w.Errors <- err
-		}
-	}
-}
-
-func (w *Watcher) sendEvent(name string, mask uint64) bool {
-	if mask == 0 {
-		return false
-	}
-	event := newEvent(name, uint32(mask))
-	select {
-	case ch := <-w.quit:
-		w.quit <- ch
-	case w.Events <- event:
-	}
-	return true
-}
-
-func toWindowsFlags(mask uint64) uint32 {
-	var m uint32
-	if mask&sys_FS_ACCESS != 0 {
-		m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
-	}
-	if mask&sys_FS_MODIFY != 0 {
-		m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
-	}
-	if mask&sys_FS_ATTRIB != 0 {
-		m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
-	}
-	if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 {
-		m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
-	}
-	return m
-}
-
-func toFSnotifyFlags(action uint32) uint64 {
-	switch action {
-	case syscall.FILE_ACTION_ADDED:
-		return sys_FS_CREATE
-	case syscall.FILE_ACTION_REMOVED:
-		return sys_FS_DELETE
-	case syscall.FILE_ACTION_MODIFIED:
-		return sys_FS_MODIFY
-	case syscall.FILE_ACTION_RENAMED_OLD_NAME:
-		return sys_FS_MOVED_FROM
-	case syscall.FILE_ACTION_RENAMED_NEW_NAME:
-		return sys_FS_MOVED_TO
-	}
-	return 0
-}
diff --git a/vendor/src/github.com/gorilla/mux/mux.go b/vendor/src/github.com/gorilla/mux/mux.go
index 8b23c39..5b5f8e7 100644
--- a/vendor/src/github.com/gorilla/mux/mux.go
+++ b/vendor/src/github.com/gorilla/mux/mux.go
@@ -87,10 +87,10 @@
 		setCurrentRoute(req, match.Route)
 	}
 	if handler == nil {
-		if r.NotFoundHandler == nil {
-			r.NotFoundHandler = http.NotFoundHandler()
-		}
 		handler = r.NotFoundHandler
+		if handler == nil {
+			handler = http.NotFoundHandler()
+		}
 	}
 	if !r.KeepContext {
 		defer context.Clear(req)
diff --git a/vendor/src/github.com/gorilla/mux/mux_test.go b/vendor/src/github.com/gorilla/mux/mux_test.go
index 0e2e480..e455bce 100644
--- a/vendor/src/github.com/gorilla/mux/mux_test.go
+++ b/vendor/src/github.com/gorilla/mux/mux_test.go
@@ -463,6 +463,15 @@
 			shouldMatch: true,
 		},
 		{
+			title:       "Queries route, match with a query string out of order",
+			route:       new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"),
+			request:     newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"),
+			vars:        map[string]string{},
+			host:        "",
+			path:        "",
+			shouldMatch: true,
+		},
+		{
 			title:       "Queries route, bad query",
 			route:       new(Route).Queries("foo", "bar", "baz", "ding"),
 			request:     newRequest("GET", "http://localhost?foo=bar&baz=dong"),
@@ -471,6 +480,42 @@
 			path:        "",
 			shouldMatch: false,
 		},
+		{
+			title:       "Queries route with pattern, match",
+			route:       new(Route).Queries("foo", "{v1}"),
+			request:     newRequest("GET", "http://localhost?foo=bar"),
+			vars:        map[string]string{"v1": "bar"},
+			host:        "",
+			path:        "",
+			shouldMatch: true,
+		},
+		{
+			title:       "Queries route with multiple patterns, match",
+			route:       new(Route).Queries("foo", "{v1}", "baz", "{v2}"),
+			request:     newRequest("GET", "http://localhost?foo=bar&baz=ding"),
+			vars:        map[string]string{"v1": "bar", "v2": "ding"},
+			host:        "",
+			path:        "",
+			shouldMatch: true,
+		},
+		{
+			title:       "Queries route with regexp pattern, match",
+			route:       new(Route).Queries("foo", "{v1:[0-9]+}"),
+			request:     newRequest("GET", "http://localhost?foo=10"),
+			vars:        map[string]string{"v1": "10"},
+			host:        "",
+			path:        "",
+			shouldMatch: true,
+		},
+		{
+			title:       "Queries route with regexp pattern, regexp does not match",
+			route:       new(Route).Queries("foo", "{v1:[0-9]+}"),
+			request:     newRequest("GET", "http://localhost?foo=a"),
+			vars:        map[string]string{},
+			host:        "",
+			path:        "",
+			shouldMatch: false,
+		},
 	}
 
 	for _, test := range tests {
diff --git a/vendor/src/github.com/gorilla/mux/old_test.go b/vendor/src/github.com/gorilla/mux/old_test.go
index 4253059..1f7c190 100644
--- a/vendor/src/github.com/gorilla/mux/old_test.go
+++ b/vendor/src/github.com/gorilla/mux/old_test.go
@@ -329,35 +329,6 @@
 	},
 }
 
-type queryMatcherTest struct {
-	matcher queryMatcher
-	url     string
-	result  bool
-}
-
-var queryMatcherTests = []queryMatcherTest{
-	{
-		matcher: queryMatcher(map[string]string{"foo": "bar", "baz": "ding"}),
-		url:     "http://localhost:8080/?foo=bar&baz=ding",
-		result:  true,
-	},
-	{
-		matcher: queryMatcher(map[string]string{"foo": "", "baz": ""}),
-		url:     "http://localhost:8080/?foo=anything&baz=anything",
-		result:  true,
-	},
-	{
-		matcher: queryMatcher(map[string]string{"foo": "ding", "baz": "bar"}),
-		url:     "http://localhost:8080/?foo=bar&baz=ding",
-		result:  false,
-	},
-	{
-		matcher: queryMatcher(map[string]string{"bar": "foo", "ding": "baz"}),
-		url:     "http://localhost:8080/?foo=bar&baz=ding",
-		result:  false,
-	},
-}
-
 type schemeMatcherTest struct {
 	matcher schemeMatcher
 	url     string
@@ -519,23 +490,8 @@
 	}
 }
 
-func TestQueryMatcher(t *testing.T) {
-	for _, v := range queryMatcherTests {
-		request, _ := http.NewRequest("GET", v.url, nil)
-		var routeMatch RouteMatch
-		result := v.matcher.Match(request, &routeMatch)
-		if result != v.result {
-			if v.result {
-				t.Errorf("%#v: should match %v.", v.matcher, v.url)
-			} else {
-				t.Errorf("%#v: should not match %v.", v.matcher, v.url)
-			}
-		}
-	}
-}
-
 func TestSchemeMatcher(t *testing.T) {
-	for _, v := range queryMatcherTests {
+	for _, v := range schemeMatcherTests {
 		request, _ := http.NewRequest("GET", v.url, nil)
 		var routeMatch RouteMatch
 		result := v.matcher.Match(request, &routeMatch)
@@ -735,7 +691,7 @@
 	}
 
 	for pattern, paths := range tests {
-		p, _ = newRouteRegexp(pattern, false, false, false)
+		p, _ = newRouteRegexp(pattern, false, false, false, false)
 		for path, result := range paths {
 			matches = p.regexp.FindStringSubmatch(path)
 			if result == nil {
diff --git a/vendor/src/github.com/gorilla/mux/regexp.go b/vendor/src/github.com/gorilla/mux/regexp.go
index 925f268..a630548 100644
--- a/vendor/src/github.com/gorilla/mux/regexp.go
+++ b/vendor/src/github.com/gorilla/mux/regexp.go
@@ -14,7 +14,7 @@
 )
 
 // newRouteRegexp parses a route template and returns a routeRegexp,
-// used to match a host or path.
+// used to match a host, a path or a query string.
 //
 // It will extract named variables, assemble a regexp to be matched, create
 // a "reverse" template to build URLs and compile regexps to validate variable
@@ -23,7 +23,7 @@
 // Previously we accepted only Python-like identifiers for variable
 // names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
 // name and pattern can't be empty, and names can't contain a colon.
-func newRouteRegexp(tpl string, matchHost, matchPrefix, strictSlash bool) (*routeRegexp, error) {
+func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) {
 	// Check if it is well-formed.
 	idxs, errBraces := braceIndices(tpl)
 	if errBraces != nil {
@@ -33,11 +33,15 @@
 	template := tpl
 	// Now let's parse it.
 	defaultPattern := "[^/]+"
-	if matchHost {
+	if matchQuery {
+		defaultPattern = "[^?&]+"
+		matchPrefix = true
+	} else if matchHost {
 		defaultPattern = "[^.]+"
-		matchPrefix, strictSlash = false, false
+		matchPrefix = false
 	}
-	if matchPrefix {
+	// Only match strict slash if not matching
+	if matchPrefix || matchHost || matchQuery {
 		strictSlash = false
 	}
 	// Set a flag for strictSlash.
@@ -48,7 +52,10 @@
 	}
 	varsN := make([]string, len(idxs)/2)
 	varsR := make([]*regexp.Regexp, len(idxs)/2)
-	pattern := bytes.NewBufferString("^")
+	pattern := bytes.NewBufferString("")
+	if !matchQuery {
+		pattern.WriteByte('^')
+	}
 	reverse := bytes.NewBufferString("")
 	var end int
 	var err error
@@ -100,6 +107,7 @@
 	return &routeRegexp{
 		template:    template,
 		matchHost:   matchHost,
+		matchQuery:  matchQuery,
 		strictSlash: strictSlash,
 		regexp:      reg,
 		reverse:     reverse.String(),
@@ -113,8 +121,10 @@
 type routeRegexp struct {
 	// The unmodified template.
 	template string
-	// True for host match, false for path match.
+	// True for host match, false for path or query string match.
 	matchHost bool
+	// True for query string match, false for path and host match.
+	matchQuery bool
 	// The strictSlash value defined on the route, but disabled if PathPrefix was used.
 	strictSlash bool
 	// Expanded regexp.
@@ -130,7 +140,11 @@
 // Match matches the regexp against the URL host or path.
 func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
 	if !r.matchHost {
-		return r.regexp.MatchString(req.URL.Path)
+		if r.matchQuery {
+			return r.regexp.MatchString(req.URL.RawQuery)
+		} else {
+			return r.regexp.MatchString(req.URL.Path)
+		}
 	}
 	return r.regexp.MatchString(getHost(req))
 }
@@ -196,8 +210,9 @@
 
 // routeRegexpGroup groups the route matchers that carry variables.
 type routeRegexpGroup struct {
-	host *routeRegexp
-	path *routeRegexp
+	host    *routeRegexp
+	path    *routeRegexp
+	queries []*routeRegexp
 }
 
 // setMatch extracts the variables from the URL once a route matches.
@@ -234,17 +249,28 @@
 			}
 		}
 	}
+	// Store query string variables.
+	rawQuery := req.URL.RawQuery
+	for _, q := range v.queries {
+		queryVars := q.regexp.FindStringSubmatch(rawQuery)
+		if queryVars != nil {
+			for k, v := range q.varsN {
+				m.Vars[v] = queryVars[k+1]
+			}
+		}
+	}
 }
 
 // getHost tries its best to return the request host.
 func getHost(r *http.Request) string {
-	if !r.URL.IsAbs() {
-		host := r.Host
-		// Slice off any port information.
-		if i := strings.Index(host, ":"); i != -1 {
-			host = host[:i]
-		}
-		return host
+	if r.URL.IsAbs() {
+		return r.URL.Host
 	}
-	return r.URL.Host
+	host := r.Host
+	// Slice off any port information.
+	if i := strings.Index(host, ":"); i != -1 {
+		host = host[:i]
+	}
+	return host
+
 }
diff --git a/vendor/src/github.com/gorilla/mux/route.go b/vendor/src/github.com/gorilla/mux/route.go
index 5cb2526..c310e66 100644
--- a/vendor/src/github.com/gorilla/mux/route.go
+++ b/vendor/src/github.com/gorilla/mux/route.go
@@ -135,12 +135,12 @@
 }
 
 // addRegexpMatcher adds a host or path matcher and builder to a route.
-func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix bool) error {
+func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
 	if r.err != nil {
 		return r.err
 	}
 	r.regexp = r.getRegexpGroup()
-	if !matchHost {
+	if !matchHost && !matchQuery {
 		if len(tpl) == 0 || tpl[0] != '/' {
 			return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
 		}
@@ -148,10 +148,15 @@
 			tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
 		}
 	}
-	rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, r.strictSlash)
+	rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash)
 	if err != nil {
 		return err
 	}
+	for _, q := range r.regexp.queries {
+		if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+			return err
+		}
+	}
 	if matchHost {
 		if r.regexp.path != nil {
 			if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
@@ -165,7 +170,11 @@
 				return err
 			}
 		}
-		r.regexp.path = rr
+		if matchQuery {
+			r.regexp.queries = append(r.regexp.queries, rr)
+		} else {
+			r.regexp.path = rr
+		}
 	}
 	r.addMatcher(rr)
 	return nil
@@ -219,7 +228,7 @@
 // Variable names must be unique in a given route. They can be retrieved
 // calling mux.Vars(request).
 func (r *Route) Host(tpl string) *Route {
-	r.err = r.addRegexpMatcher(tpl, true, false)
+	r.err = r.addRegexpMatcher(tpl, true, false, false)
 	return r
 }
 
@@ -278,7 +287,7 @@
 // Variable names must be unique in a given route. They can be retrieved
 // calling mux.Vars(request).
 func (r *Route) Path(tpl string) *Route {
-	r.err = r.addRegexpMatcher(tpl, false, false)
+	r.err = r.addRegexpMatcher(tpl, false, false, false)
 	return r
 }
 
@@ -294,35 +303,42 @@
 // Also note that the setting of Router.StrictSlash() has no effect on routes
 // with a PathPrefix matcher.
 func (r *Route) PathPrefix(tpl string) *Route {
-	r.err = r.addRegexpMatcher(tpl, false, true)
+	r.err = r.addRegexpMatcher(tpl, false, true, false)
 	return r
 }
 
 // Query ----------------------------------------------------------------------
 
-// queryMatcher matches the request against URL queries.
-type queryMatcher map[string]string
-
-func (m queryMatcher) Match(r *http.Request, match *RouteMatch) bool {
-	return matchMap(m, r.URL.Query(), false)
-}
-
 // Queries adds a matcher for URL query values.
-// It accepts a sequence of key/value pairs. For example:
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
 //
 //     r := mux.NewRouter()
-//     r.Queries("foo", "bar", "baz", "ding")
+//     r.Queries("foo", "bar", "id", "{id:[0-9]+}")
 //
 // The above route will only match if the URL contains the defined queries
-// values, e.g.: ?foo=bar&baz=ding.
+// values, e.g.: ?foo=bar&id=42.
 //
 // It the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to me matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
 func (r *Route) Queries(pairs ...string) *Route {
-	if r.err == nil {
-		var queries map[string]string
-		queries, r.err = mapFromPairs(pairs...)
-		return r.addMatcher(queryMatcher(queries))
+	length := len(pairs)
+	if length%2 != 0 {
+		r.err = fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+		return nil
 	}
+	for i := 0; i < length; i += 2 {
+		if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil {
+			return r
+		}
+	}
+
 	return r
 }
 
@@ -498,8 +514,9 @@
 		} else {
 			// Copy.
 			r.regexp = &routeRegexpGroup{
-				host: regexp.host,
-				path: regexp.path,
+				host:    regexp.host,
+				path:    regexp.path,
+				queries: regexp.queries,
 			}
 		}
 	}
diff --git a/vendor/src/github.com/kr/pty/ztypes_arm64.go b/vendor/src/github.com/kr/pty/ztypes_arm64.go
new file mode 100644
index 0000000..6c29a4b
--- /dev/null
+++ b/vendor/src/github.com/kr/pty/ztypes_arm64.go
@@ -0,0 +1,11 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+// +build arm64
+
+package pty
+
+type (
+	_C_int  int32
+	_C_uint uint32
+)
diff --git a/vendor/src/github.com/mistifyio/go-zfs/.gitignore b/vendor/src/github.com/mistifyio/go-zfs/.gitignore
new file mode 100644
index 0000000..8000dd9
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/.gitignore
@@ -0,0 +1 @@
+.vagrant
diff --git a/vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md b/vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md
new file mode 100644
index 0000000..66aab8e
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md
@@ -0,0 +1,51 @@
+## How to Contribute ##
+
+We always welcome contributions to help make `go-zfs` better. Please take a moment to read this document if you would like to contribute.
+
+### Reporting issues ###
+
+We use [Github issues](https://github.com/mistifyio/go-zfs/issues) to track bug reports, feature requests, and submitting pull requests.
+
+If you find a bug:
+
+* Use the GitHub issue search to check whether the bug has already been reported.
+* If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository.
+* If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug.
+
+### Pull requests ###
+
+We welcome bug fixes, improvements, and new features. Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. For minor items, just open a pull request.
+
+[Fork the project](https://help.github.com/articles/fork-a-repo), clone your fork, and add the upstream to your remote:
+
+    $ git clone git@github.com:<your-username>/go-zfs.git
+    $ cd go-zfs
+    $ git remote add upstream https://github.com/mistifyio/go-zfs.git
+
+If you need to pull new changes committed upstream:
+
+    $ git checkout master
+    $ git fetch upstream
+    $ git merge upstream/master
+
+Don' work directly on master as this makes it harder to merge later. Create a feature branch for your fix or new feature:
+
+    $ git checkout -b <feature-branch-name>
+
+Please try to commit your changes in logical chunks. Ideally, you should include the issue number in the commit message.
+
+    $ git commit -m "Issue #<issue-number> - <commit-message>"
+
+Push your feature branch to your fork.
+
+    $ git push origin <feature-branch-name>
+
+[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. Please give your pull request a clear title and description and note which issue(s) your pull request fixes.
+
+* All Go code should be formatted using [gofmt](http://golang.org/cmd/gofmt/). 
+* Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing).
+
+**Important:** By submitting a patch, you agree to allow the project owners to license your work under the [Apache 2.0 License](./LICENSE).
+
+----
+Guidelines based on http://azkaban.github.io/contributing.html
diff --git a/vendor/src/github.com/mistifyio/go-zfs/LICENSE b/vendor/src/github.com/mistifyio/go-zfs/LICENSE
new file mode 100644
index 0000000..f4c265c
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/LICENSE
@@ -0,0 +1,201 @@
+Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright (c) 2014, OmniTI Computer Consulting, Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
\ No newline at end of file
diff --git a/vendor/src/github.com/mistifyio/go-zfs/README.md b/vendor/src/github.com/mistifyio/go-zfs/README.md
new file mode 100644
index 0000000..2515e58
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/README.md
@@ -0,0 +1,54 @@
+# Go Wrapper for ZFS #
+
+Simple wrappers for ZFS command line tools.
+
+[![GoDoc](https://godoc.org/github.com/mistifyio/go-zfs?status.svg)](https://godoc.org/github.com/mistifyio/go-zfs)
+
+## Requirements ##
+
+You need a working ZFS setup.  To use on Ubuntu 14.04, setup ZFS:
+
+    sudo apt-get install python-software-properties
+    sudo apt-add-repository ppa:zfs-native/stable
+    sudo apt-get update
+    sudo apt-get install ubuntu-zfs libzfs-dev
+
+Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't use Ubuntu packages for Go, use http://golang.org/doc/install
+
+Generally you need root privileges to use anything zfs related.
+
+## Status ##
+
+This has been only been tested on Ubuntu 14.04
+
+In the future, we hope to work directly with libzfs.
+
+# Hacking #
+
+The tests have decent examples for most functions.
+
+```go
+//assuming a zpool named test
+//error handling ommitted
+
+
+f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+ok(t, err)
+
+s, err := f.Snapshot("test", nil)
+ok(t, err)
+
+// snapshot is named "test/snapshot-test@test"
+
+c, err := s.Clone("test/clone-test", nil)
+
+err := c.Destroy()
+err := s.Destroy()
+err := f.Destroy()
+
+```
+
+# Contributing #
+
+See the [contributing guidelines](./CONTRIBUTING.md)
+
diff --git a/vendor/src/github.com/mistifyio/go-zfs/error.go b/vendor/src/github.com/mistifyio/go-zfs/error.go
new file mode 100644
index 0000000..5408ccd
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/error.go
@@ -0,0 +1,18 @@
+package zfs
+
+import (
+	"fmt"
+)
+
+// Error is an error which is returned when the `zfs` or `zpool` shell
+// commands return with a non-zero exit code.
+type Error struct {
+	Err    error
+	Debug  string
+	Stderr string
+}
+
+// Error returns the string representation of an Error.
+func (e Error) Error() string {
+	return fmt.Sprintf("%s: %q => %s", e.Err, e.Debug, e.Stderr)
+}
diff --git a/vendor/src/github.com/mistifyio/go-zfs/error_test.go b/vendor/src/github.com/mistifyio/go-zfs/error_test.go
new file mode 100644
index 0000000..323980e
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/error_test.go
@@ -0,0 +1,37 @@
+package zfs
+
+import (
+	"errors"
+	"fmt"
+	"testing"
+)
+
+func TestError(t *testing.T) {
+	var tests = []struct {
+		err    error
+		debug  string
+		stderr string
+	}{
+		// Empty error
+		{nil, "", ""},
+		// Typical error
+		{errors.New("exit status foo"), "/sbin/foo bar qux", "command not found"},
+		// Quoted error
+		{errors.New("exit status quoted"), "\"/sbin/foo\" bar qux", "\"some\" 'random' `quotes`"},
+	}
+
+	for _, test := range tests {
+		// Generate error from tests
+		zErr := Error{
+			Err:    test.err,
+			Debug:  test.debug,
+			Stderr: test.stderr,
+		}
+
+		// Verify output format is consistent, so that any changes to the
+		// Error method must be reflected by the test
+		if str := zErr.Error(); str != fmt.Sprintf("%s: %q => %s", test.err, test.debug, test.stderr) {
+			t.Fatalf("unexpected Error string: %v", str)
+		}
+	}
+}
diff --git a/vendor/src/github.com/mistifyio/go-zfs/utils.go b/vendor/src/github.com/mistifyio/go-zfs/utils.go
new file mode 100644
index 0000000..250bd5b
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/utils.go
@@ -0,0 +1,320 @@
+package zfs
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os/exec"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+type command struct {
+	Command string
+	Stdin   io.Reader
+	Stdout  io.Writer
+}
+
+func (c *command) Run(arg ...string) ([][]string, error) {
+
+	cmd := exec.Command(c.Command, arg...)
+
+	var stdout, stderr bytes.Buffer
+
+	if c.Stdout == nil {
+		cmd.Stdout = &stdout
+	} else {
+		cmd.Stdout = c.Stdout
+	}
+
+	if c.Stdin != nil {
+		cmd.Stdin = c.Stdin
+
+	}
+	cmd.Stderr = &stderr
+
+	debug := strings.Join([]string{cmd.Path, strings.Join(cmd.Args, " ")}, " ")
+	if logger != nil {
+		logger.Log(cmd.Args)
+	}
+	err := cmd.Run()
+
+	if err != nil {
+		return nil, &Error{
+			Err:    err,
+			Debug:  debug,
+			Stderr: stderr.String(),
+		}
+	}
+
+	// assume if you passed in something for stdout, that you know what to do with it
+	if c.Stdout != nil {
+		return nil, nil
+	}
+
+	lines := strings.Split(stdout.String(), "\n")
+
+	//last line is always blank
+	lines = lines[0 : len(lines)-1]
+	output := make([][]string, len(lines))
+
+	for i, l := range lines {
+		output[i] = strings.Fields(l)
+	}
+
+	return output, nil
+}
+
+func setString(field *string, value string) {
+	v := ""
+	if value != "-" {
+		v = value
+	}
+	*field = v
+}
+
+func setUint(field *uint64, value string) error {
+	var v uint64
+	if value != "-" {
+		var err error
+		v, err = strconv.ParseUint(value, 10, 64)
+		if err != nil {
+			return err
+		}
+	}
+	*field = v
+	return nil
+}
+
+func (ds *Dataset) parseLine(line []string) error {
+	prop := line[1]
+	val := line[2]
+
+	var err error
+
+	switch prop {
+	case "available":
+		err = setUint(&ds.Avail, val)
+	case "compression":
+		setString(&ds.Compression, val)
+	case "mountpoint":
+		setString(&ds.Mountpoint, val)
+	case "quota":
+		err = setUint(&ds.Quota, val)
+	case "type":
+		setString(&ds.Type, val)
+	case "origin":
+		setString(&ds.Origin, val)
+	case "used":
+		err = setUint(&ds.Used, val)
+	case "volsize":
+		err = setUint(&ds.Volsize, val)
+	case "written":
+		err = setUint(&ds.Written, val)
+	case "logicalused":
+		err = setUint(&ds.Logicalused, val)
+	}
+	return err
+}
+
+/*
+ * from zfs diff`s escape function:
+ *
+ * Prints a file name out a character at a time.  If the character is
+ * not in the range of what we consider "printable" ASCII, display it
+ * as an escaped 3-digit octal value.  ASCII values less than a space
+ * are all control characters and we declare the upper end as the
+ * DELete character.  This also is the last 7-bit ASCII character.
+ * We choose to treat all 8-bit ASCII as not printable for this
+ * application.
+ */
+func unescapeFilepath(path string) (string, error) {
+	buf := make([]byte, 0, len(path))
+	llen := len(path)
+	for i := 0; i < llen; {
+		if path[i] == '\\' {
+			if llen < i+4 {
+				return "", fmt.Errorf("Invalid octal code: too short")
+			}
+			octalCode := path[(i + 1):(i + 4)]
+			val, err := strconv.ParseUint(octalCode, 8, 8)
+			if err != nil {
+				return "", fmt.Errorf("Invalid octal code: %v", err)
+			}
+			buf = append(buf, byte(val))
+			i += 4
+		} else {
+			buf = append(buf, path[i])
+			i++
+		}
+	}
+	return string(buf), nil
+}
+
+var changeTypeMap = map[string]ChangeType{
+	"-": Removed,
+	"+": Created,
+	"M": Modified,
+	"R": Renamed,
+}
+var inodeTypeMap = map[string]InodeType{
+	"B": BlockDevice,
+	"C": CharacterDevice,
+	"/": Directory,
+	">": Door,
+	"|": NamedPipe,
+	"@": SymbolicLink,
+	"P": EventPort,
+	"=": Socket,
+	"F": File,
+}
+
+// matches (+1) or (-1)
+var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)")
+
+func parseReferenceCount(field string) (int, error) {
+	matches := referenceCountRegex.FindStringSubmatch(field)
+	if matches == nil {
+		return 0, fmt.Errorf("Regexp does not match")
+	}
+	return strconv.Atoi(matches[1])
+}
+
+func parseInodeChange(line []string) (*InodeChange, error) {
+	llen := len(line)
+	if llen < 1 {
+		return nil, fmt.Errorf("Empty line passed")
+	}
+
+	changeType := changeTypeMap[line[0]]
+	if changeType == 0 {
+		return nil, fmt.Errorf("Unknown change type '%s'", line[0])
+	}
+
+	switch changeType {
+	case Renamed:
+		if llen != 4 {
+			return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen)
+		}
+	case Modified:
+		if llen != 4 && llen != 3 {
+			return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen)
+		}
+	default:
+		if llen != 3 {
+			return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen)
+		}
+	}
+
+	inodeType := inodeTypeMap[line[1]]
+	if inodeType == 0 {
+		return nil, fmt.Errorf("Unknown inode type '%s'", line[1])
+	}
+
+	path, err := unescapeFilepath(line[2])
+	if err != nil {
+		return nil, fmt.Errorf("Failed to parse filename: %v", err)
+	}
+
+	var newPath string
+	var referenceCount int
+	switch changeType {
+	case Renamed:
+		newPath, err = unescapeFilepath(line[3])
+		if err != nil {
+			return nil, fmt.Errorf("Failed to parse filename: %v", err)
+		}
+	case Modified:
+		if llen == 4 {
+			referenceCount, err = parseReferenceCount(line[3])
+			if err != nil {
+				return nil, fmt.Errorf("Failed to parse reference count: %v", err)
+			}
+		}
+	default:
+		newPath = ""
+	}
+
+	return &InodeChange{
+		Change:               changeType,
+		Type:                 inodeType,
+		Path:                 path,
+		NewPath:              newPath,
+		ReferenceCountChange: referenceCount,
+	}, nil
+}
+
+// example input
+//M       /       /testpool/bar/
+//+       F       /testpool/bar/hello.txt
+//M       /       /testpool/bar/hello.txt (+1)
+//M       /       /testpool/bar/hello-hardlink
+func parseInodeChanges(lines [][]string) ([]*InodeChange, error) {
+	changes := make([]*InodeChange, len(lines))
+
+	for i, line := range lines {
+		c, err := parseInodeChange(line)
+		if err != nil {
+			return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line)
+		}
+		changes[i] = c
+	}
+	return changes, nil
+}
+
+func listByType(t, filter string) ([]*Dataset, error) {
+	args := []string{"get", "all", "-t", t, "-rHp"}
+	if filter != "" {
+		args = append(args, filter)
+	}
+	out, err := zfs(args...)
+	if err != nil {
+		return nil, err
+	}
+
+	var datasets []*Dataset
+
+	name := ""
+	var ds *Dataset
+	for _, line := range out {
+		if name != line[0] {
+			name = line[0]
+			ds = &Dataset{Name: name}
+			datasets = append(datasets, ds)
+		}
+		if err := ds.parseLine(line); err != nil {
+			return nil, err
+		}
+	}
+
+	return datasets, nil
+}
+
+func propsSlice(properties map[string]string) []string {
+	args := make([]string, 0, len(properties)*3)
+	for k, v := range properties {
+		args = append(args, "-o")
+		args = append(args, fmt.Sprintf("%s=%s", k, v))
+	}
+	return args
+}
+
+func (z *Zpool) parseLine(line []string) error {
+	prop := line[1]
+	val := line[2]
+
+	var err error
+
+	switch prop {
+	case "health":
+		setString(&z.Health, val)
+	case "allocated":
+		err = setUint(&z.Allocated, val)
+	case "size":
+		err = setUint(&z.Size, val)
+	case "free":
+		err = setUint(&z.Free, val)
+	}
+	return err
+}
diff --git a/vendor/src/github.com/mistifyio/go-zfs/zfs.go b/vendor/src/github.com/mistifyio/go-zfs/zfs.go
new file mode 100644
index 0000000..f43bea2
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/zfs.go
@@ -0,0 +1,382 @@
+// Package zfs provides wrappers around the ZFS command line tools.
+package zfs
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+)
+
+// ZFS dataset types, which can indicate if a dataset is a filesystem,
+// snapshot, or volume.
+const (
+	DatasetFilesystem = "filesystem"
+	DatasetSnapshot   = "snapshot"
+	DatasetVolume     = "volume"
+)
+
+// Dataset is a ZFS dataset.  A dataset could be a clone, filesystem, snapshot,
+// or volume.  The Type struct member can be used to determine a dataset's type.
+//
+// The field definitions can be found in the ZFS manual:
+// http://www.freebsd.org/cgi/man.cgi?zfs(8).
+type Dataset struct {
+	Name          string
+	Origin        string
+	Used          uint64
+	Avail         uint64
+	Mountpoint    string
+	Compression   string
+	Type          string
+	Written       uint64
+	Volsize       uint64
+	Usedbydataset uint64
+	Logicalused   uint64
+	Quota         uint64
+}
+
+// InodeType is the type of inode as reported by Diff
+type InodeType int
+
+// Types of Inodes
+const (
+	_                     = iota // 0 == unknown type
+	BlockDevice InodeType = iota
+	CharacterDevice
+	Directory
+	Door
+	NamedPipe
+	SymbolicLink
+	EventPort
+	Socket
+	File
+)
+
+// ChangeType is the type of inode change as reported by Diff
+type ChangeType int
+
+// Types of Changes
+const (
+	_                  = iota // 0 == unknown type
+	Removed ChangeType = iota
+	Created
+	Modified
+	Renamed
+)
+
+// DestroyFlag is the options flag passed to Destroy
+type DestroyFlag int
+
+// Valid destroy options
+const (
+	DestroyDefault         DestroyFlag = 1 << iota
+	DestroyRecursive                   = 1 << iota
+	DestroyRecursiveClones             = 1 << iota
+	DestroyDeferDeletion               = 1 << iota
+	DestroyForceUmount                 = 1 << iota
+)
+
+// InodeChange represents a change as reported by Diff
+type InodeChange struct {
+	Change               ChangeType
+	Type                 InodeType
+	Path                 string
+	NewPath              string
+	ReferenceCountChange int
+}
+
+// Logger can be used to log commands/actions
+type Logger interface {
+	Log(cmd []string)
+}
+
+var logger Logger
+
+// SetLogger set a log handler to log all commands including arguments before
+// they are executed
+func SetLogger(l Logger) {
+	logger = l
+}
+
+// zfs is a helper function to wrap typical calls to zfs.
+func zfs(arg ...string) ([][]string, error) {
+	c := command{Command: "zfs"}
+	return c.Run(arg...)
+}
+
+// Datasets returns a slice of ZFS datasets, regardless of type.
+// A filter argument may be passed to select a dataset with the matching name,
+// or empty string ("") may be used to select all datasets.
+func Datasets(filter string) ([]*Dataset, error) {
+	return listByType("all", filter)
+}
+
+// Snapshots returns a slice of ZFS snapshots.
+// A filter argument may be passed to select a snapshot with the matching name,
+// or empty string ("") may be used to select all snapshots.
+func Snapshots(filter string) ([]*Dataset, error) {
+	return listByType(DatasetSnapshot, filter)
+}
+
+// Filesystems returns a slice of ZFS filesystems.
+// A filter argument may be passed to select a filesystem with the matching name,
+// or empty string ("") may be used to select all filesystems.
+func Filesystems(filter string) ([]*Dataset, error) {
+	return listByType(DatasetFilesystem, filter)
+}
+
+// Volumes returns a slice of ZFS volumes.
+// A filter argument may be passed to select a volume with the matching name,
+// or empty string ("") may be used to select all volumes.
+func Volumes(filter string) ([]*Dataset, error) {
+	return listByType(DatasetVolume, filter)
+}
+
+// GetDataset retrieves a single ZFS dataset by name.  This dataset could be
+// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume.
+func GetDataset(name string) (*Dataset, error) {
+	out, err := zfs("get", "all", "-Hp", name)
+	if err != nil {
+		return nil, err
+	}
+
+	ds := &Dataset{Name: name}
+	for _, line := range out {
+		if err := ds.parseLine(line); err != nil {
+			return nil, err
+		}
+	}
+
+	return ds, nil
+}
+
+// Clone clones a ZFS snapshot and returns a clone dataset.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, error) {
+	if d.Type != DatasetSnapshot {
+		return nil, errors.New("can only clone snapshots")
+	}
+	args := make([]string, 2, 4)
+	args[0] = "clone"
+	args[1] = "-p"
+	if properties != nil {
+		args = append(args, propsSlice(properties)...)
+	}
+	args = append(args, []string{d.Name, dest}...)
+	_, err := zfs(args...)
+	if err != nil {
+		return nil, err
+	}
+	return GetDataset(dest)
+}
+
+// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a
+// new snapshot with the specified name, and streams the input data into the
+// newly-created snapshot.
+func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) {
+	c := command{Command: "zfs", Stdin: input}
+	_, err := c.Run("receive", name)
+	if err != nil {
+		return nil, err
+	}
+	return GetDataset(name)
+}
+
+// SendSnapshot sends a ZFS stream of a snapshot to the input io.Writer.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) SendSnapshot(output io.Writer) error {
+	if d.Type != DatasetSnapshot {
+		return errors.New("can only send snapshots")
+	}
+
+	c := command{Command: "zfs", Stdout: output}
+	_, err := c.Run("send", d.Name)
+	return err
+}
+
+// CreateVolume creates a new ZFS volume with the specified name, size, and
+// properties.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) {
+	args := make([]string, 4, 5)
+	args[0] = "create"
+	args[1] = "-p"
+	args[2] = "-V"
+	args[3] = strconv.FormatUint(size, 10)
+	if properties != nil {
+		args = append(args, propsSlice(properties)...)
+	}
+	args = append(args, name)
+	_, err := zfs(args...)
+	if err != nil {
+		return nil, err
+	}
+	return GetDataset(name)
+}
+
+// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any
+// descendents of the dataset will be recursively destroyed, including snapshots.
+// If the deferred bit flag is set, the snapshot is marked for deferred
+// deletion.
+func (d *Dataset) Destroy(flags DestroyFlag) error {
+	args := make([]string, 1, 3)
+	args[0] = "destroy"
+	if flags&DestroyRecursive != 0 {
+		args = append(args, "-r")
+	}
+
+	if flags&DestroyRecursiveClones != 0 {
+		args = append(args, "-R")
+	}
+
+	if flags&DestroyDeferDeletion != 0 {
+		args = append(args, "-d")
+	}
+
+	if flags&DestroyForceUmount != 0 {
+		args = append(args, "-f")
+	}
+
+	args = append(args, d.Name)
+	_, err := zfs(args...)
+	return err
+}
+
+// SetProperty sets a ZFS property on the receiving dataset.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func (d *Dataset) SetProperty(key, val string) error {
+	prop := strings.Join([]string{key, val}, "=")
+	_, err := zfs("set", prop, d.Name)
+	return err
+}
+
+// GetProperty returns the current value of a ZFS property from the
+// receiving dataset.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func (d *Dataset) GetProperty(key string) (string, error) {
+	out, err := zfs("get", key, d.Name)
+	if err != nil {
+		return "", err
+	}
+
+	return out[0][2], nil
+}
+
+// Snapshots returns a slice of all ZFS snapshots of a given dataset.
+func (d *Dataset) Snapshots() ([]*Dataset, error) {
+	return Snapshots(d.Name)
+}
+
+// CreateFilesystem creates a new ZFS filesystem with the specified name and
+// properties.
+// A full list of available ZFS properties may be found here:
+// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) {
+	args := make([]string, 1, 4)
+	args[0] = "create"
+
+	if properties != nil {
+		args = append(args, propsSlice(properties)...)
+	}
+
+	args = append(args, name)
+	_, err := zfs(args...)
+	if err != nil {
+		return nil, err
+	}
+	return GetDataset(name)
+}
+
+// Snapshot creates a new ZFS snapshot of the receiving dataset, using the
+// specified name.  Optionally, the snapshot can be taken recursively, creating
+// snapshots of all descendent filesystems in a single, atomic operation.
+func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) {
+	args := make([]string, 1, 4)
+	args[0] = "snapshot"
+	if recursive {
+		args = append(args, "-r")
+	}
+	snapName := fmt.Sprintf("%s@%s", d.Name, name)
+	args = append(args, snapName)
+	_, err := zfs(args...)
+	if err != nil {
+		return nil, err
+	}
+	return GetDataset(snapName)
+}
+
+// Rollback rolls back the receiving ZFS dataset to a previous snapshot.
+// Optionally, intermediate snapshots can be destroyed.  A ZFS snapshot
+// rollback cannot be completed without this option, if more recent
+// snapshots exist.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) Rollback(destroyMoreRecent bool) error {
+	if d.Type != DatasetSnapshot {
+		return errors.New("can only rollback snapshots")
+	}
+
+	args := make([]string, 1, 3)
+	args[0] = "rollback"
+	if destroyMoreRecent {
+		args = append(args, "-r")
+	}
+	args = append(args, d.Name)
+
+	_, err := zfs(args...)
+	return err
+}
+
+// Children returns a slice of children of the receiving ZFS dataset.
+// A recursion depth may be specified, or a depth of 0 allows unlimited
+// recursion.
+func (d *Dataset) Children(depth uint64) ([]*Dataset, error) {
+	args := []string{"get", "all", "-t", "all", "-Hp"}
+	if depth > 0 {
+		args = append(args, "-d")
+		args = append(args, strconv.FormatUint(depth, 10))
+	} else {
+		args = append(args, "-r")
+	}
+	args = append(args, d.Name)
+
+	out, err := zfs(args...)
+	if err != nil {
+		return nil, err
+	}
+
+	var datasets []*Dataset
+	name := ""
+	var ds *Dataset
+	for _, line := range out {
+		if name != line[0] {
+			name = line[0]
+			ds = &Dataset{Name: name}
+			datasets = append(datasets, ds)
+		}
+		if err := ds.parseLine(line); err != nil {
+			return nil, err
+		}
+	}
+	return datasets[1:], nil
+}
+
+// Diff returns changes between a snapshot and the given ZFS dataset.
+// The snapshot name must include the filesystem part as it is possible to
+// compare clones with their origin snapshots.
+func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) {
+	args := []string{"diff", "-FH", snapshot, d.Name}[:]
+	out, err := zfs(args...)
+	if err != nil {
+		return nil, err
+	}
+	inodeChanges, err := parseInodeChanges(out)
+	if err != nil {
+		return nil, err
+	}
+	return inodeChanges, nil
+}
diff --git a/vendor/src/github.com/mistifyio/go-zfs/zfs_test.go b/vendor/src/github.com/mistifyio/go-zfs/zfs_test.go
new file mode 100644
index 0000000..e991a5c
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/zfs_test.go
@@ -0,0 +1,357 @@
+package zfs_test
+
+import (
+	"fmt"
+	"io/ioutil"
+	"math"
+	"os"
+	"path/filepath"
+	"reflect"
+	"runtime"
+	"testing"
+	"time"
+
+	"github.com/mistifyio/go-zfs"
+)
+
+func sleep(delay int) {
+	time.Sleep(time.Duration(delay) * time.Second)
+}
+
+func pow2(x int) int64 {
+	return int64(math.Pow(2, float64(x)))
+}
+
+//https://github.com/benbjohnson/testing
+// assert fails the test if the condition is false.
+func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
+	if !condition {
+		_, file, line, _ := runtime.Caller(1)
+		fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
+		tb.FailNow()
+	}
+}
+
+// ok fails the test if an err is not nil.
+func ok(tb testing.TB, err error) {
+	if err != nil {
+		_, file, line, _ := runtime.Caller(1)
+		fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error())
+		tb.FailNow()
+	}
+}
+
+// equals fails the test if exp is not equal to act.
+func equals(tb testing.TB, exp, act interface{}) {
+	if !reflect.DeepEqual(exp, act) {
+		_, file, line, _ := runtime.Caller(1)
+		fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
+		tb.FailNow()
+	}
+}
+
+func zpoolTest(t *testing.T, fn func()) {
+	tempfiles := make([]string, 3)
+	for i := range tempfiles {
+		f, _ := ioutil.TempFile("/tmp/", "zfs-")
+		defer f.Close()
+		err := f.Truncate(pow2(30))
+		ok(t, err)
+		tempfiles[i] = f.Name()
+		defer os.Remove(f.Name())
+	}
+
+	pool, err := zfs.CreateZpool("test", nil, tempfiles...)
+	ok(t, err)
+	defer pool.Destroy()
+	ok(t, err)
+	fn()
+
+}
+
+func TestDatasets(t *testing.T) {
+	zpoolTest(t, func() {
+		_, err := zfs.Datasets("")
+		ok(t, err)
+
+		ds, err := zfs.GetDataset("test")
+		ok(t, err)
+		equals(t, zfs.DatasetFilesystem, ds.Type)
+		equals(t, "", ds.Origin)
+		assert(t, ds.Logicalused > 0, "Logicalused is not greater than 0")
+	})
+}
+
+func TestSnapshots(t *testing.T) {
+
+	zpoolTest(t, func() {
+		snapshots, err := zfs.Snapshots("")
+		ok(t, err)
+
+		for _, snapshot := range snapshots {
+			equals(t, zfs.DatasetSnapshot, snapshot.Type)
+		}
+	})
+}
+
+func TestFilesystems(t *testing.T) {
+	zpoolTest(t, func() {
+		f, err := zfs.CreateFilesystem("test/filesystem-test", nil)
+		ok(t, err)
+
+		filesystems, err := zfs.Filesystems("")
+		ok(t, err)
+
+		for _, filesystem := range filesystems {
+			equals(t, zfs.DatasetFilesystem, filesystem.Type)
+		}
+
+		ok(t, f.Destroy(zfs.DestroyDefault))
+	})
+}
+
+func TestCreateFilesystemWithProperties(t *testing.T) {
+	zpoolTest(t, func() {
+		props := map[string]string{
+			"compression": "lz4",
+		}
+
+		f, err := zfs.CreateFilesystem("test/filesystem-test", props)
+		ok(t, err)
+
+		equals(t, "lz4", f.Compression)
+
+		filesystems, err := zfs.Filesystems("")
+		ok(t, err)
+
+		for _, filesystem := range filesystems {
+			equals(t, zfs.DatasetFilesystem, filesystem.Type)
+		}
+
+		ok(t, f.Destroy(zfs.DestroyDefault))
+	})
+}
+
+func TestVolumes(t *testing.T) {
+	zpoolTest(t, func() {
+		v, err := zfs.CreateVolume("test/volume-test", uint64(pow2(23)), nil)
+		ok(t, err)
+
+		// volumes are sometimes "busy" if you try to manipulate them right away
+		sleep(1)
+
+		equals(t, zfs.DatasetVolume, v.Type)
+		volumes, err := zfs.Volumes("")
+		ok(t, err)
+
+		for _, volume := range volumes {
+			equals(t, zfs.DatasetVolume, volume.Type)
+		}
+
+		ok(t, v.Destroy(zfs.DestroyDefault))
+	})
+}
+
+func TestSnapshot(t *testing.T) {
+	zpoolTest(t, func() {
+		f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+		ok(t, err)
+
+		filesystems, err := zfs.Filesystems("")
+		ok(t, err)
+
+		for _, filesystem := range filesystems {
+			equals(t, zfs.DatasetFilesystem, filesystem.Type)
+		}
+
+		s, err := f.Snapshot("test", false)
+		ok(t, err)
+
+		equals(t, zfs.DatasetSnapshot, s.Type)
+
+		equals(t, "test/snapshot-test@test", s.Name)
+
+		ok(t, s.Destroy(zfs.DestroyDefault))
+
+		ok(t, f.Destroy(zfs.DestroyDefault))
+	})
+}
+
+func TestClone(t *testing.T) {
+	zpoolTest(t, func() {
+		f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+		ok(t, err)
+
+		filesystems, err := zfs.Filesystems("")
+		ok(t, err)
+
+		for _, filesystem := range filesystems {
+			equals(t, zfs.DatasetFilesystem, filesystem.Type)
+		}
+
+		s, err := f.Snapshot("test", false)
+		ok(t, err)
+
+		equals(t, zfs.DatasetSnapshot, s.Type)
+		equals(t, "test/snapshot-test@test", s.Name)
+
+		c, err := s.Clone("test/clone-test", nil)
+		ok(t, err)
+
+		equals(t, zfs.DatasetFilesystem, c.Type)
+
+		ok(t, c.Destroy(zfs.DestroyDefault))
+
+		ok(t, s.Destroy(zfs.DestroyDefault))
+
+		ok(t, f.Destroy(zfs.DestroyDefault))
+	})
+}
+
+func TestSendSnapshot(t *testing.T) {
+	zpoolTest(t, func() {
+		f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+		ok(t, err)
+
+		filesystems, err := zfs.Filesystems("")
+		ok(t, err)
+
+		for _, filesystem := range filesystems {
+			equals(t, zfs.DatasetFilesystem, filesystem.Type)
+		}
+
+		s, err := f.Snapshot("test", false)
+		ok(t, err)
+
+		file, _ := ioutil.TempFile("/tmp/", "zfs-")
+		defer file.Close()
+		err = file.Truncate(pow2(30))
+		ok(t, err)
+		defer os.Remove(file.Name())
+
+		err = s.SendSnapshot(file)
+		ok(t, err)
+
+		ok(t, s.Destroy(zfs.DestroyDefault))
+
+		ok(t, f.Destroy(zfs.DestroyDefault))
+	})
+}
+
+func TestChildren(t *testing.T) {
+	zpoolTest(t, func() {
+		f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+		ok(t, err)
+
+		s, err := f.Snapshot("test", false)
+		ok(t, err)
+
+		equals(t, zfs.DatasetSnapshot, s.Type)
+		equals(t, "test/snapshot-test@test", s.Name)
+
+		children, err := f.Children(0)
+		ok(t, err)
+
+		equals(t, 1, len(children))
+		equals(t, "test/snapshot-test@test", children[0].Name)
+
+		ok(t, s.Destroy(zfs.DestroyDefault))
+		ok(t, f.Destroy(zfs.DestroyDefault))
+	})
+}
+
+func TestListZpool(t *testing.T) {
+	zpoolTest(t, func() {
+		_, err := zfs.ListZpools()
+		ok(t, err)
+	})
+}
+
+func TestRollback(t *testing.T) {
+	zpoolTest(t, func() {
+		f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
+		ok(t, err)
+
+		filesystems, err := zfs.Filesystems("")
+		ok(t, err)
+
+		for _, filesystem := range filesystems {
+			equals(t, zfs.DatasetFilesystem, filesystem.Type)
+		}
+
+		s1, err := f.Snapshot("test", false)
+		ok(t, err)
+
+		_, err = f.Snapshot("test2", false)
+		ok(t, err)
+
+		s3, err := f.Snapshot("test3", false)
+		ok(t, err)
+
+		err = s3.Rollback(false)
+		ok(t, err)
+
+		err = s1.Rollback(false)
+		assert(t, ok != nil, "should error when rolling back beyond most recent without destroyMoreRecent = true")
+
+		err = s1.Rollback(true)
+		ok(t, err)
+
+		ok(t, s1.Destroy(zfs.DestroyDefault))
+
+		ok(t, f.Destroy(zfs.DestroyDefault))
+	})
+}
+
+func TestDiff(t *testing.T) {
+	zpoolTest(t, func() {
+		fs, err := zfs.CreateFilesystem("test/origin", nil)
+		ok(t, err)
+
+		linkedFile, err := os.Create(filepath.Join(fs.Mountpoint, "linked"))
+		ok(t, err)
+
+		movedFile, err := os.Create(filepath.Join(fs.Mountpoint, "file"))
+		ok(t, err)
+
+		snapshot, err := fs.Snapshot("snapshot", false)
+		ok(t, err)
+
+		unicodeFile, err := os.Create(filepath.Join(fs.Mountpoint, "i ❤ unicode"))
+		ok(t, err)
+
+		err = os.Rename(movedFile.Name(), movedFile.Name()+"-new")
+		ok(t, err)
+
+		err = os.Link(linkedFile.Name(), linkedFile.Name()+"_hard")
+		ok(t, err)
+
+		inodeChanges, err := fs.Diff(snapshot.Name)
+		ok(t, err)
+		equals(t, 4, len(inodeChanges))
+
+		equals(t, "/test/origin/", inodeChanges[0].Path)
+		equals(t, zfs.Directory, inodeChanges[0].Type)
+		equals(t, zfs.Modified, inodeChanges[0].Change)
+
+		equals(t, "/test/origin/linked", inodeChanges[1].Path)
+		equals(t, zfs.File, inodeChanges[1].Type)
+		equals(t, zfs.Modified, inodeChanges[1].Change)
+		equals(t, 1, inodeChanges[1].ReferenceCountChange)
+
+		equals(t, "/test/origin/file", inodeChanges[2].Path)
+		equals(t, "/test/origin/file-new", inodeChanges[2].NewPath)
+		equals(t, zfs.File, inodeChanges[2].Type)
+		equals(t, zfs.Renamed, inodeChanges[2].Change)
+
+		equals(t, "/test/origin/i ❤ unicode", inodeChanges[3].Path)
+		equals(t, zfs.File, inodeChanges[3].Type)
+		equals(t, zfs.Created, inodeChanges[3].Change)
+
+		ok(t, movedFile.Close())
+		ok(t, unicodeFile.Close())
+		ok(t, linkedFile.Close())
+		ok(t, snapshot.Destroy(zfs.DestroyForceUmount))
+		ok(t, fs.Destroy(zfs.DestroyForceUmount))
+	})
+}
diff --git a/vendor/src/github.com/mistifyio/go-zfs/zpool.go b/vendor/src/github.com/mistifyio/go-zfs/zpool.go
new file mode 100644
index 0000000..59be0a8
--- /dev/null
+++ b/vendor/src/github.com/mistifyio/go-zfs/zpool.go
@@ -0,0 +1,108 @@
+package zfs
+
+// ZFS zpool states, which can indicate if a pool is online, offline,
+// degraded, etc.  More information regarding zpool states can be found here:
+// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html.
+const (
+	ZpoolOnline   = "ONLINE"
+	ZpoolDegraded = "DEGRADED"
+	ZpoolFaulted  = "FAULTED"
+	ZpoolOffline  = "OFFLINE"
+	ZpoolUnavail  = "UNAVAIL"
+	ZpoolRemoved  = "REMOVED"
+)
+
+// Zpool is a ZFS zpool.  A pool is a top-level structure in ZFS, and can
+// contain many descendent datasets.
+type Zpool struct {
+	Name      string
+	Health    string
+	Allocated uint64
+	Size      uint64
+	Free      uint64
+}
+
+// zpool is a helper function to wrap typical calls to zpool.
+func zpool(arg ...string) ([][]string, error) {
+	c := command{Command: "zpool"}
+	return c.Run(arg...)
+}
+
+// GetZpool retrieves a single ZFS zpool by name.
+func GetZpool(name string) (*Zpool, error) {
+	out, err := zpool("get", "all", "-p", name)
+	if err != nil {
+		return nil, err
+	}
+
+	// there is no -H
+	out = out[1:]
+
+	z := &Zpool{Name: name}
+	for _, line := range out {
+		if err := z.parseLine(line); err != nil {
+			return nil, err
+		}
+	}
+
+	return z, nil
+}
+
+// Datasets returns a slice of all ZFS datasets in a zpool.
+func (z *Zpool) Datasets() ([]*Dataset, error) {
+	return Datasets(z.Name)
+}
+
+// Snapshots returns a slice of all ZFS snapshots in a zpool.
+func (z *Zpool) Snapshots() ([]*Dataset, error) {
+	return Snapshots(z.Name)
+}
+
+// CreateZpool creates a new ZFS zpool with the specified name, properties,
+// and optional arguments.
+// A full list of available ZFS properties and command-line arguments may be
+// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8).
+func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) {
+	cli := make([]string, 1, 4)
+	cli[0] = "create"
+	if properties != nil {
+		cli = append(cli, propsSlice(properties)...)
+	}
+	cli = append(cli, name)
+	cli = append(cli, args...)
+	_, err := zpool(cli...)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Zpool{Name: name}, nil
+}
+
+// Destroy destroys a ZFS zpool by name.
+func (z *Zpool) Destroy() error {
+	_, err := zpool("destroy", z.Name)
+	return err
+}
+
+// ListZpools list all ZFS zpools accessible on the current system.
+func ListZpools() ([]*Zpool, error) {
+	args := []string{"list", "-Ho", "name"}
+	out, err := zpool(args...)
+	if err != nil {
+		return nil, err
+	}
+
+	// there is no -H
+	out = out[1:]
+
+	var pools []*Zpool
+
+	for _, line := range out {
+		z, err := GetZpool(line[0])
+		if err != nil {
+			return nil, err
+		}
+		pools = append(pools, z)
+	}
+	return pools, nil
+}
diff --git a/vendor/src/github.com/syndtr/gocapability/capability/enum.go b/vendor/src/github.com/syndtr/gocapability/capability/enum.go
index bff756a..fd0ce7f 100644
--- a/vendor/src/github.com/syndtr/gocapability/capability/enum.go
+++ b/vendor/src/github.com/syndtr/gocapability/capability/enum.go
@@ -34,90 +34,9 @@
 	BOUNDS = BOUNDING
 )
 
+//go:generate go run enumgen/gen.go
 type Cap int
 
-func (c Cap) String() string {
-	switch c {
-	case CAP_CHOWN:
-		return "chown"
-	case CAP_DAC_OVERRIDE:
-		return "dac_override"
-	case CAP_DAC_READ_SEARCH:
-		return "dac_read_search"
-	case CAP_FOWNER:
-		return "fowner"
-	case CAP_FSETID:
-		return "fsetid"
-	case CAP_KILL:
-		return "kill"
-	case CAP_SETGID:
-		return "setgid"
-	case CAP_SETUID:
-		return "setuid"
-	case CAP_SETPCAP:
-		return "setpcap"
-	case CAP_LINUX_IMMUTABLE:
-		return "linux_immutable"
-	case CAP_NET_BIND_SERVICE:
-		return "net_bind_service"
-	case CAP_NET_BROADCAST:
-		return "net_broadcast"
-	case CAP_NET_ADMIN:
-		return "net_admin"
-	case CAP_NET_RAW:
-		return "net_raw"
-	case CAP_IPC_LOCK:
-		return "ipc_lock"
-	case CAP_IPC_OWNER:
-		return "ipc_owner"
-	case CAP_SYS_MODULE:
-		return "sys_module"
-	case CAP_SYS_RAWIO:
-		return "sys_rawio"
-	case CAP_SYS_CHROOT:
-		return "sys_chroot"
-	case CAP_SYS_PTRACE:
-		return "sys_ptrace"
-	case CAP_SYS_PACCT:
-		return "sys_psacct"
-	case CAP_SYS_ADMIN:
-		return "sys_admin"
-	case CAP_SYS_BOOT:
-		return "sys_boot"
-	case CAP_SYS_NICE:
-		return "sys_nice"
-	case CAP_SYS_RESOURCE:
-		return "sys_resource"
-	case CAP_SYS_TIME:
-		return "sys_time"
-	case CAP_SYS_TTY_CONFIG:
-		return "sys_tty_config"
-	case CAP_MKNOD:
-		return "mknod"
-	case CAP_LEASE:
-		return "lease"
-	case CAP_AUDIT_WRITE:
-		return "audit_write"
-	case CAP_AUDIT_CONTROL:
-		return "audit_control"
-	case CAP_SETFCAP:
-		return "setfcap"
-	case CAP_MAC_OVERRIDE:
-		return "mac_override"
-	case CAP_MAC_ADMIN:
-		return "mac_admin"
-	case CAP_SYSLOG:
-		return "syslog"
-	case CAP_WAKE_ALARM:
-		return "wake_alarm"
-	case CAP_BLOCK_SUSPEND:
-		return "block_suspend"
-	case CAP_AUDIT_READ:
-		return "audit_read"
-	}
-	return "unknown"
-}
-
 // POSIX-draft defined capabilities.
 const (
 	// In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this
diff --git a/vendor/src/github.com/syndtr/gocapability/capability/enum_gen.go b/vendor/src/github.com/syndtr/gocapability/capability/enum_gen.go
new file mode 100644
index 0000000..b9e6d2d
--- /dev/null
+++ b/vendor/src/github.com/syndtr/gocapability/capability/enum_gen.go
@@ -0,0 +1,129 @@
+// generated file; DO NOT EDIT - use go generate in directory with source
+
+package capability
+
+func (c Cap) String() string {
+	switch c {
+	case CAP_CHOWN:
+		return "chown"
+	case CAP_DAC_OVERRIDE:
+		return "dac_override"
+	case CAP_DAC_READ_SEARCH:
+		return "dac_read_search"
+	case CAP_FOWNER:
+		return "fowner"
+	case CAP_FSETID:
+		return "fsetid"
+	case CAP_KILL:
+		return "kill"
+	case CAP_SETGID:
+		return "setgid"
+	case CAP_SETUID:
+		return "setuid"
+	case CAP_SETPCAP:
+		return "setpcap"
+	case CAP_LINUX_IMMUTABLE:
+		return "linux_immutable"
+	case CAP_NET_BIND_SERVICE:
+		return "net_bind_service"
+	case CAP_NET_BROADCAST:
+		return "net_broadcast"
+	case CAP_NET_ADMIN:
+		return "net_admin"
+	case CAP_NET_RAW:
+		return "net_raw"
+	case CAP_IPC_LOCK:
+		return "ipc_lock"
+	case CAP_IPC_OWNER:
+		return "ipc_owner"
+	case CAP_SYS_MODULE:
+		return "sys_module"
+	case CAP_SYS_RAWIO:
+		return "sys_rawio"
+	case CAP_SYS_CHROOT:
+		return "sys_chroot"
+	case CAP_SYS_PTRACE:
+		return "sys_ptrace"
+	case CAP_SYS_PACCT:
+		return "sys_pacct"
+	case CAP_SYS_ADMIN:
+		return "sys_admin"
+	case CAP_SYS_BOOT:
+		return "sys_boot"
+	case CAP_SYS_NICE:
+		return "sys_nice"
+	case CAP_SYS_RESOURCE:
+		return "sys_resource"
+	case CAP_SYS_TIME:
+		return "sys_time"
+	case CAP_SYS_TTY_CONFIG:
+		return "sys_tty_config"
+	case CAP_MKNOD:
+		return "mknod"
+	case CAP_LEASE:
+		return "lease"
+	case CAP_AUDIT_WRITE:
+		return "audit_write"
+	case CAP_AUDIT_CONTROL:
+		return "audit_control"
+	case CAP_SETFCAP:
+		return "setfcap"
+	case CAP_MAC_OVERRIDE:
+		return "mac_override"
+	case CAP_MAC_ADMIN:
+		return "mac_admin"
+	case CAP_SYSLOG:
+		return "syslog"
+	case CAP_WAKE_ALARM:
+		return "wake_alarm"
+	case CAP_BLOCK_SUSPEND:
+		return "block_suspend"
+	case CAP_AUDIT_READ:
+		return "audit_read"
+	}
+	return "unknown"
+}
+
+// List returns list of all supported capabilities
+func List() []Cap {
+	return []Cap{
+		CAP_CHOWN,
+		CAP_DAC_OVERRIDE,
+		CAP_DAC_READ_SEARCH,
+		CAP_FOWNER,
+		CAP_FSETID,
+		CAP_KILL,
+		CAP_SETGID,
+		CAP_SETUID,
+		CAP_SETPCAP,
+		CAP_LINUX_IMMUTABLE,
+		CAP_NET_BIND_SERVICE,
+		CAP_NET_BROADCAST,
+		CAP_NET_ADMIN,
+		CAP_NET_RAW,
+		CAP_IPC_LOCK,
+		CAP_IPC_OWNER,
+		CAP_SYS_MODULE,
+		CAP_SYS_RAWIO,
+		CAP_SYS_CHROOT,
+		CAP_SYS_PTRACE,
+		CAP_SYS_PACCT,
+		CAP_SYS_ADMIN,
+		CAP_SYS_BOOT,
+		CAP_SYS_NICE,
+		CAP_SYS_RESOURCE,
+		CAP_SYS_TIME,
+		CAP_SYS_TTY_CONFIG,
+		CAP_MKNOD,
+		CAP_LEASE,
+		CAP_AUDIT_WRITE,
+		CAP_AUDIT_CONTROL,
+		CAP_SETFCAP,
+		CAP_MAC_OVERRIDE,
+		CAP_MAC_ADMIN,
+		CAP_SYSLOG,
+		CAP_WAKE_ALARM,
+		CAP_BLOCK_SUSPEND,
+		CAP_AUDIT_READ,
+	}
+}
diff --git a/vendor/src/github.com/syndtr/gocapability/capability/enumgen/gen.go b/vendor/src/github.com/syndtr/gocapability/capability/enumgen/gen.go
new file mode 100644
index 0000000..4c73380
--- /dev/null
+++ b/vendor/src/github.com/syndtr/gocapability/capability/enumgen/gen.go
@@ -0,0 +1,92 @@
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/format"
+	"go/parser"
+	"go/token"
+	"io/ioutil"
+	"log"
+	"os"
+	"strings"
+)
+
+const fileName = "enum.go"
+const genName = "enum_gen.go"
+
+type generator struct {
+	buf  bytes.Buffer
+	caps []string
+}
+
+func (g *generator) writeHeader() {
+	g.buf.WriteString("// generated file; DO NOT EDIT - use go generate in directory with source\n")
+	g.buf.WriteString("\n")
+	g.buf.WriteString("package capability")
+}
+
+func (g *generator) writeStringFunc() {
+	g.buf.WriteString("\n")
+	g.buf.WriteString("func (c Cap) String() string {\n")
+	g.buf.WriteString("switch c {\n")
+	for _, cap := range g.caps {
+		fmt.Fprintf(&g.buf, "case %s:\n", cap)
+		fmt.Fprintf(&g.buf, "return \"%s\"\n", strings.ToLower(cap[4:]))
+	}
+	g.buf.WriteString("}\n")
+	g.buf.WriteString("return \"unknown\"\n")
+	g.buf.WriteString("}\n")
+}
+
+func (g *generator) writeListFunc() {
+	g.buf.WriteString("\n")
+	g.buf.WriteString("// List returns list of all supported capabilities\n")
+	g.buf.WriteString("func List() []Cap {\n")
+	g.buf.WriteString("return []Cap{\n")
+	for _, cap := range g.caps {
+		fmt.Fprintf(&g.buf, "%s,\n", cap)
+	}
+	g.buf.WriteString("}\n")
+	g.buf.WriteString("}\n")
+}
+
+func main() {
+	fs := token.NewFileSet()
+	parsedFile, err := parser.ParseFile(fs, fileName, nil, 0)
+	if err != nil {
+		log.Fatal(err)
+	}
+	var caps []string
+	for _, decl := range parsedFile.Decls {
+		decl, ok := decl.(*ast.GenDecl)
+		if !ok || decl.Tok != token.CONST {
+			continue
+		}
+		for _, spec := range decl.Specs {
+			vspec := spec.(*ast.ValueSpec)
+			name := vspec.Names[0].Name
+			if strings.HasPrefix(name, "CAP_") {
+				caps = append(caps, name)
+			}
+		}
+	}
+	g := &generator{caps: caps}
+	g.writeHeader()
+	g.writeStringFunc()
+	g.writeListFunc()
+	src, err := format.Source(g.buf.Bytes())
+	if err != nil {
+		fmt.Println("generated invalid Go code")
+		fmt.Println(g.buf.String())
+		log.Fatal(err)
+	}
+	fi, err := os.Stat(fileName)
+	if err != nil {
+		log.Fatal(err)
+	}
+	if err := ioutil.WriteFile(genName, src, fi.Mode().Perm()); err != nil {
+		log.Fatal(err)
+	}
+}
diff --git a/vendor/src/github.com/tchap/go-patricia/README.md b/vendor/src/github.com/tchap/go-patricia/README.md
index 11ee461..9d6ebc4 100644
--- a/vendor/src/github.com/tchap/go-patricia/README.md
+++ b/vendor/src/github.com/tchap/go-patricia/README.md
@@ -50,9 +50,12 @@
 	return nil
 }
 
-// Create a new tree.
+// Create a new default trie (using the default parameter values).
 trie := NewTrie()
 
+// Create a new custom trie.
+trie := NewTrie(MaxPrefixPerNode(16), MaxChildrenPerSparseNode(10))
+
 // Insert some items.
 trie.Insert(Prefix("Pepa Novak"), 1)
 trie.Insert(Prefix("Pepa Sindelar"), 2)
@@ -67,12 +70,12 @@
 fmt.Printf("Anybody called %q here? %v\n", key, trie.MatchSubtree(key))
 // Anybody called "Karel" here? true
 
-// Walk the tree.
+// Walk the tree in alphabetical order.
 trie.Visit(printItem)
+// "Karel Hynek Macha": 4
+// "Karel Macha": 3
 // "Pepa Novak": 1
 // "Pepa Sindelar": 2
-// "Karel Macha": 3
-// "Karel Hynek Macha": 4
 
 // Walk a subtree.
 trie.VisitSubtree(Prefix("Pepa"), printItem)
@@ -96,8 +99,8 @@
 
 // Walk again.
 trie.Visit(printItem)
-// "Pepa Sindelar": 2
 // "Karel Hynek Macha": 10
+// "Pepa Sindelar": 2
 
 // Delete a subtree.
 trie.DeleteSubtree(Prefix("Pepa"))
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/children.go b/vendor/src/github.com/tchap/go-patricia/patricia/children.go
index 07d3326..a204b0c 100644
--- a/vendor/src/github.com/tchap/go-patricia/patricia/children.go
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/children.go
@@ -5,11 +5,7 @@
 
 package patricia
 
-// Max prefix length that is kept in a single trie node.
-var MaxPrefixPerNode = 10
-
-// Max children to keep in a node in the sparse mode.
-const MaxChildrenPerSparseNode = 8
+import "sort"
 
 type childList interface {
 	length() int
@@ -21,13 +17,28 @@
 	walk(prefix *Prefix, visitor VisitorFunc) error
 }
 
-type sparseChildList struct {
-	children []*Trie
+type tries []*Trie
+
+func (t tries) Len() int {
+	return len(t)
 }
 
-func newSparseChildList() childList {
+func (t tries) Less(i, j int) bool {
+	strings := sort.StringSlice{string(t[i].prefix), string(t[j].prefix)}
+	return strings.Less(0, 1)
+}
+
+func (t tries) Swap(i, j int) {
+	t[i], t[j] = t[j], t[i]
+}
+
+type sparseChildList struct {
+	children tries
+}
+
+func newSparseChildList(maxChildrenPerSparseNode int) childList {
 	return &sparseChildList{
-		children: make([]*Trie, 0, MaxChildrenPerSparseNode),
+		children: make(tries, 0, DefaultMaxChildrenPerSparseNode),
 	}
 }
 
@@ -82,6 +93,9 @@
 }
 
 func (list *sparseChildList) walk(prefix *Prefix, visitor VisitorFunc) error {
+
+	sort.Sort(list.children)
+
 	for _, child := range list.children {
 		*prefix = append(*prefix, child.prefix...)
 		if child.item != nil {
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/patricia.go b/vendor/src/github.com/tchap/go-patricia/patricia/patricia.go
index 8fcbcdf..a8c3786 100644
--- a/vendor/src/github.com/tchap/go-patricia/patricia/patricia.go
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/patricia.go
@@ -13,6 +13,11 @@
 // Trie
 //------------------------------------------------------------------------------
 
+const (
+	DefaultMaxPrefixPerNode         = 10
+	DefaultMaxChildrenPerSparseNode = 8
+)
+
 type (
 	Prefix      []byte
 	Item        interface{}
@@ -27,15 +32,44 @@
 	prefix Prefix
 	item   Item
 
+	maxPrefixPerNode         int
+	maxChildrenPerSparseNode int
+
 	children childList
 }
 
 // Public API ------------------------------------------------------------------
 
+type Option func(*Trie)
+
 // Trie constructor.
-func NewTrie() *Trie {
-	return &Trie{
-		children: newSparseChildList(),
+func NewTrie(options ...Option) *Trie {
+	trie := &Trie{}
+
+	for _, opt := range options {
+		opt(trie)
+	}
+
+	if trie.maxPrefixPerNode <= 0 {
+		trie.maxPrefixPerNode = DefaultMaxPrefixPerNode
+	}
+	if trie.maxChildrenPerSparseNode <= 0 {
+		trie.maxChildrenPerSparseNode = DefaultMaxChildrenPerSparseNode
+	}
+
+	trie.children = newSparseChildList(trie.maxChildrenPerSparseNode)
+	return trie
+}
+
+func MaxPrefixPerNode(value int) Option {
+	return func(trie *Trie) {
+		trie.maxPrefixPerNode = value
+	}
+}
+
+func MaxChildrenPerSparseNode(value int) Option {
+	return func(trie *Trie) {
+		trie.maxChildrenPerSparseNode = value
 	}
 }
 
@@ -85,7 +119,8 @@
 	return
 }
 
-// Visit calls visitor on every node containing a non-nil item.
+// Visit calls visitor on every node containing a non-nil item
+// in alphabetical order.
 //
 // If an error is returned from visitor, the function stops visiting the tree
 // and returns that error, unless it is a special error - SkipSubtree. In that
@@ -233,7 +268,7 @@
 	// If we are in the root of the trie, reset the trie.
 	if parent == nil {
 		root.prefix = nil
-		root.children = newSparseChildList()
+		root.children = newSparseChildList(trie.maxPrefixPerNode)
 		return true
 	}
 
@@ -257,12 +292,12 @@
 	)
 
 	if node.prefix == nil {
-		if len(key) <= MaxPrefixPerNode {
+		if len(key) <= trie.maxPrefixPerNode {
 			node.prefix = key
 			goto InsertItem
 		}
-		node.prefix = key[:MaxPrefixPerNode]
-		key = key[MaxPrefixPerNode:]
+		node.prefix = key[:trie.maxPrefixPerNode]
+		key = key[trie.maxPrefixPerNode:]
 		goto AppendChild
 	}
 
@@ -306,14 +341,14 @@
 	// This loop starts with empty node.prefix that needs to be filled.
 	for len(key) != 0 {
 		child := NewTrie()
-		if len(key) <= MaxPrefixPerNode {
+		if len(key) <= trie.maxPrefixPerNode {
 			child.prefix = key
 			node.children = node.children.add(child)
 			node = child
 			goto InsertItem
 		} else {
-			child.prefix = key[:MaxPrefixPerNode]
-			key = key[MaxPrefixPerNode:]
+			child.prefix = key[:trie.maxPrefixPerNode]
+			key = key[trie.maxPrefixPerNode:]
 			node.children = node.children.add(child)
 			node = child
 		}
@@ -344,7 +379,7 @@
 	}
 
 	// Make sure the combined prefixes fit into a single node.
-	if len(trie.prefix)+len(child.prefix) > MaxPrefixPerNode {
+	if len(trie.prefix)+len(child.prefix) > trie.maxPrefixPerNode {
 		return trie
 	}
 
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go
index 346e9a6..96089fc 100644
--- a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go
@@ -55,7 +55,7 @@
 	trie := NewTrie()
 	start := byte(70)
 	// create a dense node
-	for i := byte(0); i <= MaxChildrenPerSparseNode; i++ {
+	for i := byte(0); i <= DefaultMaxChildrenPerSparseNode; i++ {
 		if !trie.Insert(Prefix([]byte{start + i}), true) {
 			t.Errorf("insert failed, prefix=%v", start+i)
 		}
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go
index 27f3c87..b35c9e2 100644
--- a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go
@@ -300,10 +300,10 @@
 	someErr := errors.New("Something exploded")
 	if err := trie.Visit(func(prefix Prefix, item Item) error {
 		t.Logf("VISITING prefix=%q, item=%v", prefix, item)
-		if item.(int) == 0 {
+		if item.(int) == 3 {
 			return someErr
 		}
-		if item.(int) != 0 {
+		if item.(int) != 3 {
 			t.Errorf("Unexpected prefix encountered, %q", prefix)
 		}
 		return nil
@@ -598,10 +598,10 @@
 
 	// Walk the tree.
 	trie.Visit(printItem)
+	// "Karel Hynek Macha": 4
+	// "Karel Macha": 3
 	// "Pepa Novak": 1
 	// "Pepa Sindelar": 2
-	// "Karel Macha": 3
-	// "Karel Hynek Macha": 4
 
 	// Walk a subtree.
 	trie.VisitSubtree(Prefix("Pepa"), printItem)
@@ -625,8 +625,8 @@
 
 	// Walk again.
 	trie.Visit(printItem)
-	// "Pepa Sindelar": 2
 	// "Karel Hynek Macha": 10
+	// "Pepa Sindelar": 2
 
 	// Delete a subtree.
 	trie.DeleteSubtree(Prefix("Pepa"))
@@ -638,16 +638,16 @@
 	// Output:
 	// "Pepa Novak" present? true
 	// Anybody called "Karel" here? true
-	// "Pepa Novak": 1
-	// "Pepa Sindelar": 2
-	// "Karel Macha": 3
 	// "Karel Hynek Macha": 4
+	// "Karel Macha": 3
+	// "Pepa Novak": 1
+	// "Pepa Sindelar": 2
 	// "Pepa Novak": 1
 	// "Pepa Sindelar": 2
 	// "Karel Hynek Macha": 10
 	// "Karel Hynek Macha": 10
-	// "Pepa Sindelar": 2
 	// "Karel Hynek Macha": 10
+	// "Pepa Sindelar": 2
 	// "Karel Hynek Macha": 10
 }
 
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_test.go b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_test.go
index ce5ae37..12c441b 100644
--- a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_test.go
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_test.go
@@ -13,6 +13,20 @@
 
 // Tests -----------------------------------------------------------------------
 
+func TestTrie_ConstructorOptions(t *testing.T) {
+	trie := NewTrie(MaxPrefixPerNode(16), MaxChildrenPerSparseNode(10))
+
+	if trie.maxPrefixPerNode != 16 {
+		t.Errorf("Unexpected trie.maxPrefixPerNode value, expected=%v, got=%v",
+			16, trie.maxPrefixPerNode)
+	}
+
+	if trie.maxChildrenPerSparseNode != 10 {
+		t.Errorf("Unexpected trie.maxChildrenPerSparseNode value, expected=%v, got=%v",
+			10, trie.maxChildrenPerSparseNode)
+	}
+}
+
 func TestTrie_GetNonexistentPrefix(t *testing.T) {
 	trie := NewTrie()
 
diff --git a/vendor/src/github.com/vishvananda/netlink/.travis.yml b/vendor/src/github.com/vishvananda/netlink/.travis.yml
new file mode 100644
index 0000000..1970069
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/.travis.yml
@@ -0,0 +1,3 @@
+language: go
+install:
+      - go get github.com/vishvananda/netns
diff --git a/vendor/src/github.com/vishvananda/netlink/LICENSE b/vendor/src/github.com/vishvananda/netlink/LICENSE
new file mode 100644
index 0000000..9f64db8
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/LICENSE
@@ -0,0 +1,192 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   Copyright 2014 Vishvananda Ishaya.
+   Copyright 2014 Docker, Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/src/github.com/vishvananda/netlink/Makefile b/vendor/src/github.com/vishvananda/netlink/Makefile
new file mode 100644
index 0000000..b325018
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/Makefile
@@ -0,0 +1,29 @@
+DIRS := \
+	. \
+	nl
+
+DEPS = \
+	github.com/vishvananda/netns
+
+uniq = $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1)))
+testdirs = $(call uniq,$(foreach d,$(1),$(dir $(wildcard $(d)/*_test.go))))
+goroot = $(addprefix ../../../,$(1))
+unroot = $(subst ../../../,,$(1))
+fmt = $(addprefix fmt-,$(1))
+
+all: fmt
+
+$(call goroot,$(DEPS)):
+	go get $(call unroot,$@)
+
+.PHONY: $(call testdirs,$(DIRS))
+$(call testdirs,$(DIRS)):
+	sudo -E go test -v github.com/vishvananda/netlink/$@
+
+$(call fmt,$(call testdirs,$(DIRS))):
+	! gofmt -l $(subst fmt-,,$@)/*.go | grep ''
+
+.PHONY: fmt
+fmt: $(call fmt,$(call testdirs,$(DIRS)))
+
+test: fmt $(call goroot,$(DEPS)) $(call testdirs,$(DIRS))
diff --git a/vendor/src/github.com/vishvananda/netlink/README.md b/vendor/src/github.com/vishvananda/netlink/README.md
new file mode 100644
index 0000000..555f886
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/README.md
@@ -0,0 +1,83 @@
+# netlink - netlink library for go #
+
+[![Build Status](https://travis-ci.org/vishvananda/netlink.png?branch=master)](https://travis-ci.org/vishvananda/netlink) [![GoDoc](https://godoc.org/github.com/vishvananda/netlink?status.svg)](https://godoc.org/github.com/vishvananda/netlink)
+
+The netlink package provides a simple netlink library for go. Netlink
+is the interface a user-space program in linux uses to communicate with
+the kernel. It can be used to add and remove interfaces, set ip addresses
+and routes, and configure ipsec. Netlink communication requires elevated
+privileges, so in most cases this code needs to be run as root. Since
+low-level netlink messages are inscrutable at best, the library attempts
+to provide an api that is loosely modeled on the CLI provied by iproute2.
+Actions like `ip link add` will be accomplished via a similarly named
+function like AddLink(). This library began its life as a fork of the
+netlink functionality in
+[docker/libcontainer](https://github.com/docker/libcontainer) but was
+heavily rewritten to improve testability, performance, and to add new
+functionality like ipsec xfrm handling.
+
+## Local Build and Test ##
+
+You can use go get command:
+
+    go get github.com/vishvananda/netlink
+
+Testing dependencies:
+
+    go get github.com/vishvananda/netns
+
+Testing (requires root):
+
+    sudo -E go test github.com/vishvananda/netlink
+
+## Examples ##
+
+Add a new bridge and add eth1 into it:
+
+```go
+package main
+
+import (
+    "net"
+    "github.com/vishvananda/netlink"
+)
+
+func main() {
+    mybridge := &netlink.Bridge{netlink.LinkAttrs{Name: "foo"}}
+    _ := netlink.LinkAdd(mybridge)
+    eth1, _ := netlink.LinkByName("eth1")
+    netlink.LinkSetMaster(eth1, mybridge)
+}
+
+```
+
+Add a new ip address to loopback:
+
+```go
+package main
+
+import (
+    "net"
+    "github.com/vishvananda/netlink"
+)
+
+func main() {
+    lo, _ := netlink.LinkByName("lo")
+    addr, _ := netlink.ParseAddr("169.254.169.254/32")
+    netlink.AddrAdd(lo, addr)
+}
+
+```
+
+## Future Work ##
+
+Many pieces of netlink are not yet fully supported in the high-level
+interface. Aspects of virtually all of the high-level objects don't exist.
+Many of the underlying primitives are there, so its a matter of putting
+the right fields into the high-level objects and making sure that they
+are serialized and deserialized correctly in the Add and List methods.
+
+There are also a few pieces of low level netlink functionality that still
+need to be implemented. Routing rules are not in place and some of the
+more advanced link types. Hopefully there is decent structure and testing
+in place to make these fairly straightforward to add.
diff --git a/vendor/src/github.com/vishvananda/netlink/addr.go b/vendor/src/github.com/vishvananda/netlink/addr.go
new file mode 100644
index 0000000..5c12f4e
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/addr.go
@@ -0,0 +1,43 @@
+package netlink
+
+import (
+	"fmt"
+	"net"
+	"strings"
+)
+
+// Addr represents an IP address from netlink. Netlink ip addresses
+// include a mask, so it stores the address as a net.IPNet.
+type Addr struct {
+	*net.IPNet
+	Label string
+}
+
+// String returns $ip/$netmask $label
+func (addr Addr) String() string {
+	return fmt.Sprintf("%s %s", addr.IPNet, addr.Label)
+}
+
+// ParseAddr parses the string representation of an address in the
+// form $ip/$netmask $label. The label portion is optional
+func ParseAddr(s string) (*Addr, error) {
+	label := ""
+	parts := strings.Split(s, " ")
+	if len(parts) > 1 {
+		s = parts[0]
+		label = parts[1]
+	}
+	m, err := ParseIPNet(s)
+	if err != nil {
+		return nil, err
+	}
+	return &Addr{IPNet: m, Label: label}, nil
+}
+
+// Equal returns true if both Addrs have the same net.IPNet value.
+func (a Addr) Equal(x Addr) bool {
+	sizea, _ := a.Mask.Size()
+	sizeb, _ := x.Mask.Size()
+	// ignore label for comparison
+	return a.IP.Equal(x.IP) && sizea == sizeb
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/addr_linux.go b/vendor/src/github.com/vishvananda/netlink/addr_linux.go
new file mode 100644
index 0000000..dd26f4a
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/addr_linux.go
@@ -0,0 +1,114 @@
+package netlink
+
+import (
+	"fmt"
+	"net"
+	"strings"
+	"syscall"
+
+	"github.com/vishvananda/netlink/nl"
+)
+
+// AddrAdd will add an IP address to a link device.
+// Equivalent to: `ip addr add $addr dev $link`
+func AddrAdd(link Link, addr *Addr) error {
+
+	req := nl.NewNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+	return addrHandle(link, addr, req)
+}
+
+// AddrDel will delete an IP address from a link device.
+// Equivalent to: `ip addr del $addr dev $link`
+func AddrDel(link Link, addr *Addr) error {
+	req := nl.NewNetlinkRequest(syscall.RTM_DELADDR, syscall.NLM_F_ACK)
+	return addrHandle(link, addr, req)
+}
+
+func addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error {
+	base := link.Attrs()
+	if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) {
+		return fmt.Errorf("label must begin with interface name")
+	}
+	ensureIndex(base)
+
+	family := nl.GetIPFamily(addr.IP)
+
+	msg := nl.NewIfAddrmsg(family)
+	msg.Index = uint32(base.Index)
+	prefixlen, _ := addr.Mask.Size()
+	msg.Prefixlen = uint8(prefixlen)
+	req.AddData(msg)
+
+	var addrData []byte
+	if family == FAMILY_V4 {
+		addrData = addr.IP.To4()
+	} else {
+		addrData = addr.IP.To16()
+	}
+
+	localData := nl.NewRtAttr(syscall.IFA_LOCAL, addrData)
+	req.AddData(localData)
+
+	addressData := nl.NewRtAttr(syscall.IFA_ADDRESS, addrData)
+	req.AddData(addressData)
+
+	if addr.Label != "" {
+		labelData := nl.NewRtAttr(syscall.IFA_LABEL, nl.ZeroTerminated(addr.Label))
+		req.AddData(labelData)
+	}
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+// AddrList gets a list of IP addresses in the system.
+// Equivalent to: `ip addr show`.
+// The list can be filtered by link and ip family.
+func AddrList(link Link, family int) ([]Addr, error) {
+	req := nl.NewNetlinkRequest(syscall.RTM_GETADDR, syscall.NLM_F_DUMP)
+	msg := nl.NewIfInfomsg(family)
+	req.AddData(msg)
+
+	msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWADDR)
+	if err != nil {
+		return nil, err
+	}
+
+	index := 0
+	if link != nil {
+		base := link.Attrs()
+		ensureIndex(base)
+		index = base.Index
+	}
+
+	res := make([]Addr, 0)
+	for _, m := range msgs {
+		msg := nl.DeserializeIfAddrmsg(m)
+
+		if link != nil && msg.Index != uint32(index) {
+			// Ignore messages from other interfaces
+			continue
+		}
+
+		attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+		if err != nil {
+			return nil, err
+		}
+
+		var addr Addr
+		for _, attr := range attrs {
+			switch attr.Attr.Type {
+			case syscall.IFA_ADDRESS:
+				addr.IPNet = &net.IPNet{
+					IP:   attr.Value,
+					Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)),
+				}
+			case syscall.IFA_LABEL:
+				addr.Label = string(attr.Value[:len(attr.Value)-1])
+			}
+		}
+		res = append(res, addr)
+	}
+
+	return res, nil
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/addr_test.go b/vendor/src/github.com/vishvananda/netlink/addr_test.go
new file mode 100644
index 0000000..45e22c0
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/addr_test.go
@@ -0,0 +1,45 @@
+package netlink
+
+import (
+	"testing"
+)
+
+func TestAddrAddDel(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	link, err := LinkByName("lo")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	addr, err := ParseAddr("127.1.1.1/24 local")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err = AddrAdd(link, addr); err != nil {
+		t.Fatal(err)
+	}
+
+	addrs, err := AddrList(link, FAMILY_ALL)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(addrs) != 1 || !addr.Equal(addrs[0]) || addrs[0].Label != addr.Label {
+		t.Fatal("Address not added properly")
+	}
+
+	if err = AddrDel(link, addr); err != nil {
+		t.Fatal(err)
+	}
+	addrs, err = AddrList(link, FAMILY_ALL)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(addrs) != 0 {
+		t.Fatal("Address not removed properly")
+	}
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/link.go b/vendor/src/github.com/vishvananda/netlink/link.go
new file mode 100644
index 0000000..276c2f8
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/link.go
@@ -0,0 +1,175 @@
+package netlink
+
+import "net"
+
+// Link represents a link device from netlink. Shared link attributes
+// like name may be retrieved using the Attrs() method. Unique data
+// can be retrieved by casting the object to the proper type.
+type Link interface {
+	Attrs() *LinkAttrs
+	Type() string
+}
+
+// LinkAttrs represents data shared by most link types
+type LinkAttrs struct {
+	Index        int
+	MTU          int
+	TxQLen       uint32 // Transmit Queue Length
+	Name         string
+	HardwareAddr net.HardwareAddr
+	Flags        net.Flags
+	ParentIndex  int // index of the parent link device
+	MasterIndex  int // must be the index of a bridge
+}
+
+// Device links cannot be created via netlink. These links
+// are links created by udev like 'lo' and 'etho0'
+type Device struct {
+	LinkAttrs
+}
+
+func (device *Device) Attrs() *LinkAttrs {
+	return &device.LinkAttrs
+}
+
+func (device *Device) Type() string {
+	return "device"
+}
+
+// Dummy links are dummy ethernet devices
+type Dummy struct {
+	LinkAttrs
+}
+
+func (dummy *Dummy) Attrs() *LinkAttrs {
+	return &dummy.LinkAttrs
+}
+
+func (dummy *Dummy) Type() string {
+	return "dummy"
+}
+
+// Bridge links are simple linux bridges
+type Bridge struct {
+	LinkAttrs
+}
+
+func (bridge *Bridge) Attrs() *LinkAttrs {
+	return &bridge.LinkAttrs
+}
+
+func (bridge *Bridge) Type() string {
+	return "bridge"
+}
+
+// Vlan links have ParentIndex set in their Attrs()
+type Vlan struct {
+	LinkAttrs
+	VlanId int
+}
+
+func (vlan *Vlan) Attrs() *LinkAttrs {
+	return &vlan.LinkAttrs
+}
+
+func (vlan *Vlan) Type() string {
+	return "vlan"
+}
+
+// Macvlan links have ParentIndex set in their Attrs()
+type Macvlan struct {
+	LinkAttrs
+}
+
+func (macvlan *Macvlan) Attrs() *LinkAttrs {
+	return &macvlan.LinkAttrs
+}
+
+func (macvlan *Macvlan) Type() string {
+	return "macvlan"
+}
+
+// Veth devices must specify PeerName on create
+type Veth struct {
+	LinkAttrs
+	PeerName string // veth on create only
+}
+
+func (veth *Veth) Attrs() *LinkAttrs {
+	return &veth.LinkAttrs
+}
+
+func (veth *Veth) Type() string {
+	return "veth"
+}
+
+// Generic links represent types that are not currently understood
+// by this netlink library.
+type Generic struct {
+	LinkAttrs
+	LinkType string
+}
+
+func (generic *Generic) Attrs() *LinkAttrs {
+	return &generic.LinkAttrs
+}
+
+func (generic *Generic) Type() string {
+	return generic.LinkType
+}
+
+type Vxlan struct {
+	LinkAttrs
+	VxlanId      int
+	VtepDevIndex int
+	SrcAddr      net.IP
+	Group        net.IP
+	TTL          int
+	TOS          int
+	Learning     bool
+	Proxy        bool
+	RSC          bool
+	L2miss       bool
+	L3miss       bool
+	NoAge        bool
+	Age          int
+	Limit        int
+	Port         int
+	PortLow      int
+	PortHigh     int
+}
+
+func (vxlan *Vxlan) Attrs() *LinkAttrs {
+	return &vxlan.LinkAttrs
+}
+
+func (vxlan *Vxlan) Type() string {
+	return "vxlan"
+}
+
+type IPVlanMode uint16
+
+const (
+	IPVLAN_MODE_L2 IPVlanMode = iota
+	IPVLAN_MODE_L3
+	IPVLAN_MODE_MAX
+)
+
+type IPVlan struct {
+	LinkAttrs
+	Mode IPVlanMode
+}
+
+func (ipvlan *IPVlan) Attrs() *LinkAttrs {
+	return &ipvlan.LinkAttrs
+}
+
+func (ipvlan *IPVlan) Type() string {
+	return "ipvlan"
+}
+
+// iproute2 supported devices;
+// vlan | veth | vcan | dummy | ifb | macvlan | macvtap |
+// bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan |
+// gre | gretap | ip6gre | ip6gretap | vti | nlmon |
+// bond_slave | ipvlan
diff --git a/vendor/src/github.com/vishvananda/netlink/link_linux.go b/vendor/src/github.com/vishvananda/netlink/link_linux.go
new file mode 100644
index 0000000..aedea16
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/link_linux.go
@@ -0,0 +1,696 @@
+package netlink
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"net"
+	"syscall"
+
+	"github.com/vishvananda/netlink/nl"
+)
+
+var native = nl.NativeEndian()
+var lookupByDump = false
+
+func ensureIndex(link *LinkAttrs) {
+	if link != nil && link.Index == 0 {
+		newlink, _ := LinkByName(link.Name)
+		if newlink != nil {
+			link.Index = newlink.Attrs().Index
+		}
+	}
+}
+
+// LinkSetUp enables the link device.
+// Equivalent to: `ip link set $link up`
+func LinkSetUp(link Link) error {
+	base := link.Attrs()
+	ensureIndex(base)
+	req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Change = syscall.IFF_UP
+	msg.Flags = syscall.IFF_UP
+	msg.Index = int32(base.Index)
+	req.AddData(msg)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+// LinkSetUp disables link device.
+// Equivalent to: `ip link set $link down`
+func LinkSetDown(link Link) error {
+	base := link.Attrs()
+	ensureIndex(base)
+	req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Change = syscall.IFF_UP
+	msg.Flags = 0 & ^syscall.IFF_UP
+	msg.Index = int32(base.Index)
+	req.AddData(msg)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+// LinkSetMTU sets the mtu of the link device.
+// Equivalent to: `ip link set $link mtu $mtu`
+func LinkSetMTU(link Link, mtu int) error {
+	base := link.Attrs()
+	ensureIndex(base)
+	req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Type = syscall.RTM_SETLINK
+	msg.Flags = syscall.NLM_F_REQUEST
+	msg.Index = int32(base.Index)
+	msg.Change = nl.DEFAULT_CHANGE
+	req.AddData(msg)
+
+	b := make([]byte, 4)
+	native.PutUint32(b, uint32(mtu))
+
+	data := nl.NewRtAttr(syscall.IFLA_MTU, b)
+	req.AddData(data)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+// LinkSetName sets the name of the link device.
+// Equivalent to: `ip link set $link name $name`
+func LinkSetName(link Link, name string) error {
+	base := link.Attrs()
+	ensureIndex(base)
+	req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Type = syscall.RTM_SETLINK
+	msg.Flags = syscall.NLM_F_REQUEST
+	msg.Index = int32(base.Index)
+	msg.Change = nl.DEFAULT_CHANGE
+	req.AddData(msg)
+
+	data := nl.NewRtAttr(syscall.IFLA_IFNAME, []byte(name))
+	req.AddData(data)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+// LinkSetHardwareAddr sets the hardware address of the link device.
+// Equivalent to: `ip link set $link address $hwaddr`
+func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
+	base := link.Attrs()
+	ensureIndex(base)
+	req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Type = syscall.RTM_SETLINK
+	msg.Flags = syscall.NLM_F_REQUEST
+	msg.Index = int32(base.Index)
+	msg.Change = nl.DEFAULT_CHANGE
+	req.AddData(msg)
+
+	data := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(hwaddr))
+	req.AddData(data)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+// LinkSetMaster sets the master of the link device.
+// Equivalent to: `ip link set $link master $master`
+func LinkSetMaster(link Link, master *Bridge) error {
+	index := 0
+	if master != nil {
+		masterBase := master.Attrs()
+		ensureIndex(masterBase)
+		index = masterBase.Index
+	}
+	return LinkSetMasterByIndex(link, index)
+}
+
+// LinkSetMasterByIndex sets the master of the link device.
+// Equivalent to: `ip link set $link master $master`
+func LinkSetMasterByIndex(link Link, masterIndex int) error {
+	base := link.Attrs()
+	ensureIndex(base)
+	req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Type = syscall.RTM_SETLINK
+	msg.Flags = syscall.NLM_F_REQUEST
+	msg.Index = int32(base.Index)
+	msg.Change = nl.DEFAULT_CHANGE
+	req.AddData(msg)
+
+	b := make([]byte, 4)
+	native.PutUint32(b, uint32(masterIndex))
+
+	data := nl.NewRtAttr(syscall.IFLA_MASTER, b)
+	req.AddData(data)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+// LinkSetNsPid puts the device into a new network namespace. The
+// pid must be a pid of a running process.
+// Equivalent to: `ip link set $link netns $pid`
+func LinkSetNsPid(link Link, nspid int) error {
+	base := link.Attrs()
+	ensureIndex(base)
+	req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Type = syscall.RTM_SETLINK
+	msg.Flags = syscall.NLM_F_REQUEST
+	msg.Index = int32(base.Index)
+	msg.Change = nl.DEFAULT_CHANGE
+	req.AddData(msg)
+
+	b := make([]byte, 4)
+	native.PutUint32(b, uint32(nspid))
+
+	data := nl.NewRtAttr(syscall.IFLA_NET_NS_PID, b)
+	req.AddData(data)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+// LinkSetNsPid puts the device into a new network namespace. The
+// fd must be an open file descriptor to a network namespace.
+// Similar to: `ip link set $link netns $ns`
+func LinkSetNsFd(link Link, fd int) error {
+	base := link.Attrs()
+	ensureIndex(base)
+	req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Type = syscall.RTM_SETLINK
+	msg.Flags = syscall.NLM_F_REQUEST
+	msg.Index = int32(base.Index)
+	msg.Change = nl.DEFAULT_CHANGE
+	req.AddData(msg)
+
+	b := make([]byte, 4)
+	native.PutUint32(b, uint32(fd))
+
+	data := nl.NewRtAttr(nl.IFLA_NET_NS_FD, b)
+	req.AddData(data)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+func boolAttr(val bool) []byte {
+	var v uint8
+	if val {
+		v = 1
+	}
+	return nl.Uint8Attr(v)
+}
+
+type vxlanPortRange struct {
+	Lo, Hi uint16
+}
+
+func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) {
+	data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+	nl.NewRtAttrChild(data, nl.IFLA_VXLAN_ID, nl.Uint32Attr(uint32(vxlan.VxlanId)))
+	if vxlan.VtepDevIndex != 0 {
+		nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LINK, nl.Uint32Attr(uint32(vxlan.VtepDevIndex)))
+	}
+	if vxlan.SrcAddr != nil {
+		ip := vxlan.SrcAddr.To4()
+		if ip != nil {
+			nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL, []byte(ip))
+		} else {
+			ip = vxlan.SrcAddr.To16()
+			if ip != nil {
+				nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LOCAL6, []byte(ip))
+			}
+		}
+	}
+	if vxlan.Group != nil {
+		group := vxlan.Group.To4()
+		if group != nil {
+			nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP, []byte(group))
+		} else {
+			group = vxlan.Group.To16()
+			if group != nil {
+				nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GROUP6, []byte(group))
+			}
+		}
+	}
+
+	nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TTL, nl.Uint8Attr(uint8(vxlan.TTL)))
+	nl.NewRtAttrChild(data, nl.IFLA_VXLAN_TOS, nl.Uint8Attr(uint8(vxlan.TOS)))
+	nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LEARNING, boolAttr(vxlan.Learning))
+	nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PROXY, boolAttr(vxlan.Proxy))
+	nl.NewRtAttrChild(data, nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC))
+	nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss))
+	nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss))
+
+	if vxlan.NoAge {
+		nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0))
+	} else if vxlan.Age > 0 {
+		nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(uint32(vxlan.Age)))
+	}
+	if vxlan.Limit > 0 {
+		nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit)))
+	}
+	if vxlan.Port > 0 {
+		nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT, nl.Uint16Attr(uint16(vxlan.Port)))
+	}
+	if vxlan.PortLow > 0 || vxlan.PortHigh > 0 {
+		pr := vxlanPortRange{uint16(vxlan.PortLow), uint16(vxlan.PortHigh)}
+
+		buf := new(bytes.Buffer)
+		binary.Write(buf, binary.BigEndian, &pr)
+
+		nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT_RANGE, buf.Bytes())
+	}
+}
+
+// LinkAdd adds a new link device. The type and features of the device
+// are taken fromt the parameters in the link object.
+// Equivalent to: `ip link add $link`
+func LinkAdd(link Link) error {
+	// TODO: set mtu and hardware address
+	// TODO: support extra data for macvlan
+	base := link.Attrs()
+
+	if base.Name == "" {
+		return fmt.Errorf("LinkAttrs.Name cannot be empty!")
+	}
+
+	req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	req.AddData(msg)
+
+	if base.ParentIndex != 0 {
+		b := make([]byte, 4)
+		native.PutUint32(b, uint32(base.ParentIndex))
+		data := nl.NewRtAttr(syscall.IFLA_LINK, b)
+		req.AddData(data)
+	} else if link.Type() == "ipvlan" {
+		return fmt.Errorf("Can't create ipvlan link without ParentIndex")
+	}
+
+	nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(base.Name))
+	req.AddData(nameData)
+
+	if base.MTU > 0 {
+		mtu := nl.NewRtAttr(syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
+		req.AddData(mtu)
+	}
+
+	linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil)
+	nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type()))
+
+	nl.NewRtAttrChild(linkInfo, syscall.IFLA_TXQLEN, nl.Uint32Attr(base.TxQLen))
+
+	if vlan, ok := link.(*Vlan); ok {
+		b := make([]byte, 2)
+		native.PutUint16(b, uint16(vlan.VlanId))
+		data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+		nl.NewRtAttrChild(data, nl.IFLA_VLAN_ID, b)
+	} else if veth, ok := link.(*Veth); ok {
+		data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+		peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil)
+		nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC)
+		nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(veth.PeerName))
+		nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(base.TxQLen))
+		if base.MTU > 0 {
+			nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
+		}
+	} else if vxlan, ok := link.(*Vxlan); ok {
+		addVxlanAttrs(vxlan, linkInfo)
+	} else if ipv, ok := link.(*IPVlan); ok {
+		data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+		nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(ipv.Mode)))
+	}
+
+	req.AddData(linkInfo)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	if err != nil {
+		return err
+	}
+
+	ensureIndex(base)
+
+	// can't set master during create, so set it afterwards
+	if base.MasterIndex != 0 {
+		// TODO: verify MasterIndex is actually a bridge?
+		return LinkSetMasterByIndex(link, base.MasterIndex)
+	}
+	return nil
+}
+
+// LinkDel deletes link device. Either Index or Name must be set in
+// the link object for it to be deleted. The other values are ignored.
+// Equivalent to: `ip link del $link`
+func LinkDel(link Link) error {
+	base := link.Attrs()
+
+	ensureIndex(base)
+
+	req := nl.NewNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Index = int32(base.Index)
+	req.AddData(msg)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+func linkByNameDump(name string) (Link, error) {
+	links, err := LinkList()
+	if err != nil {
+		return nil, err
+	}
+
+	for _, link := range links {
+		if link.Attrs().Name == name {
+			return link, nil
+		}
+	}
+	return nil, fmt.Errorf("Link %s not found", name)
+}
+
+// LinkByName finds a link by name and returns a pointer to the object.
+func LinkByName(name string) (Link, error) {
+	if lookupByDump {
+		return linkByNameDump(name)
+	}
+
+	req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	req.AddData(msg)
+
+	nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(name))
+	req.AddData(nameData)
+
+	link, err := execGetLink(req)
+	if err == syscall.EINVAL {
+		// older kernels don't support looking up via IFLA_IFNAME
+		// so fall back to dumping all links
+		lookupByDump = true
+		return linkByNameDump(name)
+	}
+
+	return link, err
+}
+
+// LinkByIndex finds a link by index and returns a pointer to the object.
+func LinkByIndex(index int) (Link, error) {
+	req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Index = int32(index)
+	req.AddData(msg)
+
+	return execGetLink(req)
+}
+
+func execGetLink(req *nl.NetlinkRequest) (Link, error) {
+	msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	if err != nil {
+		if errno, ok := err.(syscall.Errno); ok {
+			if errno == syscall.ENODEV {
+				return nil, fmt.Errorf("Link not found")
+			}
+		}
+		return nil, err
+	}
+
+	switch {
+	case len(msgs) == 0:
+		return nil, fmt.Errorf("Link not found")
+
+	case len(msgs) == 1:
+		return linkDeserialize(msgs[0])
+
+	default:
+		return nil, fmt.Errorf("More than one link found")
+	}
+}
+
+// linkDeserialize deserializes a raw message received from netlink into
+// a link object.
+func linkDeserialize(m []byte) (Link, error) {
+	msg := nl.DeserializeIfInfomsg(m)
+
+	attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+	if err != nil {
+		return nil, err
+	}
+
+	base := LinkAttrs{Index: int(msg.Index), Flags: linkFlags(msg.Flags)}
+	var link Link
+	linkType := ""
+	for _, attr := range attrs {
+		switch attr.Attr.Type {
+		case syscall.IFLA_LINKINFO:
+			infos, err := nl.ParseRouteAttr(attr.Value)
+			if err != nil {
+				return nil, err
+			}
+			for _, info := range infos {
+				switch info.Attr.Type {
+				case nl.IFLA_INFO_KIND:
+					linkType = string(info.Value[:len(info.Value)-1])
+					switch linkType {
+					case "dummy":
+						link = &Dummy{}
+					case "bridge":
+						link = &Bridge{}
+					case "vlan":
+						link = &Vlan{}
+					case "veth":
+						link = &Veth{}
+					case "vxlan":
+						link = &Vxlan{}
+					case "ipvlan":
+						link = &IPVlan{}
+					default:
+						link = &Generic{LinkType: linkType}
+					}
+				case nl.IFLA_INFO_DATA:
+					data, err := nl.ParseRouteAttr(info.Value)
+					if err != nil {
+						return nil, err
+					}
+					switch linkType {
+					case "vlan":
+						parseVlanData(link, data)
+					case "vxlan":
+						parseVxlanData(link, data)
+					case "ipvlan":
+						parseIPVlanData(link, data)
+					}
+				}
+			}
+		case syscall.IFLA_ADDRESS:
+			var nonzero bool
+			for _, b := range attr.Value {
+				if b != 0 {
+					nonzero = true
+				}
+			}
+			if nonzero {
+				base.HardwareAddr = attr.Value[:]
+			}
+		case syscall.IFLA_IFNAME:
+			base.Name = string(attr.Value[:len(attr.Value)-1])
+		case syscall.IFLA_MTU:
+			base.MTU = int(native.Uint32(attr.Value[0:4]))
+		case syscall.IFLA_LINK:
+			base.ParentIndex = int(native.Uint32(attr.Value[0:4]))
+		case syscall.IFLA_MASTER:
+			base.MasterIndex = int(native.Uint32(attr.Value[0:4]))
+		case syscall.IFLA_TXQLEN:
+			base.TxQLen = native.Uint32(attr.Value[0:4])
+		}
+	}
+	// Links that don't have IFLA_INFO_KIND are hardware devices
+	if link == nil {
+		link = &Device{}
+	}
+	*link.Attrs() = base
+
+	return link, nil
+}
+
+// LinkList gets a list of link devices.
+// Equivalent to: `ip link show`
+func LinkList() ([]Link, error) {
+	// NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need
+	//             to get the message ourselves to parse link type.
+	req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	req.AddData(msg)
+
+	msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK)
+	if err != nil {
+		return nil, err
+	}
+
+	res := make([]Link, 0)
+
+	for _, m := range msgs {
+		link, err := linkDeserialize(m)
+		if err != nil {
+			return nil, err
+		}
+		res = append(res, link)
+	}
+
+	return res, nil
+}
+
+func LinkSetHairpin(link Link, mode bool) error {
+	return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_MODE)
+}
+
+func LinkSetGuard(link Link, mode bool) error {
+	return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD)
+}
+
+func LinkSetFastLeave(link Link, mode bool) error {
+	return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_FAST_LEAVE)
+}
+
+func LinkSetLearning(link Link, mode bool) error {
+	return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_LEARNING)
+}
+
+func LinkSetRootBlock(link Link, mode bool) error {
+	return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROTECT)
+}
+
+func LinkSetFlood(link Link, mode bool) error {
+	return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD)
+}
+
+func setProtinfoAttr(link Link, mode bool, attr int) error {
+	base := link.Attrs()
+	ensureIndex(base)
+	req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+	msg.Type = syscall.RTM_SETLINK
+	msg.Flags = syscall.NLM_F_REQUEST
+	msg.Index = int32(base.Index)
+	msg.Change = nl.DEFAULT_CHANGE
+	req.AddData(msg)
+
+	br := nl.NewRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil)
+	nl.NewRtAttrChild(br, attr, boolToByte(mode))
+	req.AddData(br)
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) {
+	vlan := link.(*Vlan)
+	for _, datum := range data {
+		switch datum.Attr.Type {
+		case nl.IFLA_VLAN_ID:
+			vlan.VlanId = int(native.Uint16(datum.Value[0:2]))
+		}
+	}
+}
+
+func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) {
+	vxlan := link.(*Vxlan)
+	for _, datum := range data {
+		switch datum.Attr.Type {
+		case nl.IFLA_VXLAN_ID:
+			vxlan.VxlanId = int(native.Uint32(datum.Value[0:4]))
+		case nl.IFLA_VXLAN_LINK:
+			vxlan.VtepDevIndex = int(native.Uint32(datum.Value[0:4]))
+		case nl.IFLA_VXLAN_LOCAL:
+			vxlan.SrcAddr = net.IP(datum.Value[0:4])
+		case nl.IFLA_VXLAN_LOCAL6:
+			vxlan.SrcAddr = net.IP(datum.Value[0:16])
+		case nl.IFLA_VXLAN_GROUP:
+			vxlan.Group = net.IP(datum.Value[0:4])
+		case nl.IFLA_VXLAN_GROUP6:
+			vxlan.Group = net.IP(datum.Value[0:16])
+		case nl.IFLA_VXLAN_TTL:
+			vxlan.TTL = int(datum.Value[0])
+		case nl.IFLA_VXLAN_TOS:
+			vxlan.TOS = int(datum.Value[0])
+		case nl.IFLA_VXLAN_LEARNING:
+			vxlan.Learning = int8(datum.Value[0]) != 0
+		case nl.IFLA_VXLAN_PROXY:
+			vxlan.Proxy = int8(datum.Value[0]) != 0
+		case nl.IFLA_VXLAN_RSC:
+			vxlan.RSC = int8(datum.Value[0]) != 0
+		case nl.IFLA_VXLAN_L2MISS:
+			vxlan.L2miss = int8(datum.Value[0]) != 0
+		case nl.IFLA_VXLAN_L3MISS:
+			vxlan.L3miss = int8(datum.Value[0]) != 0
+		case nl.IFLA_VXLAN_AGEING:
+			vxlan.Age = int(native.Uint32(datum.Value[0:4]))
+			vxlan.NoAge = vxlan.Age == 0
+		case nl.IFLA_VXLAN_LIMIT:
+			vxlan.Limit = int(native.Uint32(datum.Value[0:4]))
+		case nl.IFLA_VXLAN_PORT:
+			vxlan.Port = int(native.Uint16(datum.Value[0:2]))
+		case nl.IFLA_VXLAN_PORT_RANGE:
+			buf := bytes.NewBuffer(datum.Value[0:4])
+			var pr vxlanPortRange
+			if binary.Read(buf, binary.BigEndian, &pr) != nil {
+				vxlan.PortLow = int(pr.Lo)
+				vxlan.PortHigh = int(pr.Hi)
+			}
+		}
+	}
+}
+
+func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) {
+	ipv := link.(*IPVlan)
+	for _, datum := range data {
+		if datum.Attr.Type == nl.IFLA_IPVLAN_MODE {
+			ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4]))
+			return
+		}
+	}
+}
+
+// copied from pkg/net_linux.go
+func linkFlags(rawFlags uint32) net.Flags {
+	var f net.Flags
+	if rawFlags&syscall.IFF_UP != 0 {
+		f |= net.FlagUp
+	}
+	if rawFlags&syscall.IFF_BROADCAST != 0 {
+		f |= net.FlagBroadcast
+	}
+	if rawFlags&syscall.IFF_LOOPBACK != 0 {
+		f |= net.FlagLoopback
+	}
+	if rawFlags&syscall.IFF_POINTOPOINT != 0 {
+		f |= net.FlagPointToPoint
+	}
+	if rawFlags&syscall.IFF_MULTICAST != 0 {
+		f |= net.FlagMulticast
+	}
+	return f
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/link_test.go b/vendor/src/github.com/vishvananda/netlink/link_test.go
new file mode 100644
index 0000000..05b8e95
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/link_test.go
@@ -0,0 +1,531 @@
+package netlink
+
+import (
+	"bytes"
+	"net"
+	"testing"
+
+	"github.com/vishvananda/netns"
+)
+
+const testTxQLen uint32 = 100
+
+func testLinkAddDel(t *testing.T, link Link) {
+	links, err := LinkList()
+	if err != nil {
+		t.Fatal(err)
+	}
+	num := len(links)
+
+	if err := LinkAdd(link); err != nil {
+		t.Fatal(err)
+	}
+
+	base := link.Attrs()
+
+	result, err := LinkByName(base.Name)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	rBase := result.Attrs()
+
+	if vlan, ok := link.(*Vlan); ok {
+		other, ok := result.(*Vlan)
+		if !ok {
+			t.Fatal("Result of create is not a vlan")
+		}
+		if vlan.VlanId != other.VlanId {
+			t.Fatal("Link.VlanId id doesn't match")
+		}
+	}
+
+	if rBase.ParentIndex == 0 && base.ParentIndex != 0 {
+		t.Fatal("Created link doesn't have a Parent but it should")
+	} else if rBase.ParentIndex != 0 && base.ParentIndex == 0 {
+		t.Fatal("Created link has a Parent but it shouldn't")
+	} else if rBase.ParentIndex != 0 && base.ParentIndex != 0 {
+		if rBase.ParentIndex != base.ParentIndex {
+			t.Fatal("Link.ParentIndex doesn't match")
+		}
+	}
+
+	if veth, ok := link.(*Veth); ok {
+		if veth.TxQLen != testTxQLen {
+			t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, testTxQLen)
+		}
+		if rBase.MTU != base.MTU {
+			t.Fatalf("MTU is %d, should be %d", rBase.MTU, base.MTU)
+		}
+
+		if veth.PeerName != "" {
+			var peer *Veth
+			other, err := LinkByName(veth.PeerName)
+			if err != nil {
+				t.Fatalf("Peer %s not created", veth.PeerName)
+			}
+			if peer, ok = other.(*Veth); !ok {
+				t.Fatalf("Peer %s is incorrect type", veth.PeerName)
+			}
+			if peer.TxQLen != testTxQLen {
+				t.Fatalf("TxQLen of peer is %d, should be %d", peer.TxQLen, testTxQLen)
+			}
+		}
+	}
+
+	if vxlan, ok := link.(*Vxlan); ok {
+		other, ok := result.(*Vxlan)
+		if !ok {
+			t.Fatal("Result of create is not a vxlan")
+		}
+		compareVxlan(t, vxlan, other)
+	}
+
+	if ipv, ok := link.(*IPVlan); ok {
+		other, ok := result.(*IPVlan)
+		if !ok {
+			t.Fatal("Result of create is not a ipvlan")
+		}
+		if ipv.Mode != other.Mode {
+			t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, ipv.Mode)
+		}
+	}
+
+	if err = LinkDel(link); err != nil {
+		t.Fatal(err)
+	}
+
+	links, err = LinkList()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(links) != num {
+		t.Fatal("Link not removed properly")
+	}
+}
+
+func compareVxlan(t *testing.T, expected, actual *Vxlan) {
+
+	if actual.VxlanId != expected.VxlanId {
+		t.Fatal("Vxlan.VxlanId doesn't match")
+	}
+	if expected.SrcAddr != nil && !actual.SrcAddr.Equal(expected.SrcAddr) {
+		t.Fatal("Vxlan.SrcAddr doesn't match")
+	}
+	if expected.Group != nil && !actual.Group.Equal(expected.Group) {
+		t.Fatal("Vxlan.Group doesn't match")
+	}
+	if expected.TTL != -1 && actual.TTL != expected.TTL {
+		t.Fatal("Vxlan.TTL doesn't match")
+	}
+	if expected.TOS != -1 && actual.TOS != expected.TOS {
+		t.Fatal("Vxlan.TOS doesn't match")
+	}
+	if actual.Learning != expected.Learning {
+		t.Fatal("Vxlan.Learning doesn't match")
+	}
+	if actual.Proxy != expected.Proxy {
+		t.Fatal("Vxlan.Proxy doesn't match")
+	}
+	if actual.RSC != expected.RSC {
+		t.Fatal("Vxlan.RSC doesn't match")
+	}
+	if actual.L2miss != expected.L2miss {
+		t.Fatal("Vxlan.L2miss doesn't match")
+	}
+	if actual.L3miss != expected.L3miss {
+		t.Fatal("Vxlan.L3miss doesn't match")
+	}
+	if expected.NoAge {
+		if !actual.NoAge {
+			t.Fatal("Vxlan.NoAge doesn't match")
+		}
+	} else if expected.Age > 0 && actual.Age != expected.Age {
+		t.Fatal("Vxlan.Age doesn't match")
+	}
+	if expected.Limit > 0 && actual.Limit != expected.Limit {
+		t.Fatal("Vxlan.Limit doesn't match")
+	}
+	if expected.Port > 0 && actual.Port != expected.Port {
+		t.Fatal("Vxlan.Port doesn't match")
+	}
+	if expected.PortLow > 0 || expected.PortHigh > 0 {
+		if actual.PortLow != expected.PortLow {
+			t.Fatal("Vxlan.PortLow doesn't match")
+		}
+		if actual.PortHigh != expected.PortHigh {
+			t.Fatal("Vxlan.PortHigh doesn't match")
+		}
+	}
+}
+
+func TestLinkAddDelDummy(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	testLinkAddDel(t, &Dummy{LinkAttrs{Name: "foo"}})
+}
+
+func TestLinkAddDelBridge(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	testLinkAddDel(t, &Bridge{LinkAttrs{Name: "foo", MTU: 1400}})
+}
+
+func TestLinkAddDelVlan(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	parent := &Dummy{LinkAttrs{Name: "foo"}}
+	if err := LinkAdd(parent); err != nil {
+		t.Fatal(err)
+	}
+
+	testLinkAddDel(t, &Vlan{LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, 900})
+
+	if err := LinkDel(parent); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestLinkAddDelMacvlan(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	parent := &Dummy{LinkAttrs{Name: "foo"}}
+	if err := LinkAdd(parent); err != nil {
+		t.Fatal(err)
+	}
+
+	testLinkAddDel(t, &Macvlan{LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}})
+
+	if err := LinkDel(parent); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestLinkAddDelVeth(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	testLinkAddDel(t, &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar"})
+}
+
+func TestLinkAddDelBridgeMaster(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	master := &Bridge{LinkAttrs{Name: "foo"}}
+	if err := LinkAdd(master); err != nil {
+		t.Fatal(err)
+	}
+	testLinkAddDel(t, &Dummy{LinkAttrs{Name: "bar", MasterIndex: master.Attrs().Index}})
+
+	if err := LinkDel(master); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestLinkSetUnsetResetMaster(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	master := &Bridge{LinkAttrs{Name: "foo"}}
+	if err := LinkAdd(master); err != nil {
+		t.Fatal(err)
+	}
+
+	newmaster := &Bridge{LinkAttrs{Name: "bar"}}
+	if err := LinkAdd(newmaster); err != nil {
+		t.Fatal(err)
+	}
+
+	slave := &Dummy{LinkAttrs{Name: "baz"}}
+	if err := LinkAdd(slave); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := LinkSetMaster(slave, master); err != nil {
+		t.Fatal(err)
+	}
+
+	link, err := LinkByName("baz")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if link.Attrs().MasterIndex != master.Attrs().Index {
+		t.Fatal("Master not set properly")
+	}
+
+	if err := LinkSetMaster(slave, newmaster); err != nil {
+		t.Fatal(err)
+	}
+
+	link, err = LinkByName("baz")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if link.Attrs().MasterIndex != newmaster.Attrs().Index {
+		t.Fatal("Master not reset properly")
+	}
+
+	if err := LinkSetMaster(slave, nil); err != nil {
+		t.Fatal(err)
+	}
+
+	link, err = LinkByName("baz")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if link.Attrs().MasterIndex != 0 {
+		t.Fatal("Master not unset properly")
+	}
+	if err := LinkDel(slave); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := LinkDel(newmaster); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := LinkDel(master); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestLinkSetNs(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	basens, err := netns.Get()
+	if err != nil {
+		t.Fatal("Failed to get basens")
+	}
+	defer basens.Close()
+
+	newns, err := netns.New()
+	if err != nil {
+		t.Fatal("Failed to create newns")
+	}
+	defer newns.Close()
+
+	link := &Veth{LinkAttrs{Name: "foo"}, "bar"}
+	if err := LinkAdd(link); err != nil {
+		t.Fatal(err)
+	}
+
+	peer, err := LinkByName("bar")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	LinkSetNsFd(peer, int(basens))
+	if err != nil {
+		t.Fatal("Failed to set newns for link")
+	}
+
+	_, err = LinkByName("bar")
+	if err == nil {
+		t.Fatal("Link bar is still in newns")
+	}
+
+	err = netns.Set(basens)
+	if err != nil {
+		t.Fatal("Failed to set basens")
+	}
+
+	peer, err = LinkByName("bar")
+	if err != nil {
+		t.Fatal("Link is not in basens")
+	}
+
+	if err := LinkDel(peer); err != nil {
+		t.Fatal(err)
+	}
+
+	err = netns.Set(newns)
+	if err != nil {
+		t.Fatal("Failed to set newns")
+	}
+
+	_, err = LinkByName("foo")
+	if err == nil {
+		t.Fatal("Other half of veth pair not deleted")
+	}
+
+}
+
+func TestLinkAddDelVxlan(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	parent := &Dummy{
+		LinkAttrs{Name: "foo"},
+	}
+	if err := LinkAdd(parent); err != nil {
+		t.Fatal(err)
+	}
+
+	vxlan := Vxlan{
+		LinkAttrs: LinkAttrs{
+			Name: "bar",
+		},
+		VxlanId:      10,
+		VtepDevIndex: parent.Index,
+		Learning:     true,
+		L2miss:       true,
+		L3miss:       true,
+	}
+
+	testLinkAddDel(t, &vxlan)
+	if err := LinkDel(parent); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestLinkAddDelIPVlanL2(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+	parent := &Dummy{LinkAttrs{Name: "foo"}}
+	if err := LinkAdd(parent); err != nil {
+		t.Fatal(err)
+	}
+
+	ipv := IPVlan{
+		LinkAttrs: LinkAttrs{
+			Name:        "bar",
+			ParentIndex: parent.Index,
+		},
+		Mode: IPVLAN_MODE_L2,
+	}
+
+	testLinkAddDel(t, &ipv)
+}
+
+func TestLinkAddDelIPVlanL3(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+	parent := &Dummy{LinkAttrs{Name: "foo"}}
+	if err := LinkAdd(parent); err != nil {
+		t.Fatal(err)
+	}
+
+	ipv := IPVlan{
+		LinkAttrs: LinkAttrs{
+			Name:        "bar",
+			ParentIndex: parent.Index,
+		},
+		Mode: IPVLAN_MODE_L3,
+	}
+
+	testLinkAddDel(t, &ipv)
+}
+
+func TestLinkAddDelIPVlanNoParent(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	ipv := IPVlan{
+		LinkAttrs: LinkAttrs{
+			Name: "bar",
+		},
+		Mode: IPVLAN_MODE_L3,
+	}
+	err := LinkAdd(&ipv)
+	if err == nil {
+		t.Fatal("Add should fail if ipvlan creating without ParentIndex")
+	}
+	if err.Error() != "Can't create ipvlan link without ParentIndex" {
+		t.Fatalf("Error should be about missing ParentIndex, got %q", err)
+	}
+}
+
+func TestLinkByIndex(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	dummy := &Dummy{LinkAttrs{Name: "dummy"}}
+	if err := LinkAdd(dummy); err != nil {
+		t.Fatal(err)
+	}
+
+	found, err := LinkByIndex(dummy.Index)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if found.Attrs().Index != dummy.Attrs().Index {
+		t.Fatalf("Indices don't match: %v != %v", found.Attrs().Index, dummy.Attrs().Index)
+	}
+
+	LinkDel(dummy)
+
+	// test not found
+	_, err = LinkByIndex(dummy.Attrs().Index)
+	if err == nil {
+		t.Fatalf("LinkByIndex(%v) found deleted link", err)
+	}
+}
+
+func TestLinkSet(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	iface := &Dummy{LinkAttrs{Name: "foo"}}
+	if err := LinkAdd(iface); err != nil {
+		t.Fatal(err)
+	}
+
+	link, err := LinkByName("foo")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = LinkSetName(link, "bar")
+	if err != nil {
+		t.Fatalf("Could not change interface name: %v", err)
+	}
+
+	link, err = LinkByName("bar")
+	if err != nil {
+		t.Fatalf("Interface name not changed: %v", err)
+	}
+
+	err = LinkSetMTU(link, 1400)
+	if err != nil {
+		t.Fatalf("Could not set MTU: %v", err)
+	}
+
+	link, err = LinkByName("bar")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if link.Attrs().MTU != 1400 {
+		t.Fatal("MTU not changed!")
+	}
+
+	addr, err := net.ParseMAC("00:12:34:56:78:AB")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = LinkSetHardwareAddr(link, addr)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	link, err = LinkByName("bar")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !bytes.Equal(link.Attrs().HardwareAddr, addr) {
+		t.Fatalf("hardware address not changed!")
+	}
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/neigh.go b/vendor/src/github.com/vishvananda/netlink/neigh.go
new file mode 100644
index 0000000..0e5eb90
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/neigh.go
@@ -0,0 +1,22 @@
+package netlink
+
+import (
+	"fmt"
+	"net"
+)
+
+// Neigh represents a link layer neighbor from netlink.
+type Neigh struct {
+	LinkIndex    int
+	Family       int
+	State        int
+	Type         int
+	Flags        int
+	IP           net.IP
+	HardwareAddr net.HardwareAddr
+}
+
+// String returns $ip/$hwaddr $label
+func (neigh *Neigh) String() string {
+	return fmt.Sprintf("%s %s", neigh.IP, neigh.HardwareAddr)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/neigh_linux.go b/vendor/src/github.com/vishvananda/netlink/neigh_linux.go
new file mode 100644
index 0000000..1fdaa3a
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/neigh_linux.go
@@ -0,0 +1,189 @@
+package netlink
+
+import (
+	"net"
+	"syscall"
+	"unsafe"
+
+	"github.com/vishvananda/netlink/nl"
+)
+
+const (
+	NDA_UNSPEC = iota
+	NDA_DST
+	NDA_LLADDR
+	NDA_CACHEINFO
+	NDA_PROBES
+	NDA_VLAN
+	NDA_PORT
+	NDA_VNI
+	NDA_IFINDEX
+	NDA_MAX = NDA_IFINDEX
+)
+
+// Neighbor Cache Entry States.
+const (
+	NUD_NONE       = 0x00
+	NUD_INCOMPLETE = 0x01
+	NUD_REACHABLE  = 0x02
+	NUD_STALE      = 0x04
+	NUD_DELAY      = 0x08
+	NUD_PROBE      = 0x10
+	NUD_FAILED     = 0x20
+	NUD_NOARP      = 0x40
+	NUD_PERMANENT  = 0x80
+)
+
+// Neighbor Flags
+const (
+	NTF_USE    = 0x01
+	NTF_SELF   = 0x02
+	NTF_MASTER = 0x04
+	NTF_PROXY  = 0x08
+	NTF_ROUTER = 0x80
+)
+
+type Ndmsg struct {
+	Family uint8
+	Index  uint32
+	State  uint16
+	Flags  uint8
+	Type   uint8
+}
+
+func deserializeNdmsg(b []byte) *Ndmsg {
+	var dummy Ndmsg
+	return (*Ndmsg)(unsafe.Pointer(&b[0:unsafe.Sizeof(dummy)][0]))
+}
+
+func (msg *Ndmsg) Serialize() []byte {
+	return (*(*[unsafe.Sizeof(*msg)]byte)(unsafe.Pointer(msg)))[:]
+}
+
+func (msg *Ndmsg) Len() int {
+	return int(unsafe.Sizeof(*msg))
+}
+
+// NeighAdd will add an IP to MAC mapping to the ARP table
+// Equivalent to: `ip neigh add ....`
+func NeighAdd(neigh *Neigh) error {
+	return neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL)
+}
+
+// NeighAdd will add or replace an IP to MAC mapping to the ARP table
+// Equivalent to: `ip neigh replace....`
+func NeighSet(neigh *Neigh) error {
+	return neighAdd(neigh, syscall.NLM_F_CREATE)
+}
+
+// NeighAppend will append an entry to FDB
+// Equivalent to: `bridge fdb append...`
+func NeighAppend(neigh *Neigh) error {
+	return neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_APPEND)
+}
+
+func neighAdd(neigh *Neigh, mode int) error {
+	req := nl.NewNetlinkRequest(syscall.RTM_NEWNEIGH, mode|syscall.NLM_F_ACK)
+	return neighHandle(neigh, req)
+}
+
+// NeighDel will delete an IP address from a link device.
+// Equivalent to: `ip addr del $addr dev $link`
+func NeighDel(neigh *Neigh) error {
+	req := nl.NewNetlinkRequest(syscall.RTM_DELNEIGH, syscall.NLM_F_ACK)
+	return neighHandle(neigh, req)
+}
+
+func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error {
+	var family int
+	if neigh.Family > 0 {
+		family = neigh.Family
+	} else {
+		family = nl.GetIPFamily(neigh.IP)
+	}
+
+	msg := Ndmsg{
+		Family: uint8(family),
+		Index:  uint32(neigh.LinkIndex),
+		State:  uint16(neigh.State),
+		Type:   uint8(neigh.Type),
+		Flags:  uint8(neigh.Flags),
+	}
+	req.AddData(&msg)
+
+	ipData := neigh.IP.To4()
+	if ipData == nil {
+		ipData = neigh.IP.To16()
+	}
+
+	dstData := nl.NewRtAttr(NDA_DST, ipData)
+	req.AddData(dstData)
+
+	hwData := nl.NewRtAttr(NDA_LLADDR, []byte(neigh.HardwareAddr))
+	req.AddData(hwData)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+// NeighList gets a list of IP-MAC mappings in the system (ARP table).
+// Equivalent to: `ip neighbor show`.
+// The list can be filtered by link and ip family.
+func NeighList(linkIndex, family int) ([]Neigh, error) {
+	req := nl.NewNetlinkRequest(syscall.RTM_GETNEIGH, syscall.NLM_F_DUMP)
+	msg := Ndmsg{
+		Family: uint8(family),
+	}
+	req.AddData(&msg)
+
+	msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWNEIGH)
+	if err != nil {
+		return nil, err
+	}
+
+	res := make([]Neigh, 0)
+	for _, m := range msgs {
+		ndm := deserializeNdmsg(m)
+		if linkIndex != 0 && int(ndm.Index) != linkIndex {
+			// Ignore messages from other interfaces
+			continue
+		}
+
+		neigh, err := NeighDeserialize(m)
+		if err != nil {
+			continue
+		}
+
+		res = append(res, *neigh)
+	}
+
+	return res, nil
+}
+
+func NeighDeserialize(m []byte) (*Neigh, error) {
+	msg := deserializeNdmsg(m)
+
+	neigh := Neigh{
+		LinkIndex: int(msg.Index),
+		Family:    int(msg.Family),
+		State:     int(msg.State),
+		Type:      int(msg.Type),
+		Flags:     int(msg.Flags),
+	}
+
+	attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+	if err != nil {
+		return nil, err
+	}
+
+	for _, attr := range attrs {
+		switch attr.Attr.Type {
+		case NDA_DST:
+			neigh.IP = net.IP(attr.Value)
+		case NDA_LLADDR:
+			neigh.HardwareAddr = net.HardwareAddr(attr.Value)
+		}
+	}
+
+	return &neigh, nil
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/neigh_test.go b/vendor/src/github.com/vishvananda/netlink/neigh_test.go
new file mode 100644
index 0000000..50da59c
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/neigh_test.go
@@ -0,0 +1,104 @@
+package netlink
+
+import (
+	"net"
+	"testing"
+)
+
+type arpEntry struct {
+	ip  net.IP
+	mac net.HardwareAddr
+}
+
+func parseMAC(s string) net.HardwareAddr {
+	m, err := net.ParseMAC(s)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+func dumpContains(dump []Neigh, e arpEntry) bool {
+	for _, n := range dump {
+		if n.IP.Equal(e.ip) && (n.State&NUD_INCOMPLETE) == 0 {
+			return true
+		}
+	}
+	return false
+}
+
+func TestNeighAddDel(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	dummy := Dummy{LinkAttrs{Name: "neigh0"}}
+	if err := LinkAdd(&dummy); err != nil {
+		t.Fatal(err)
+	}
+
+	ensureIndex(dummy.Attrs())
+
+	arpTable := []arpEntry{
+		{net.ParseIP("10.99.0.1"), parseMAC("aa:bb:cc:dd:00:01")},
+		{net.ParseIP("10.99.0.2"), parseMAC("aa:bb:cc:dd:00:02")},
+		{net.ParseIP("10.99.0.3"), parseMAC("aa:bb:cc:dd:00:03")},
+		{net.ParseIP("10.99.0.4"), parseMAC("aa:bb:cc:dd:00:04")},
+		{net.ParseIP("10.99.0.5"), parseMAC("aa:bb:cc:dd:00:05")},
+	}
+
+	// Add the arpTable
+	for _, entry := range arpTable {
+		err := NeighAdd(&Neigh{
+			LinkIndex:    dummy.Index,
+			State:        NUD_REACHABLE,
+			IP:           entry.ip,
+			HardwareAddr: entry.mac,
+		})
+
+		if err != nil {
+			t.Errorf("Failed to NeighAdd: %v", err)
+		}
+	}
+
+	// Dump and see that all added entries are there
+	dump, err := NeighList(dummy.Index, 0)
+	if err != nil {
+		t.Errorf("Failed to NeighList: %v", err)
+	}
+
+	for _, entry := range arpTable {
+		if !dumpContains(dump, entry) {
+			t.Errorf("Dump does not contain: %v", entry)
+		}
+	}
+
+	// Delete the arpTable
+	for _, entry := range arpTable {
+		err := NeighDel(&Neigh{
+			LinkIndex:    dummy.Index,
+			IP:           entry.ip,
+			HardwareAddr: entry.mac,
+		})
+
+		if err != nil {
+			t.Errorf("Failed to NeighDel: %v", err)
+		}
+	}
+
+	// TODO: seems not working because of cache
+	//// Dump and see that none of deleted entries are there
+	//dump, err = NeighList(dummy.Index, 0)
+	//if err != nil {
+	//t.Errorf("Failed to NeighList: %v", err)
+	//}
+
+	//for _, entry := range arpTable {
+	//if dumpContains(dump, entry) {
+	//t.Errorf("Dump contains: %v", entry)
+	//}
+	//}
+
+	if err := LinkDel(&dummy); err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/netlink.go b/vendor/src/github.com/vishvananda/netlink/netlink.go
new file mode 100644
index 0000000..41ebdb1
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/netlink.go
@@ -0,0 +1,39 @@
+// Package netlink provides a simple library for netlink. Netlink is
+// the interface a user-space program in linux uses to communicate with
+// the kernel. It can be used to add and remove interfaces, set up ip
+// addresses and routes, and confiugre ipsec. Netlink communication
+// requires elevated privileges, so in most cases this code needs to
+// be run as root. The low level primitives for netlink are contained
+// in the nl subpackage. This package attempts to provide a high-level
+// interface that is loosly modeled on the iproute2 cli.
+package netlink
+
+import (
+	"net"
+
+	"github.com/vishvananda/netlink/nl"
+)
+
+const (
+	// Family type definitions
+	FAMILY_ALL = nl.FAMILY_ALL
+	FAMILY_V4  = nl.FAMILY_V4
+	FAMILY_V6  = nl.FAMILY_V6
+)
+
+// ParseIPNet parses a string in ip/net format and returns a net.IPNet.
+// This is valuable because addresses in netlink are often IPNets and
+// ParseCIDR returns an IPNet with the IP part set to the base IP of the
+// range.
+func ParseIPNet(s string) (*net.IPNet, error) {
+	ip, ipNet, err := net.ParseCIDR(s)
+	if err != nil {
+		return nil, err
+	}
+	return &net.IPNet{IP: ip, Mask: ipNet.Mask}, nil
+}
+
+// NewIPNet generates an IPNet from an ip address using a netmask of 32.
+func NewIPNet(ip net.IP) *net.IPNet {
+	return &net.IPNet{IP: ip, Mask: net.CIDRMask(32, 32)}
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/netlink_test.go b/vendor/src/github.com/vishvananda/netlink/netlink_test.go
new file mode 100644
index 0000000..3292b75
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/netlink_test.go
@@ -0,0 +1,34 @@
+package netlink
+
+import (
+	"log"
+	"os"
+	"runtime"
+	"testing"
+
+	"github.com/vishvananda/netns"
+)
+
+type tearDownNetlinkTest func()
+
+func setUpNetlinkTest(t *testing.T) tearDownNetlinkTest {
+	if os.Getuid() != 0 {
+		msg := "Skipped test because it requires root privileges."
+		log.Printf(msg)
+		t.Skip(msg)
+	}
+
+	// new temporary namespace so we don't pollute the host
+	// lock thread since the namespace is thread local
+	runtime.LockOSThread()
+	var err error
+	ns, err := netns.New()
+	if err != nil {
+		t.Fatal("Failed to create newns", ns)
+	}
+
+	return func() {
+		ns.Close()
+		runtime.UnlockOSThread()
+	}
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/src/github.com/vishvananda/netlink/netlink_unspecified.go
new file mode 100644
index 0000000..10c49c1
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/netlink_unspecified.go
@@ -0,0 +1,143 @@
+// +build !linux
+
+package netlink
+
+import (
+	"errors"
+)
+
+var (
+	ErrNotImplemented = errors.New("not implemented")
+)
+
+func LinkSetUp(link *Link) error {
+	return ErrNotImplemented
+}
+
+func LinkSetDown(link *Link) error {
+	return ErrNotImplemented
+}
+
+func LinkSetMTU(link *Link, mtu int) error {
+	return ErrNotImplemented
+}
+
+func LinkSetMaster(link *Link, master *Link) error {
+	return ErrNotImplemented
+}
+
+func LinkSetNsPid(link *Link, nspid int) error {
+	return ErrNotImplemented
+}
+
+func LinkSetNsFd(link *Link, fd int) error {
+	return ErrNotImplemented
+}
+
+func LinkAdd(link *Link) error {
+	return ErrNotImplemented
+}
+
+func LinkDel(link *Link) error {
+	return ErrNotImplemented
+}
+
+func SetHairpin(link Link, mode bool) error {
+	return ErrNotImplemented
+}
+
+func SetGuard(link Link, mode bool) error {
+	return ErrNotImplemented
+}
+
+func SetFastLeave(link Link, mode bool) error {
+	return ErrNotImplemented
+}
+
+func SetLearning(link Link, mode bool) error {
+	return ErrNotImplemented
+}
+
+func SetRootBlock(link Link, mode bool) error {
+	return ErrNotImplemented
+}
+
+func SetFlood(link Link, mode bool) error {
+	return ErrNotImplemented
+}
+
+func LinkList() ([]Link, error) {
+	return nil, ErrNotImplemented
+}
+
+func AddrAdd(link *Link, addr *Addr) error {
+	return ErrNotImplemented
+}
+
+func AddrDel(link *Link, addr *Addr) error {
+	return ErrNotImplemented
+}
+
+func AddrList(link *Link, family int) ([]Addr, error) {
+	return nil, ErrNotImplemented
+}
+
+func RouteAdd(route *Route) error {
+	return ErrNotImplemented
+}
+
+func RouteDel(route *Route) error {
+	return ErrNotImplemented
+}
+
+func RouteList(link *Link, family int) ([]Route, error) {
+	return nil, ErrNotImplemented
+}
+
+func XfrmPolicyAdd(policy *XfrmPolicy) error {
+	return ErrNotImplemented
+}
+
+func XfrmPolicyDel(policy *XfrmPolicy) error {
+	return ErrNotImplemented
+}
+
+func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
+	return nil, ErrNotImplemented
+}
+
+func XfrmStateAdd(policy *XfrmState) error {
+	return ErrNotImplemented
+}
+
+func XfrmStateDel(policy *XfrmState) error {
+	return ErrNotImplemented
+}
+
+func XfrmStateList(family int) ([]XfrmState, error) {
+	return nil, ErrNotImplemented
+}
+
+func NeighAdd(neigh *Neigh) error {
+	return ErrNotImplemented
+}
+
+func NeighSet(neigh *Neigh) error {
+	return ErrNotImplemented
+}
+
+func NeighAppend(neigh *Neigh) error {
+	return ErrNotImplemented
+}
+
+func NeighDel(neigh *Neigh) error {
+	return ErrNotImplemented
+}
+
+func NeighList(linkIndex, family int) ([]Neigh, error) {
+	return nil, ErrNotImplemented
+}
+
+func NeighDeserialize(m []byte) (*Ndmsg, *Neigh, error) {
+	return nil, nil, ErrNotImplemented
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/addr_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/addr_linux.go
new file mode 100644
index 0000000..17088fa
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/addr_linux.go
@@ -0,0 +1,47 @@
+package nl
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+type IfAddrmsg struct {
+	syscall.IfAddrmsg
+}
+
+func NewIfAddrmsg(family int) *IfAddrmsg {
+	return &IfAddrmsg{
+		IfAddrmsg: syscall.IfAddrmsg{
+			Family: uint8(family),
+		},
+	}
+}
+
+// struct ifaddrmsg {
+//   __u8    ifa_family;
+//   __u8    ifa_prefixlen;  /* The prefix length    */
+//   __u8    ifa_flags;  /* Flags      */
+//   __u8    ifa_scope;  /* Address scope    */
+//   __u32   ifa_index;  /* Link index     */
+// };
+
+// type IfAddrmsg struct {
+// 	Family    uint8
+// 	Prefixlen uint8
+// 	Flags     uint8
+// 	Scope     uint8
+// 	Index     uint32
+// }
+// SizeofIfAddrmsg     = 0x8
+
+func DeserializeIfAddrmsg(b []byte) *IfAddrmsg {
+	return (*IfAddrmsg)(unsafe.Pointer(&b[0:syscall.SizeofIfAddrmsg][0]))
+}
+
+func (msg *IfAddrmsg) Serialize() []byte {
+	return (*(*[syscall.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+func (msg *IfAddrmsg) Len() int {
+	return syscall.SizeofIfAddrmsg
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/addr_linux_test.go b/vendor/src/github.com/vishvananda/netlink/nl/addr_linux_test.go
new file mode 100644
index 0000000..98c3b21
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/addr_linux_test.go
@@ -0,0 +1,39 @@
+package nl
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/binary"
+	"syscall"
+	"testing"
+)
+
+func (msg *IfAddrmsg) write(b []byte) {
+	native := NativeEndian()
+	b[0] = msg.Family
+	b[1] = msg.Prefixlen
+	b[2] = msg.Flags
+	b[3] = msg.Scope
+	native.PutUint32(b[4:8], msg.Index)
+}
+
+func (msg *IfAddrmsg) serializeSafe() []byte {
+	len := syscall.SizeofIfAddrmsg
+	b := make([]byte, len)
+	msg.write(b)
+	return b
+}
+
+func deserializeIfAddrmsgSafe(b []byte) *IfAddrmsg {
+	var msg = IfAddrmsg{}
+	binary.Read(bytes.NewReader(b[0:syscall.SizeofIfAddrmsg]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestIfAddrmsgDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, syscall.SizeofIfAddrmsg)
+	rand.Read(orig)
+	safemsg := deserializeIfAddrmsgSafe(orig)
+	msg := DeserializeIfAddrmsg(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/link_linux.go
new file mode 100644
index 0000000..ab0dede
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/link_linux.go
@@ -0,0 +1,81 @@
+package nl
+
+const (
+	DEFAULT_CHANGE = 0xFFFFFFFF
+)
+
+const (
+	IFLA_INFO_UNSPEC = iota
+	IFLA_INFO_KIND
+	IFLA_INFO_DATA
+	IFLA_INFO_XSTATS
+	IFLA_INFO_MAX = IFLA_INFO_XSTATS
+)
+
+const (
+	IFLA_VLAN_UNSPEC = iota
+	IFLA_VLAN_ID
+	IFLA_VLAN_FLAGS
+	IFLA_VLAN_EGRESS_QOS
+	IFLA_VLAN_INGRESS_QOS
+	IFLA_VLAN_PROTOCOL
+	IFLA_VLAN_MAX = IFLA_VLAN_PROTOCOL
+)
+
+const (
+	VETH_INFO_UNSPEC = iota
+	VETH_INFO_PEER
+	VETH_INFO_MAX = VETH_INFO_PEER
+)
+
+const (
+	IFLA_VXLAN_UNSPEC = iota
+	IFLA_VXLAN_ID
+	IFLA_VXLAN_GROUP
+	IFLA_VXLAN_LINK
+	IFLA_VXLAN_LOCAL
+	IFLA_VXLAN_TTL
+	IFLA_VXLAN_TOS
+	IFLA_VXLAN_LEARNING
+	IFLA_VXLAN_AGEING
+	IFLA_VXLAN_LIMIT
+	IFLA_VXLAN_PORT_RANGE
+	IFLA_VXLAN_PROXY
+	IFLA_VXLAN_RSC
+	IFLA_VXLAN_L2MISS
+	IFLA_VXLAN_L3MISS
+	IFLA_VXLAN_PORT
+	IFLA_VXLAN_GROUP6
+	IFLA_VXLAN_LOCAL6
+	IFLA_VXLAN_MAX = IFLA_VXLAN_LOCAL6
+)
+
+const (
+	BRIDGE_MODE_UNSPEC = iota
+	BRIDGE_MODE_HAIRPIN
+)
+
+const (
+	IFLA_BRPORT_UNSPEC = iota
+	IFLA_BRPORT_STATE
+	IFLA_BRPORT_PRIORITY
+	IFLA_BRPORT_COST
+	IFLA_BRPORT_MODE
+	IFLA_BRPORT_GUARD
+	IFLA_BRPORT_PROTECT
+	IFLA_BRPORT_FAST_LEAVE
+	IFLA_BRPORT_LEARNING
+	IFLA_BRPORT_UNICAST_FLOOD
+	IFLA_BRPORT_MAX = IFLA_BRPORT_UNICAST_FLOOD
+)
+
+const (
+	IFLA_IPVLAN_UNSPEC = iota
+	IFLA_IPVLAN_MODE
+	IFLA_IPVLAN_MAX = IFLA_IPVLAN_MODE
+)
+
+const (
+	// not defined in syscall
+	IFLA_NET_NS_FD = 28
+)
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go
new file mode 100644
index 0000000..72f2813
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go
@@ -0,0 +1,417 @@
+// Package nl has low level primitives for making Netlink calls.
+package nl
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"net"
+	"sync/atomic"
+	"syscall"
+	"unsafe"
+)
+
+const (
+	// Family type definitions
+	FAMILY_ALL = syscall.AF_UNSPEC
+	FAMILY_V4  = syscall.AF_INET
+	FAMILY_V6  = syscall.AF_INET6
+)
+
+var nextSeqNr uint32
+
+// GetIPFamily returns the family type of a net.IP.
+func GetIPFamily(ip net.IP) int {
+	if len(ip) <= net.IPv4len {
+		return FAMILY_V4
+	}
+	if ip.To4() != nil {
+		return FAMILY_V4
+	}
+	return FAMILY_V6
+}
+
+var nativeEndian binary.ByteOrder
+
+// Get native endianness for the system
+func NativeEndian() binary.ByteOrder {
+	if nativeEndian == nil {
+		var x uint32 = 0x01020304
+		if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
+			nativeEndian = binary.BigEndian
+		}
+		nativeEndian = binary.LittleEndian
+	}
+	return nativeEndian
+}
+
+// Byte swap a 16 bit value if we aren't big endian
+func Swap16(i uint16) uint16 {
+	if NativeEndian() == binary.BigEndian {
+		return i
+	}
+	return (i&0xff00)>>8 | (i&0xff)<<8
+}
+
+// Byte swap a 32 bit value if aren't big endian
+func Swap32(i uint32) uint32 {
+	if NativeEndian() == binary.BigEndian {
+		return i
+	}
+	return (i&0xff000000)>>24 | (i&0xff0000)>>8 | (i&0xff00)<<8 | (i&0xff)<<24
+}
+
+type NetlinkRequestData interface {
+	Len() int
+	Serialize() []byte
+}
+
+// IfInfomsg is related to links, but it is used for list requests as well
+type IfInfomsg struct {
+	syscall.IfInfomsg
+}
+
+// Create an IfInfomsg with family specified
+func NewIfInfomsg(family int) *IfInfomsg {
+	return &IfInfomsg{
+		IfInfomsg: syscall.IfInfomsg{
+			Family: uint8(family),
+		},
+	}
+}
+
+func DeserializeIfInfomsg(b []byte) *IfInfomsg {
+	return (*IfInfomsg)(unsafe.Pointer(&b[0:syscall.SizeofIfInfomsg][0]))
+}
+
+func (msg *IfInfomsg) Serialize() []byte {
+	return (*(*[syscall.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+func (msg *IfInfomsg) Len() int {
+	return syscall.SizeofIfInfomsg
+}
+
+func rtaAlignOf(attrlen int) int {
+	return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1)
+}
+
+func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg {
+	msg := NewIfInfomsg(family)
+	parent.children = append(parent.children, msg)
+	return msg
+}
+
+// Extend RtAttr to handle data and children
+type RtAttr struct {
+	syscall.RtAttr
+	Data     []byte
+	children []NetlinkRequestData
+}
+
+// Create a new Extended RtAttr object
+func NewRtAttr(attrType int, data []byte) *RtAttr {
+	return &RtAttr{
+		RtAttr: syscall.RtAttr{
+			Type: uint16(attrType),
+		},
+		children: []NetlinkRequestData{},
+		Data:     data,
+	}
+}
+
+// Create a new RtAttr obj anc add it as a child of an existing object
+func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr {
+	attr := NewRtAttr(attrType, data)
+	parent.children = append(parent.children, attr)
+	return attr
+}
+
+func (a *RtAttr) Len() int {
+	if len(a.children) == 0 {
+		return (syscall.SizeofRtAttr + len(a.Data))
+	}
+
+	l := 0
+	for _, child := range a.children {
+		l += rtaAlignOf(child.Len())
+	}
+	l += syscall.SizeofRtAttr
+	return rtaAlignOf(l + len(a.Data))
+}
+
+// Serialize the RtAttr into a byte array
+// This can't ust unsafe.cast because it must iterate through children.
+func (a *RtAttr) Serialize() []byte {
+	native := NativeEndian()
+
+	length := a.Len()
+	buf := make([]byte, rtaAlignOf(length))
+
+	if a.Data != nil {
+		copy(buf[4:], a.Data)
+	} else {
+		next := 4
+		for _, child := range a.children {
+			childBuf := child.Serialize()
+			copy(buf[next:], childBuf)
+			next += rtaAlignOf(len(childBuf))
+		}
+	}
+
+	if l := uint16(length); l != 0 {
+		native.PutUint16(buf[0:2], l)
+	}
+	native.PutUint16(buf[2:4], a.Type)
+	return buf
+}
+
+type NetlinkRequest struct {
+	syscall.NlMsghdr
+	Data []NetlinkRequestData
+}
+
+// Serialize the Netlink Request into a byte array
+func (msg *NetlinkRequest) Serialize() []byte {
+	length := syscall.SizeofNlMsghdr
+	dataBytes := make([][]byte, len(msg.Data))
+	for i, data := range msg.Data {
+		dataBytes[i] = data.Serialize()
+		length = length + len(dataBytes[i])
+	}
+	msg.Len = uint32(length)
+	b := make([]byte, length)
+	hdr := (*(*[syscall.SizeofNlMsghdr]byte)(unsafe.Pointer(msg)))[:]
+	next := syscall.SizeofNlMsghdr
+	copy(b[0:next], hdr)
+	for _, data := range dataBytes {
+		for _, dataByte := range data {
+			b[next] = dataByte
+			next = next + 1
+		}
+	}
+	return b
+}
+
+func (msg *NetlinkRequest) AddData(data NetlinkRequestData) {
+	if data != nil {
+		msg.Data = append(msg.Data, data)
+	}
+}
+
+// Execute the request against a the given sockType.
+// Returns a list of netlink messages in seriaized format, optionally filtered
+// by resType.
+func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) {
+	s, err := getNetlinkSocket(sockType)
+	if err != nil {
+		return nil, err
+	}
+	defer s.Close()
+
+	if err := s.Send(req); err != nil {
+		return nil, err
+	}
+
+	pid, err := s.GetPid()
+	if err != nil {
+		return nil, err
+	}
+
+	res := make([][]byte, 0)
+
+done:
+	for {
+		msgs, err := s.Recieve()
+		if err != nil {
+			return nil, err
+		}
+		for _, m := range msgs {
+			if m.Header.Seq != req.Seq {
+				return nil, fmt.Errorf("Wrong Seq nr %d, expected 1", m.Header.Seq)
+			}
+			if m.Header.Pid != pid {
+				return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
+			}
+			if m.Header.Type == syscall.NLMSG_DONE {
+				break done
+			}
+			if m.Header.Type == syscall.NLMSG_ERROR {
+				native := NativeEndian()
+				error := int32(native.Uint32(m.Data[0:4]))
+				if error == 0 {
+					break done
+				}
+				return nil, syscall.Errno(-error)
+			}
+			if resType != 0 && m.Header.Type != resType {
+				continue
+			}
+			res = append(res, m.Data)
+			if m.Header.Flags&syscall.NLM_F_MULTI == 0 {
+				break done
+			}
+		}
+	}
+	return res, nil
+}
+
+// Create a new netlink request from proto and flags
+// Note the Len value will be inaccurate once data is added until
+// the message is serialized
+func NewNetlinkRequest(proto, flags int) *NetlinkRequest {
+	return &NetlinkRequest{
+		NlMsghdr: syscall.NlMsghdr{
+			Len:   uint32(syscall.SizeofNlMsghdr),
+			Type:  uint16(proto),
+			Flags: syscall.NLM_F_REQUEST | uint16(flags),
+			Seq:   atomic.AddUint32(&nextSeqNr, 1),
+		},
+	}
+}
+
+type NetlinkSocket struct {
+	fd  int
+	lsa syscall.SockaddrNetlink
+}
+
+func getNetlinkSocket(protocol int) (*NetlinkSocket, error) {
+	fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol)
+	if err != nil {
+		return nil, err
+	}
+	s := &NetlinkSocket{
+		fd: fd,
+	}
+	s.lsa.Family = syscall.AF_NETLINK
+	if err := syscall.Bind(fd, &s.lsa); err != nil {
+		syscall.Close(fd)
+		return nil, err
+	}
+
+	return s, nil
+}
+
+// Create a netlink socket with a given protocol (e.g. NETLINK_ROUTE)
+// and subscribe it to multicast groups passed in variable argument list.
+// Returns the netlink socket on whic hReceive() method can be called
+// to retrieve the messages from the kernel.
+func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) {
+	fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol)
+	if err != nil {
+		return nil, err
+	}
+	s := &NetlinkSocket{
+		fd: fd,
+	}
+	s.lsa.Family = syscall.AF_NETLINK
+
+	for _, g := range groups {
+		s.lsa.Groups |= (1 << (g - 1))
+	}
+
+	if err := syscall.Bind(fd, &s.lsa); err != nil {
+		syscall.Close(fd)
+		return nil, err
+	}
+
+	return s, nil
+}
+
+func (s *NetlinkSocket) Close() {
+	syscall.Close(s.fd)
+}
+
+func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
+	if err := syscall.Sendto(s.fd, request.Serialize(), 0, &s.lsa); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (s *NetlinkSocket) Recieve() ([]syscall.NetlinkMessage, error) {
+	rb := make([]byte, syscall.Getpagesize())
+	nr, _, err := syscall.Recvfrom(s.fd, rb, 0)
+	if err != nil {
+		return nil, err
+	}
+	if nr < syscall.NLMSG_HDRLEN {
+		return nil, fmt.Errorf("Got short response from netlink")
+	}
+	rb = rb[:nr]
+	return syscall.ParseNetlinkMessage(rb)
+}
+
+func (s *NetlinkSocket) GetPid() (uint32, error) {
+	lsa, err := syscall.Getsockname(s.fd)
+	if err != nil {
+		return 0, err
+	}
+	switch v := lsa.(type) {
+	case *syscall.SockaddrNetlink:
+		return v.Pid, nil
+	}
+	return 0, fmt.Errorf("Wrong socket type")
+}
+
+func ZeroTerminated(s string) []byte {
+	bytes := make([]byte, len(s)+1)
+	for i := 0; i < len(s); i++ {
+		bytes[i] = s[i]
+	}
+	bytes[len(s)] = 0
+	return bytes
+}
+
+func NonZeroTerminated(s string) []byte {
+	bytes := make([]byte, len(s))
+	for i := 0; i < len(s); i++ {
+		bytes[i] = s[i]
+	}
+	return bytes
+}
+
+func BytesToString(b []byte) string {
+	n := bytes.Index(b, []byte{0})
+	return string(b[:n])
+}
+
+func Uint8Attr(v uint8) []byte {
+	return []byte{byte(v)}
+}
+
+func Uint16Attr(v uint16) []byte {
+	native := NativeEndian()
+	bytes := make([]byte, 2)
+	native.PutUint16(bytes, v)
+	return bytes
+}
+
+func Uint32Attr(v uint32) []byte {
+	native := NativeEndian()
+	bytes := make([]byte, 4)
+	native.PutUint32(bytes, v)
+	return bytes
+}
+
+func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) {
+	var attrs []syscall.NetlinkRouteAttr
+	for len(b) >= syscall.SizeofRtAttr {
+		a, vbuf, alen, err := netlinkRouteAttrAndValue(b)
+		if err != nil {
+			return nil, err
+		}
+		ra := syscall.NetlinkRouteAttr{Attr: *a, Value: vbuf[:int(a.Len)-syscall.SizeofRtAttr]}
+		attrs = append(attrs, ra)
+		b = b[alen:]
+	}
+	return attrs, nil
+}
+
+func netlinkRouteAttrAndValue(b []byte) (*syscall.RtAttr, []byte, int, error) {
+	a := (*syscall.RtAttr)(unsafe.Pointer(&b[0]))
+	if int(a.Len) < syscall.SizeofRtAttr || int(a.Len) > len(b) {
+		return nil, nil, 0, syscall.EINVAL
+	}
+	return a, b[syscall.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/nl_linux_test.go b/vendor/src/github.com/vishvananda/netlink/nl/nl_linux_test.go
new file mode 100644
index 0000000..4672684
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/nl_linux_test.go
@@ -0,0 +1,60 @@
+package nl
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/binary"
+	"reflect"
+	"syscall"
+	"testing"
+)
+
+type testSerializer interface {
+	serializeSafe() []byte
+	Serialize() []byte
+}
+
+func testDeserializeSerialize(t *testing.T, orig []byte, safemsg testSerializer, msg testSerializer) {
+	if !reflect.DeepEqual(safemsg, msg) {
+		t.Fatal("Deserialization failed.\n", safemsg, "\n", msg)
+	}
+	safe := msg.serializeSafe()
+	if !bytes.Equal(safe, orig) {
+		t.Fatal("Safe serialization failed.\n", safe, "\n", orig)
+	}
+	b := msg.Serialize()
+	if !bytes.Equal(b, safe) {
+		t.Fatal("Serialization failed.\n", b, "\n", safe)
+	}
+}
+
+func (msg *IfInfomsg) write(b []byte) {
+	native := NativeEndian()
+	b[0] = msg.Family
+	b[1] = msg.X__ifi_pad
+	native.PutUint16(b[2:4], msg.Type)
+	native.PutUint32(b[4:8], uint32(msg.Index))
+	native.PutUint32(b[8:12], msg.Flags)
+	native.PutUint32(b[12:16], msg.Change)
+}
+
+func (msg *IfInfomsg) serializeSafe() []byte {
+	length := syscall.SizeofIfInfomsg
+	b := make([]byte, length)
+	msg.write(b)
+	return b
+}
+
+func deserializeIfInfomsgSafe(b []byte) *IfInfomsg {
+	var msg = IfInfomsg{}
+	binary.Read(bytes.NewReader(b[0:syscall.SizeofIfInfomsg]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestIfInfomsgDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, syscall.SizeofIfInfomsg)
+	rand.Read(orig)
+	safemsg := deserializeIfInfomsgSafe(orig)
+	msg := DeserializeIfInfomsg(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/route_linux.go
new file mode 100644
index 0000000..5dde998
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/route_linux.go
@@ -0,0 +1,33 @@
+package nl
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+type RtMsg struct {
+	syscall.RtMsg
+}
+
+func NewRtMsg() *RtMsg {
+	return &RtMsg{
+		RtMsg: syscall.RtMsg{
+			Table:    syscall.RT_TABLE_MAIN,
+			Scope:    syscall.RT_SCOPE_UNIVERSE,
+			Protocol: syscall.RTPROT_BOOT,
+			Type:     syscall.RTN_UNICAST,
+		},
+	}
+}
+
+func (msg *RtMsg) Len() int {
+	return syscall.SizeofRtMsg
+}
+
+func DeserializeRtMsg(b []byte) *RtMsg {
+	return (*RtMsg)(unsafe.Pointer(&b[0:syscall.SizeofRtMsg][0]))
+}
+
+func (msg *RtMsg) Serialize() []byte {
+	return (*(*[syscall.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/route_linux_test.go b/vendor/src/github.com/vishvananda/netlink/nl/route_linux_test.go
new file mode 100644
index 0000000..ba9c410
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/route_linux_test.go
@@ -0,0 +1,43 @@
+package nl
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/binary"
+	"syscall"
+	"testing"
+)
+
+func (msg *RtMsg) write(b []byte) {
+	native := NativeEndian()
+	b[0] = msg.Family
+	b[1] = msg.Dst_len
+	b[2] = msg.Src_len
+	b[3] = msg.Tos
+	b[4] = msg.Table
+	b[5] = msg.Protocol
+	b[6] = msg.Scope
+	b[7] = msg.Type
+	native.PutUint32(b[8:12], msg.Flags)
+}
+
+func (msg *RtMsg) serializeSafe() []byte {
+	len := syscall.SizeofRtMsg
+	b := make([]byte, len)
+	msg.write(b)
+	return b
+}
+
+func deserializeRtMsgSafe(b []byte) *RtMsg {
+	var msg = RtMsg{}
+	binary.Read(bytes.NewReader(b[0:syscall.SizeofRtMsg]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestRtMsgDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, syscall.SizeofRtMsg)
+	rand.Read(orig)
+	safemsg := deserializeRtMsgSafe(orig)
+	msg := DeserializeRtMsg(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go
new file mode 100644
index 0000000..d953130
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go
@@ -0,0 +1,259 @@
+package nl
+
+import (
+	"bytes"
+	"net"
+	"unsafe"
+)
+
+// Infinity for packet and byte counts
+const (
+	XFRM_INF = ^uint64(0)
+)
+
+// Message Types
+const (
+	XFRM_MSG_BASE        = 0x10
+	XFRM_MSG_NEWSA       = 0x10
+	XFRM_MSG_DELSA       = 0x11
+	XFRM_MSG_GETSA       = 0x12
+	XFRM_MSG_NEWPOLICY   = 0x13
+	XFRM_MSG_DELPOLICY   = 0x14
+	XFRM_MSG_GETPOLICY   = 0x15
+	XFRM_MSG_ALLOCSPI    = 0x16
+	XFRM_MSG_ACQUIRE     = 0x17
+	XFRM_MSG_EXPIRE      = 0x18
+	XFRM_MSG_UPDPOLICY   = 0x19
+	XFRM_MSG_UPDSA       = 0x1a
+	XFRM_MSG_POLEXPIRE   = 0x1b
+	XFRM_MSG_FLUSHSA     = 0x1c
+	XFRM_MSG_FLUSHPOLICY = 0x1d
+	XFRM_MSG_NEWAE       = 0x1e
+	XFRM_MSG_GETAE       = 0x1f
+	XFRM_MSG_REPORT      = 0x20
+	XFRM_MSG_MIGRATE     = 0x21
+	XFRM_MSG_NEWSADINFO  = 0x22
+	XFRM_MSG_GETSADINFO  = 0x23
+	XFRM_MSG_NEWSPDINFO  = 0x24
+	XFRM_MSG_GETSPDINFO  = 0x25
+	XFRM_MSG_MAPPING     = 0x26
+	XFRM_MSG_MAX         = 0x26
+	XFRM_NR_MSGTYPES     = 0x17
+)
+
+// Attribute types
+const (
+	/* Netlink message attributes.  */
+	XFRMA_UNSPEC         = 0x00
+	XFRMA_ALG_AUTH       = 0x01 /* struct xfrm_algo */
+	XFRMA_ALG_CRYPT      = 0x02 /* struct xfrm_algo */
+	XFRMA_ALG_COMP       = 0x03 /* struct xfrm_algo */
+	XFRMA_ENCAP          = 0x04 /* struct xfrm_algo + struct xfrm_encap_tmpl */
+	XFRMA_TMPL           = 0x05 /* 1 or more struct xfrm_user_tmpl */
+	XFRMA_SA             = 0x06 /* struct xfrm_usersa_info  */
+	XFRMA_POLICY         = 0x07 /* struct xfrm_userpolicy_info */
+	XFRMA_SEC_CTX        = 0x08 /* struct xfrm_sec_ctx */
+	XFRMA_LTIME_VAL      = 0x09
+	XFRMA_REPLAY_VAL     = 0x0a
+	XFRMA_REPLAY_THRESH  = 0x0b
+	XFRMA_ETIMER_THRESH  = 0x0c
+	XFRMA_SRCADDR        = 0x0d /* xfrm_address_t */
+	XFRMA_COADDR         = 0x0e /* xfrm_address_t */
+	XFRMA_LASTUSED       = 0x0f /* unsigned long  */
+	XFRMA_POLICY_TYPE    = 0x10 /* struct xfrm_userpolicy_type */
+	XFRMA_MIGRATE        = 0x11
+	XFRMA_ALG_AEAD       = 0x12 /* struct xfrm_algo_aead */
+	XFRMA_KMADDRESS      = 0x13 /* struct xfrm_user_kmaddress */
+	XFRMA_ALG_AUTH_TRUNC = 0x14 /* struct xfrm_algo_auth */
+	XFRMA_MARK           = 0x15 /* struct xfrm_mark */
+	XFRMA_TFCPAD         = 0x16 /* __u32 */
+	XFRMA_REPLAY_ESN_VAL = 0x17 /* struct xfrm_replay_esn */
+	XFRMA_SA_EXTRA_FLAGS = 0x18 /* __u32 */
+	XFRMA_MAX            = 0x18
+)
+
+const (
+	SizeofXfrmAddress     = 0x10
+	SizeofXfrmSelector    = 0x38
+	SizeofXfrmLifetimeCfg = 0x40
+	SizeofXfrmLifetimeCur = 0x20
+	SizeofXfrmId          = 0x18
+)
+
+// typedef union {
+//   __be32    a4;
+//   __be32    a6[4];
+// } xfrm_address_t;
+
+type XfrmAddress [SizeofXfrmAddress]byte
+
+func (x *XfrmAddress) ToIP() net.IP {
+	var empty = [12]byte{}
+	ip := make(net.IP, net.IPv6len)
+	if bytes.Equal(x[4:16], empty[:]) {
+		ip[10] = 0xff
+		ip[11] = 0xff
+		copy(ip[12:16], x[0:4])
+	} else {
+		copy(ip[:], x[:])
+	}
+	return ip
+}
+
+func (x *XfrmAddress) ToIPNet(prefixlen uint8) *net.IPNet {
+	ip := x.ToIP()
+	if GetIPFamily(ip) == FAMILY_V4 {
+		return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 32)}
+	} else {
+		return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 128)}
+	}
+}
+
+func (x *XfrmAddress) FromIP(ip net.IP) {
+	var empty = [16]byte{}
+	if len(ip) < net.IPv4len {
+		copy(x[4:16], empty[:])
+	} else if GetIPFamily(ip) == FAMILY_V4 {
+		copy(x[0:4], ip.To4()[0:4])
+		copy(x[4:16], empty[:12])
+	} else {
+		copy(x[0:16], ip.To16()[0:16])
+	}
+}
+
+func DeserializeXfrmAddress(b []byte) *XfrmAddress {
+	return (*XfrmAddress)(unsafe.Pointer(&b[0:SizeofXfrmAddress][0]))
+}
+
+func (msg *XfrmAddress) Serialize() []byte {
+	return (*(*[SizeofXfrmAddress]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_selector {
+//   xfrm_address_t  daddr;
+//   xfrm_address_t  saddr;
+//   __be16  dport;
+//   __be16  dport_mask;
+//   __be16  sport;
+//   __be16  sport_mask;
+//   __u16 family;
+//   __u8  prefixlen_d;
+//   __u8  prefixlen_s;
+//   __u8  proto;
+//   int ifindex;
+//   __kernel_uid32_t  user;
+// };
+
+type XfrmSelector struct {
+	Daddr      XfrmAddress
+	Saddr      XfrmAddress
+	Dport      uint16 // big endian
+	DportMask  uint16 // big endian
+	Sport      uint16 // big endian
+	SportMask  uint16 // big endian
+	Family     uint16
+	PrefixlenD uint8
+	PrefixlenS uint8
+	Proto      uint8
+	Pad        [3]byte
+	Ifindex    int32
+	User       uint32
+}
+
+func (msg *XfrmSelector) Len() int {
+	return SizeofXfrmSelector
+}
+
+func DeserializeXfrmSelector(b []byte) *XfrmSelector {
+	return (*XfrmSelector)(unsafe.Pointer(&b[0:SizeofXfrmSelector][0]))
+}
+
+func (msg *XfrmSelector) Serialize() []byte {
+	return (*(*[SizeofXfrmSelector]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_lifetime_cfg {
+//   __u64 soft_byte_limit;
+//   __u64 hard_byte_limit;
+//   __u64 soft_packet_limit;
+//   __u64 hard_packet_limit;
+//   __u64 soft_add_expires_seconds;
+//   __u64 hard_add_expires_seconds;
+//   __u64 soft_use_expires_seconds;
+//   __u64 hard_use_expires_seconds;
+// };
+//
+
+type XfrmLifetimeCfg struct {
+	SoftByteLimit         uint64
+	HardByteLimit         uint64
+	SoftPacketLimit       uint64
+	HardPacketLimit       uint64
+	SoftAddExpiresSeconds uint64
+	HardAddExpiresSeconds uint64
+	SoftUseExpiresSeconds uint64
+	HardUseExpiresSeconds uint64
+}
+
+func (msg *XfrmLifetimeCfg) Len() int {
+	return SizeofXfrmLifetimeCfg
+}
+
+func DeserializeXfrmLifetimeCfg(b []byte) *XfrmLifetimeCfg {
+	return (*XfrmLifetimeCfg)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCfg][0]))
+}
+
+func (msg *XfrmLifetimeCfg) Serialize() []byte {
+	return (*(*[SizeofXfrmLifetimeCfg]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_lifetime_cur {
+//   __u64 bytes;
+//   __u64 packets;
+//   __u64 add_time;
+//   __u64 use_time;
+// };
+
+type XfrmLifetimeCur struct {
+	Bytes   uint64
+	Packets uint64
+	AddTime uint64
+	UseTime uint64
+}
+
+func (msg *XfrmLifetimeCur) Len() int {
+	return SizeofXfrmLifetimeCur
+}
+
+func DeserializeXfrmLifetimeCur(b []byte) *XfrmLifetimeCur {
+	return (*XfrmLifetimeCur)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCur][0]))
+}
+
+func (msg *XfrmLifetimeCur) Serialize() []byte {
+	return (*(*[SizeofXfrmLifetimeCur]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_id {
+//   xfrm_address_t  daddr;
+//   __be32    spi;
+//   __u8    proto;
+// };
+
+type XfrmId struct {
+	Daddr XfrmAddress
+	Spi   uint32 // big endian
+	Proto uint8
+	Pad   [3]byte
+}
+
+func (msg *XfrmId) Len() int {
+	return SizeofXfrmId
+}
+
+func DeserializeXfrmId(b []byte) *XfrmId {
+	return (*XfrmId)(unsafe.Pointer(&b[0:SizeofXfrmId][0]))
+}
+
+func (msg *XfrmId) Serialize() []byte {
+	return (*(*[SizeofXfrmId]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux_test.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux_test.go
new file mode 100644
index 0000000..04404d7
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux_test.go
@@ -0,0 +1,161 @@
+package nl
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/binary"
+	"testing"
+)
+
+func (msg *XfrmAddress) write(b []byte) {
+	copy(b[0:SizeofXfrmAddress], msg[:])
+}
+
+func (msg *XfrmAddress) serializeSafe() []byte {
+	b := make([]byte, SizeofXfrmAddress)
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmAddressSafe(b []byte) *XfrmAddress {
+	var msg = XfrmAddress{}
+	binary.Read(bytes.NewReader(b[0:SizeofXfrmAddress]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestXfrmAddressDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, SizeofXfrmAddress)
+	rand.Read(orig)
+	safemsg := deserializeXfrmAddressSafe(orig)
+	msg := DeserializeXfrmAddress(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmSelector) write(b []byte) {
+	const AddrEnd = SizeofXfrmAddress * 2
+	native := NativeEndian()
+	msg.Daddr.write(b[0:SizeofXfrmAddress])
+	msg.Saddr.write(b[SizeofXfrmAddress:AddrEnd])
+	native.PutUint16(b[AddrEnd:AddrEnd+2], msg.Dport)
+	native.PutUint16(b[AddrEnd+2:AddrEnd+4], msg.DportMask)
+	native.PutUint16(b[AddrEnd+4:AddrEnd+6], msg.Sport)
+	native.PutUint16(b[AddrEnd+6:AddrEnd+8], msg.SportMask)
+	native.PutUint16(b[AddrEnd+8:AddrEnd+10], msg.Family)
+	b[AddrEnd+10] = msg.PrefixlenD
+	b[AddrEnd+11] = msg.PrefixlenS
+	b[AddrEnd+12] = msg.Proto
+	copy(b[AddrEnd+13:AddrEnd+16], msg.Pad[:])
+	native.PutUint32(b[AddrEnd+16:AddrEnd+20], uint32(msg.Ifindex))
+	native.PutUint32(b[AddrEnd+20:AddrEnd+24], msg.User)
+}
+
+func (msg *XfrmSelector) serializeSafe() []byte {
+	length := SizeofXfrmSelector
+	b := make([]byte, length)
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmSelectorSafe(b []byte) *XfrmSelector {
+	var msg = XfrmSelector{}
+	binary.Read(bytes.NewReader(b[0:SizeofXfrmSelector]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestXfrmSelectorDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, SizeofXfrmSelector)
+	rand.Read(orig)
+	safemsg := deserializeXfrmSelectorSafe(orig)
+	msg := DeserializeXfrmSelector(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmLifetimeCfg) write(b []byte) {
+	native := NativeEndian()
+	native.PutUint64(b[0:8], msg.SoftByteLimit)
+	native.PutUint64(b[8:16], msg.HardByteLimit)
+	native.PutUint64(b[16:24], msg.SoftPacketLimit)
+	native.PutUint64(b[24:32], msg.HardPacketLimit)
+	native.PutUint64(b[32:40], msg.SoftAddExpiresSeconds)
+	native.PutUint64(b[40:48], msg.HardAddExpiresSeconds)
+	native.PutUint64(b[48:56], msg.SoftUseExpiresSeconds)
+	native.PutUint64(b[56:64], msg.HardUseExpiresSeconds)
+}
+
+func (msg *XfrmLifetimeCfg) serializeSafe() []byte {
+	length := SizeofXfrmLifetimeCfg
+	b := make([]byte, length)
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmLifetimeCfgSafe(b []byte) *XfrmLifetimeCfg {
+	var msg = XfrmLifetimeCfg{}
+	binary.Read(bytes.NewReader(b[0:SizeofXfrmLifetimeCfg]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestXfrmLifetimeCfgDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, SizeofXfrmLifetimeCfg)
+	rand.Read(orig)
+	safemsg := deserializeXfrmLifetimeCfgSafe(orig)
+	msg := DeserializeXfrmLifetimeCfg(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmLifetimeCur) write(b []byte) {
+	native := NativeEndian()
+	native.PutUint64(b[0:8], msg.Bytes)
+	native.PutUint64(b[8:16], msg.Packets)
+	native.PutUint64(b[16:24], msg.AddTime)
+	native.PutUint64(b[24:32], msg.UseTime)
+}
+
+func (msg *XfrmLifetimeCur) serializeSafe() []byte {
+	length := SizeofXfrmLifetimeCur
+	b := make([]byte, length)
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmLifetimeCurSafe(b []byte) *XfrmLifetimeCur {
+	var msg = XfrmLifetimeCur{}
+	binary.Read(bytes.NewReader(b[0:SizeofXfrmLifetimeCur]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestXfrmLifetimeCurDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, SizeofXfrmLifetimeCur)
+	rand.Read(orig)
+	safemsg := deserializeXfrmLifetimeCurSafe(orig)
+	msg := DeserializeXfrmLifetimeCur(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmId) write(b []byte) {
+	native := NativeEndian()
+	msg.Daddr.write(b[0:SizeofXfrmAddress])
+	native.PutUint32(b[SizeofXfrmAddress:SizeofXfrmAddress+4], msg.Spi)
+	b[SizeofXfrmAddress+4] = msg.Proto
+	copy(b[SizeofXfrmAddress+5:SizeofXfrmAddress+8], msg.Pad[:])
+}
+
+func (msg *XfrmId) serializeSafe() []byte {
+	b := make([]byte, SizeofXfrmId)
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmIdSafe(b []byte) *XfrmId {
+	var msg = XfrmId{}
+	binary.Read(bytes.NewReader(b[0:SizeofXfrmId]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestXfrmIdDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, SizeofXfrmId)
+	rand.Read(orig)
+	safemsg := deserializeXfrmIdSafe(orig)
+	msg := DeserializeXfrmId(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go
new file mode 100644
index 0000000..66f7e03
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go
@@ -0,0 +1,119 @@
+package nl
+
+import (
+	"unsafe"
+)
+
+const (
+	SizeofXfrmUserpolicyId   = 0x40
+	SizeofXfrmUserpolicyInfo = 0xa8
+	SizeofXfrmUserTmpl       = 0x40
+)
+
+// struct xfrm_userpolicy_id {
+//   struct xfrm_selector    sel;
+//   __u32       index;
+//   __u8        dir;
+// };
+//
+
+type XfrmUserpolicyId struct {
+	Sel   XfrmSelector
+	Index uint32
+	Dir   uint8
+	Pad   [3]byte
+}
+
+func (msg *XfrmUserpolicyId) Len() int {
+	return SizeofXfrmUserpolicyId
+}
+
+func DeserializeXfrmUserpolicyId(b []byte) *XfrmUserpolicyId {
+	return (*XfrmUserpolicyId)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyId][0]))
+}
+
+func (msg *XfrmUserpolicyId) Serialize() []byte {
+	return (*(*[SizeofXfrmUserpolicyId]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_userpolicy_info {
+//   struct xfrm_selector    sel;
+//   struct xfrm_lifetime_cfg  lft;
+//   struct xfrm_lifetime_cur  curlft;
+//   __u32       priority;
+//   __u32       index;
+//   __u8        dir;
+//   __u8        action;
+// #define XFRM_POLICY_ALLOW 0
+// #define XFRM_POLICY_BLOCK 1
+//   __u8        flags;
+// #define XFRM_POLICY_LOCALOK 1 /* Allow user to override global policy */
+//   /* Automatically expand selector to include matching ICMP payloads. */
+// #define XFRM_POLICY_ICMP  2
+//   __u8        share;
+// };
+
+type XfrmUserpolicyInfo struct {
+	Sel      XfrmSelector
+	Lft      XfrmLifetimeCfg
+	Curlft   XfrmLifetimeCur
+	Priority uint32
+	Index    uint32
+	Dir      uint8
+	Action   uint8
+	Flags    uint8
+	Share    uint8
+	Pad      [4]byte
+}
+
+func (msg *XfrmUserpolicyInfo) Len() int {
+	return SizeofXfrmUserpolicyInfo
+}
+
+func DeserializeXfrmUserpolicyInfo(b []byte) *XfrmUserpolicyInfo {
+	return (*XfrmUserpolicyInfo)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyInfo][0]))
+}
+
+func (msg *XfrmUserpolicyInfo) Serialize() []byte {
+	return (*(*[SizeofXfrmUserpolicyInfo]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_user_tmpl {
+//   struct xfrm_id    id;
+//   __u16     family;
+//   xfrm_address_t    saddr;
+//   __u32     reqid;
+//   __u8      mode;
+//   __u8      share;
+//   __u8      optional;
+//   __u32     aalgos;
+//   __u32     ealgos;
+//   __u32     calgos;
+// }
+
+type XfrmUserTmpl struct {
+	XfrmId   XfrmId
+	Family   uint16
+	Pad1     [2]byte
+	Saddr    XfrmAddress
+	Reqid    uint32
+	Mode     uint8
+	Share    uint8
+	Optional uint8
+	Pad2     byte
+	Aalgos   uint32
+	Ealgos   uint32
+	Calgos   uint32
+}
+
+func (msg *XfrmUserTmpl) Len() int {
+	return SizeofXfrmUserTmpl
+}
+
+func DeserializeXfrmUserTmpl(b []byte) *XfrmUserTmpl {
+	return (*XfrmUserTmpl)(unsafe.Pointer(&b[0:SizeofXfrmUserTmpl][0]))
+}
+
+func (msg *XfrmUserTmpl) Serialize() []byte {
+	return (*(*[SizeofXfrmUserTmpl]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux_test.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux_test.go
new file mode 100644
index 0000000..08a604b
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux_test.go
@@ -0,0 +1,109 @@
+package nl
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/binary"
+	"testing"
+)
+
+func (msg *XfrmUserpolicyId) write(b []byte) {
+	native := NativeEndian()
+	msg.Sel.write(b[0:SizeofXfrmSelector])
+	native.PutUint32(b[SizeofXfrmSelector:SizeofXfrmSelector+4], msg.Index)
+	b[SizeofXfrmSelector+4] = msg.Dir
+	copy(b[SizeofXfrmSelector+5:SizeofXfrmSelector+8], msg.Pad[:])
+}
+
+func (msg *XfrmUserpolicyId) serializeSafe() []byte {
+	b := make([]byte, SizeofXfrmUserpolicyId)
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmUserpolicyIdSafe(b []byte) *XfrmUserpolicyId {
+	var msg = XfrmUserpolicyId{}
+	binary.Read(bytes.NewReader(b[0:SizeofXfrmUserpolicyId]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestXfrmUserpolicyIdDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, SizeofXfrmUserpolicyId)
+	rand.Read(orig)
+	safemsg := deserializeXfrmUserpolicyIdSafe(orig)
+	msg := DeserializeXfrmUserpolicyId(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmUserpolicyInfo) write(b []byte) {
+	const CfgEnd = SizeofXfrmSelector + SizeofXfrmLifetimeCfg
+	const CurEnd = CfgEnd + SizeofXfrmLifetimeCur
+	native := NativeEndian()
+	msg.Sel.write(b[0:SizeofXfrmSelector])
+	msg.Lft.write(b[SizeofXfrmSelector:CfgEnd])
+	msg.Curlft.write(b[CfgEnd:CurEnd])
+	native.PutUint32(b[CurEnd:CurEnd+4], msg.Priority)
+	native.PutUint32(b[CurEnd+4:CurEnd+8], msg.Index)
+	b[CurEnd+8] = msg.Dir
+	b[CurEnd+9] = msg.Action
+	b[CurEnd+10] = msg.Flags
+	b[CurEnd+11] = msg.Share
+	copy(b[CurEnd+12:CurEnd+16], msg.Pad[:])
+}
+
+func (msg *XfrmUserpolicyInfo) serializeSafe() []byte {
+	b := make([]byte, SizeofXfrmUserpolicyInfo)
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmUserpolicyInfoSafe(b []byte) *XfrmUserpolicyInfo {
+	var msg = XfrmUserpolicyInfo{}
+	binary.Read(bytes.NewReader(b[0:SizeofXfrmUserpolicyInfo]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestXfrmUserpolicyInfoDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, SizeofXfrmUserpolicyInfo)
+	rand.Read(orig)
+	safemsg := deserializeXfrmUserpolicyInfoSafe(orig)
+	msg := DeserializeXfrmUserpolicyInfo(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmUserTmpl) write(b []byte) {
+	const AddrEnd = SizeofXfrmId + 4 + SizeofXfrmAddress
+	native := NativeEndian()
+	msg.XfrmId.write(b[0:SizeofXfrmId])
+	native.PutUint16(b[SizeofXfrmId:SizeofXfrmId+2], msg.Family)
+	copy(b[SizeofXfrmId+2:SizeofXfrmId+4], msg.Pad1[:])
+	msg.Saddr.write(b[SizeofXfrmId+4 : AddrEnd])
+	native.PutUint32(b[AddrEnd:AddrEnd+4], msg.Reqid)
+	b[AddrEnd+4] = msg.Mode
+	b[AddrEnd+5] = msg.Share
+	b[AddrEnd+6] = msg.Optional
+	b[AddrEnd+7] = msg.Pad2
+	native.PutUint32(b[AddrEnd+8:AddrEnd+12], msg.Aalgos)
+	native.PutUint32(b[AddrEnd+12:AddrEnd+16], msg.Ealgos)
+	native.PutUint32(b[AddrEnd+16:AddrEnd+20], msg.Calgos)
+}
+
+func (msg *XfrmUserTmpl) serializeSafe() []byte {
+	b := make([]byte, SizeofXfrmUserTmpl)
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmUserTmplSafe(b []byte) *XfrmUserTmpl {
+	var msg = XfrmUserTmpl{}
+	binary.Read(bytes.NewReader(b[0:SizeofXfrmUserTmpl]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestXfrmUserTmplDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, SizeofXfrmUserTmpl)
+	rand.Read(orig)
+	safemsg := deserializeXfrmUserTmplSafe(orig)
+	msg := DeserializeXfrmUserTmpl(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
new file mode 100644
index 0000000..4876ce4
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go
@@ -0,0 +1,221 @@
+package nl
+
+import (
+	"unsafe"
+)
+
+const (
+	SizeofXfrmUsersaId   = 0x18
+	SizeofXfrmStats      = 0x0c
+	SizeofXfrmUsersaInfo = 0xe0
+	SizeofXfrmAlgo       = 0x44
+	SizeofXfrmAlgoAuth   = 0x48
+	SizeofXfrmEncapTmpl  = 0x18
+)
+
+// struct xfrm_usersa_id {
+//   xfrm_address_t      daddr;
+//   __be32        spi;
+//   __u16       family;
+//   __u8        proto;
+// };
+
+type XfrmUsersaId struct {
+	Daddr  XfrmAddress
+	Spi    uint32 // big endian
+	Family uint16
+	Proto  uint8
+	Pad    byte
+}
+
+func (msg *XfrmUsersaId) Len() int {
+	return SizeofXfrmUsersaId
+}
+
+func DeserializeXfrmUsersaId(b []byte) *XfrmUsersaId {
+	return (*XfrmUsersaId)(unsafe.Pointer(&b[0:SizeofXfrmUsersaId][0]))
+}
+
+func (msg *XfrmUsersaId) Serialize() []byte {
+	return (*(*[SizeofXfrmUsersaId]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_stats {
+//   __u32 replay_window;
+//   __u32 replay;
+//   __u32 integrity_failed;
+// };
+
+type XfrmStats struct {
+	ReplayWindow    uint32
+	Replay          uint32
+	IntegrityFailed uint32
+}
+
+func (msg *XfrmStats) Len() int {
+	return SizeofXfrmStats
+}
+
+func DeserializeXfrmStats(b []byte) *XfrmStats {
+	return (*XfrmStats)(unsafe.Pointer(&b[0:SizeofXfrmStats][0]))
+}
+
+func (msg *XfrmStats) Serialize() []byte {
+	return (*(*[SizeofXfrmStats]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_usersa_info {
+//   struct xfrm_selector    sel;
+//   struct xfrm_id      id;
+//   xfrm_address_t      saddr;
+//   struct xfrm_lifetime_cfg  lft;
+//   struct xfrm_lifetime_cur  curlft;
+//   struct xfrm_stats   stats;
+//   __u32       seq;
+//   __u32       reqid;
+//   __u16       family;
+//   __u8        mode;   /* XFRM_MODE_xxx */
+//   __u8        replay_window;
+//   __u8        flags;
+// #define XFRM_STATE_NOECN  1
+// #define XFRM_STATE_DECAP_DSCP 2
+// #define XFRM_STATE_NOPMTUDISC 4
+// #define XFRM_STATE_WILDRECV 8
+// #define XFRM_STATE_ICMP   16
+// #define XFRM_STATE_AF_UNSPEC  32
+// #define XFRM_STATE_ALIGN4 64
+// #define XFRM_STATE_ESN    128
+// };
+//
+// #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1
+//
+
+type XfrmUsersaInfo struct {
+	Sel          XfrmSelector
+	Id           XfrmId
+	Saddr        XfrmAddress
+	Lft          XfrmLifetimeCfg
+	Curlft       XfrmLifetimeCur
+	Stats        XfrmStats
+	Seq          uint32
+	Reqid        uint32
+	Family       uint16
+	Mode         uint8
+	ReplayWindow uint8
+	Flags        uint8
+	Pad          [7]byte
+}
+
+func (msg *XfrmUsersaInfo) Len() int {
+	return SizeofXfrmUsersaInfo
+}
+
+func DeserializeXfrmUsersaInfo(b []byte) *XfrmUsersaInfo {
+	return (*XfrmUsersaInfo)(unsafe.Pointer(&b[0:SizeofXfrmUsersaInfo][0]))
+}
+
+func (msg *XfrmUsersaInfo) Serialize() []byte {
+	return (*(*[SizeofXfrmUsersaInfo]byte)(unsafe.Pointer(msg)))[:]
+}
+
+// struct xfrm_algo {
+//   char    alg_name[64];
+//   unsigned int  alg_key_len;    /* in bits */
+//   char    alg_key[0];
+// };
+
+type XfrmAlgo struct {
+	AlgName   [64]byte
+	AlgKeyLen uint32
+	AlgKey    []byte
+}
+
+func (msg *XfrmAlgo) Len() int {
+	return SizeofXfrmAlgo + int(msg.AlgKeyLen/8)
+}
+
+func DeserializeXfrmAlgo(b []byte) *XfrmAlgo {
+	ret := XfrmAlgo{}
+	copy(ret.AlgName[:], b[0:64])
+	ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64]))
+	ret.AlgKey = b[68:ret.Len()]
+	return &ret
+}
+
+func (msg *XfrmAlgo) Serialize() []byte {
+	b := make([]byte, msg.Len())
+	copy(b[0:64], msg.AlgName[:])
+	copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:])
+	copy(b[68:msg.Len()], msg.AlgKey[:])
+	return b
+}
+
+// struct xfrm_algo_auth {
+//   char    alg_name[64];
+//   unsigned int  alg_key_len;    /* in bits */
+//   unsigned int  alg_trunc_len;  /* in bits */
+//   char    alg_key[0];
+// };
+
+type XfrmAlgoAuth struct {
+	AlgName     [64]byte
+	AlgKeyLen   uint32
+	AlgTruncLen uint32
+	AlgKey      []byte
+}
+
+func (msg *XfrmAlgoAuth) Len() int {
+	return SizeofXfrmAlgoAuth + int(msg.AlgKeyLen/8)
+}
+
+func DeserializeXfrmAlgoAuth(b []byte) *XfrmAlgoAuth {
+	ret := XfrmAlgoAuth{}
+	copy(ret.AlgName[:], b[0:64])
+	ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64]))
+	ret.AlgTruncLen = *(*uint32)(unsafe.Pointer(&b[68]))
+	ret.AlgKey = b[72:ret.Len()]
+	return &ret
+}
+
+func (msg *XfrmAlgoAuth) Serialize() []byte {
+	b := make([]byte, msg.Len())
+	copy(b[0:64], msg.AlgName[:])
+	copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:])
+	copy(b[68:72], (*(*[4]byte)(unsafe.Pointer(&msg.AlgTruncLen)))[:])
+	copy(b[72:msg.Len()], msg.AlgKey[:])
+	return b
+}
+
+// struct xfrm_algo_aead {
+//   char    alg_name[64];
+//   unsigned int  alg_key_len;  /* in bits */
+//   unsigned int  alg_icv_len;  /* in bits */
+//   char    alg_key[0];
+// }
+
+// struct xfrm_encap_tmpl {
+//   __u16   encap_type;
+//   __be16    encap_sport;
+//   __be16    encap_dport;
+//   xfrm_address_t  encap_oa;
+// };
+
+type XfrmEncapTmpl struct {
+	EncapType  uint16
+	EncapSport uint16 // big endian
+	EncapDport uint16 // big endian
+	Pad        [2]byte
+	EncapOa    XfrmAddress
+}
+
+func (msg *XfrmEncapTmpl) Len() int {
+	return SizeofXfrmEncapTmpl
+}
+
+func DeserializeXfrmEncapTmpl(b []byte) *XfrmEncapTmpl {
+	return (*XfrmEncapTmpl)(unsafe.Pointer(&b[0:SizeofXfrmEncapTmpl][0]))
+}
+
+func (msg *XfrmEncapTmpl) Serialize() []byte {
+	return (*(*[SizeofXfrmEncapTmpl]byte)(unsafe.Pointer(msg)))[:]
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux_test.go b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux_test.go
new file mode 100644
index 0000000..d5281e9
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux_test.go
@@ -0,0 +1,207 @@
+package nl
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/binary"
+	"testing"
+)
+
+func (msg *XfrmUsersaId) write(b []byte) {
+	native := NativeEndian()
+	msg.Daddr.write(b[0:SizeofXfrmAddress])
+	native.PutUint32(b[SizeofXfrmAddress:SizeofXfrmAddress+4], msg.Spi)
+	native.PutUint16(b[SizeofXfrmAddress+4:SizeofXfrmAddress+6], msg.Family)
+	b[SizeofXfrmAddress+6] = msg.Proto
+	b[SizeofXfrmAddress+7] = msg.Pad
+}
+
+func (msg *XfrmUsersaId) serializeSafe() []byte {
+	b := make([]byte, SizeofXfrmUsersaId)
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmUsersaIdSafe(b []byte) *XfrmUsersaId {
+	var msg = XfrmUsersaId{}
+	binary.Read(bytes.NewReader(b[0:SizeofXfrmUsersaId]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestXfrmUsersaIdDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, SizeofXfrmUsersaId)
+	rand.Read(orig)
+	safemsg := deserializeXfrmUsersaIdSafe(orig)
+	msg := DeserializeXfrmUsersaId(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmStats) write(b []byte) {
+	native := NativeEndian()
+	native.PutUint32(b[0:4], msg.ReplayWindow)
+	native.PutUint32(b[4:8], msg.Replay)
+	native.PutUint32(b[8:12], msg.IntegrityFailed)
+}
+
+func (msg *XfrmStats) serializeSafe() []byte {
+	b := make([]byte, SizeofXfrmStats)
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmStatsSafe(b []byte) *XfrmStats {
+	var msg = XfrmStats{}
+	binary.Read(bytes.NewReader(b[0:SizeofXfrmStats]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestXfrmStatsDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, SizeofXfrmStats)
+	rand.Read(orig)
+	safemsg := deserializeXfrmStatsSafe(orig)
+	msg := DeserializeXfrmStats(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmUsersaInfo) write(b []byte) {
+	const IdEnd = SizeofXfrmSelector + SizeofXfrmId
+	const AddressEnd = IdEnd + SizeofXfrmAddress
+	const CfgEnd = AddressEnd + SizeofXfrmLifetimeCfg
+	const CurEnd = CfgEnd + SizeofXfrmLifetimeCur
+	const StatsEnd = CurEnd + SizeofXfrmStats
+	native := NativeEndian()
+	msg.Sel.write(b[0:SizeofXfrmSelector])
+	msg.Id.write(b[SizeofXfrmSelector:IdEnd])
+	msg.Saddr.write(b[IdEnd:AddressEnd])
+	msg.Lft.write(b[AddressEnd:CfgEnd])
+	msg.Curlft.write(b[CfgEnd:CurEnd])
+	msg.Stats.write(b[CurEnd:StatsEnd])
+	native.PutUint32(b[StatsEnd:StatsEnd+4], msg.Seq)
+	native.PutUint32(b[StatsEnd+4:StatsEnd+8], msg.Reqid)
+	native.PutUint16(b[StatsEnd+8:StatsEnd+10], msg.Family)
+	b[StatsEnd+10] = msg.Mode
+	b[StatsEnd+11] = msg.ReplayWindow
+	b[StatsEnd+12] = msg.Flags
+	copy(b[StatsEnd+13:StatsEnd+20], msg.Pad[:])
+}
+
+func (msg *XfrmUsersaInfo) serializeSafe() []byte {
+	b := make([]byte, SizeofXfrmUsersaInfo)
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmUsersaInfoSafe(b []byte) *XfrmUsersaInfo {
+	var msg = XfrmUsersaInfo{}
+	binary.Read(bytes.NewReader(b[0:SizeofXfrmUsersaInfo]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestXfrmUsersaInfoDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, SizeofXfrmUsersaInfo)
+	rand.Read(orig)
+	safemsg := deserializeXfrmUsersaInfoSafe(orig)
+	msg := DeserializeXfrmUsersaInfo(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmAlgo) write(b []byte) {
+	native := NativeEndian()
+	copy(b[0:64], msg.AlgName[:])
+	native.PutUint32(b[64:68], msg.AlgKeyLen)
+	copy(b[68:msg.Len()], msg.AlgKey[:])
+}
+
+func (msg *XfrmAlgo) serializeSafe() []byte {
+	b := make([]byte, msg.Len())
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmAlgoSafe(b []byte) *XfrmAlgo {
+	var msg = XfrmAlgo{}
+	copy(msg.AlgName[:], b[0:64])
+	binary.Read(bytes.NewReader(b[64:68]), NativeEndian(), &msg.AlgKeyLen)
+	msg.AlgKey = b[68:msg.Len()]
+	return &msg
+}
+
+func TestXfrmAlgoDeserializeSerialize(t *testing.T) {
+	// use a 32 byte key len
+	var orig = make([]byte, SizeofXfrmAlgo+32)
+	rand.Read(orig)
+	// set the key len to 256 bits
+	orig[64] = 0
+	orig[65] = 1
+	orig[66] = 0
+	orig[67] = 0
+	safemsg := deserializeXfrmAlgoSafe(orig)
+	msg := DeserializeXfrmAlgo(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmAlgoAuth) write(b []byte) {
+	native := NativeEndian()
+	copy(b[0:64], msg.AlgName[:])
+	native.PutUint32(b[64:68], msg.AlgKeyLen)
+	native.PutUint32(b[68:72], msg.AlgTruncLen)
+	copy(b[72:msg.Len()], msg.AlgKey[:])
+}
+
+func (msg *XfrmAlgoAuth) serializeSafe() []byte {
+	b := make([]byte, msg.Len())
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmAlgoAuthSafe(b []byte) *XfrmAlgoAuth {
+	var msg = XfrmAlgoAuth{}
+	copy(msg.AlgName[:], b[0:64])
+	binary.Read(bytes.NewReader(b[64:68]), NativeEndian(), &msg.AlgKeyLen)
+	binary.Read(bytes.NewReader(b[68:72]), NativeEndian(), &msg.AlgTruncLen)
+	msg.AlgKey = b[72:msg.Len()]
+	return &msg
+}
+
+func TestXfrmAlgoAuthDeserializeSerialize(t *testing.T) {
+	// use a 32 byte key len
+	var orig = make([]byte, SizeofXfrmAlgoAuth+32)
+	rand.Read(orig)
+	// set the key len to 256 bits
+	orig[64] = 0
+	orig[65] = 1
+	orig[66] = 0
+	orig[67] = 0
+	safemsg := deserializeXfrmAlgoAuthSafe(orig)
+	msg := DeserializeXfrmAlgoAuth(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
+
+func (msg *XfrmEncapTmpl) write(b []byte) {
+	native := NativeEndian()
+	native.PutUint16(b[0:2], msg.EncapType)
+	native.PutUint16(b[2:4], msg.EncapSport)
+	native.PutUint16(b[4:6], msg.EncapDport)
+	copy(b[6:8], msg.Pad[:])
+	msg.EncapOa.write(b[8:SizeofXfrmAddress])
+}
+
+func (msg *XfrmEncapTmpl) serializeSafe() []byte {
+	b := make([]byte, SizeofXfrmEncapTmpl)
+	msg.write(b)
+	return b
+}
+
+func deserializeXfrmEncapTmplSafe(b []byte) *XfrmEncapTmpl {
+	var msg = XfrmEncapTmpl{}
+	binary.Read(bytes.NewReader(b[0:SizeofXfrmEncapTmpl]), NativeEndian(), &msg)
+	return &msg
+}
+
+func TestXfrmEncapTmplDeserializeSerialize(t *testing.T) {
+	var orig = make([]byte, SizeofXfrmEncapTmpl)
+	rand.Read(orig)
+	safemsg := deserializeXfrmEncapTmplSafe(orig)
+	msg := DeserializeXfrmEncapTmpl(orig)
+	testDeserializeSerialize(t, orig, safemsg, msg)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/protinfo.go b/vendor/src/github.com/vishvananda/netlink/protinfo.go
new file mode 100644
index 0000000..79396da
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/protinfo.go
@@ -0,0 +1,53 @@
+package netlink
+
+import (
+	"strings"
+)
+
+// Protinfo represents bridge flags from netlink.
+type Protinfo struct {
+	Hairpin   bool
+	Guard     bool
+	FastLeave bool
+	RootBlock bool
+	Learning  bool
+	Flood     bool
+}
+
+// String returns a list of enabled flags
+func (prot *Protinfo) String() string {
+	boolStrings := make([]string, 0)
+	if prot.Hairpin {
+		boolStrings = append(boolStrings, "Hairpin")
+	}
+	if prot.Guard {
+		boolStrings = append(boolStrings, "Guard")
+	}
+	if prot.FastLeave {
+		boolStrings = append(boolStrings, "FastLeave")
+	}
+	if prot.RootBlock {
+		boolStrings = append(boolStrings, "RootBlock")
+	}
+	if prot.Learning {
+		boolStrings = append(boolStrings, "Learning")
+	}
+	if prot.Flood {
+		boolStrings = append(boolStrings, "Flood")
+	}
+	return strings.Join(boolStrings, " ")
+}
+
+func boolToByte(x bool) []byte {
+	if x {
+		return []byte{1}
+	}
+	return []byte{0}
+}
+
+func byteToBool(x byte) bool {
+	if uint8(x) != 0 {
+		return true
+	}
+	return false
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/src/github.com/vishvananda/netlink/protinfo_linux.go
new file mode 100644
index 0000000..7181eba
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/protinfo_linux.go
@@ -0,0 +1,60 @@
+package netlink
+
+import (
+	"fmt"
+	"syscall"
+
+	"github.com/vishvananda/netlink/nl"
+)
+
+func LinkGetProtinfo(link Link) (Protinfo, error) {
+	base := link.Attrs()
+	ensureIndex(base)
+	var pi Protinfo
+	req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+	msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+	req.AddData(msg)
+	msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	if err != nil {
+		return pi, err
+	}
+
+	for _, m := range msgs {
+		ans := nl.DeserializeIfInfomsg(m)
+		if int(ans.Index) != base.Index {
+			continue
+		}
+		attrs, err := nl.ParseRouteAttr(m[ans.Len():])
+		if err != nil {
+			return pi, err
+		}
+		for _, attr := range attrs {
+			if attr.Attr.Type != syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED {
+				continue
+			}
+			infos, err := nl.ParseRouteAttr(attr.Value)
+			if err != nil {
+				return pi, err
+			}
+			var pi Protinfo
+			for _, info := range infos {
+				switch info.Attr.Type {
+				case nl.IFLA_BRPORT_MODE:
+					pi.Hairpin = byteToBool(info.Value[0])
+				case nl.IFLA_BRPORT_GUARD:
+					pi.Guard = byteToBool(info.Value[0])
+				case nl.IFLA_BRPORT_FAST_LEAVE:
+					pi.FastLeave = byteToBool(info.Value[0])
+				case nl.IFLA_BRPORT_PROTECT:
+					pi.RootBlock = byteToBool(info.Value[0])
+				case nl.IFLA_BRPORT_LEARNING:
+					pi.Learning = byteToBool(info.Value[0])
+				case nl.IFLA_BRPORT_UNICAST_FLOOD:
+					pi.Flood = byteToBool(info.Value[0])
+				}
+			}
+			return pi, nil
+		}
+	}
+	return pi, fmt.Errorf("Device with index %d not found", base.Index)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/protinfo_test.go b/vendor/src/github.com/vishvananda/netlink/protinfo_test.go
new file mode 100644
index 0000000..f94c42b
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/protinfo_test.go
@@ -0,0 +1,98 @@
+package netlink
+
+import "testing"
+
+func TestProtinfo(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+	master := &Bridge{LinkAttrs{Name: "foo"}}
+	if err := LinkAdd(master); err != nil {
+		t.Fatal(err)
+	}
+	iface1 := &Dummy{LinkAttrs{Name: "bar1", MasterIndex: master.Index}}
+	iface2 := &Dummy{LinkAttrs{Name: "bar2", MasterIndex: master.Index}}
+	iface3 := &Dummy{LinkAttrs{Name: "bar3"}}
+
+	if err := LinkAdd(iface1); err != nil {
+		t.Fatal(err)
+	}
+	if err := LinkAdd(iface2); err != nil {
+		t.Fatal(err)
+	}
+	if err := LinkAdd(iface3); err != nil {
+		t.Fatal(err)
+	}
+
+	oldpi1, err := LinkGetProtinfo(iface1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	oldpi2, err := LinkGetProtinfo(iface2)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := LinkSetHairpin(iface1, true); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := LinkSetRootBlock(iface1, true); err != nil {
+		t.Fatal(err)
+	}
+
+	pi1, err := LinkGetProtinfo(iface1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !pi1.Hairpin {
+		t.Fatalf("Hairpin mode is not enabled for %s, but should", iface1.Name)
+	}
+	if !pi1.RootBlock {
+		t.Fatalf("RootBlock is not enabled for %s, but should", iface1.Name)
+	}
+	if pi1.Guard != oldpi1.Guard {
+		t.Fatalf("Guard field was changed for %s but shouldn't", iface1.Name)
+	}
+	if pi1.FastLeave != oldpi1.FastLeave {
+		t.Fatalf("FastLeave field was changed for %s but shouldn't", iface1.Name)
+	}
+	if pi1.Learning != oldpi1.Learning {
+		t.Fatalf("Learning field was changed for %s but shouldn't", iface1.Name)
+	}
+	if pi1.Flood != oldpi1.Flood {
+		t.Fatalf("Flood field was changed for %s but shouldn't", iface1.Name)
+	}
+
+	if err := LinkSetGuard(iface2, true); err != nil {
+		t.Fatal(err)
+	}
+	if err := LinkSetLearning(iface2, false); err != nil {
+		t.Fatal(err)
+	}
+	pi2, err := LinkGetProtinfo(iface2)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if pi2.Hairpin {
+		t.Fatalf("Hairpin mode is enabled for %s, but shouldn't", iface2.Name)
+	}
+	if !pi2.Guard {
+		t.Fatalf("Guard is not enabled for %s, but should", iface2.Name)
+	}
+	if pi2.Learning {
+		t.Fatalf("Learning is enabled for %s, but shouldn't", iface2.Name)
+	}
+	if pi2.RootBlock != oldpi2.RootBlock {
+		t.Fatalf("RootBlock field was changed for %s but shouldn't", iface2.Name)
+	}
+	if pi2.FastLeave != oldpi2.FastLeave {
+		t.Fatalf("FastLeave field was changed for %s but shouldn't", iface2.Name)
+	}
+	if pi2.Flood != oldpi2.Flood {
+		t.Fatalf("Flood field was changed for %s but shouldn't", iface2.Name)
+	}
+
+	if err := LinkSetHairpin(iface3, true); err == nil || err.Error() != "operation not supported" {
+		t.Fatalf("Set protinfo attrs for link without master is not supported, but err: %s", err)
+	}
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/route.go b/vendor/src/github.com/vishvananda/netlink/route.go
new file mode 100644
index 0000000..6218546
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/route.go
@@ -0,0 +1,35 @@
+package netlink
+
+import (
+	"fmt"
+	"net"
+	"syscall"
+)
+
+// Scope is an enum representing a route scope.
+type Scope uint8
+
+const (
+	SCOPE_UNIVERSE Scope = syscall.RT_SCOPE_UNIVERSE
+	SCOPE_SITE     Scope = syscall.RT_SCOPE_SITE
+	SCOPE_LINK     Scope = syscall.RT_SCOPE_LINK
+	SCOPE_HOST     Scope = syscall.RT_SCOPE_HOST
+	SCOPE_NOWHERE  Scope = syscall.RT_SCOPE_NOWHERE
+)
+
+// Route represents a netlink route. A route is associated with a link,
+// has a destination network, an optional source ip, and optional
+// gateway. Advanced route parameters and non-main routing tables are
+// currently not supported.
+type Route struct {
+	LinkIndex int
+	Scope     Scope
+	Dst       *net.IPNet
+	Src       net.IP
+	Gw        net.IP
+}
+
+func (r Route) String() string {
+	return fmt.Sprintf("{Ifindex: %d Dst: %s Src: %s Gw: %s}", r.LinkIndex, r.Dst,
+		r.Src, r.Gw)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/route_linux.go b/vendor/src/github.com/vishvananda/netlink/route_linux.go
new file mode 100644
index 0000000..43872aa
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/route_linux.go
@@ -0,0 +1,225 @@
+package netlink
+
+import (
+	"fmt"
+	"net"
+	"syscall"
+
+	"github.com/vishvananda/netlink/nl"
+)
+
+// RtAttr is shared so it is in netlink_linux.go
+
+// RouteAdd will add a route to the system.
+// Equivalent to: `ip route add $route`
+func RouteAdd(route *Route) error {
+	req := nl.NewNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+	return routeHandle(route, req)
+}
+
+// RouteAdd will delete a route from the system.
+// Equivalent to: `ip route del $route`
+func RouteDel(route *Route) error {
+	req := nl.NewNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK)
+	return routeHandle(route, req)
+}
+
+func routeHandle(route *Route, req *nl.NetlinkRequest) error {
+	if (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil {
+		return fmt.Errorf("one of Dst.IP, Src, or Gw must not be nil")
+	}
+
+	msg := nl.NewRtMsg()
+	msg.Scope = uint8(route.Scope)
+	family := -1
+	var rtAttrs []*nl.RtAttr
+
+	if route.Dst != nil && route.Dst.IP != nil {
+		dstLen, _ := route.Dst.Mask.Size()
+		msg.Dst_len = uint8(dstLen)
+		dstFamily := nl.GetIPFamily(route.Dst.IP)
+		family = dstFamily
+		var dstData []byte
+		if dstFamily == FAMILY_V4 {
+			dstData = route.Dst.IP.To4()
+		} else {
+			dstData = route.Dst.IP.To16()
+		}
+		rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData))
+	}
+
+	if route.Src != nil {
+		srcFamily := nl.GetIPFamily(route.Src)
+		if family != -1 && family != srcFamily {
+			return fmt.Errorf("source and destination ip are not the same IP family")
+		}
+		family = srcFamily
+		var srcData []byte
+		if srcFamily == FAMILY_V4 {
+			srcData = route.Src.To4()
+		} else {
+			srcData = route.Src.To16()
+		}
+		// The commonly used src ip for routes is actually PREFSRC
+		rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PREFSRC, srcData))
+	}
+
+	if route.Gw != nil {
+		gwFamily := nl.GetIPFamily(route.Gw)
+		if family != -1 && family != gwFamily {
+			return fmt.Errorf("gateway, source, and destination ip are not the same IP family")
+		}
+		family = gwFamily
+		var gwData []byte
+		if gwFamily == FAMILY_V4 {
+			gwData = route.Gw.To4()
+		} else {
+			gwData = route.Gw.To16()
+		}
+		rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_GATEWAY, gwData))
+	}
+
+	msg.Family = uint8(family)
+
+	req.AddData(msg)
+	for _, attr := range rtAttrs {
+		req.AddData(attr)
+	}
+
+	var (
+		b      = make([]byte, 4)
+		native = nl.NativeEndian()
+	)
+	native.PutUint32(b, uint32(route.LinkIndex))
+
+	req.AddData(nl.NewRtAttr(syscall.RTA_OIF, b))
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+// RouteList gets a list of routes in the system.
+// Equivalent to: `ip route show`.
+// The list can be filtered by link and ip family.
+func RouteList(link Link, family int) ([]Route, error) {
+	req := nl.NewNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP)
+	msg := nl.NewIfInfomsg(family)
+	req.AddData(msg)
+
+	msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE)
+	if err != nil {
+		return nil, err
+	}
+
+	index := 0
+	if link != nil {
+		base := link.Attrs()
+		ensureIndex(base)
+		index = base.Index
+	}
+
+	native := nl.NativeEndian()
+	res := make([]Route, 0)
+	for _, m := range msgs {
+		msg := nl.DeserializeRtMsg(m)
+
+		if msg.Flags&syscall.RTM_F_CLONED != 0 {
+			// Ignore cloned routes
+			continue
+		}
+
+		if msg.Table != syscall.RT_TABLE_MAIN {
+			// Ignore non-main tables
+			continue
+		}
+
+		attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+		if err != nil {
+			return nil, err
+		}
+
+		route := Route{Scope: Scope(msg.Scope)}
+		for _, attr := range attrs {
+			switch attr.Attr.Type {
+			case syscall.RTA_GATEWAY:
+				route.Gw = net.IP(attr.Value)
+			case syscall.RTA_PREFSRC:
+				route.Src = net.IP(attr.Value)
+			case syscall.RTA_DST:
+				route.Dst = &net.IPNet{
+					IP:   attr.Value,
+					Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)),
+				}
+			case syscall.RTA_OIF:
+				routeIndex := int(native.Uint32(attr.Value[0:4]))
+				if link != nil && routeIndex != index {
+					// Ignore routes from other interfaces
+					continue
+				}
+				route.LinkIndex = routeIndex
+			}
+		}
+		res = append(res, route)
+	}
+
+	return res, nil
+}
+
+// RouteGet gets a route to a specific destination from the host system.
+// Equivalent to: 'ip route get'.
+func RouteGet(destination net.IP) ([]Route, error) {
+	req := nl.NewNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_REQUEST)
+	family := nl.GetIPFamily(destination)
+	var destinationData []byte
+	var bitlen uint8
+	if family == FAMILY_V4 {
+		destinationData = destination.To4()
+		bitlen = 32
+	} else {
+		destinationData = destination.To16()
+		bitlen = 128
+	}
+	msg := &nl.RtMsg{}
+	msg.Family = uint8(family)
+	msg.Dst_len = bitlen
+	req.AddData(msg)
+
+	rtaDst := nl.NewRtAttr(syscall.RTA_DST, destinationData)
+	req.AddData(rtaDst)
+
+	msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE)
+	if err != nil {
+		return nil, err
+	}
+
+	native := nl.NativeEndian()
+	res := make([]Route, 0)
+	for _, m := range msgs {
+		msg := nl.DeserializeRtMsg(m)
+		attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+		if err != nil {
+			return nil, err
+		}
+
+		route := Route{}
+		for _, attr := range attrs {
+			switch attr.Attr.Type {
+			case syscall.RTA_GATEWAY:
+				route.Gw = net.IP(attr.Value)
+			case syscall.RTA_PREFSRC:
+				route.Src = net.IP(attr.Value)
+			case syscall.RTA_DST:
+				route.Dst = &net.IPNet{
+					IP:   attr.Value,
+					Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)),
+				}
+			case syscall.RTA_OIF:
+				routeIndex := int(native.Uint32(attr.Value[0:4]))
+				route.LinkIndex = routeIndex
+			}
+		}
+		res = append(res, route)
+	}
+	return res, nil
+
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/route_test.go b/vendor/src/github.com/vishvananda/netlink/route_test.go
new file mode 100644
index 0000000..f02bef8
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/route_test.go
@@ -0,0 +1,84 @@
+package netlink
+
+import (
+	"net"
+	"testing"
+)
+
+func TestRouteAddDel(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	// get loopback interface
+	link, err := LinkByName("lo")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// bring the interface up
+	if err = LinkSetUp(link); err != nil {
+		t.Fatal(err)
+	}
+
+	// add a gateway route
+	_, dst, err := net.ParseCIDR("192.168.0.0/24")
+
+	ip := net.ParseIP("127.1.1.1")
+	route := Route{LinkIndex: link.Attrs().Index, Dst: dst, Src: ip}
+	err = RouteAdd(&route)
+	if err != nil {
+		t.Fatal(err)
+	}
+	routes, err := RouteList(link, FAMILY_V4)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(routes) != 1 {
+		t.Fatal("Link not added properly")
+	}
+
+	dstIP := net.ParseIP("192.168.0.42")
+	routeToDstIP, err := RouteGet(dstIP)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(routeToDstIP) == 0 {
+		t.Fatal("Default route not present")
+	}
+
+	err = RouteDel(&route)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	routes, err = RouteList(link, FAMILY_V4)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(routes) != 0 {
+		t.Fatal("Route not removed properly")
+	}
+
+}
+
+func TestRouteAddIncomplete(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	// get loopback interface
+	link, err := LinkByName("lo")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// bring the interface up
+	if err = LinkSetUp(link); err != nil {
+		t.Fatal(err)
+	}
+
+	route := Route{LinkIndex: link.Attrs().Index}
+	if err := RouteAdd(&route); err == nil {
+		t.Fatal("Adding incomplete route should fail")
+	}
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm.go b/vendor/src/github.com/vishvananda/netlink/xfrm.go
new file mode 100644
index 0000000..621ffb6
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm.go
@@ -0,0 +1,64 @@
+package netlink
+
+import (
+	"fmt"
+	"syscall"
+)
+
+// Proto is an enum representing an ipsec protocol.
+type Proto uint8
+
+const (
+	XFRM_PROTO_ROUTE2    Proto = syscall.IPPROTO_ROUTING
+	XFRM_PROTO_ESP       Proto = syscall.IPPROTO_ESP
+	XFRM_PROTO_AH        Proto = syscall.IPPROTO_AH
+	XFRM_PROTO_HAO       Proto = syscall.IPPROTO_DSTOPTS
+	XFRM_PROTO_COMP      Proto = syscall.IPPROTO_COMP
+	XFRM_PROTO_IPSEC_ANY Proto = syscall.IPPROTO_RAW
+)
+
+func (p Proto) String() string {
+	switch p {
+	case XFRM_PROTO_ROUTE2:
+		return "route2"
+	case XFRM_PROTO_ESP:
+		return "esp"
+	case XFRM_PROTO_AH:
+		return "ah"
+	case XFRM_PROTO_HAO:
+		return "hao"
+	case XFRM_PROTO_COMP:
+		return "comp"
+	case XFRM_PROTO_IPSEC_ANY:
+		return "ipsec-any"
+	}
+	return fmt.Sprintf("%d", p)
+}
+
+// Mode is an enum representing an ipsec transport.
+type Mode uint8
+
+const (
+	XFRM_MODE_TRANSPORT Mode = iota
+	XFRM_MODE_TUNNEL
+	XFRM_MODE_ROUTEOPTIMIZATION
+	XFRM_MODE_IN_TRIGGER
+	XFRM_MODE_BEET
+	XFRM_MODE_MAX
+)
+
+func (m Mode) String() string {
+	switch m {
+	case XFRM_MODE_TRANSPORT:
+		return "transport"
+	case XFRM_MODE_TUNNEL:
+		return "tunnel"
+	case XFRM_MODE_ROUTEOPTIMIZATION:
+		return "ro"
+	case XFRM_MODE_IN_TRIGGER:
+		return "in_trigger"
+	case XFRM_MODE_BEET:
+		return "beet"
+	}
+	return fmt.Sprintf("%d", m)
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_policy.go b/vendor/src/github.com/vishvananda/netlink/xfrm_policy.go
new file mode 100644
index 0000000..d85c65d
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_policy.go
@@ -0,0 +1,59 @@
+package netlink
+
+import (
+	"fmt"
+	"net"
+)
+
+// Dir is an enum representing an ipsec template direction.
+type Dir uint8
+
+const (
+	XFRM_DIR_IN Dir = iota
+	XFRM_DIR_OUT
+	XFRM_DIR_FWD
+	XFRM_SOCKET_IN
+	XFRM_SOCKET_OUT
+	XFRM_SOCKET_FWD
+)
+
+func (d Dir) String() string {
+	switch d {
+	case XFRM_DIR_IN:
+		return "dir in"
+	case XFRM_DIR_OUT:
+		return "dir out"
+	case XFRM_DIR_FWD:
+		return "dir fwd"
+	case XFRM_SOCKET_IN:
+		return "socket in"
+	case XFRM_SOCKET_OUT:
+		return "socket out"
+	case XFRM_SOCKET_FWD:
+		return "socket fwd"
+	}
+	return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN)
+}
+
+// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec
+// policy. These rules are matched with XfrmState to determine encryption
+// and authentication algorithms.
+type XfrmPolicyTmpl struct {
+	Dst   net.IP
+	Src   net.IP
+	Proto Proto
+	Mode  Mode
+	Reqid int
+}
+
+// XfrmPolicy represents an ipsec policy. It represents the overlay network
+// and has a list of XfrmPolicyTmpls representing the base addresses of
+// the policy.
+type XfrmPolicy struct {
+	Dst      *net.IPNet
+	Src      *net.IPNet
+	Dir      Dir
+	Priority int
+	Index    int
+	Tmpls    []XfrmPolicyTmpl
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go
new file mode 100644
index 0000000..6fe1b63
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go
@@ -0,0 +1,127 @@
+package netlink
+
+import (
+	"syscall"
+
+	"github.com/vishvananda/netlink/nl"
+)
+
+func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) {
+	sel.Family = uint16(nl.GetIPFamily(policy.Dst.IP))
+	sel.Daddr.FromIP(policy.Dst.IP)
+	sel.Saddr.FromIP(policy.Src.IP)
+	prefixlenD, _ := policy.Dst.Mask.Size()
+	sel.PrefixlenD = uint8(prefixlenD)
+	prefixlenS, _ := policy.Src.Mask.Size()
+	sel.PrefixlenS = uint8(prefixlenS)
+}
+
+// XfrmPolicyAdd will add an xfrm policy to the system.
+// Equivalent to: `ip xfrm policy add $policy`
+func XfrmPolicyAdd(policy *XfrmPolicy) error {
+	req := nl.NewNetlinkRequest(nl.XFRM_MSG_NEWPOLICY, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+
+	msg := &nl.XfrmUserpolicyInfo{}
+	selFromPolicy(&msg.Sel, policy)
+	msg.Priority = uint32(policy.Priority)
+	msg.Index = uint32(policy.Index)
+	msg.Dir = uint8(policy.Dir)
+	msg.Lft.SoftByteLimit = nl.XFRM_INF
+	msg.Lft.HardByteLimit = nl.XFRM_INF
+	msg.Lft.SoftPacketLimit = nl.XFRM_INF
+	msg.Lft.HardPacketLimit = nl.XFRM_INF
+	req.AddData(msg)
+
+	tmplData := make([]byte, nl.SizeofXfrmUserTmpl*len(policy.Tmpls))
+	for i, tmpl := range policy.Tmpls {
+		start := i * nl.SizeofXfrmUserTmpl
+		userTmpl := nl.DeserializeXfrmUserTmpl(tmplData[start : start+nl.SizeofXfrmUserTmpl])
+		userTmpl.XfrmId.Daddr.FromIP(tmpl.Dst)
+		userTmpl.Saddr.FromIP(tmpl.Src)
+		userTmpl.XfrmId.Proto = uint8(tmpl.Proto)
+		userTmpl.Mode = uint8(tmpl.Mode)
+		userTmpl.Reqid = uint32(tmpl.Reqid)
+		userTmpl.Aalgos = ^uint32(0)
+		userTmpl.Ealgos = ^uint32(0)
+		userTmpl.Calgos = ^uint32(0)
+	}
+	if len(tmplData) > 0 {
+		tmpls := nl.NewRtAttr(nl.XFRMA_TMPL, tmplData)
+		req.AddData(tmpls)
+	}
+
+	_, err := req.Execute(syscall.NETLINK_XFRM, 0)
+	return err
+}
+
+// XfrmPolicyDel will delete an xfrm policy from the system. Note that
+// the Tmpls are ignored when matching the policy to delete.
+// Equivalent to: `ip xfrm policy del $policy`
+func XfrmPolicyDel(policy *XfrmPolicy) error {
+	req := nl.NewNetlinkRequest(nl.XFRM_MSG_DELPOLICY, syscall.NLM_F_ACK)
+
+	msg := &nl.XfrmUserpolicyId{}
+	selFromPolicy(&msg.Sel, policy)
+	msg.Index = uint32(policy.Index)
+	msg.Dir = uint8(policy.Dir)
+	req.AddData(msg)
+
+	_, err := req.Execute(syscall.NETLINK_XFRM, 0)
+	return err
+}
+
+// XfrmPolicyList gets a list of xfrm policies in the system.
+// Equivalent to: `ip xfrm policy show`.
+// The list can be filtered by ip family.
+func XfrmPolicyList(family int) ([]XfrmPolicy, error) {
+	req := nl.NewNetlinkRequest(nl.XFRM_MSG_GETPOLICY, syscall.NLM_F_DUMP)
+
+	msg := nl.NewIfInfomsg(family)
+	req.AddData(msg)
+
+	msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY)
+	if err != nil {
+		return nil, err
+	}
+
+	res := make([]XfrmPolicy, 0)
+	for _, m := range msgs {
+		msg := nl.DeserializeXfrmUserpolicyInfo(m)
+
+		if family != FAMILY_ALL && family != int(msg.Sel.Family) {
+			continue
+		}
+
+		var policy XfrmPolicy
+
+		policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD)
+		policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS)
+		policy.Priority = int(msg.Priority)
+		policy.Index = int(msg.Index)
+		policy.Dir = Dir(msg.Dir)
+
+		attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+		if err != nil {
+			return nil, err
+		}
+
+		for _, attr := range attrs {
+			switch attr.Attr.Type {
+			case nl.XFRMA_TMPL:
+				max := len(attr.Value)
+				for i := 0; i < max; i += nl.SizeofXfrmUserTmpl {
+					var resTmpl XfrmPolicyTmpl
+					tmpl := nl.DeserializeXfrmUserTmpl(attr.Value[i : i+nl.SizeofXfrmUserTmpl])
+					resTmpl.Dst = tmpl.XfrmId.Daddr.ToIP()
+					resTmpl.Src = tmpl.Saddr.ToIP()
+					resTmpl.Proto = Proto(tmpl.XfrmId.Proto)
+					resTmpl.Mode = Mode(tmpl.Mode)
+					resTmpl.Reqid = int(tmpl.Reqid)
+					policy.Tmpls = append(policy.Tmpls, resTmpl)
+				}
+			}
+		}
+		res = append(res, policy)
+	}
+	return res, nil
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_policy_test.go b/vendor/src/github.com/vishvananda/netlink/xfrm_policy_test.go
new file mode 100644
index 0000000..06d178d
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_policy_test.go
@@ -0,0 +1,49 @@
+package netlink
+
+import (
+	"net"
+	"testing"
+)
+
+func TestXfrmPolicyAddDel(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	src, _ := ParseIPNet("127.1.1.1/32")
+	dst, _ := ParseIPNet("127.1.1.2/32")
+	policy := XfrmPolicy{
+		Src: src,
+		Dst: dst,
+		Dir: XFRM_DIR_OUT,
+	}
+	tmpl := XfrmPolicyTmpl{
+		Src:   net.ParseIP("127.0.0.1"),
+		Dst:   net.ParseIP("127.0.0.2"),
+		Proto: XFRM_PROTO_ESP,
+		Mode:  XFRM_MODE_TUNNEL,
+	}
+	policy.Tmpls = append(policy.Tmpls, tmpl)
+	if err := XfrmPolicyAdd(&policy); err != nil {
+		t.Fatal(err)
+	}
+	policies, err := XfrmPolicyList(FAMILY_ALL)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(policies) != 1 {
+		t.Fatal("Policy not added properly")
+	}
+
+	if err = XfrmPolicyDel(&policy); err != nil {
+		t.Fatal(err)
+	}
+
+	policies, err = XfrmPolicyList(FAMILY_ALL)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(policies) != 0 {
+		t.Fatal("Policy not removed properly")
+	}
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_state.go b/vendor/src/github.com/vishvananda/netlink/xfrm_state.go
new file mode 100644
index 0000000..5b8f2df
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_state.go
@@ -0,0 +1,53 @@
+package netlink
+
+import (
+	"net"
+)
+
+// XfrmStateAlgo represents the algorithm to use for the ipsec encryption.
+type XfrmStateAlgo struct {
+	Name        string
+	Key         []byte
+	TruncateLen int // Auth only
+}
+
+// EncapType is an enum representing an ipsec template direction.
+type EncapType uint8
+
+const (
+	XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1
+	XFRM_ENCAP_ESPINUDP
+)
+
+func (e EncapType) String() string {
+	switch e {
+	case XFRM_ENCAP_ESPINUDP_NONIKE:
+		return "espinudp-nonike"
+	case XFRM_ENCAP_ESPINUDP:
+		return "espinudp"
+	}
+	return "unknown"
+}
+
+// XfrmEncap represents the encapsulation to use for the ipsec encryption.
+type XfrmStateEncap struct {
+	Type            EncapType
+	SrcPort         int
+	DstPort         int
+	OriginalAddress net.IP
+}
+
+// XfrmState represents the state of an ipsec policy. It optionally
+// contains an XfrmStateAlgo for encryption and one for authentication.
+type XfrmState struct {
+	Dst          net.IP
+	Src          net.IP
+	Proto        Proto
+	Mode         Mode
+	Spi          int
+	Reqid        int
+	ReplayWindow int
+	Auth         *XfrmStateAlgo
+	Crypt        *XfrmStateAlgo
+	Encap        *XfrmStateEncap
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go
new file mode 100644
index 0000000..0f1fbd0
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go
@@ -0,0 +1,181 @@
+package netlink
+
+import (
+	"fmt"
+	"syscall"
+
+	"github.com/vishvananda/netlink/nl"
+)
+
+func writeStateAlgo(a *XfrmStateAlgo) []byte {
+	algo := nl.XfrmAlgo{
+		AlgKeyLen: uint32(len(a.Key) * 8),
+		AlgKey:    a.Key,
+	}
+	end := len(a.Name)
+	if end > 64 {
+		end = 64
+	}
+	copy(algo.AlgName[:end], a.Name)
+	return algo.Serialize()
+}
+
+func writeStateAlgoAuth(a *XfrmStateAlgo) []byte {
+	algo := nl.XfrmAlgoAuth{
+		AlgKeyLen:   uint32(len(a.Key) * 8),
+		AlgTruncLen: uint32(a.TruncateLen),
+		AlgKey:      a.Key,
+	}
+	end := len(a.Name)
+	if end > 64 {
+		end = 64
+	}
+	copy(algo.AlgName[:end], a.Name)
+	return algo.Serialize()
+}
+
+// XfrmStateAdd will add an xfrm state to the system.
+// Equivalent to: `ip xfrm state add $state`
+func XfrmStateAdd(state *XfrmState) error {
+	// A state with spi 0 can't be deleted so don't allow it to be set
+	if state.Spi == 0 {
+		return fmt.Errorf("Spi must be set when adding xfrm state.")
+	}
+	req := nl.NewNetlinkRequest(nl.XFRM_MSG_NEWSA, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
+
+	msg := &nl.XfrmUsersaInfo{}
+	msg.Family = uint16(nl.GetIPFamily(state.Dst))
+	msg.Id.Daddr.FromIP(state.Dst)
+	msg.Saddr.FromIP(state.Src)
+	msg.Id.Proto = uint8(state.Proto)
+	msg.Mode = uint8(state.Mode)
+	msg.Id.Spi = nl.Swap32(uint32(state.Spi))
+	msg.Reqid = uint32(state.Reqid)
+	msg.ReplayWindow = uint8(state.ReplayWindow)
+	msg.Lft.SoftByteLimit = nl.XFRM_INF
+	msg.Lft.HardByteLimit = nl.XFRM_INF
+	msg.Lft.SoftPacketLimit = nl.XFRM_INF
+	msg.Lft.HardPacketLimit = nl.XFRM_INF
+	req.AddData(msg)
+
+	if state.Auth != nil {
+		out := nl.NewRtAttr(nl.XFRMA_ALG_AUTH_TRUNC, writeStateAlgoAuth(state.Auth))
+		req.AddData(out)
+	}
+	if state.Crypt != nil {
+		out := nl.NewRtAttr(nl.XFRMA_ALG_CRYPT, writeStateAlgo(state.Crypt))
+		req.AddData(out)
+	}
+	if state.Encap != nil {
+		encapData := make([]byte, nl.SizeofXfrmEncapTmpl)
+		encap := nl.DeserializeXfrmEncapTmpl(encapData)
+		encap.EncapType = uint16(state.Encap.Type)
+		encap.EncapSport = nl.Swap16(uint16(state.Encap.SrcPort))
+		encap.EncapDport = nl.Swap16(uint16(state.Encap.DstPort))
+		encap.EncapOa.FromIP(state.Encap.OriginalAddress)
+		out := nl.NewRtAttr(nl.XFRMA_ENCAP, encapData)
+		req.AddData(out)
+	}
+
+	_, err := req.Execute(syscall.NETLINK_XFRM, 0)
+	return err
+}
+
+// XfrmStateDel will delete an xfrm state from the system. Note that
+// the Algos are ignored when matching the state to delete.
+// Equivalent to: `ip xfrm state del $state`
+func XfrmStateDel(state *XfrmState) error {
+	req := nl.NewNetlinkRequest(nl.XFRM_MSG_DELSA, syscall.NLM_F_ACK)
+
+	msg := &nl.XfrmUsersaId{}
+	msg.Daddr.FromIP(state.Dst)
+	msg.Family = uint16(nl.GetIPFamily(state.Dst))
+	msg.Proto = uint8(state.Proto)
+	msg.Spi = nl.Swap32(uint32(state.Spi))
+	req.AddData(msg)
+
+	saddr := nl.XfrmAddress{}
+	saddr.FromIP(state.Src)
+	srcdata := nl.NewRtAttr(nl.XFRMA_SRCADDR, saddr.Serialize())
+
+	req.AddData(srcdata)
+
+	_, err := req.Execute(syscall.NETLINK_XFRM, 0)
+	return err
+}
+
+// XfrmStateList gets a list of xfrm states in the system.
+// Equivalent to: `ip xfrm state show`.
+// The list can be filtered by ip family.
+func XfrmStateList(family int) ([]XfrmState, error) {
+	req := nl.NewNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP)
+
+	msg := nl.NewIfInfomsg(family)
+	req.AddData(msg)
+
+	msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWSA)
+	if err != nil {
+		return nil, err
+	}
+
+	res := make([]XfrmState, 0)
+	for _, m := range msgs {
+		msg := nl.DeserializeXfrmUsersaInfo(m)
+
+		if family != FAMILY_ALL && family != int(msg.Family) {
+			continue
+		}
+
+		var state XfrmState
+
+		state.Dst = msg.Id.Daddr.ToIP()
+		state.Src = msg.Saddr.ToIP()
+		state.Proto = Proto(msg.Id.Proto)
+		state.Mode = Mode(msg.Mode)
+		state.Spi = int(nl.Swap32(msg.Id.Spi))
+		state.Reqid = int(msg.Reqid)
+		state.ReplayWindow = int(msg.ReplayWindow)
+
+		attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+		if err != nil {
+			return nil, err
+		}
+
+		for _, attr := range attrs {
+			switch attr.Attr.Type {
+			case nl.XFRMA_ALG_AUTH, nl.XFRMA_ALG_CRYPT:
+				var resAlgo *XfrmStateAlgo
+				if attr.Attr.Type == nl.XFRMA_ALG_AUTH {
+					if state.Auth == nil {
+						state.Auth = new(XfrmStateAlgo)
+					}
+					resAlgo = state.Auth
+				} else {
+					state.Crypt = new(XfrmStateAlgo)
+					resAlgo = state.Crypt
+				}
+				algo := nl.DeserializeXfrmAlgo(attr.Value[:])
+				(*resAlgo).Name = nl.BytesToString(algo.AlgName[:])
+				(*resAlgo).Key = algo.AlgKey
+			case nl.XFRMA_ALG_AUTH_TRUNC:
+				if state.Auth == nil {
+					state.Auth = new(XfrmStateAlgo)
+				}
+				algo := nl.DeserializeXfrmAlgoAuth(attr.Value[:])
+				state.Auth.Name = nl.BytesToString(algo.AlgName[:])
+				state.Auth.Key = algo.AlgKey
+				state.Auth.TruncateLen = int(algo.AlgTruncLen)
+			case nl.XFRMA_ENCAP:
+				encap := nl.DeserializeXfrmEncapTmpl(attr.Value[:])
+				state.Encap = new(XfrmStateEncap)
+				state.Encap.Type = EncapType(encap.EncapType)
+				state.Encap.SrcPort = int(nl.Swap16(encap.EncapSport))
+				state.Encap.DstPort = int(nl.Swap16(encap.EncapDport))
+				state.Encap.OriginalAddress = encap.EncapOa.ToIP()
+			}
+
+		}
+		res = append(res, state)
+	}
+	return res, nil
+}
diff --git a/vendor/src/github.com/vishvananda/netlink/xfrm_state_test.go b/vendor/src/github.com/vishvananda/netlink/xfrm_state_test.go
new file mode 100644
index 0000000..df57ef8
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netlink/xfrm_state_test.go
@@ -0,0 +1,50 @@
+package netlink
+
+import (
+	"net"
+	"testing"
+)
+
+func TestXfrmStateAddDel(t *testing.T) {
+	tearDown := setUpNetlinkTest(t)
+	defer tearDown()
+
+	state := XfrmState{
+		Src:   net.ParseIP("127.0.0.1"),
+		Dst:   net.ParseIP("127.0.0.2"),
+		Proto: XFRM_PROTO_ESP,
+		Mode:  XFRM_MODE_TUNNEL,
+		Spi:   1,
+		Auth: &XfrmStateAlgo{
+			Name: "hmac(sha256)",
+			Key:  []byte("abcdefghijklmnopqrstuvwzyzABCDEF"),
+		},
+		Crypt: &XfrmStateAlgo{
+			Name: "cbc(aes)",
+			Key:  []byte("abcdefghijklmnopqrstuvwzyzABCDEF"),
+		},
+	}
+	if err := XfrmStateAdd(&state); err != nil {
+		t.Fatal(err)
+	}
+	policies, err := XfrmStateList(FAMILY_ALL)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if len(policies) != 1 {
+		t.Fatal("State not added properly")
+	}
+
+	if err = XfrmStateDel(&state); err != nil {
+		t.Fatal(err)
+	}
+
+	policies, err = XfrmStateList(FAMILY_ALL)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(policies) != 0 {
+		t.Fatal("State not removed properly")
+	}
+}
diff --git a/vendor/src/github.com/vishvananda/netns/LICENSE b/vendor/src/github.com/vishvananda/netns/LICENSE
new file mode 100644
index 0000000..9f64db8
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/LICENSE
@@ -0,0 +1,192 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   Copyright 2014 Vishvananda Ishaya.
+   Copyright 2014 Docker, Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/src/github.com/vishvananda/netns/README.md b/vendor/src/github.com/vishvananda/netns/README.md
new file mode 100644
index 0000000..24a4003
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/README.md
@@ -0,0 +1,49 @@
+# netns - network namespaces in go #
+
+The netns package provides an ultra-simple interface for handling
+network namespaces in go. Changing namespaces requires elevated
+privileges, so in most cases this code needs to be run as root.
+
+## Local Build and Test ##
+
+You can use go get command:
+
+    go get github.com/vishvananda/netns
+
+Testing (requires root):
+
+    sudo -E go test github.com/vishvananda/netns
+
+## Example ##
+
+```go
+package main
+
+import (
+    "net"
+    "runtime"
+    "github.com/vishvananada/netns"
+)
+
+func main() {
+    // Lock the OS Thread so we don't accidentally switch namespaces
+    runtime.LockOSThread()
+    defer runtime.UnlockOSThread()
+
+    // Save the current network namespace
+    origns, _ := netns.Get()
+    defer origns.Close()
+
+    // Create a new network namespace
+    newns, _ := netns.New()
+    defer newns.Close()
+
+    // Do something with tne network namespace
+    ifaces, _ := net.Interfaces()
+    fmt.Printf("Interfaces: %v\n", ifaces)
+
+    // Switch back to the original namespace
+    netns.Set(origns)
+}
+
+```
diff --git a/vendor/src/github.com/vishvananda/netns/netns.go b/vendor/src/github.com/vishvananda/netns/netns.go
new file mode 100644
index 0000000..3878da3
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns.go
@@ -0,0 +1,66 @@
+// Package netns allows ultra-simple network namespace handling. NsHandles
+// can be retrieved and set. Note that the current namespace is thread
+// local so actions that set and reset namespaces should use LockOSThread
+// to make sure the namespace doesn't change due to a goroutine switch.
+// It is best to close NsHandles when you are done with them. This can be
+// accomplished via a `defer ns.Close()` on the handle. Changing namespaces
+// requires elevated privileges, so in most cases this code needs to be run
+// as root.
+package netns
+
+import (
+	"fmt"
+	"syscall"
+)
+// NsHandle is a handle to a network namespace. It can be cast directly
+// to an int and used as a file descriptor.
+type NsHandle int
+
+// Equal determines if two network handles refer to the same network
+// namespace. This is done by comparing the device and inode that the
+// file descripors point to.
+func (ns NsHandle) Equal(other NsHandle) bool {
+	if ns == other {
+		return true
+	}
+	var s1, s2 syscall.Stat_t
+	if err := syscall.Fstat(int(ns), &s1); err != nil {
+		return false
+	}
+	if err := syscall.Fstat(int(other), &s2); err != nil {
+		return false
+	}
+	return (s1.Dev == s2.Dev) && (s1.Ino == s2.Ino)
+}
+
+// String shows the file descriptor number and its dev and inode.
+func (ns NsHandle) String() string {
+	var s syscall.Stat_t
+	if ns == -1 {
+		return "NS(None)"
+	}
+	if err := syscall.Fstat(int(ns), &s); err != nil {
+		return fmt.Sprintf("NS(%d: unknown)", ns)
+	}
+	return fmt.Sprintf("NS(%d: %d, %d)", ns, s.Dev, s.Ino)
+}
+
+// IsOpen returns true if Close() has not been called.
+func (ns NsHandle) IsOpen() bool {
+	return ns != -1
+}
+
+// Close closes the NsHandle and resets its file descriptor to -1.
+// It is not safe to use an NsHandle after Close() is called.
+func (ns *NsHandle) Close() error {
+	if err := syscall.Close(int(*ns)); err != nil {
+		return err
+	}
+	(*ns) = -1
+	return nil
+}
+
+// Get an empty (closed) NsHandle
+func None() NsHandle {
+	return NsHandle(-1)
+}
diff --git a/vendor/src/github.com/vishvananda/netns/netns_linux.go b/vendor/src/github.com/vishvananda/netns/netns_linux.go
new file mode 100644
index 0000000..1cf5e13
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns_linux.go
@@ -0,0 +1,206 @@
+package netns
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"syscall"
+)
+
+const (
+	// These constants belong in the syscall library but have not been
+	// added yet.
+	CLONE_NEWUTS  = 0x04000000 /* New utsname group? */
+	CLONE_NEWIPC  = 0x08000000 /* New ipcs */
+	CLONE_NEWUSER = 0x10000000 /* New user namespace */
+	CLONE_NEWPID  = 0x20000000 /* New pid namespace */
+	CLONE_NEWNET  = 0x40000000 /* New network namespace */
+	CLONE_IO      = 0x80000000 /* Get io context */
+)
+
+// Setns sets namespace using syscall. Note that this should be a method
+// in syscall but it has not been added.
+func Setns(ns NsHandle, nstype int) (err error) {
+	_, _, e1 := syscall.Syscall(SYS_SETNS, uintptr(ns), uintptr(nstype), 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+// Set sets the current network namespace to the namespace represented
+// by NsHandle.
+func Set(ns NsHandle) (err error) {
+	return Setns(ns, CLONE_NEWNET)
+}
+
+// New creates a new network namespace and returns a handle to it.
+func New() (ns NsHandle, err error) {
+	if err := syscall.Unshare(CLONE_NEWNET); err != nil {
+		return -1, err
+	}
+	return Get()
+}
+
+// Get gets a handle to the current threads network namespace.
+func Get() (NsHandle, error) {
+	return GetFromThread(os.Getpid(), syscall.Gettid())
+}
+
+// GetFromName gets a handle to a named network namespace such as one
+// created by `ip netns add`.
+func GetFromName(name string) (NsHandle, error) {
+	fd, err := syscall.Open(fmt.Sprintf("/var/run/netns/%s", name), syscall.O_RDONLY, 0)
+	if err != nil {
+		return -1, err
+	}
+	return NsHandle(fd), nil
+}
+
+// GetFromPid gets a handle to the network namespace of a given pid.
+func GetFromPid(pid int) (NsHandle, error) {
+	fd, err := syscall.Open(fmt.Sprintf("/proc/%d/ns/net", pid), syscall.O_RDONLY, 0)
+	if err != nil {
+		return -1, err
+	}
+	return NsHandle(fd), nil
+}
+
+// GetFromThread gets a handle to the network namespace of a given pid and tid.
+func GetFromThread(pid, tid int) (NsHandle, error) {
+	name := fmt.Sprintf("/proc/%d/task/%d/ns/net", pid, tid)
+	fd, err := syscall.Open(name, syscall.O_RDONLY, 0)
+	if err != nil {
+		return -1, err
+	}
+	return NsHandle(fd), nil
+}
+
+// GetFromDocker gets a handle to the network namespace of a docker container.
+// Id is prefixed matched against the running docker containers, so a short
+// identifier can be used as long as it isn't ambiguous.
+func GetFromDocker(id string) (NsHandle, error) {
+	pid, err := getPidForContainer(id)
+	if err != nil {
+		return -1, err
+	}
+	return GetFromPid(pid)
+}
+
+// borrowed from docker/utils/utils.go
+func findCgroupMountpoint(cgroupType string) (string, error) {
+	output, err := ioutil.ReadFile("/proc/mounts")
+	if err != nil {
+		return "", err
+	}
+
+	// /proc/mounts has 6 fields per line, one mount per line, e.g.
+	// cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
+	for _, line := range strings.Split(string(output), "\n") {
+		parts := strings.Split(line, " ")
+		if len(parts) == 6 && parts[2] == "cgroup" {
+			for _, opt := range strings.Split(parts[3], ",") {
+				if opt == cgroupType {
+					return parts[1], nil
+				}
+			}
+		}
+	}
+
+	return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
+}
+
+// Returns the relative path to the cgroup docker is running in.
+// borrowed from docker/utils/utils.go
+// modified to get the docker pid instead of using /proc/self
+func getThisCgroup(cgroupType string) (string, error) {
+	dockerpid, err := ioutil.ReadFile("/var/run/docker.pid")
+	if err != nil {
+		return "", err
+	}
+	result := strings.Split(string(dockerpid), "\n")
+	if len(result) == 0 || len(result[0]) == 0 {
+		return "", fmt.Errorf("docker pid not found in /var/run/docker.pid")
+	}
+	pid, err := strconv.Atoi(result[0])
+
+	output, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid))
+	if err != nil {
+		return "", err
+	}
+	for _, line := range strings.Split(string(output), "\n") {
+		parts := strings.Split(line, ":")
+		// any type used by docker should work
+		if parts[1] == cgroupType {
+			return parts[2], nil
+		}
+	}
+	return "", fmt.Errorf("cgroup '%s' not found in /proc/%d/cgroup", cgroupType, pid)
+}
+
+// Returns the first pid in a container.
+// borrowed from docker/utils/utils.go
+// modified to only return the first pid
+// modified to glob with id
+// modified to search for newer docker containers
+func getPidForContainer(id string) (int, error) {
+	pid := 0
+
+	// memory is chosen randomly, any cgroup used by docker works
+	cgroupType := "memory"
+
+	cgroupRoot, err := findCgroupMountpoint(cgroupType)
+	if err != nil {
+		return pid, err
+	}
+
+	cgroupThis, err := getThisCgroup(cgroupType)
+	if err != nil {
+		return pid, err
+	}
+
+	id += "*"
+
+	attempts := []string{
+		filepath.Join(cgroupRoot, cgroupThis, id, "tasks"),
+		// With more recent lxc versions use, cgroup will be in lxc/
+		filepath.Join(cgroupRoot, cgroupThis, "lxc", id, "tasks"),
+		// With more recent dockee, cgroup will be in docker/
+		filepath.Join(cgroupRoot, cgroupThis, "docker", id, "tasks"),
+	}
+
+	var filename string
+	for _, attempt := range attempts {
+		filenames, _ := filepath.Glob(attempt)
+		if len(filenames) > 1 {
+			return pid, fmt.Errorf("Ambiguous id supplied: %v", filenames)
+		} else if len(filenames) == 1 {
+			filename = filenames[0]
+			break
+		}
+	}
+
+	if filename == "" {
+		return pid, fmt.Errorf("Unable to find container: %v", id[:len(id)-1])
+	}
+
+	output, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return pid, err
+	}
+
+	result := strings.Split(string(output), "\n")
+	if len(result) == 0 || len(result[0]) == 0 {
+		return pid, fmt.Errorf("No pid found for container")
+	}
+
+	pid, err = strconv.Atoi(result[0])
+	if err != nil {
+		return pid, fmt.Errorf("Invalid pid '%s': %s", result[0], err)
+	}
+
+	return pid, nil
+}
diff --git a/vendor/src/github.com/vishvananda/netns/netns_linux_386.go b/vendor/src/github.com/vishvananda/netns/netns_linux_386.go
new file mode 100644
index 0000000..0a6fe49
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns_linux_386.go
@@ -0,0 +1,5 @@
+package netns
+
+const (
+	SYS_SETNS = 346
+)
diff --git a/vendor/src/github.com/vishvananda/netns/netns_linux_amd.go b/vendor/src/github.com/vishvananda/netns/netns_linux_amd.go
new file mode 100644
index 0000000..bbf3f4d
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns_linux_amd.go
@@ -0,0 +1,5 @@
+package netns
+
+const (
+	SYS_SETNS = 308
+)
diff --git a/vendor/src/github.com/vishvananda/netns/netns_linux_arm.go b/vendor/src/github.com/vishvananda/netns/netns_linux_arm.go
new file mode 100644
index 0000000..e35cb07
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns_linux_arm.go
@@ -0,0 +1,5 @@
+package netns
+
+const (
+	SYS_SETNS = 374
+)
diff --git a/vendor/src/github.com/vishvananda/netns/netns_test.go b/vendor/src/github.com/vishvananda/netns/netns_test.go
new file mode 100644
index 0000000..e51981c
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns_test.go
@@ -0,0 +1,66 @@
+package netns
+
+import (
+	"runtime"
+	"sync"
+	"testing"
+)
+
+func TestGetNewSetDelete(t *testing.T) {
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	origns, err := Get()
+	if err != nil {
+		t.Fatal(err)
+	}
+	newns, err := New()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if origns.Equal(newns) {
+		t.Fatal("New ns failed")
+	}
+	if err := Set(origns); err != nil {
+		t.Fatal(err)
+	}
+	newns.Close()
+	if newns.IsOpen() {
+		t.Fatal("newns still open after close", newns)
+	}
+	ns, err := Get()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !ns.Equal(origns) {
+		t.Fatal("Reset ns failed", origns, newns, ns)
+	}
+}
+
+func TestNone(t *testing.T) {
+	ns := None()
+	if ns.IsOpen() {
+		t.Fatal("None ns is open", ns)
+	}
+}
+
+func TestThreaded(t *testing.T) {
+	ncpu := runtime.GOMAXPROCS(-1)
+	if ncpu < 2 {
+		t.Skip("-cpu=2 or larger required")
+	}
+
+	// Lock this thread simply to ensure other threads get used.
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	wg := &sync.WaitGroup{}
+	for i := 0; i < ncpu; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			TestGetNewSetDelete(t)
+		}()
+	}
+	wg.Wait()
+}
diff --git a/vendor/src/github.com/vishvananda/netns/netns_unspecified.go b/vendor/src/github.com/vishvananda/netns/netns_unspecified.go
new file mode 100644
index 0000000..42a804f
--- /dev/null
+++ b/vendor/src/github.com/vishvananda/netns/netns_unspecified.go
@@ -0,0 +1,35 @@
+// +build !linux
+
+package netns
+
+import (
+	"errors"
+)
+
+var (
+	ErrNotImplemented = errors.New("not implemented")
+)
+
+func Set(ns Namespace) (err error) {
+	return ErrNotImplemented
+}
+
+func New() (ns Namespace, err error) {
+	return -1, ErrNotImplemented
+}
+
+func Get() (Namespace, error) {
+	return -1, ErrNotImplemented
+}
+
+func GetFromName(name string) (Namespace, error) {
+	return -1, ErrNotImplemented
+}
+
+func GetFromPid(pid int) (Namespace, error) {
+	return -1, ErrNotImplemented
+}
+
+func GetFromDocker(id string) (Namespace, error) {
+	return -1, ErrNotImplemented
+}
diff --git a/volume/drivers/adapter.go b/volume/drivers/adapter.go
new file mode 100644
index 0000000..6846a3a
--- /dev/null
+++ b/volume/drivers/adapter.go
@@ -0,0 +1,60 @@
+package volumedrivers
+
+import "github.com/docker/docker/volume"
+
+type volumeDriverAdapter struct {
+	name  string
+	proxy *volumeDriverProxy
+}
+
+func (a *volumeDriverAdapter) Name() string {
+	return a.name
+}
+
+func (a *volumeDriverAdapter) Create(name string) (volume.Volume, error) {
+	err := a.proxy.Create(name)
+	if err != nil {
+		return nil, err
+	}
+	return &volumeAdapter{
+		proxy:      a.proxy,
+		name:       name,
+		driverName: a.name}, nil
+}
+
+func (a *volumeDriverAdapter) Remove(v volume.Volume) error {
+	return a.proxy.Remove(v.Name())
+}
+
+type volumeAdapter struct {
+	proxy      *volumeDriverProxy
+	name       string
+	driverName string
+	eMount     string // ephemeral host volume path
+}
+
+func (a *volumeAdapter) Name() string {
+	return a.name
+}
+
+func (a *volumeAdapter) DriverName() string {
+	return a.driverName
+}
+
+func (a *volumeAdapter) Path() string {
+	if len(a.eMount) > 0 {
+		return a.eMount
+	}
+	m, _ := a.proxy.Path(a.name)
+	return m
+}
+
+func (a *volumeAdapter) Mount() (string, error) {
+	var err error
+	a.eMount, err = a.proxy.Mount(a.name)
+	return a.eMount, err
+}
+
+func (a *volumeAdapter) Unmount() error {
+	return a.proxy.Unmount(a.name)
+}
diff --git a/volume/drivers/api.go b/volume/drivers/api.go
new file mode 100644
index 0000000..1b98fa7
--- /dev/null
+++ b/volume/drivers/api.go
@@ -0,0 +1,20 @@
+package volumedrivers
+
+import "github.com/docker/docker/volume"
+
+type client interface {
+	Call(string, interface{}, interface{}) error
+}
+
+func NewVolumeDriver(name string, c client) volume.Driver {
+	proxy := &volumeDriverProxy{c}
+	return &volumeDriverAdapter{name, proxy}
+}
+
+type VolumeDriver interface {
+	Create(name string) (err error)
+	Remove(name string) (err error)
+	Path(name string) (mountpoint string, err error)
+	Mount(name string) (mountpoint string, err error)
+	Unmount(name string) (err error)
+}
diff --git a/volume/drivers/extpoint.go b/volume/drivers/extpoint.go
new file mode 100644
index 0000000..b002a0f
--- /dev/null
+++ b/volume/drivers/extpoint.go
@@ -0,0 +1,61 @@
+package volumedrivers
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/docker/docker/pkg/plugins"
+	"github.com/docker/docker/volume"
+)
+
+// currently created by hand. generation tool would generate this like:
+// $ extpoint-gen Driver > volume/extpoint.go
+
+var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver)}
+
+type driverExtpoint struct {
+	extensions map[string]volume.Driver
+	sync.Mutex
+}
+
+func Register(extension volume.Driver, name string) bool {
+	drivers.Lock()
+	defer drivers.Unlock()
+	if name == "" {
+		return false
+	}
+	_, exists := drivers.extensions[name]
+	if exists {
+		return false
+	}
+	drivers.extensions[name] = extension
+	return true
+}
+
+func Unregister(name string) bool {
+	drivers.Lock()
+	defer drivers.Unlock()
+	_, exists := drivers.extensions[name]
+	if !exists {
+		return false
+	}
+	delete(drivers.extensions, name)
+	return true
+}
+
+func Lookup(name string) (volume.Driver, error) {
+	drivers.Lock()
+	defer drivers.Unlock()
+	ext, ok := drivers.extensions[name]
+	if ok {
+		return ext, nil
+	}
+	pl, err := plugins.Get(name, "VolumeDriver")
+	if err != nil {
+		return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err)
+	}
+
+	d := NewVolumeDriver(name, pl.Client)
+	drivers.extensions[name] = d
+	return d, nil
+}
diff --git a/volume/drivers/proxy.go b/volume/drivers/proxy.go
new file mode 100644
index 0000000..545e490
--- /dev/null
+++ b/volume/drivers/proxy.go
@@ -0,0 +1,74 @@
+package volumedrivers
+
+import "fmt"
+
+// currently created by hand. generation tool would generate this like:
+// $ rpc-gen volume/drivers/api.go VolumeDriver > volume/drivers/proxy.go
+
+type volumeDriverRequest struct {
+	Name string
+}
+
+type volumeDriverResponse struct {
+	Mountpoint string `json:",ommitempty"`
+	Err        error  `json:",ommitempty"`
+}
+
+type volumeDriverProxy struct {
+	c client
+}
+
+func (pp *volumeDriverProxy) Create(name string) error {
+	args := volumeDriverRequest{name}
+	var ret volumeDriverResponse
+	err := pp.c.Call("VolumeDriver.Create", args, &ret)
+	if err != nil {
+		return pp.fmtError(name, err)
+	}
+	return pp.fmtError(name, ret.Err)
+}
+
+func (pp *volumeDriverProxy) Remove(name string) error {
+	args := volumeDriverRequest{name}
+	var ret volumeDriverResponse
+	err := pp.c.Call("VolumeDriver.Remove", args, &ret)
+	if err != nil {
+		return pp.fmtError(name, err)
+	}
+	return pp.fmtError(name, ret.Err)
+}
+
+func (pp *volumeDriverProxy) Path(name string) (string, error) {
+	args := volumeDriverRequest{name}
+	var ret volumeDriverResponse
+	if err := pp.c.Call("VolumeDriver.Path", args, &ret); err != nil {
+		return "", pp.fmtError(name, err)
+	}
+	return ret.Mountpoint, pp.fmtError(name, ret.Err)
+}
+
+func (pp *volumeDriverProxy) Mount(name string) (string, error) {
+	args := volumeDriverRequest{name}
+	var ret volumeDriverResponse
+	if err := pp.c.Call("VolumeDriver.Mount", args, &ret); err != nil {
+		return "", pp.fmtError(name, err)
+	}
+	return ret.Mountpoint, pp.fmtError(name, ret.Err)
+}
+
+func (pp *volumeDriverProxy) Unmount(name string) error {
+	args := volumeDriverRequest{name}
+	var ret volumeDriverResponse
+	err := pp.c.Call("VolumeDriver.Unmount", args, &ret)
+	if err != nil {
+		return pp.fmtError(name, err)
+	}
+	return pp.fmtError(name, ret.Err)
+}
+
+func (pp *volumeDriverProxy) fmtError(name string, err error) error {
+	if err == nil {
+		return nil
+	}
+	return fmt.Errorf("External volume driver request failed for %s: %v", name, err)
+}
diff --git a/volume/local/local.go b/volume/local/local.go
new file mode 100644
index 0000000..3082e72
--- /dev/null
+++ b/volume/local/local.go
@@ -0,0 +1,126 @@
+package local
+
+import (
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sync"
+
+	"github.com/docker/docker/volume"
+)
+
+func New(rootDirectory string) (*Root, error) {
+	if err := os.MkdirAll(rootDirectory, 0700); err != nil {
+		return nil, err
+	}
+	r := &Root{
+		path:    rootDirectory,
+		volumes: make(map[string]*Volume),
+	}
+	dirs, err := ioutil.ReadDir(rootDirectory)
+	if err != nil {
+		return nil, err
+	}
+	for _, d := range dirs {
+		name := filepath.Base(d.Name())
+		r.volumes[name] = &Volume{
+			driverName: r.Name(),
+			name:       name,
+			path:       filepath.Join(rootDirectory, name),
+		}
+	}
+	return r, nil
+}
+
+type Root struct {
+	m       sync.Mutex
+	path    string
+	volumes map[string]*Volume
+}
+
+func (r *Root) Name() string {
+	return "local"
+}
+
+func (r *Root) Create(name string) (volume.Volume, error) {
+	r.m.Lock()
+	defer r.m.Unlock()
+	v, exists := r.volumes[name]
+	if !exists {
+		path := filepath.Join(r.path, name)
+		if err := os.Mkdir(path, 0755); err != nil {
+			if os.IsExist(err) {
+				return nil, fmt.Errorf("volume already exists under %s", path)
+			}
+			return nil, err
+		}
+		v = &Volume{
+			driverName: r.Name(),
+			name:       name,
+			path:       path,
+		}
+		r.volumes[name] = v
+	}
+	v.use()
+	return v, nil
+}
+
+func (r *Root) Remove(v volume.Volume) error {
+	r.m.Lock()
+	defer r.m.Unlock()
+	lv, ok := v.(*Volume)
+	if !ok {
+		return errors.New("unknown volume type")
+	}
+	lv.release()
+	if lv.usedCount == 0 {
+		delete(r.volumes, lv.name)
+		return os.RemoveAll(lv.path)
+	}
+	return nil
+}
+
+type Volume struct {
+	m         sync.Mutex
+	usedCount int
+	// unique name of the volume
+	name string
+	// path is the path on the host where the data lives
+	path string
+	// driverName is the name of the driver that created the volume.
+	driverName string
+}
+
+func (v *Volume) Name() string {
+	return v.name
+}
+
+func (v *Volume) DriverName() string {
+	return v.driverName
+}
+
+func (v *Volume) Path() string {
+	return v.path
+}
+
+func (v *Volume) Mount() (string, error) {
+	return v.path, nil
+}
+
+func (v *Volume) Unmount() error {
+	return nil
+}
+
+func (v *Volume) use() {
+	v.m.Lock()
+	v.usedCount++
+	v.m.Unlock()
+}
+
+func (v *Volume) release() {
+	v.m.Lock()
+	v.usedCount--
+	v.m.Unlock()
+}
diff --git a/volume/volume.go b/volume/volume.go
new file mode 100644
index 0000000..6edcae3
--- /dev/null
+++ b/volume/volume.go
@@ -0,0 +1,26 @@
+package volume
+
+const DefaultDriverName = "local"
+
+type Driver interface {
+	// Name returns the name of the volume driver.
+	Name() string
+	// Create makes a new volume with the given id.
+	Create(string) (Volume, error)
+	// Remove deletes the volume.
+	Remove(Volume) error
+}
+
+type Volume interface {
+	// Name returns the name of the volume
+	Name() string
+	// DriverName returns the name of the driver which owns this volume.
+	DriverName() string
+	// Path returns the absolute path to the volume.
+	Path() string
+	// Mount mounts the volume and returns the absolute path to
+	// where it can be consumed.
+	Mount() (string, error)
+	// Unmount unmounts the volume when it is no longer in use.
+	Unmount() error
+}
diff --git a/volumes/repository.go b/volumes/repository.go
deleted file mode 100644
index dbd7a5f..0000000
--- a/volumes/repository.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package volumes
-
-import (
-	"fmt"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"sync"
-
-	log "github.com/Sirupsen/logrus"
-	"github.com/docker/docker/daemon/graphdriver"
-	"github.com/docker/docker/pkg/common"
-)
-
-type Repository struct {
-	configPath string
-	driver     graphdriver.Driver
-	volumes    map[string]*Volume
-	lock       sync.Mutex
-}
-
-func NewRepository(configPath string, driver graphdriver.Driver) (*Repository, error) {
-	abspath, err := filepath.Abs(configPath)
-	if err != nil {
-		return nil, err
-	}
-
-	// Create the config path
-	if err := os.MkdirAll(abspath, 0700); err != nil && !os.IsExist(err) {
-		return nil, err
-	}
-
-	repo := &Repository{
-		driver:     driver,
-		configPath: abspath,
-		volumes:    make(map[string]*Volume),
-	}
-
-	return repo, repo.restore()
-}
-
-func (r *Repository) newVolume(path string, writable bool) (*Volume, error) {
-	var (
-		isBindMount bool
-		err         error
-		id          = common.GenerateRandomID()
-	)
-	if path != "" {
-		isBindMount = true
-	}
-
-	if path == "" {
-		path, err = r.createNewVolumePath(id)
-		if err != nil {
-			return nil, err
-		}
-	}
-	path = filepath.Clean(path)
-
-	// Ignore the error here since the path may not exist
-	// Really just want to make sure the path we are using is real(or non-existant)
-	if cleanPath, err := filepath.EvalSymlinks(path); err == nil {
-		path = cleanPath
-	}
-
-	v := &Volume{
-		ID:          id,
-		Path:        path,
-		repository:  r,
-		Writable:    writable,
-		containers:  make(map[string]struct{}),
-		configPath:  r.configPath + "/" + id,
-		IsBindMount: isBindMount,
-	}
-
-	if err := v.initialize(); err != nil {
-		return nil, err
-	}
-
-	return v, r.add(v)
-}
-
-func (r *Repository) restore() error {
-	dir, err := ioutil.ReadDir(r.configPath)
-	if err != nil {
-		return err
-	}
-
-	for _, v := range dir {
-		id := v.Name()
-		vol := &Volume{
-			ID:         id,
-			configPath: r.configPath + "/" + id,
-			containers: make(map[string]struct{}),
-		}
-		if err := vol.FromDisk(); err != nil {
-			if !os.IsNotExist(err) {
-				log.Debugf("Error restoring volume: %v", err)
-				continue
-			}
-			if err := vol.initialize(); err != nil {
-				log.Debugf("%s", err)
-				continue
-			}
-		}
-		if err := r.add(vol); err != nil {
-			log.Debugf("Error restoring volume: %v", err)
-		}
-	}
-	return nil
-}
-
-func (r *Repository) Get(path string) *Volume {
-	r.lock.Lock()
-	vol := r.get(path)
-	r.lock.Unlock()
-	return vol
-}
-
-func (r *Repository) get(path string) *Volume {
-	path, err := filepath.EvalSymlinks(path)
-	if err != nil {
-		return nil
-	}
-	return r.volumes[filepath.Clean(path)]
-}
-
-func (r *Repository) add(volume *Volume) error {
-	if vol := r.get(volume.Path); vol != nil {
-		return fmt.Errorf("Volume exists: %s", volume.ID)
-	}
-	r.volumes[volume.Path] = volume
-	return nil
-}
-
-func (r *Repository) Delete(path string) error {
-	r.lock.Lock()
-	defer r.lock.Unlock()
-	path, err := filepath.EvalSymlinks(path)
-	if err != nil {
-		return err
-	}
-	volume := r.get(filepath.Clean(path))
-	if volume == nil {
-		return fmt.Errorf("Volume %s does not exist", path)
-	}
-
-	containers := volume.Containers()
-	if len(containers) > 0 {
-		return fmt.Errorf("Volume %s is being used and cannot be removed: used by containers %s", volume.Path, containers)
-	}
-
-	if err := os.RemoveAll(volume.configPath); err != nil {
-		return err
-	}
-
-	if !volume.IsBindMount {
-		if err := r.driver.Remove(volume.ID); err != nil {
-			if !os.IsNotExist(err) {
-				return err
-			}
-		}
-	}
-
-	delete(r.volumes, volume.Path)
-	return nil
-}
-
-func (r *Repository) createNewVolumePath(id string) (string, error) {
-	if err := r.driver.Create(id, ""); err != nil {
-		return "", err
-	}
-
-	path, err := r.driver.Get(id, "")
-	if err != nil {
-		return "", fmt.Errorf("Driver %s failed to get volume rootfs %s: %v", r.driver, id, err)
-	}
-
-	return path, nil
-}
-
-func (r *Repository) FindOrCreateVolume(path string, writable bool) (*Volume, error) {
-	r.lock.Lock()
-	defer r.lock.Unlock()
-
-	if path == "" {
-		return r.newVolume(path, writable)
-	}
-
-	if v := r.get(path); v != nil {
-		return v, nil
-	}
-
-	return r.newVolume(path, writable)
-}
diff --git a/volumes/repository_test.go b/volumes/repository_test.go
deleted file mode 100644
index 801c225..0000000
--- a/volumes/repository_test.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package volumes
-
-import (
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"testing"
-
-	"github.com/docker/docker/daemon/graphdriver"
-	_ "github.com/docker/docker/daemon/graphdriver/vfs"
-)
-
-func TestRepositoryFindOrCreate(t *testing.T) {
-	root, err := ioutil.TempDir(os.TempDir(), "volumes")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(root)
-	repo, err := newRepo(root)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// no path
-	v, err := repo.FindOrCreateVolume("", true)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// FIXME: volumes are heavily dependent on the vfs driver, but this should not be so!
-	expected := filepath.Join(root, "repo-graph", "vfs", "dir", v.ID)
-	if v.Path != expected {
-		t.Fatalf("expected new path to be created in %s, got %s", expected, v.Path)
-	}
-
-	// with a non-existant path
-	dir := filepath.Join(root, "doesntexist")
-	v, err = repo.FindOrCreateVolume(dir, true)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if v.Path != dir {
-		t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path)
-	}
-
-	if _, err := os.Stat(v.Path); err != nil {
-		t.Fatal(err)
-	}
-
-	// with a pre-existing path
-	// can just use the same path from above since it now exists
-	v, err = repo.FindOrCreateVolume(dir, true)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if v.Path != dir {
-		t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path)
-	}
-
-}
-
-func TestRepositoryGet(t *testing.T) {
-	root, err := ioutil.TempDir(os.TempDir(), "volumes")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(root)
-	repo, err := newRepo(root)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	v, err := repo.FindOrCreateVolume("", true)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	v2 := repo.Get(v.Path)
-	if v2 == nil {
-		t.Fatalf("expected to find volume but didn't")
-	}
-	if v2 != v {
-		t.Fatalf("expected get to return same volume")
-	}
-}
-
-func TestRepositoryDelete(t *testing.T) {
-	root, err := ioutil.TempDir(os.TempDir(), "volumes")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(root)
-	repo, err := newRepo(root)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// with a normal volume
-	v, err := repo.FindOrCreateVolume("", true)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if err := repo.Delete(v.Path); err != nil {
-		t.Fatal(err)
-	}
-
-	if v := repo.Get(v.Path); v != nil {
-		t.Fatalf("expected volume to not exist")
-	}
-
-	if _, err := os.Stat(v.Path); err == nil {
-		t.Fatalf("expected volume files to be removed")
-	}
-
-	// with a bind mount
-	dir := filepath.Join(root, "test")
-	v, err = repo.FindOrCreateVolume(dir, true)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if err := repo.Delete(v.Path); err != nil {
-		t.Fatal(err)
-	}
-
-	if v := repo.Get(v.Path); v != nil {
-		t.Fatalf("expected volume to not exist")
-	}
-
-	if _, err := os.Stat(v.Path); err != nil && os.IsNotExist(err) {
-		t.Fatalf("expected bind volume data to persist after destroying volume")
-	}
-
-	// with container refs
-	dir = filepath.Join(root, "test")
-	v, err = repo.FindOrCreateVolume(dir, true)
-	if err != nil {
-		t.Fatal(err)
-	}
-	v.AddContainer("1234")
-
-	if err := repo.Delete(v.Path); err == nil {
-		t.Fatalf("expected volume delete to fail due to container refs")
-	}
-
-	v.RemoveContainer("1234")
-	if err := repo.Delete(v.Path); err != nil {
-		t.Fatal(err)
-	}
-
-}
-
-func newRepo(root string) (*Repository, error) {
-	configPath := filepath.Join(root, "repo-config")
-	graphDir := filepath.Join(root, "repo-graph")
-
-	driver, err := graphdriver.GetDriver("vfs", graphDir, []string{})
-	if err != nil {
-		return nil, err
-	}
-	return NewRepository(configPath, driver)
-}
diff --git a/volumes/volume.go b/volumes/volume.go
deleted file mode 100644
index 8041160..0000000
--- a/volumes/volume.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package volumes
-
-import (
-	"encoding/json"
-	"io"
-	"io/ioutil"
-	"os"
-	"path"
-	"path/filepath"
-	"sync"
-
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/symlink"
-)
-
-type Volume struct {
-	ID          string
-	Path        string
-	IsBindMount bool
-	Writable    bool
-	containers  map[string]struct{}
-	configPath  string
-	repository  *Repository
-	lock        sync.Mutex
-}
-
-func (v *Volume) Export(resource, name string) (io.ReadCloser, error) {
-	if v.IsBindMount && filepath.Base(resource) == name {
-		name = ""
-	}
-
-	basePath, err := v.getResourcePath(resource)
-	if err != nil {
-		return nil, err
-	}
-	stat, err := os.Stat(basePath)
-	if err != nil {
-		return nil, err
-	}
-	var filter []string
-	if !stat.IsDir() {
-		d, f := path.Split(basePath)
-		basePath = d
-		filter = []string{f}
-	} else {
-		filter = []string{path.Base(basePath)}
-		basePath = path.Dir(basePath)
-	}
-	return archive.TarWithOptions(basePath, &archive.TarOptions{
-		Compression:  archive.Uncompressed,
-		Name:         name,
-		IncludeFiles: filter,
-	})
-}
-
-func (v *Volume) IsDir() (bool, error) {
-	stat, err := os.Stat(v.Path)
-	if err != nil {
-		return false, err
-	}
-
-	return stat.IsDir(), nil
-}
-
-func (v *Volume) Containers() []string {
-	v.lock.Lock()
-
-	var containers []string
-	for c := range v.containers {
-		containers = append(containers, c)
-	}
-
-	v.lock.Unlock()
-	return containers
-}
-
-func (v *Volume) RemoveContainer(containerId string) {
-	v.lock.Lock()
-	delete(v.containers, containerId)
-	v.lock.Unlock()
-}
-
-func (v *Volume) AddContainer(containerId string) {
-	v.lock.Lock()
-	v.containers[containerId] = struct{}{}
-	v.lock.Unlock()
-}
-
-func (v *Volume) initialize() error {
-	v.lock.Lock()
-	defer v.lock.Unlock()
-
-	if _, err := os.Stat(v.Path); err != nil && os.IsNotExist(err) {
-		if err := os.MkdirAll(v.Path, 0755); err != nil {
-			return err
-		}
-	}
-
-	if err := os.MkdirAll(v.configPath, 0755); err != nil {
-		return err
-	}
-	jsonPath, err := v.jsonPath()
-	if err != nil {
-		return err
-	}
-	f, err := os.Create(jsonPath)
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-
-	return v.toDisk()
-}
-
-func (v *Volume) ToDisk() error {
-	v.lock.Lock()
-	defer v.lock.Unlock()
-	return v.toDisk()
-}
-
-func (v *Volume) toDisk() error {
-	data, err := json.Marshal(v)
-	if err != nil {
-		return err
-	}
-
-	pth, err := v.jsonPath()
-	if err != nil {
-		return err
-	}
-
-	return ioutil.WriteFile(pth, data, 0666)
-}
-
-func (v *Volume) FromDisk() error {
-	v.lock.Lock()
-	defer v.lock.Unlock()
-	pth, err := v.jsonPath()
-	if err != nil {
-		return err
-	}
-
-	jsonSource, err := os.Open(pth)
-	if err != nil {
-		return err
-	}
-	defer jsonSource.Close()
-
-	dec := json.NewDecoder(jsonSource)
-
-	return dec.Decode(v)
-}
-
-func (v *Volume) jsonPath() (string, error) {
-	return v.getRootResourcePath("config.json")
-}
-func (v *Volume) getRootResourcePath(path string) (string, error) {
-	cleanPath := filepath.Join("/", path)
-	return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath)
-}
-
-func (v *Volume) getResourcePath(path string) (string, error) {
-	cleanPath := filepath.Join("/", path)
-	return symlink.FollowSymlinkInScope(filepath.Join(v.Path, cleanPath), v.Path)
-}
diff --git a/volumes/volume_test.go b/volumes/volume_test.go
deleted file mode 100644
index 5f3fdcf..0000000
--- a/volumes/volume_test.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package volumes
-
-import "testing"
-
-func TestContainers(t *testing.T) {
-	v := &Volume{containers: make(map[string]struct{})}
-	id := "1234"
-
-	v.AddContainer(id)
-
-	if v.Containers()[0] != id {
-		t.Fatalf("adding a container ref failed")
-	}
-
-	v.RemoveContainer(id)
-	if len(v.Containers()) != 0 {
-		t.Fatalf("removing container failed")
-	}
-}